Merge remote-tracking branch 'origin/master' into feature/pushover_support-1680

pull/1724/head
Jared Scheib 2017-07-17 14:12:07 -07:00
commit b2a69dd6ac
108 changed files with 5024 additions and 2721 deletions

16
Gopkg.lock generated
View File

@ -1,4 +1,5 @@
memo = "bac138180cd86a0ae604cd3aa7b6ba300673478c880882bd58a4bd7f8bff518d"
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
name = "github.com/NYTimes/gziphandler"
@ -63,9 +64,9 @@ memo = "bac138180cd86a0ae604cd3aa7b6ba300673478c880882bd58a4bd7f8bff518d"
[[projects]]
name = "github.com/influxdata/kapacitor"
packages = ["client/v1","influxdb","models","pipeline","services/k8s/client","tick","tick/ast","tick/stateful","udf"]
revision = "5408057e5a3493d3b5bd38d5d535ea45b587f8ff"
version = "v1.2.0"
packages = ["client/v1","pipeline","services/k8s/client","tick","tick/ast","tick/stateful","udf/agent"]
revision = "3b5512f7276483326577907803167e4bb213c613"
version = "v1.3.1"
[[projects]]
name = "github.com/influxdata/usage-client"
@ -130,3 +131,10 @@ memo = "bac138180cd86a0ae604cd3aa7b6ba300673478c880882bd58a4bd7f8bff518d"
packages = ["internal","internal/base","internal/datastore","internal/log","internal/remote_api","internal/urlfetch","urlfetch"]
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
version = "v1.0.0"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "bac138180cd86a0ae604cd3aa7b6ba300673478c880882bd58a4bd7f8bff518d"
solver-name = "gps-cdcl"
solver-version = 1

View File

@ -1,77 +1,77 @@
required = ["github.com/jteeuwen/go-bindata","github.com/gogo/protobuf/proto","github.com/gogo/protobuf/jsonpb","github.com/gogo/protobuf/protoc-gen-gogo","github.com/gogo/protobuf/gogoproto"]
[[dependencies]]
[[constraint]]
name = "github.com/NYTimes/gziphandler"
revision = "6710af535839f57c687b62c4c23d649f9545d885"
[[dependencies]]
[[constraint]]
name = "github.com/Sirupsen/logrus"
revision = "3ec0642a7fb6488f65b06f9040adc67e3990296a"
[[dependencies]]
[[constraint]]
name = "github.com/boltdb/bolt"
revision = "5cc10bbbc5c141029940133bb33c9e969512a698"
[[dependencies]]
[[constraint]]
name = "github.com/bouk/httprouter"
revision = "ee8b3818a7f51fbc94cc709b5744b52c2c725e91"
[[dependencies]]
[[constraint]]
name = "github.com/dgrijalva/jwt-go"
revision = "24c63f56522a87ec5339cc3567883f1039378fdb"
[[dependencies]]
[[constraint]]
name = "github.com/elazarl/go-bindata-assetfs"
revision = "9a6736ed45b44bf3835afeebb3034b57ed329f3e"
[[dependencies]]
[[constraint]]
name = "github.com/gogo/protobuf"
revision = "6abcf94fd4c97dcb423fdafd42fe9f96ca7e421b"
[[dependencies]]
[[constraint]]
name = "github.com/google/go-github"
revision = "1bc362c7737e51014af7299e016444b654095ad9"
[[dependencies]]
[[constraint]]
name = "github.com/influxdata/influxdb"
revision = "af72d9b0e4ebe95be30e89b160f43eabaf0529ed"
[[dependencies]]
[[constraint]]
name = "github.com/influxdata/kapacitor"
version = "^1.2.0"
[[dependencies]]
[[constraint]]
name = "github.com/influxdata/usage-client"
revision = "6d3895376368aa52a3a81d2a16e90f0f52371967"
[[dependencies]]
[[constraint]]
name = "github.com/jessevdk/go-flags"
revision = "4cc2832a6e6d1d3b815e2b9d544b2a4dfb3ce8fa"
[[dependencies]]
[[constraint]]
name = "github.com/jteeuwen/go-bindata"
revision = "a0ff2567cfb70903282db057e799fd826784d41d"
[[dependencies]]
[[constraint]]
name = "github.com/satori/go.uuid"
revision = "b061729afc07e77a8aa4fad0a2fd840958f1942a"
[[dependencies]]
[[constraint]]
name = "github.com/sergi/go-diff"
revision = "1d28411638c1e67fe1930830df207bef72496ae9"
[[dependencies]]
[[constraint]]
name = "github.com/tylerb/graceful"
version = "^1.2.13"
[[dependencies]]
[[constraint]]
name = "golang.org/x/net"
revision = "749a502dd1eaf3e5bfd4f8956748c502357c0bbe"
[[dependencies]]
[[constraint]]
name = "golang.org/x/oauth2"
revision = "1e695b1c8febf17aad3bfa7bf0a819ef94b98ad5"
[[dependencies]]
[[constraint]]
name = "google.golang.org/api"
revision = "bc20c61134e1d25265dd60049f5735381e79b631"

View File

@ -169,7 +169,51 @@ func TestAlertServices(t *testing.T) {
},
},
},
wantErr: true,
want: `alert()
.post()
`,
},
{
name: "Test post with headers",
rule: chronograf.AlertRule{
AlertNodes: []chronograf.KapacitorNode{
{
Name: "post",
Args: []string{"http://myaddress"},
Properties: []chronograf.KapacitorProperty{
{
Name: "header",
Args: []string{"key", "value"},
},
},
},
},
},
want: `alert()
.post('http://myaddress')
.header('key', 'value')
`,
},
{
name: "Test post with headers",
rule: chronograf.AlertRule{
AlertNodes: []chronograf.KapacitorNode{
{
Name: "post",
Args: []string{"http://myaddress"},
Properties: []chronograf.KapacitorProperty{
{
Name: "endpoint",
Args: []string{"myendpoint"},
},
},
},
},
},
want: `alert()
.post('http://myaddress')
.endpoint('myendpoint')
`,
},
}
for _, tt := range tests {

View File

@ -607,11 +607,11 @@ func extractEmail(node *pipeline.AlertNode, rule *chronograf.AlertRule) {
}
func extractPost(node *pipeline.AlertNode, rule *chronograf.AlertRule) {
if node.PostHandlers == nil {
if node.HTTPPostHandlers == nil {
return
}
rule.Alerts = append(rule.Alerts, "http")
p := node.PostHandlers[0]
p := node.HTTPPostHandlers[0]
alert := chronograf.KapacitorNode{
Name: "http",
}
@ -620,6 +620,22 @@ func extractPost(node *pipeline.AlertNode, rule *chronograf.AlertRule) {
alert.Args = []string{p.URL}
}
if p.Endpoint != "" {
alert.Properties = append(alert.Properties, chronograf.KapacitorProperty{
Name: "endpoint",
Args: []string{p.Endpoint},
})
}
if len(p.Headers) > 0 {
for k, v := range p.Headers {
alert.Properties = append(alert.Properties, chronograf.KapacitorProperty{
Name: "header",
Args: []string{k, v},
})
}
}
rule.AlertNodes = append(rule.AlertNodes, alert)
}

View File

@ -60,12 +60,15 @@ func TestReverse(t *testing.T) {
.victorOps()
.email('howdy@howdy.com')
.log('/tmp/alerts.log')
.post('http://backin.tm')
.endpoint('myendpoint')
.header('key', 'value')
`),
want: chronograf.AlertRule{
Name: "name",
Trigger: "threshold",
Alerts: []string{"victorops", "smtp", "slack", "log"},
Alerts: []string{"victorops", "smtp", "http", "slack", "log"},
AlertNodes: []chronograf.KapacitorNode{
{
Name: "victorops",
@ -74,6 +77,20 @@ func TestReverse(t *testing.T) {
Name: "smtp",
Args: []string{"howdy@howdy.com"},
},
{
Name: "http",
Args: []string{"http://backin.tm"},
Properties: []chronograf.KapacitorProperty{
{
Name: "endpoint",
Args: []string{"myendpoint"},
},
{
Name: "header",
Args: []string{"key", "value"},
},
},
},
{
Name: "slack",
},

View File

@ -1,5 +1,227 @@
# Changelog
## Unreleased [unreleased]
### Features
### Bugfixes
## v1.3.1 [2017-06-02]
### Bugfixes
- [#1415](https://github.com/influxdata/kapacitor/pull/1415): Proxy from environment for HTTP request to slack
- [#1414](https://github.com/influxdata/kapacitor/pull/1414): Fix derivative node preserving fields from previous point in stream tasks.
## v1.3.0 [2017-05-22]
### Release Notes
The v1.3.0 release has two major features.
1. Addition of scraping and discovering for Prometheus style data collection.
2. Updates to the Alert Topic system
Here is a quick example of how to configure Kapacitor to scrape discovered targets.
First configure a discoverer, here we use the file-discovery discoverer.
Next configure a scraper to use that discoverer.
>NOTE: The scraping and discovering features are released under technical preview,
meaning that the configuration or API around the feature may change in a future release.
```
# Configure file discoverer
[[file-discovery]]
enabled = true
id = "discover_files"
refresh-interval = "10s"
##### This will look for prometheus json files
##### File format is here https://prometheus.io/docs/operating/configuration/#%3Cfile_sd_config%3E
files = ["/tmp/prom/*.json"]
# Configure scraper
[[scraper]]
enabled = true
name = "node_exporter"
discoverer-id = "discover_files"
discoverer-service = "file-discovery"
db = "prometheus"
rp = "autogen"
type = "prometheus"
scheme = "http"
metrics-path = "/metrics"
scrape-interval = "2s"
scrape-timeout = "10s"
```
Add the above snippet to your kapacitor.conf file.
Create the below snippet as the file `/tmp/prom/localhost.json`:
```
[{
"targets": ["localhost:9100"]
}]
```
Start the Prometheus node_exporter locally.
Now startup Kapacitor and it will discover the `localhost:9100` node_exporter target and begin scrapping it for metrics.
For more details on the scraping and discovery systems see the full documentation [here](https://docs.influxdata.com/kapacitor/v1.3/scraping).
The second major feature with this release, are changes to the alert topic system.
The previous release introduce this new system as a technical preview, with this release the alerting service has been simplified.
Alert handlers now only ever have a single action and belong to a single topic.
The handler definition has been simplified as a result.
Here are some example alert handlers using the new structure:
```yaml
id: my_handler
kind: pagerDuty
options:
serviceKey: XXX
```
```yaml
id: aggregate_by_1m
kind: aggregate
options:
interval: 1m
topic: aggregated
```
```yaml
id: publish_to_system
kind: publish
options:
topics: [ system ]
```
To define a handler now you must specify which topic the handler belongs to.
For example to define the above aggregate handler on the system topic use this command:
```sh
kapacitor define-handler system aggregate_by_1m.yaml
```
For more details on the alerting system see the full documentation [here](https://docs.influxdata.com/kapacitor/v1.3/alerts).
# Bugfixes
- [#1396](https://github.com/influxdata/kapacitor/pull/1396): Fix broken ENV var config overrides for the kubernetes section.
- [#1397](https://github.com/influxdata/kapacitor/pull/1397): Update default configuration file to include sections for each discoverer service.
## v1.3.0-rc4 [2017-05-19]
# Bugfixes
- [#1379](https://github.com/influxdata/kapacitor/issues/1379): Copy batch points slice before modification, fixes potential panics and data corruption.
- [#1394](https://github.com/influxdata/kapacitor/pull/1394): Use the Prometheus metric name as the measurement name by default for scrape data.
- [#1392](https://github.com/influxdata/kapacitor/pull/1392): Fix possible deadlock for scraper configuration updating.
## v1.3.0-rc3 [2017-05-18]
### Bugfixes
- [#1369](https://github.com/influxdata/kapacitor/issues/1369): Fix panic with concurrent writes to same points in state tracking nodes.
- [#1387](https://github.com/influxdata/kapacitor/pull/1387): static-discovery configuration simplified
- [#1378](https://github.com/influxdata/kapacitor/issues/1378): Fix panic in InfluxQL node with missing field.
## v1.3.0-rc2 [2017-05-11]
### Bugfixes
- [#1370](https://github.com/influxdata/kapacitor/issues/1370): Fix missing working_cardinality stats on stateDuration and stateCount nodes.
## v1.3.0-rc1 [2017-05-08]
### Features
- [#1299](https://github.com/influxdata/kapacitor/pull/1299): Allowing sensu handler to be specified
- [#1284](https://github.com/influxdata/kapacitor/pull/1284): Add type signatures to Kapacitor functions.
- [#1203](https://github.com/influxdata/kapacitor/issues/1203): Add `isPresent` operator for verifying whether a value is present (part of [#1284](https://github.com/influxdata/kapacitor/pull/1284)).
- [#1354](https://github.com/influxdata/kapacitor/pull/1354): Add Kubernetes scraping support.
- [#1359](https://github.com/influxdata/kapacitor/pull/1359): Add groupBy exclude and Add dropOriginalFieldName to flatten.
- [#1360](https://github.com/influxdata/kapacitor/pull/1360): Add KapacitorLoopback node to be able to send data from a task back into Kapacitor.
### Bugfixes
- [#1329](https://github.com/influxdata/kapacitor/issues/1329): BREAKING: A bug was fixed around missing fields in the derivative node.
The behavior of the node changes slightly in order to provide a consistent fix to the bug.
The breaking change is that now, the time of the points returned are from the right hand or current point time, instead of the left hand or previous point time.
- [#1353](https://github.com/influxdata/kapacitor/issues/1353): Fix panic in scraping TargetManager.
- [#1238](https://github.com/influxdata/kapacitor/pull/1238): Use ProxyFromEnvironment for all outgoing HTTP traffic.
## v1.3.0-beta2 [2017-05-01]
### Features
- [#117](https://github.com/influxdata/kapacitor/issues/117): Add headers to alert POST requests.
### Bugfixes
- [#1294](https://github.com/influxdata/kapacitor/issues/1294): Fix bug where batch queries would be missing all fields after the first nil field.
- [#1343](https://github.com/influxdata/kapacitor/issues/1343): BREAKING: The UDF agent Go API has changed, the changes now make it so that the agent package is self contained.
## v1.3.0-beta1 [2017-04-29]
### Features
- [#1322](https://github.com/influxdata/kapacitor/pull/1322): TLS configuration in Slack service for Mattermost compatibility
- [#1330](https://github.com/influxdata/kapacitor/issues/1330): Generic HTTP Post node
- [#1159](https://github.com/influxdata/kapacitor/pulls/1159): Go version 1.7.4 -> 1.7.5
- [#1175](https://github.com/influxdata/kapacitor/pull/1175): BREAKING: Add generic error counters to every node type.
Renamed `query_errors` to `errors` in batch node.
Renamed `eval_errors` to `errors` in eval node.
- [#922](https://github.com/influxdata/kapacitor/issues/922): Expose server specific information in alert templates.
- [#1162](https://github.com/influxdata/kapacitor/pulls/1162): Add Pushover integration.
- [#1221](https://github.com/influxdata/kapacitor/pull/1221): Add `working_cardinality` stat to each node type that tracks the number of groups per node.
- [#1211](https://github.com/influxdata/kapacitor/issues/1211): Add StateDuration node.
- [#1209](https://github.com/influxdata/kapacitor/issues/1209): BREAKING: Refactor the Alerting service.
The change is completely breaking for the technical preview alerting service, a.k.a. the new alert topic handler features.
The change boils down to simplifying how you define and interact with topics.
Alert handlers now only ever have a single action and belong to a single topic.
An automatic migration from old to new handler definitions will be performed during startup.
See the updated API docs.
- [#1286](https://github.com/influxdata/kapacitor/issues/1286): Default HipChat URL should be blank
- [#507](https://github.com/influxdata/kapacitor/issues/507): Add API endpoint for performing Kapacitor database backups.
- [#1132](https://github.com/influxdata/kapacitor/issues/1132): Adding source for sensu alert as parameter
- [#1346](https://github.com/influxdata/kapacitor/pull/1346): Add discovery and scraping services.
### Bugfixes
- [#1133](https://github.com/influxdata/kapacitor/issues/1133): Fix case-sensitivity for Telegram `parseMode` value.
- [#1147](https://github.com/influxdata/kapacitor/issues/1147): Fix pprof debug endpoint
- [#1164](https://github.com/influxdata/kapacitor/pull/1164): Fix hang in config API to update a config section.
Now if the service update process takes too long the request will timeout and return an error.
Previously the request would block forever.
- [#1165](https://github.com/influxdata/kapacitor/issues/1165): Make the alerta auth token prefix configurable and default it to Bearer.
- [#1184](https://github.com/influxdata/kapacitor/pull/1184): Fix logrotate file to correctly rotate error log.
- [#1200](https://github.com/influxdata/kapacitor/pull/1200): Fix bug with alert duration being incorrect after restoring alert state.
- [#1199](https://github.com/influxdata/kapacitor/pull/1199): BREAKING: Fix inconsistency with JSON data from alerts.
The alert handlers Alerta, Log, OpsGenie, PagerDuty, Post and VictorOps allow extra opaque data to be attached to alert notifications.
That opaque data was inconsistent and this change fixes that.
Depending on how that data was consumed this could result in a breaking change, since the original behavior was inconsistent
we decided it would be best to fix the issue now and make it consistent for all future builds.
Specifically in the JSON result data the old key `Series` is always `series`, and the old key `Err` is now always `error` instead of for only some of the outputs.
- [#1181](https://github.com/influxdata/kapacitor/pull/1181): Fix bug parsing dbrp values with quotes.
- [#1228](https://github.com/influxdata/kapacitor/pull/1228): Fix panic on loading replay files without a file extension.
- [#1192](https://github.com/influxdata/kapacitor/issues/1192): Fix bug in Default Node not updating batch tags and groupID.
Also empty string on a tag value is now a sufficient condition for the default conditions to be applied.
See [#1233](https://github.com/influxdata/kapacitor/pull/1233) for more information.
- [#1068](https://github.com/influxdata/kapacitor/issues/1068): Fix dot view syntax to use xlabels and not create invalid quotes.
- [#1295](https://github.com/influxdata/kapacitor/issues/1295): Fix curruption of recordings list after deleting all recordings.
- [#1237](https://github.com/influxdata/kapacitor/issues/1237): Fix missing "vars" key when listing tasks.
- [#1271](https://github.com/influxdata/kapacitor/issues/1271): Fix bug where aggregates would not be able to change type.
- [#1261](https://github.com/influxdata/kapacitor/issues/1261): Fix panic when the process cannot stat the data dir.
## v1.2.1 [2017-04-13]
### Bugfixes
- [#1323](https://github.com/influxdata/kapacitor/pull/1323): Fix issue where credentials to InfluxDB could not be updated dynamically.
## v1.2.0 [2017-01-23]
### Release Notes
@ -60,24 +282,24 @@ See the [API docs on technical preview](https://docs.influxdata.com/kapacitor/v1
### Features
- [#327](https://github.com/influxdata/kapacitor/issues/327): You can now window based on count in addition to time.
- [#1110](https://github.com/influxdata/kapacitor/pull/1110): Add new query property for aligning group by intervals to start times.
- [#1095](https://github.com/influxdata/kapacitor/pull/1095): Add new alert API, with support for configuring handlers and topics.
- [#1052](https://github.com/influxdata/kapacitor/issues/1052): Move alerta api token to header and add option to skip TLS verification.
- [#929](https://github.com/influxdata/kapacitor/pull/929): Add SNMP trap service for alerting.
- [#913](https://github.com/influxdata/kapacitor/issues/913): Add fillPeriod option to Window node, so that the first emit waits till the period has elapsed before emitting.
- [#898](https://github.com/influxdata/kapacitor/issues/898): Now when the Window node every value is zero, the window will be emitted immediately for each new point.
- [#1052](https://github.com/influxdata/kapacitor/issues/1052): Move alerta api token to header and add option to skip TLS verification.
- [#251](https://github.com/influxdata/kapacitor/issues/251): Enable markdown in slack attachments.
- [#1095](https://github.com/influxdata/kapacitor/pull/1095): Add new alert API, with support for configuring handlers and topics.
- [#929](https://github.com/influxdata/kapacitor/pull/929): Add SNMP trap service for alerting
- [#1110](https://github.com/influxdata/kapacitor/pull/1110): Add new query property for aligning group by intervals to start times.
- [#744](https://github.com/influxdata/kapacitor/issues/744): Preserve alert state across restarts and disable/enable actions.
- [#327](https://github.com/influxdata/kapacitor/issues/327): You can now window based on count in addition to time.
- [#251](https://github.com/influxdata/kapacitor/issues/251): Enable markdown in slack attachments.
### Bugfixes
- [#1045](https://github.com/influxdata/kapacitor/issues/1045): Fix panic during replays.
- [#1043](https://github.com/influxdata/kapacitor/issues/1043): logrotate.d ignores kapacitor configuration due to bad file mode
- [#1100](https://github.com/influxdata/kapacitor/issues/1100): Fix issue with the Union node buffering more points than necessary.
- [#872](https://github.com/influxdata/kapacitor/issues/872): Fix panic during failed aggregate results.
- [#1087](https://github.com/influxdata/kapacitor/issues/1087): Fix panic during close of failed startup when connecting to InfluxDB.
- [#1045](https://github.com/influxdata/kapacitor/issues/1045): Fix panic during replays.
- [#1043](https://github.com/influxdata/kapacitor/issues/1043): logrotate.d ignores kapacitor configuration due to bad file mode.
- [#872](https://github.com/influxdata/kapacitor/issues/872): Fix panic during failed aggregate results.
## v1.1.1 [2016-12-02]

View File

@ -64,18 +64,11 @@ If you are going to be contributing back to Kapacitor please take a second to si
Installing Go
-------------
Kapacitor requires Go 1.6 or greater.
Kapacitor typically requires the lastest version of Go.
To install go see https://golang.org/dl/
Revision Control Systems
------------------------
Go has the ability to import remote packages via revision control systems with the `go get` command. To ensure that you can retrieve any remote package, be sure to install the following rcs software to your system.
Currently the project only depends on `git` and `mercurial`.
* [Install Git](http://git-scm.com/book/en/Getting-Started-Installing-Git)
* [Install Mercurial](http://mercurial.selenic.com/wiki/Download)
Getting the source
------
Setup the project structure and fetch the repo like so:
@ -114,8 +107,6 @@ go fmt ./...
go vet ./...
```
NOTE: If you have not installed mercurial, the above command will fail. See [Revision Control Systems](#revision-control-systems) above.
For more information on `go vet`, [read the GoDoc](https://godoc.org/golang.org/x/tools/cmd/vet).
Build and Test
@ -140,57 +131,18 @@ Dependencies
------------
Kapacitor vendors all dependencies.
Kapacitor does not use a dependency manager tool, but rather uses git subrepos to place
dependencies in the vendor directory.
This give complete control over how dependency are managed and keeps the workflow simple.
A few helper scripts are provided to make this process fast and easy.
Kapacitor uses the golang [dep](https://github.com/golang/dep) tool.
To manage the subrepo you must first install [git-subrepo](https://github.com/ingydotnet/git-subrepo#installation).
First list all dependencies, including dependencies of dependencies.
Install the dep tool:
```
./list-deps
go get -u github.com/golang/dep
```
To add a new dependency add a new entry to the `vendor.list` file, of the form:
See the dep help for usage and documentation.
```
<package> [branch]
```
The `branch` column is optional.
If `branch` is left empty the default branch will be used.
For example, to add the `github.com/influxdata/foo/bar` dependency add this line to the `vendor.list`.
```
github.com/influxdata/foo https://github.com/influxdata/foo.git
```
Notice that `bar` part of the path was left off since its a subdirectory of the repo.
Commit this change then run:
```
./vendor.sh github.com/influxdata/foo
```
This will add the subrepo for the git repo under `vendor/github.com/influxdata/foo`.
Later to update the dependency use the same command.
```
./vendor.sh github.com/influxdata/foo
```
Or to update all dependencies at once use
```
./vendor.sh
```
These scripts are really simple, we may formalize them later but currently simplicity is key.
Kapacitor commits vendored deps into the repo, as a result always run `dep prune` after any `dep ensure` operation.
This helps keep the amount of code committed to a minimum.
Generating Code
@ -206,16 +158,13 @@ Go provides a consistent command for generating all necessary code:
go generate ./...
```
For the generate command to succeed you will need a few dependencies installed on your system:
For the generate command to succeed you will need a few dependencies installed on your system.
These dependencies are already vendored in the code and and can be installed from there.
* tmpl -- A utility used to generate code from templates. Install via `go get github.com/benbjohnson/tmpl`
* tmpl -- A utility used to generate code from templates. Install via `go install ./vendor/github.com/benbjohnson/tmpl`
* protoc + protoc-gen-go -- A protobuf compiler plus the protoc-gen-go extension.
You need version 3.0.0-beta-2 of protoc.
To install the go plugin run `go get github.com/golang/protobuf/protoc-gen-go`
NOTE: Since installing dependencies can often be painful we have provided a docker container that comes with all of these dependencies installed.
See the section below about the build script and docker.
You need version 3.0.0 of protoc.
To install the go plugin run `go install ./vendor/github.com/golang/protobuf/protoc-gen-go`
The Build Script
----------------

View File

@ -33,7 +33,7 @@ RUN wget https://github.com/google/protobuf/releases/download/v${PROTO_VERSION}/
# Install go
ENV GOPATH /root/go
ENV GO_VERSION 1.7.4
ENV GO_VERSION 1.7.5
ENV GO_ARCH 386
RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \
tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \

View File

@ -43,7 +43,7 @@ RUN wget https://github.com/google/protobuf/releases/download/v${PROTO_VERSION}/
# Install go
ENV GOPATH /root/go
ENV GO_VERSION 1.7.4
ENV GO_VERSION 1.7.5
ENV GO_ARCH amd64
RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \
tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \

529
vendor/github.com/influxdata/kapacitor/Gopkg.lock generated vendored Normal file
View File

@ -0,0 +1,529 @@
memo = "92770b8e4eb3071b7b03e0fdb5be20a0fecb16d6b3b3ca0f68a74789f43b3e07"
[[projects]]
name = "cloud.google.com/go"
packages = ["compute/metadata","internal"]
revision = "2e6a95edb1071d750f6d7db777bf66cd2997af6c"
version = "v0.7.0"
[[projects]]
name = "github.com/Azure/azure-sdk-for-go"
packages = ["arm/compute","arm/network"]
revision = "bd73d950fa4440dae889bd9917bff7cef539f86e"
[[projects]]
name = "github.com/Azure/go-autorest"
packages = ["autorest","autorest/azure","autorest/date","autorest/to","autorest/validation"]
revision = "a2fdd780c9a50455cecd249b00bdc3eb73a78e31"
version = "v7.3.1"
[[projects]]
name = "github.com/BurntSushi/toml"
packages = ["."]
revision = "b26d9c308763d68093482582cea63d69be07a0f0"
version = "v0.3.0"
[[projects]]
name = "github.com/PuerkitoBio/purell"
packages = ["."]
revision = "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/PuerkitoBio/urlesc"
packages = ["."]
revision = "bbf7a2afc14f93e1e0a5c06df524fbd75e5031e5"
[[projects]]
name = "github.com/Sirupsen/logrus"
packages = ["."]
revision = "ba1b36c82c5e05c4f912a88eab0dcd91a171688f"
version = "v0.11.5"
[[projects]]
name = "github.com/aws/aws-sdk-go"
packages = ["aws","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/stscreds","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","private/protocol","private/protocol/ec2query","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/xml/xmlutil","service/ec2","service/sts"]
revision = "f6ea558f30e0a983d529b32c741e4caed17c7df0"
version = "v1.8.16"
[[projects]]
branch = "master"
name = "github.com/benbjohnson/tmpl"
packages = ["."]
revision = "8e77bc5fc07968736bb74f4b40b4c577028a61b6"
[[projects]]
branch = "master"
name = "github.com/beorn7/perks"
packages = ["quantile"]
revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9"
[[projects]]
name = "github.com/blang/semver"
packages = ["."]
revision = "b38d23b8782a487059e8fc8773e9a5b228a77cb6"
version = "v3.5.0"
[[projects]]
name = "github.com/boltdb/bolt"
packages = ["."]
revision = "583e8937c61f1af6513608ccc75c97b6abdf4ff9"
version = "v1.3.0"
[[projects]]
name = "github.com/cenkalti/backoff"
packages = ["."]
revision = "32cd0c5b3aef12c76ed64aaf678f6c79736be7dc"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "github.com/coreos/go-oidc"
packages = ["http","jose","key","oauth2","oidc"]
revision = "be73733bb8cc830d0205609b95d125215f8e9c70"
[[projects]]
name = "github.com/coreos/pkg"
packages = ["health","httputil","timeutil"]
revision = "3ac0863d7acf3bc44daf49afef8919af12f704ef"
version = "v3"
[[projects]]
branch = "master"
name = "github.com/davecgh/go-spew"
packages = ["spew"]
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
[[projects]]
name = "github.com/dgrijalva/jwt-go"
packages = ["."]
revision = "d2709f9f1f31ebcda9651b03077758c1f3a0018c"
version = "v3.0.0"
[[projects]]
name = "github.com/docker/distribution"
packages = ["digest","reference"]
revision = "a25b9ef0c9fe242ac04bb20d3a028442b7d266b6"
version = "v2.6.1"
[[projects]]
branch = "master"
name = "github.com/dustin/go-humanize"
packages = ["."]
revision = "259d2a102b871d17f30e3cd9881a642961a1e486"
[[projects]]
name = "github.com/emicklei/go-restful"
packages = [".","log","swagger"]
revision = "777bb3f19bcafe2575ffb2a3e46af92509ae9594"
version = "v1.2"
[[projects]]
branch = "master"
name = "github.com/evanphx/json-patch"
packages = ["."]
revision = "30afec6a1650c11c861dc1fb58e100cd5219e490"
[[projects]]
branch = "master"
name = "github.com/geoffgarside/ber"
packages = ["."]
revision = "854377f11dfb81f04121879829bc53487e377739"
[[projects]]
branch = "master"
name = "github.com/ghodss/yaml"
packages = ["."]
revision = "04f313413ffd65ce25f2541bfd2b2ceec5c0908c"
[[projects]]
name = "github.com/go-ini/ini"
packages = ["."]
revision = "e7fea39b01aea8d5671f6858f0532f56e8bff3a5"
version = "v1.27.0"
[[projects]]
branch = "master"
name = "github.com/go-openapi/jsonpointer"
packages = ["."]
revision = "779f45308c19820f1a69e9a4cd965f496e0da10f"
[[projects]]
branch = "master"
name = "github.com/go-openapi/jsonreference"
packages = ["."]
revision = "36d33bfe519efae5632669801b180bf1a245da3b"
[[projects]]
branch = "master"
name = "github.com/go-openapi/spec"
packages = ["."]
revision = "e51c28f07047ad90caff03f6450908720d337e0c"
[[projects]]
branch = "master"
name = "github.com/go-openapi/swag"
packages = ["."]
revision = "24ebf76d720bab64f62824d76bced3184a65490d"
[[projects]]
name = "github.com/gogo/protobuf"
packages = ["proto","sortkeys"]
revision = "100ba4e885062801d56799d78530b73b178a78f3"
version = "v0.4"
[[projects]]
branch = "master"
name = "github.com/golang/glog"
packages = ["."]
revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
[[projects]]
branch = "master"
name = "github.com/golang/protobuf"
packages = ["proto","protoc-gen-go","protoc-gen-go/descriptor","protoc-gen-go/generator","protoc-gen-go/grpc","protoc-gen-go/plugin"]
revision = "2bba0603135d7d7f5cb73b2125beeda19c09f4ef"
[[projects]]
branch = "master"
name = "github.com/golang/snappy"
packages = ["."]
revision = "553a641470496b2327abcac10b36396bd98e45c9"
[[projects]]
branch = "master"
name = "github.com/google/gofuzz"
packages = ["."]
revision = "44d81051d367757e1c7c6a5a86423ece9afcf63c"
[[projects]]
branch = "master"
name = "github.com/google/uuid"
packages = ["."]
revision = "6a5e28554805e78ea6141142aba763936c4761c0"
[[projects]]
branch = "master"
name = "github.com/googleapis/gax-go"
packages = ["."]
revision = "9af46dd5a1713e8b5cd71106287eba3cefdde50b"
[[projects]]
name = "github.com/gorhill/cronexpr"
packages = ["."]
revision = "a557574d6c024ed6e36acc8b610f5f211c91568a"
version = "1.0.0"
[[projects]]
name = "github.com/hashicorp/consul"
packages = ["api"]
revision = "e9ca44d0a1757ac9aecc6785904a701936c10e4a"
version = "v0.8.1"
[[projects]]
branch = "master"
name = "github.com/hashicorp/go-cleanhttp"
packages = ["."]
revision = "3573b8b52aa7b37b9358d966a898feb387f62437"
[[projects]]
branch = "master"
name = "github.com/hashicorp/go-rootcerts"
packages = ["."]
revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00"
[[projects]]
name = "github.com/hashicorp/serf"
packages = ["coordinate"]
revision = "d6574a5bb1226678d7010325fb6c985db20ee458"
version = "v0.8.1"
[[projects]]
name = "github.com/influxdata/influxdb"
packages = [".","client","client/v2","influxql","influxql/internal","influxql/neldermead","models","monitor/diagnostics","pkg/escape","pkg/limiter","services/collectd","services/graphite","services/meta","services/meta/internal","services/opentsdb","services/udp","toml","tsdb","tsdb/internal","uuid"]
revision = "e4628bb69266dbd624dc27d674b52705ce0dcbf2"
version = "v1.1.4"
[[projects]]
branch = "master"
name = "github.com/influxdata/usage-client"
packages = ["v1"]
revision = "6d3895376368aa52a3a81d2a16e90f0f52371967"
[[projects]]
branch = "master"
name = "github.com/influxdata/wlog"
packages = ["."]
revision = "7c63b0a71ef8300adc255344d275e10e5c3a71ec"
[[projects]]
name = "github.com/jmespath/go-jmespath"
packages = ["."]
revision = "3433f3ea46d9f8019119e7dd41274e112a2359a9"
version = "0.2.2"
[[projects]]
name = "github.com/jonboulle/clockwork"
packages = ["."]
revision = "2eee05ed794112d45db504eb05aa693efd2b8b09"
version = "v0.1.0"
[[projects]]
branch = "master"
name = "github.com/juju/ratelimit"
packages = ["."]
revision = "acf38b000a03e4ab89e40f20f1e548f4e6ac7f72"
[[projects]]
name = "github.com/k-sone/snmpgo"
packages = ["."]
revision = "de09377ff34857b08afdc16ea8c7c2929eb1fc6e"
version = "v3.2.0"
[[projects]]
name = "github.com/kimor79/gollectd"
packages = ["."]
revision = "b5dddb1667dcc1e6355b9305e2c1608a2db6983c"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "github.com/mailru/easyjson"
packages = ["buffer","jlexer","jwriter"]
revision = "9e74fe5423dca819cf8fd940d9a5d5307ac7aa10"
[[projects]]
name = "github.com/mattn/go-runewidth"
packages = ["."]
revision = "9e777a8366cce605130a531d2cd6363d07ad7317"
version = "v0.0.2"
[[projects]]
name = "github.com/matttproud/golang_protobuf_extensions"
packages = ["pbutil"]
revision = "3247c84500bff8d9fb6d579d800f20b3e091582c"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "github.com/miekg/dns"
packages = ["."]
revision = "6ebcb714d36901126ee2807031543b38c56de963"
[[projects]]
branch = "master"
name = "github.com/mitchellh/copystructure"
packages = ["."]
revision = "f81071c9d77b7931f78c90b416a074ecdc50e959"
[[projects]]
branch = "master"
name = "github.com/mitchellh/go-homedir"
packages = ["."]
revision = "b8bc1bf767474819792c23f32d8286a45736f1c6"
[[projects]]
branch = "master"
name = "github.com/mitchellh/mapstructure"
packages = ["."]
revision = "5a0325d7fafaac12dda6e7fb8bd222ec1b69875e"
[[projects]]
branch = "master"
name = "github.com/mitchellh/reflectwalk"
packages = ["."]
revision = "417edcfd99a4d472c262e58f22b4bfe97580f03e"
[[projects]]
name = "github.com/pborman/uuid"
packages = ["."]
revision = "a97ce2ca70fa5a848076093f05e639a89ca34d06"
version = "v1.0"
[[projects]]
name = "github.com/pkg/errors"
packages = ["."]
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
version = "v0.8.0"
[[projects]]
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
name = "github.com/prometheus/client_golang"
packages = ["prometheus"]
revision = "c5b7fccd204277076155f10851dad72b76a49317"
version = "v0.8.0"
[[projects]]
branch = "master"
name = "github.com/prometheus/client_model"
packages = ["go"]
revision = "6f3806018612930941127f2a7c6c453ba2c527d2"
[[projects]]
branch = "master"
name = "github.com/prometheus/common"
packages = ["expfmt","internal/bitbucket.org/ww/goautoneg","log","model","version"]
revision = "9e0844febd9e2856f839c9cb974fbd676d1755a8"
[[projects]]
branch = "master"
name = "github.com/prometheus/procfs"
packages = [".","xfs"]
revision = "6ac8c5d890d415025dd5aae7595bcb2a6e7e2fad"
[[projects]]
branch = "master"
name = "github.com/prometheus/prometheus"
packages = ["config","discovery","discovery/azure","discovery/consul","discovery/dns","discovery/ec2","discovery/file","discovery/gce","discovery/kubernetes","discovery/marathon","discovery/triton","discovery/zookeeper","relabel","retrieval","storage","storage/local","storage/local/chunk","storage/local/codable","storage/local/index","storage/metric","util/flock","util/httputil","util/strutil","util/testutil","util/treecache"]
revision = "58298e738211f46cdab48c404e5514a544774579"
source = "github.com/goller/prometheus"
[[projects]]
name = "github.com/russross/blackfriday"
packages = ["."]
revision = "0b647d0506a698cca42caca173e55559b12a69f2"
version = "v1.4"
[[projects]]
branch = "master"
name = "github.com/samuel/go-zookeeper"
packages = ["zk"]
revision = "1d7be4effb13d2d908342d349d71a284a7542693"
[[projects]]
branch = "master"
name = "github.com/serenize/snaker"
packages = ["."]
revision = "543781d2b79bd95c51ffe70e70a55c946ca211ff"
[[projects]]
branch = "master"
name = "github.com/shurcooL/go"
packages = ["indentwriter"]
revision = "20b4b0a352116a106a505a8c528b6513e7e0d5c2"
[[projects]]
branch = "master"
name = "github.com/shurcooL/markdownfmt"
packages = ["markdown"]
revision = "10aae0a270abfb5d929ae6ca59c4b0ac0fa8f237"
[[projects]]
branch = "master"
name = "github.com/shurcooL/sanitized_anchor_name"
packages = ["."]
revision = "1dba4b3954bc059efc3991ec364f9f9a35f597d2"
[[projects]]
branch = "master"
name = "github.com/spf13/pflag"
packages = ["."]
revision = "2300d0f8576fe575f71aaa5b9bbe4e1b0dc2eb51"
[[projects]]
name = "github.com/stretchr/testify"
packages = ["assert"]
revision = "69483b4bd14f5845b5a1e55bca19e954e827f1d0"
version = "v1.1.4"
[[projects]]
branch = "master"
name = "github.com/syndtr/goleveldb"
packages = ["leveldb","leveldb/cache","leveldb/comparer","leveldb/errors","leveldb/filter","leveldb/iterator","leveldb/journal","leveldb/memdb","leveldb/opt","leveldb/storage","leveldb/table","leveldb/util"]
revision = "8c81ea47d4c41a385645e133e15510fc6a2a74b4"
[[projects]]
branch = "master"
name = "github.com/ugorji/go"
packages = ["codec"]
revision = "708a42d246822952f38190a8d8c4e6b16a0e600c"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = ["bcrypt","blowfish"]
revision = "0242f07995e684be54f2a2776327141acf1cef91"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = ["context","context/ctxhttp","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"]
revision = "d212a1ef2de2f5d441c327b8f26cf3ea3ea9f265"
[[projects]]
branch = "master"
name = "golang.org/x/oauth2"
packages = [".","google","internal","jws","jwt"]
revision = "a6bd8cefa1811bd24b86f8902872e4e8225f74c4"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = ["unix","windows","windows/registry","windows/svc/eventlog"]
revision = "f3918c30c5c2cb527c0b071a27c35120a6c0719a"
[[projects]]
branch = "master"
name = "golang.org/x/text"
packages = ["internal/gen","internal/triegen","internal/ucd","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable","width"]
revision = "a9a820217f98f7c8a207ec1e45a874e1fe12c478"
[[projects]]
branch = "master"
name = "google.golang.org/api"
packages = ["compute/v1","gensupport","googleapi","googleapi/internal/uritemplates"]
revision = "fbbaff1827317122a8a0e1b24de25df8417ce87b"
[[projects]]
name = "google.golang.org/appengine"
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"]
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
version = "v1.0.0"
[[projects]]
name = "google.golang.org/grpc"
packages = [".","codes","credentials","grpclog","internal","keepalive","metadata","naming","peer","stats","tap","transport"]
revision = "8050b9cbc271307e5a716a9d782803d09b0d6f2d"
version = "v1.2.1"
[[projects]]
branch = "v3"
name = "gopkg.in/alexcesaro/quotedprintable.v3"
packages = ["."]
revision = "2caba252f4dc53eaf6b553000885530023f54623"
[[projects]]
name = "gopkg.in/fsnotify.v1"
packages = ["."]
revision = "629574ca2a5df945712d3079857300b5e4da0236"
version = "v1.4.2"
[[projects]]
name = "gopkg.in/gomail.v2"
packages = ["."]
revision = "41f3572897373c5538c50a2402db15db079fa4fd"
version = "2.0.0"
[[projects]]
name = "gopkg.in/inf.v0"
packages = ["."]
revision = "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4"
version = "v0.9.0"
[[projects]]
branch = "v2"
name = "gopkg.in/yaml.v2"
packages = ["."]
revision = "a5b47d31c556af34a302ce5d659e6fea44d90de0"
[[projects]]
name = "k8s.io/client-go"
packages = ["1.5/discovery","1.5/kubernetes","1.5/kubernetes/typed/apps/v1alpha1","1.5/kubernetes/typed/authentication/v1beta1","1.5/kubernetes/typed/authorization/v1beta1","1.5/kubernetes/typed/autoscaling/v1","1.5/kubernetes/typed/batch/v1","1.5/kubernetes/typed/certificates/v1alpha1","1.5/kubernetes/typed/core/v1","1.5/kubernetes/typed/extensions/v1beta1","1.5/kubernetes/typed/policy/v1alpha1","1.5/kubernetes/typed/rbac/v1alpha1","1.5/kubernetes/typed/storage/v1beta1","1.5/pkg/api","1.5/pkg/api/errors","1.5/pkg/api/install","1.5/pkg/api/meta","1.5/pkg/api/meta/metatypes","1.5/pkg/api/resource","1.5/pkg/api/unversioned","1.5/pkg/api/v1","1.5/pkg/api/validation/path","1.5/pkg/apimachinery","1.5/pkg/apimachinery/announced","1.5/pkg/apimachinery/registered","1.5/pkg/apis/apps","1.5/pkg/apis/apps/install","1.5/pkg/apis/apps/v1alpha1","1.5/pkg/apis/authentication","1.5/pkg/apis/authentication/install","1.5/pkg/apis/authentication/v1beta1","1.5/pkg/apis/authorization","1.5/pkg/apis/authorization/install","1.5/pkg/apis/authorization/v1beta1","1.5/pkg/apis/autoscaling","1.5/pkg/apis/autoscaling/install","1.5/pkg/apis/autoscaling/v1","1.5/pkg/apis/batch","1.5/pkg/apis/batch/install","1.5/pkg/apis/batch/v1","1.5/pkg/apis/batch/v2alpha1","1.5/pkg/apis/certificates","1.5/pkg/apis/certificates/install","1.5/pkg/apis/certificates/v1alpha1","1.5/pkg/apis/extensions","1.5/pkg/apis/extensions/install","1.5/pkg/apis/extensions/v1beta1","1.5/pkg/apis/policy","1.5/pkg/apis/policy/install","1.5/pkg/apis/policy/v1alpha1","1.5/pkg/apis/rbac","1.5/pkg/apis/rbac/install","1.5/pkg/apis/rbac/v1alpha1","1.5/pkg/apis/storage","1.5/pkg/apis/storage/install","1.5/pkg/apis/storage/v1beta1","1.5/pkg/auth/user","1.5/pkg/conversion","1.5/pkg/conversion/queryparams","1.5/pkg/fields","1.5/pkg/genericapiserver/openapi/common","1.5/pkg/labels","1.5/pkg/runtime","1.5/pkg/runtime/serializer","1.5/pkg/runtime/serializer/json","1.5/pkg/runtime/serializer/protobuf","1.5/pkg/runtime/serializer/recognizer","1.5/pkg/runtime/serializer/streaming","1.5/pkg/runtime/serializer/versioning","1.5/pkg/selection","1.5/pkg/third_party/forked/golang/reflect","1.5/pkg/types","1.5/pkg/util","1.5/pkg/util/cert","1.5/pkg/util/clock","1.5/pkg/util/errors","1.5/pkg/util/flowcontrol","1.5/pkg/util/framer","1.5/pkg/util/integer","1.5/pkg/util/intstr","1.5/pkg/util/json","1.5/pkg/util/labels","1.5/pkg/util/net","1.5/pkg/util/parsers","1.5/pkg/util/rand","1.5/pkg/util/runtime","1.5/pkg/util/sets","1.5/pkg/util/uuid","1.5/pkg/util/validation","1.5/pkg/util/validation/field","1.5/pkg/util/wait","1.5/pkg/util/yaml","1.5/pkg/version","1.5/pkg/watch","1.5/pkg/watch/versioned","1.5/plugin/pkg/client/auth","1.5/plugin/pkg/client/auth/gcp","1.5/plugin/pkg/client/auth/oidc","1.5/rest","1.5/tools/cache","1.5/tools/clientcmd/api","1.5/tools/metrics","1.5/transport"]
revision = "1195e3a8ee1a529d53eed7c624527a68555ddf1f"
version = "v1.5.1"

42
vendor/github.com/influxdata/kapacitor/Gopkg.toml generated vendored Normal file
View File

@ -0,0 +1,42 @@
required = ["github.com/benbjohnson/tmpl","github.com/golang/protobuf/protoc-gen-go"]
[[dependencies]]
name = "github.com/Azure/azure-sdk-for-go"
revision = "bd73d950fa4440dae889bd9917bff7cef539f86e"
[[dependencies]]
branch = "master"
name = "github.com/davecgh/go-spew"
[[dependencies]]
branch = "master"
name = "github.com/evanphx/json-patch"
[[dependencies]]
branch = "master"
name = "github.com/ghodss/yaml"
[[dependencies]]
branch = "master"
name = "github.com/google/uuid"
[[dependencies]]
name = "github.com/influxdata/influxdb"
version = "~1.1.0"
[[dependencies]]
branch = "master"
name = "github.com/mitchellh/mapstructure"
[[dependencies]]
branch = "logger-targetmanager-wait"
name = "github.com/prometheus/prometheus"
source = "github.com/goller/prometheus"
[[dependencies]]
branch = "master"
name = "github.com/shurcooL/markdownfmt"
[[overrides]]
name = "github.com/Azure/azure-sdk-for-go"
revision = "bd73d950fa4440dae889bd9917bff7cef539f86e"

View File

@ -7,6 +7,7 @@ Dependencies
* github.com/dgrijalva/jwt-go [MIT](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE)
* github.com/dustin/go-humanize [MIT](https://github.com/dustin/go-humanize/blob/master/LICENSE)
* github.com/golang/protobuf [BSD](https://github.com/golang/protobuf/blob/master/LICENSE)
* github.com/google/uuid [BSD](https://github.com/google/uuid/blob/master/LICENSE)
* github.com/gorhill/cronexpr [APLv2](https://github.com/gorhill/cronexpr/blob/master/APLv2)
* github.com/k-sone/snmpgo [MIT](https://github.com/k-sone/snmpgo/blob/master/LICENSE)
* github.com/kimor79/gollectd [BSD](https://github.com/kimor79/gollectd/blob/master/LICENSE)
@ -20,5 +21,4 @@ Dependencies
* github.com/shurcooL/markdownfmt [MIT](https://github.com/shurcooL/markdownfmt/blob/master/README.md)
* github.com/shurcooL/sanitized\_anchor\_name [MIT](https://github.com/shurcooL/sanitized_anchor_name/blob/master/LICENSE)
* github.com/stretchr/testify [MIT](https://github.com/stretchr/testify/blob/master/LICENSE)
* github.com/twinj/uuid [MIT](https://github.com/twinj/uuid/blob/master/LICENSE)
* gopkg.in/gomail.v2 [MIT](https://github.com/go-gomail/gomail/blob/v2/LICENSE)

View File

@ -11,22 +11,25 @@ import (
text "text/template"
"time"
"github.com/influxdata/influxdb/influxql"
imodels "github.com/influxdata/influxdb/models"
"github.com/influxdata/kapacitor/alert"
"github.com/influxdata/kapacitor/expvar"
"github.com/influxdata/kapacitor/models"
"github.com/influxdata/kapacitor/pipeline"
alertservice "github.com/influxdata/kapacitor/services/alert"
"github.com/influxdata/kapacitor/services/hipchat"
"github.com/influxdata/kapacitor/services/httppost"
"github.com/influxdata/kapacitor/services/opsgenie"
"github.com/influxdata/kapacitor/services/pagerduty"
"github.com/influxdata/kapacitor/services/pushover"
"github.com/influxdata/kapacitor/services/sensu"
"github.com/influxdata/kapacitor/services/slack"
"github.com/influxdata/kapacitor/services/smtp"
"github.com/influxdata/kapacitor/services/snmptrap"
"github.com/influxdata/kapacitor/services/telegram"
"github.com/influxdata/kapacitor/services/victorops"
"github.com/influxdata/kapacitor/tick/ast"
"github.com/influxdata/kapacitor/tick/stateful"
"github.com/influxdata/kapacitor/vars"
"github.com/pkg/errors"
)
@ -58,6 +61,8 @@ type AlertNode struct {
messageTmpl *text.Template
detailsTmpl *html.Template
statesMu sync.RWMutex
alertsTriggered *expvar.Int
oksTriggered *expvar.Int
infosTriggered *expvar.Int
@ -69,6 +74,8 @@ type AlertNode struct {
levelResets []stateful.Expression
lrScopePools []stateful.ScopePool
serverInfo serverInfo
}
// Create a new AlertNode which caches the most recent item and exposes it over the HTTP API.
@ -76,6 +83,11 @@ func newAlertNode(et *ExecutingTask, n *pipeline.AlertNode, l *log.Logger) (an *
an = &AlertNode{
node: node{Node: n, et: et, logger: l},
a: n,
serverInfo: serverInfo{
Hostname: vars.HostVar.StringValue(),
ClusterID: vars.ClusterIDVar.StringValue(),
ServerID: vars.ServerIDVar.StringValue(),
},
}
an.node.runF = an.runAlert
@ -119,15 +131,6 @@ func newAlertNode(et *ExecutingTask, n *pipeline.AlertNode, l *log.Logger) (an *
return nil, err
}
// Construct alert handlers
for _, post := range n.PostHandlers {
c := alertservice.PostHandlerConfig{
URL: post.URL,
}
h := alertservice.NewPostHandler(c, l)
an.handlers = append(an.handlers, h)
}
for _, tcp := range n.TcpHandlers {
c := alertservice.TCPHandlerConfig{
Address: tcp.Address,
@ -204,8 +207,15 @@ func newAlertNode(et *ExecutingTask, n *pipeline.AlertNode, l *log.Logger) (an *
an.handlers = append(an.handlers, h)
}
for range n.SensuHandlers {
h := et.tm.SensuService.Handler(l)
for _, s := range n.SensuHandlers {
c := sensu.HandlerConfig{
Source: s.Source,
Handlers: s.HandlersList,
}
h, err := et.tm.SensuService.Handler(c, l)
if err != nil {
return nil, errors.Wrap(err, "failed to create sensu alert handler")
}
an.handlers = append(an.handlers, h)
}
@ -325,6 +335,37 @@ func newAlertNode(et *ExecutingTask, n *pipeline.AlertNode, l *log.Logger) (an *
an.handlers = append(an.handlers, h)
}
for _, p := range n.PushoverHandlers {
c := pushover.HandlerConfig{}
if p.Device != "" {
c.Device = p.Device
}
if p.Title != "" {
c.Title = p.Title
}
if p.URL != "" {
c.URL = p.URL
}
if p.URLTitle != "" {
c.URLTitle = p.URLTitle
}
if p.Sound != "" {
c.Sound = p.Sound
}
h := et.tm.PushoverService.Handler(c, l)
an.handlers = append(an.handlers, h)
}
for _, p := range n.HTTPPostHandlers {
c := httppost.HandlerConfig{
URL: p.URL,
Endpoint: p.Endpoint,
Headers: p.Headers,
}
h := et.tm.HTTPPostService.Handler(c, l)
an.handlers = append(an.handlers, h)
}
for _, og := range n.OpsGenieHandlers {
c := opsgenie.HandlerConfig{
TeamsList: og.TeamsList,
@ -358,14 +399,14 @@ func newAlertNode(et *ExecutingTask, n *pipeline.AlertNode, l *log.Logger) (an *
}
an.levels[alert.Info] = statefulExpression
an.scopePools[alert.Info] = stateful.NewScopePool(stateful.FindReferenceVariables(n.Info.Expression))
an.scopePools[alert.Info] = stateful.NewScopePool(ast.FindReferenceVariables(n.Info.Expression))
if n.InfoReset != nil {
lstatefulExpression, lexpressionCompileError := stateful.NewExpression(n.InfoReset.Expression)
if lexpressionCompileError != nil {
return nil, fmt.Errorf("Failed to compile stateful expression for infoReset: %s", lexpressionCompileError)
}
an.levelResets[alert.Info] = lstatefulExpression
an.lrScopePools[alert.Info] = stateful.NewScopePool(stateful.FindReferenceVariables(n.InfoReset.Expression))
an.lrScopePools[alert.Info] = stateful.NewScopePool(ast.FindReferenceVariables(n.InfoReset.Expression))
}
}
@ -375,14 +416,14 @@ func newAlertNode(et *ExecutingTask, n *pipeline.AlertNode, l *log.Logger) (an *
return nil, fmt.Errorf("Failed to compile stateful expression for warn: %s", expressionCompileError)
}
an.levels[alert.Warning] = statefulExpression
an.scopePools[alert.Warning] = stateful.NewScopePool(stateful.FindReferenceVariables(n.Warn.Expression))
an.scopePools[alert.Warning] = stateful.NewScopePool(ast.FindReferenceVariables(n.Warn.Expression))
if n.WarnReset != nil {
lstatefulExpression, lexpressionCompileError := stateful.NewExpression(n.WarnReset.Expression)
if lexpressionCompileError != nil {
return nil, fmt.Errorf("Failed to compile stateful expression for warnReset: %s", lexpressionCompileError)
}
an.levelResets[alert.Warning] = lstatefulExpression
an.lrScopePools[alert.Warning] = stateful.NewScopePool(stateful.FindReferenceVariables(n.WarnReset.Expression))
an.lrScopePools[alert.Warning] = stateful.NewScopePool(ast.FindReferenceVariables(n.WarnReset.Expression))
}
}
@ -392,14 +433,14 @@ func newAlertNode(et *ExecutingTask, n *pipeline.AlertNode, l *log.Logger) (an *
return nil, fmt.Errorf("Failed to compile stateful expression for crit: %s", expressionCompileError)
}
an.levels[alert.Critical] = statefulExpression
an.scopePools[alert.Critical] = stateful.NewScopePool(stateful.FindReferenceVariables(n.Crit.Expression))
an.scopePools[alert.Critical] = stateful.NewScopePool(ast.FindReferenceVariables(n.Crit.Expression))
if n.CritReset != nil {
lstatefulExpression, lexpressionCompileError := stateful.NewExpression(n.CritReset.Expression)
if lexpressionCompileError != nil {
return nil, fmt.Errorf("Failed to compile stateful expression for critReset: %s", lexpressionCompileError)
}
an.levelResets[alert.Critical] = lstatefulExpression
an.lrScopePools[alert.Critical] = stateful.NewScopePool(stateful.FindReferenceVariables(n.CritReset.Expression))
an.lrScopePools[alert.Critical] = stateful.NewScopePool(ast.FindReferenceVariables(n.CritReset.Expression))
}
}
@ -420,13 +461,21 @@ func newAlertNode(et *ExecutingTask, n *pipeline.AlertNode, l *log.Logger) (an *
}
func (a *AlertNode) runAlert([]byte) error {
valueF := func() int64 {
a.statesMu.RLock()
l := len(a.states)
a.statesMu.RUnlock()
return int64(l)
}
a.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF))
// Register delete hook
if a.hasAnonTopic() {
a.et.tm.registerDeleteHookForTask(a.et.Task.ID, deleteAlertHook(a.anonTopic))
// Register Handlers on topic
for _, h := range a.handlers {
a.et.tm.AlertService.RegisterHandler([]string{a.anonTopic}, h)
a.et.tm.AlertService.RegisterAnonHandler(a.anonTopic, h)
}
// Restore anonTopic
a.et.tm.AlertService.RestoreTopic(a.anonTopic)
@ -460,14 +509,16 @@ func (a *AlertNode) runAlert([]byte) error {
return err
}
var currentLevel alert.Level
if state, ok := a.states[p.Group]; ok {
if state, ok := a.getAlertState(p.Group); ok {
currentLevel = state.currentLevel()
} else {
// Check for previous state
currentLevel = a.restoreEventState(id)
var triggered time.Time
currentLevel, triggered = a.restoreEventState(id)
if currentLevel != alert.OK {
// Update the state with the restored state
a.updateState(p.Time, currentLevel, p.Group)
state = a.updateState(p.Time, currentLevel, p.Group)
state.triggered(triggered)
}
}
l := a.determineLevel(p.Time, p.Fields, p.Tags, currentLevel)
@ -550,14 +601,16 @@ func (a *AlertNode) runAlert([]byte) error {
var highestPoint *models.BatchPoint
var currentLevel alert.Level
if state, ok := a.states[b.Group]; ok {
if state, ok := a.getAlertState(b.Group); ok {
currentLevel = state.currentLevel()
} else {
// Check for previous state
currentLevel = a.restoreEventState(id)
var triggered time.Time
currentLevel, triggered = a.restoreEventState(id)
if currentLevel != alert.OK {
// Update the state with the restored state
a.updateState(b.TMax, currentLevel, b.Group)
state = a.updateState(b.TMax, currentLevel, b.Group)
state.triggered(triggered)
}
}
for i, p := range b.Points {
@ -613,6 +666,7 @@ func (a *AlertNode) runAlert([]byte) error {
a.a.IdField != "" ||
a.a.DurationField != "" ||
a.a.MessageField != "" {
b.Points = b.ShallowCopyPoints()
for i := range b.Points {
if a.a.LevelTag != "" || a.a.IdTag != "" {
b.Points[i].Tags = b.Points[i].Tags.Copy()
@ -665,7 +719,7 @@ func (a *AlertNode) runAlert([]byte) error {
a.et.tm.AlertService.CloseTopic(a.anonTopic)
// Deregister Handlers on topic
for _, h := range a.handlers {
a.et.tm.AlertService.DeregisterHandler([]string{a.anonTopic}, h)
a.et.tm.AlertService.DeregisterAnonHandler(a.anonTopic, h)
}
return nil
}
@ -683,19 +737,25 @@ func (a *AlertNode) hasTopic() bool {
return a.topic != ""
}
func (a *AlertNode) restoreEventState(id string) alert.Level {
func (a *AlertNode) restoreEventState(id string) (alert.Level, time.Time) {
var topicState, anonTopicState alert.EventState
var anonFound, topicFound bool
// Check for previous state on anonTopic
if a.hasAnonTopic() {
if state, ok := a.et.tm.AlertService.EventState(a.anonTopic, id); ok {
if state, ok, err := a.et.tm.AlertService.EventState(a.anonTopic, id); err != nil {
a.incrementErrorCount()
a.logger.Printf("E! failed to get event state for anonymous topic %s, event %s: %v", a.anonTopic, id, err)
} else if ok {
anonTopicState = state
anonFound = true
}
}
// Check for previous state on topic.
if a.hasTopic() {
if state, ok := a.et.tm.AlertService.EventState(a.topic, id); ok {
if state, ok, err := a.et.tm.AlertService.EventState(a.topic, id); err != nil {
a.incrementErrorCount()
a.logger.Printf("E! failed to get event state for topic %s, event %s: %v", a.topic, id, err)
} else if ok {
topicState = state
topicFound = true
}
@ -704,19 +764,21 @@ func (a *AlertNode) restoreEventState(id string) alert.Level {
if anonFound && topicFound {
// Anon topic takes precedence
if err := a.et.tm.AlertService.UpdateEvent(a.topic, anonTopicState); err != nil {
a.incrementErrorCount()
a.logger.Printf("E! failed to update topic %q event state for event %q", a.topic, id)
}
} else if topicFound && a.hasAnonTopic() {
// Update event state for topic
if err := a.et.tm.AlertService.UpdateEvent(a.anonTopic, topicState); err != nil {
a.incrementErrorCount()
a.logger.Printf("E! failed to update topic %q event state for event %q", a.topic, id)
}
} // else nothing was found, nothing to do
}
if anonFound {
return anonTopicState.Level
return anonTopicState.Level, anonTopicState.Time
}
return topicState.Level
return topicState.Level, topicState.Time
}
func (a *AlertNode) handleEvent(event alert.Event) {
@ -739,6 +801,7 @@ func (a *AlertNode) handleEvent(event alert.Event) {
err := a.et.tm.AlertService.Collect(event)
if err != nil {
a.eventsDropped.Add(1)
a.incrementErrorCount()
a.logger.Println("E!", err)
}
}
@ -749,6 +812,7 @@ func (a *AlertNode) handleEvent(event alert.Event) {
err := a.et.tm.AlertService.Collect(event)
if err != nil {
a.eventsDropped.Add(1)
a.incrementErrorCount()
a.logger.Println("E!", err)
}
}
@ -760,6 +824,7 @@ func (a *AlertNode) determineLevel(now time.Time, fields models.Fields, tags map
}
if rse := a.levelResets[currentLevel]; rse != nil {
if pass, err := EvalPredicate(rse, a.lrScopePools[currentLevel], now, fields, tags); err != nil {
a.incrementErrorCount()
a.logger.Printf("E! error evaluating reset expression for current level %v: %s", currentLevel, err)
} else if !pass {
return currentLevel
@ -781,6 +846,7 @@ func (a *AlertNode) findFirstMatchLevel(start alert.Level, stop alert.Level, now
continue
}
if pass, err := EvalPredicate(se, a.scopePools[l], now, fields, tags); err != nil {
a.incrementErrorCount()
a.logger.Printf("E! error evaluating expression for level %v: %s", alert.Level(l), err)
continue
} else if pass {
@ -790,14 +856,6 @@ func (a *AlertNode) findFirstMatchLevel(start alert.Level, stop alert.Level, now
return alert.OK, false
}
func (a *AlertNode) batchToResult(b models.Batch) influxql.Result {
row := models.BatchToRow(b)
r := influxql.Result{
Series: imodels.Rows{row},
}
return r
}
func (a *AlertNode) event(
id, name string,
group models.GroupID,
@ -828,7 +886,7 @@ func (a *AlertNode) event(
Group: string(group),
Tags: tags,
Fields: fields,
Result: a.batchToResult(b),
Result: models.BatchToResult(b),
},
}
return event, nil
@ -904,12 +962,14 @@ func (a *alertState) percentChange() float64 {
}
func (a *AlertNode) updateState(t time.Time, level alert.Level, group models.GroupID) *alertState {
state, ok := a.states[group]
state, ok := a.getAlertState(group)
if !ok {
state = &alertState{
history: make([]alert.Level, a.a.History),
}
a.statesMu.Lock()
a.states[group] = state
a.statesMu.Unlock()
}
state.addEvent(level)
@ -925,6 +985,12 @@ func (a *AlertNode) updateState(t time.Time, level alert.Level, group models.Gro
return state
}
type serverInfo struct {
Hostname string
ClusterID string
ServerID string
}
// Type containing information available to ID template.
type idInfo struct {
// Measurement name
@ -939,6 +1005,8 @@ type idInfo struct {
// Map of tags
Tags map[string]string
ServerInfo serverInfo
}
type messageInfo struct {
@ -969,10 +1037,11 @@ func (a *AlertNode) renderID(name string, group models.GroupID, tags models.Tags
g = "nil"
}
info := idInfo{
Name: name,
TaskName: a.et.Task.ID,
Group: g,
Tags: tags,
Name: name,
TaskName: a.et.Task.ID,
Group: g,
Tags: tags,
ServerInfo: a.serverInfo,
}
id := a.bufPool.Get().(*bytes.Buffer)
defer func() {
@ -994,10 +1063,11 @@ func (a *AlertNode) renderMessageAndDetails(id, name string, t time.Time, group
}
minfo := messageInfo{
idInfo: idInfo{
Name: name,
TaskName: a.et.Task.ID,
Group: g,
Tags: tags,
Name: name,
TaskName: a.et.Task.ID,
Group: g,
Tags: tags,
ServerInfo: a.serverInfo,
},
ID: id,
Fields: fields,
@ -1034,3 +1104,10 @@ func (a *AlertNode) renderMessageAndDetails(id, name string, t time.Time, group
details := tmpBuffer.String()
return msg, details, nil
}
func (a *AlertNode) getAlertState(id models.GroupID) (state *alertState, ok bool) {
a.statesMu.RLock()
state, ok = a.states[id]
a.statesMu.RUnlock()
return state, ok
}

View File

@ -17,7 +17,6 @@ import (
)
const (
statsQueryErrors = "query_errors"
statsBatchesQueried = "batches_queried"
statsPointsQueried = "points_queried"
)
@ -137,7 +136,6 @@ type QueryNode struct {
closing chan struct{}
aborting chan struct{}
queryErrors *expvar.Int
batchesQueried *expvar.Int
pointsQueried *expvar.Int
byName bool
@ -266,11 +264,9 @@ func (b *QueryNode) Queries(start, stop time.Time) ([]*Query, error) {
// Query InfluxDB and collect batches on batch collector.
func (b *QueryNode) doQuery() error {
defer b.ins[0].Close()
b.queryErrors = &expvar.Int{}
b.batchesQueried = &expvar.Int{}
b.pointsQueried = &expvar.Int{}
b.statMap.Set(statsQueryErrors, b.queryErrors)
b.statMap.Set(statsBatchesQueried, b.batchesQueried)
b.statMap.Set(statsPointsQueried, b.pointsQueried)
@ -305,7 +301,7 @@ func (b *QueryNode) doQuery() error {
}
resp, err := con.Query(q)
if err != nil {
b.queryErrors.Add(1)
b.incrementErrorCount()
b.logger.Println("E!", err)
b.timer.Stop()
break
@ -315,8 +311,8 @@ func (b *QueryNode) doQuery() error {
for _, res := range resp.Results {
batches, err := models.ResultToBatches(res, b.byName)
if err != nil {
b.incrementErrorCount()
b.logger.Println("E! failed to understand query result:", err)
b.queryErrors.Add(1)
continue
}
for _, bch := range batches {

View File

@ -24,6 +24,6 @@ docker run --rm \
-e AWS_ACCESS_KEY_ID="$AWS_ACCESS_KEY_ID" \
-e AWS_SECRET_ACCESS_KEY="$AWS_SECRET_ACCESS_KEY" \
${VOLUME_OPTIONS} \
-v $DIR:/root/go/src/github.com/influxdata/kapacitor \
-v $DIR:/root/go/src/github.com/influxdata/kapacitor:Z \
influxdata/kapacitor-builder \
"$@"

View File

@ -8,6 +8,7 @@
* [Replays](#replays)
* [Alerts](#alerts)
* [Configuration](#configuration)
* [Storage](#storage)
* [Testing Services](#testing-services)
* [Miscellaneous](#miscellaneous)
@ -1372,12 +1373,29 @@ Meaning that they are subject to change in the future until the technical previe
As such the URL for the endpoints uses the base path `/kapacitor/v1preview`.
Once the technical preview is deemed complete the endpoint paths will be promoted to use the v1 `/kapacitor/v1` base path.
### Topics
Alerts are grouped into topics.
An alert handler "listens" on a topic for any new events.
You can either specify the alert topic in the TICKscript or one will be generated for you.
### Creating and Removing Topics
Topics are created dynamically when they referenced in TICKscripts or in handlers.
To delete a topic make a `DELETE` request to `/kapacitor/v1preview/alerts/topics/<topic id>`.
This will delete all known events and state for the topic.
>NOTE: Since topics are dynamically created, a topic may return after having deleted it, if a new event is created for the topic.
#### Example
```
DELETE /kapacitor/v1preview/alerts/topics/system
```
### List Topics
To query the list of available topics make a GET requests to `/kapacitor/v1preview/alerts/topics`.
| Query Parameter | Default | Purpose |
@ -1438,9 +1456,9 @@ GET /kapacitor/v1preview/alerts/topics?min-level=WARNING
}
```
### Topic Status
### Topic State
To query the status of a topic make a GET request to `/kapacitor/v1preview/alerts/topics/<topic id>`.
To query the state of a topic make a GET request to `/kapacitor/v1preview/alerts/topics/<topic id>`.
#### Example
@ -1458,7 +1476,7 @@ GET /kapacitor/v1preview/alerts/topics/system
}
```
### All Topic Events
### List Topic Events
To query all the events within a topic make a GET request to `/kapacitor/v1preview/alerts/topics/<topic id>/events`.
@ -1501,7 +1519,7 @@ GET /kapacitor/v1preview/alerts/topics/system/events
}
```
### Specific Topic Event
### Topic Event
You can query a specific event within a topic by making a GET request to `/kapacitor/v1preview/alerts/topics/<topic id>/events/<event id>`.
@ -1524,11 +1542,15 @@ GET /kapacitor/v1preview/alerts/topics/system/events/cpu
}
```
### Topic Handlers
### List Topic Handlers
Handlers are created independent of a topic but are associated with a topic.
Handlers are created within a topic.
You can get a list of handlers configured for a topic by making a GET request to `/kapacitor/v1preview/alerts/topics/<topic id>/handlers`.
| Query Parameter | Default | Purpose |
| --------------- | ------- | ------- |
| pattern | * | Filter results based on the pattern. Uses standard shell glob matching on the service name, see [this](https://golang.org/pkg/path/filepath/#Match) for more details. |
>NOTE: Anonymous handlers (created automatically from TICKscripts) will not be listed under their associated anonymous topic as they are not configured via the API.
#### Example
@ -1546,9 +1568,8 @@ GET /kapacitor/v1preview/alerts/topics/system/handlers
"topic": "system",
"handlers": [
{
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/slack"},
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/handlers/slack"},
"id":"slack",
"topics": ["system", "app"],
"actions": [{
"kind":"slack",
"options":{
@ -1557,9 +1578,8 @@ GET /kapacitor/v1preview/alerts/topics/system/handlers
}]
},
{
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/smtp"},
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/handlers/smtp"},
"id":"smtp",
"topics": ["system", "app"],
"actions": [{
"kind":"smtp"
}]
@ -1583,78 +1603,20 @@ GET /kapacitor/v1preview/alerts/topics/main:alert_cpu:alert5/handlers
}
```
### Creating and Removing Topics
Topics are created dynamically for you when they referenced in TICKscripts or in handlers.
To delete a topic make a `DELETE` request to `/kapacitor/v1preview/alerts/topics/<topic id>`.
This will delete all known events and state for the topic.
>NOTE: Since topics are dynamically created, a topic may return after having deleted it, if a new event is created for the topic.
#### Example
```
DELETE /kapacitor/v1preview/alerts/topics/system
```
### List Handlers
To query information about all handlers independent of a given topic make a GET request to `/kapacitor/v1preview/alerts/handlers`.
| Query Parameter | Default | Purpose |
| --------------- | ------- | ------- |
| pattern | * | Filter results based on the pattern. Uses standard shell glob matching on the service name, see [this](https://golang.org/pkg/path/filepath/#Match) for more details. |
#### Example
```
GET /kapacitor/v1preview/alerts/handlers
```
```
{
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers"},
"handlers": [
{
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/slack"},
"id":"slack",
"topics": ["system", "app"],
"actions": [{
"kind":"slack",
"options": {
"channel":"#alerts"
}
}]
},
{
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/smtp"},
"id":"smtp",
"topics": ["system", "app"],
"actions": [{
"kind":"smtp"
}]
}
]
}
```
### Get a Handler
To query information about a specific handler make a GET request to `/kapacitor/v1preview/alerts/handlers/<handler id>`.
To query information about a specific handler make a GET request to `/kapacitor/v1preview/alerts/topics/<topic id>/handlers/<handler id>`.
#### Example
```
GET /kapacitor/v1preview/alerts/handlers/slack
GET /kapacitor/v1preview/alerts/topics/system/handlers/slack
```
```
{
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/slack"},
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/handlers/slack"},
"id":"slack",
"topics": ["system", "app"],
"actions": [{
"kind":"slack",
"options": {
@ -1666,13 +1628,12 @@ GET /kapacitor/v1preview/alerts/handlers/slack
### Create a Handler
To create a new handler make a POST request to `/kapacitor/v1preview/alerts/handlers`.
To create a new handler make a POST request to `/kapacitor/v1preview/alerts/topics/system/handlers`.
```
POST /kapacitor/v1preview/alerts/handlers
POST /kapacitor/v1preview/alerts/topics/system/handlers
{
"id":"slack",
"topics": ["system", "app"],
"actions": [{
"kind":"slack",
"options": {
@ -1685,9 +1646,8 @@ POST /kapacitor/v1preview/alerts/handlers
```
{
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/slack"},
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/handlers/slack"},
"id": "slack",
"topics": ["system", "app"],
"actions": [{
"kind":"slack",
"options": {
@ -1699,7 +1659,7 @@ POST /kapacitor/v1preview/alerts/handlers
### Update a Handler
To update an existing handler you can either make a PUT or PATCH request to `/kapacitor/v1preview/alerts/handlers/<handler id>`.
To update an existing handler you can either make a PUT or PATCH request to `/kapacitor/v1preview/alerts/topics/system/handlers/<handler id>`.
Using PUT will replace the entire handler, by using PATCH specific parts of the handler can be modified.
@ -1710,7 +1670,7 @@ PATCH will apply JSON patch object to the existing handler, see [rfc6902](https:
Update the topics and actions for a handler using the PATCH method.
```
PATCH /kapacitor/v1preview/alerts/handlers/slack
PATCH /kapacitor/v1preview/alerts/topics/system/handlers/slack
[
{"op":"replace", "path":"/topics", "value":["system", "test"]},
{"op":"replace", "path":"/actions/0/options/channel", "value":"#testing_alerts"}
@ -1719,9 +1679,8 @@ PATCH /kapacitor/v1preview/alerts/handlers/slack
```
{
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/slack"},
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/handlers/slack"},
"id": "slack",
"topics": ["system", "test"],
"actions": [
{
"kind":"slack",
@ -1736,10 +1695,9 @@ PATCH /kapacitor/v1preview/alerts/handlers/slack
Replace an entire handler using the PUT method.
```
PUT /kapacitor/v1preview/alerts/handlers/slack
PUT /kapacitor/v1preview/alerts/topics/system/handlers/slack
{
"id": "slack",
"topics": ["system", "test"],
"actions": [
{
"kind":"slack",
@ -1753,9 +1711,8 @@ PUT /kapacitor/v1preview/alerts/handlers/slack
```
{
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/slack"},
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/handlers/slack"},
"id": "slack",
"topics": ["system", "test"],
"actions": [
{
"kind":"slack",
@ -1769,12 +1726,13 @@ PUT /kapacitor/v1preview/alerts/handlers/slack
### Remove a Handler
To remove an existing handler make a DELETE request to `/kapacitor/v1preview/alerts/handlers/<handler id>`.
To remove an existing handler make a DELETE request to `/kapacitor/v1preview/alerts/topics/system/handlers/<handler id>`.
```
DELETE /kapacitor/v1preview/alerts/handlers/<handler id>
DELETE /kapacitor/v1preview/alerts/topics/system/handlers/<handler id>
```
## Configuration
You can set configuration overrides via the API for certain sections of the config.
@ -1786,7 +1744,6 @@ The intent of the API is to allow for dynamic configuration of sensitive credent
As such, it is recommended to use either the configuration file or the API to manage these configuration sections, but not both.
This will help to eliminate any confusion that may arise as to the source of a given configuration option.
### Enabling/Disabling Configuration Overrides
By default the ability to override the configuration is enabled.
@ -2160,6 +2117,62 @@ POST /kapacitor/v1/config/influxdb/remote
| 403 | Config override service not enabled |
| 404 | The specified configuration section/option does not exist |
## Storage
Kapacitor exposes some operations that can be performed on the underlying storage.
>WARNING: Everything storage operation is directly manipulating the underlying storage database.
Always make a backup of the database before performing any of these operations.
### Backing up the Storage
Making a GET request to `/kapacitor/v1/storage/backup` will return a dump of the Kapacitor database.
To restore from a backup replace the `kapacitor.db` file with the contents of the backup request.
```
# Create a backup.
curl http://localhost:9092/kapacitor/v1/storage/backup > kapacitor.db
```
```
# Restore a backup.
# The destination path is dependent on your configuration.
cp kapacitor.db ~/.kapacitor/kapacitor.db
```
### Stores
Kapacitor's underlying storage system is organized into different stores.
Various actions can be performed on each individual store.
>WARNING: Everything storage operation is directly manipulating the underlying storage database.
Always make a backup of the database before performing any of these operations.
Available actions:
| Action | Description |
| ------ | ----------- |
| rebuild | Rebuild all indexes in a store, this operation can be very expensive. |
To perform an action make a POST request to the `/kapacitor/v1/storage/stores/<name of store>`
#### Example
```
POST /kapacitor/v1/storage/stores/tasks
{
"action" : "rebuild"
}
```
#### Response
| Code | Meaning |
| ---- | ------- |
| 204 | Success |
| 400 | Unknown action |
| 404 | The specified store does not exist |
## Testing Services
Kapacitor makes use of various service integrations.

View File

@ -6,6 +6,7 @@ import (
"crypto/tls"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
@ -43,10 +44,12 @@ const (
configPath = basePath + "/config"
serviceTestsPath = basePath + "/service-tests"
alertsPath = basePreviewPath + "/alerts"
handlersPath = alertsPath + "/handlers"
topicsPath = alertsPath + "/topics"
topicEventsPath = "events"
topicHandlersPath = "handlers"
storagePath = basePath + "/storage"
storesPath = storagePath + "/stores"
backupPath = storagePath + "/backup"
)
// HTTP configuration for connecting to Kapacitor
@ -146,6 +149,7 @@ func New(conf Config) (*Client, error) {
}
tr := &http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: &tls.Config{
InsecureSkipVerify: conf.InsecureSkipVerify,
},
@ -589,10 +593,7 @@ func (c *Client) BaseURL() url.URL {
return *c.url
}
// Perform the request.
// If result is not nil the response body is JSON decoded into result.
// Codes is a list of valid response codes.
func (c *Client) Do(req *http.Request, result interface{}, codes ...int) (*http.Response, error) {
func (c *Client) prepRequest(req *http.Request) error {
req.Header.Set("User-Agent", c.userAgent)
if c.credentials != nil {
switch c.credentials.Method {
@ -601,9 +602,37 @@ func (c *Client) Do(req *http.Request, result interface{}, codes ...int) (*http.
case BearerAuthentication:
req.Header.Set("Authorization", "Bearer "+c.credentials.Token)
default:
return nil, errors.New("unknown authentication method set")
return errors.New("unknown authentication method set")
}
}
return nil
}
func (c *Client) decodeError(resp *http.Response) error {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
type errResp struct {
Error string `json:"error"`
}
d := json.NewDecoder(bytes.NewReader(body))
rp := errResp{}
d.Decode(&rp)
if rp.Error != "" {
return errors.New(rp.Error)
}
return fmt.Errorf("invalid response: code %d: body: %s", resp.StatusCode, string(body))
}
// Perform the request.
// If result is not nil the response body is JSON decoded into result.
// Codes is a list of valid response codes.
func (c *Client) Do(req *http.Request, result interface{}, codes ...int) (*http.Response, error) {
err := c.prepRequest(req)
if err != nil {
return nil, err
}
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, err
@ -618,20 +647,7 @@ func (c *Client) Do(req *http.Request, result interface{}, codes ...int) (*http.
}
}
if !valid {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
type errResp struct {
Error string `json:"error"`
}
d := json.NewDecoder(bytes.NewReader(body))
rp := errResp{}
d.Decode(&rp)
if rp.Error != "" {
return nil, errors.New(rp.Error)
}
return nil, fmt.Errorf("invalid response: code %d: body: %s", resp.StatusCode, string(body))
return nil, c.decodeError(resp)
}
if result != nil {
d := json.NewDecoder(resp.Body)
@ -687,6 +703,10 @@ func (c *Client) ServiceTestLink(service string) Link {
return Link{Relation: Self, Href: path.Join(serviceTestsPath, service)}
}
func (c *Client) TopicLink(id string) Link {
return Link{Relation: Self, Href: path.Join(topicsPath, id)}
}
func (c *Client) TopicEventsLink(topic string) Link {
return Link{Relation: Self, Href: path.Join(topicsPath, topic, topicEventsPath)}
}
@ -697,12 +717,11 @@ func (c *Client) TopicEventLink(topic, event string) Link {
func (c *Client) TopicHandlersLink(topic string) Link {
return Link{Relation: Self, Href: path.Join(topicsPath, topic, topicHandlersPath)}
}
func (c *Client) HandlerLink(id string) Link {
return Link{Relation: Self, Href: path.Join(handlersPath, id)}
func (c *Client) TopicHandlerLink(topic, id string) Link {
return Link{Relation: Self, Href: path.Join(topicsPath, topic, topicHandlersPath, id)}
}
func (c *Client) TopicLink(id string) Link {
return Link{Relation: Self, Href: path.Join(topicsPath, id)}
func (c *Client) StorageLink(name string) Link {
return Link{Relation: Self, Href: path.Join(storesPath, name)}
}
type CreateTaskOptions struct {
@ -1910,51 +1929,23 @@ func (c *Client) ListTopicEvents(link Link, opt *ListTopicEventsOptions) (TopicE
}
type TopicHandlers struct {
Link Link `json:"link"`
Topic string `json:"topic"`
Handlers []Handler `json:"handlers"`
Link Link `json:"link"`
Topic string `json:"topic"`
Handlers []TopicHandler `json:"handlers"`
}
// TopicHandlers returns the current state for events within a topic.
func (c *Client) ListTopicHandlers(link Link) (TopicHandlers, error) {
t := TopicHandlers{}
if link.Href == "" {
return t, fmt.Errorf("invalid link %v", link)
}
u := *c.url
u.Path = link.Href
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return t, err
}
_, err = c.Do(req, &t, http.StatusOK)
return t, err
type TopicHandler struct {
Link Link `json:"link"`
ID string `json:"id"`
Kind string `json:"kind"`
Options map[string]interface{} `json:"options"`
Match string `json:"match"`
}
type Handlers struct {
Link Link `json:"link"`
Handlers []Handler `json:"handlers"`
}
type Handler struct {
Link Link `json:"link"`
ID string `json:"id"`
Topics []string `json:"topics"`
Actions []HandlerAction `json:"actions"`
}
type HandlerAction struct {
Kind string `json:"kind" yaml:"kind"`
Options map[string]interface{} `json:"options" yaml:"options"`
}
// Handler retrieves an alert handler.
// TopicHandler retrieves an alert handler.
// Errors if no handler exists.
func (c *Client) Handler(link Link) (Handler, error) {
h := Handler{}
func (c *Client) TopicHandler(link Link) (TopicHandler, error) {
h := TopicHandler{}
if link.Href == "" {
return h, fmt.Errorf("invalid link %v", link)
}
@ -1971,39 +1962,40 @@ func (c *Client) Handler(link Link) (Handler, error) {
return h, err
}
type HandlerOptions struct {
ID string `json:"id" yaml:"id"`
Topics []string `json:"topics" yaml:"topics"`
Actions []HandlerAction `json:"actions" yaml:"actions"`
type TopicHandlerOptions struct {
ID string `json:"id" yaml:"id"`
Kind string `json:"kind" yaml:"kind"`
Options map[string]interface{} `json:"options" yaml:"options"`
Match string `json:"match" yaml:"match"`
}
// CreateHandler creates a new alert handler.
// CreateTopicHandler creates a new alert handler.
// Errors if the handler already exists.
func (c *Client) CreateHandler(opt HandlerOptions) (Handler, error) {
func (c *Client) CreateTopicHandler(link Link, opt TopicHandlerOptions) (TopicHandler, error) {
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
err := enc.Encode(opt)
if err != nil {
return Handler{}, err
return TopicHandler{}, err
}
u := *c.url
u.Path = handlersPath
u.Path = link.Href
req, err := http.NewRequest("POST", u.String(), &buf)
if err != nil {
return Handler{}, err
return TopicHandler{}, err
}
req.Header.Set("Content-Type", "application/json")
h := Handler{}
h := TopicHandler{}
_, err = c.Do(req, &h, http.StatusOK)
return h, err
}
// PatchHandler applies a patch operation to an existing handler.
func (c *Client) PatchHandler(link Link, patch JSONPatch) (Handler, error) {
h := Handler{}
// PatchTopicHandler applies a patch operation to an existing handler.
func (c *Client) PatchTopicHandler(link Link, patch JSONPatch) (TopicHandler, error) {
h := TopicHandler{}
if link.Href == "" {
return h, fmt.Errorf("invalid link %v", link)
}
@ -2027,9 +2019,9 @@ func (c *Client) PatchHandler(link Link, patch JSONPatch) (Handler, error) {
return h, err
}
// ReplaceHandler replaces an existing handler, with the new definition.
func (c *Client) ReplaceHandler(link Link, opt HandlerOptions) (Handler, error) {
h := Handler{}
// ReplaceTopicHandler replaces an existing handler, with the new definition.
func (c *Client) ReplaceTopicHandler(link Link, opt TopicHandlerOptions) (TopicHandler, error) {
h := TopicHandler{}
if link.Href == "" {
return h, fmt.Errorf("invalid link %v", link)
}
@ -2053,8 +2045,8 @@ func (c *Client) ReplaceHandler(link Link, opt HandlerOptions) (Handler, error)
return h, err
}
// DeleteHandler deletes a handler.
func (c *Client) DeleteHandler(link Link) error {
// DeleteTopicHandler deletes a handler.
func (c *Client) DeleteTopicHandler(link Link) error {
if link.Href == "" {
return fmt.Errorf("invalid link %v", link)
}
@ -2070,27 +2062,27 @@ func (c *Client) DeleteHandler(link Link) error {
return err
}
type ListHandlersOptions struct {
type ListTopicHandlersOptions struct {
Pattern string
}
func (o *ListHandlersOptions) Default() {}
func (o *ListTopicHandlersOptions) Default() {}
func (o *ListHandlersOptions) Values() *url.Values {
func (o *ListTopicHandlersOptions) Values() *url.Values {
v := &url.Values{}
v.Set("pattern", o.Pattern)
return v
}
func (c *Client) ListHandlers(opt *ListHandlersOptions) (Handlers, error) {
handlers := Handlers{}
func (c *Client) ListTopicHandlers(link Link, opt *ListTopicHandlersOptions) (TopicHandlers, error) {
handlers := TopicHandlers{}
if opt == nil {
opt = new(ListHandlersOptions)
opt = new(ListTopicHandlersOptions)
}
opt.Default()
u := *c.url
u.Path = handlersPath
u.Path = link.Href
u.RawQuery = opt.Values().Encode()
req, err := http.NewRequest("GET", u.String(), nil)
@ -2105,6 +2097,116 @@ func (c *Client) ListHandlers(opt *ListHandlersOptions) (Handlers, error) {
return handlers, nil
}
type StorageList struct {
Link Link `json:"link"`
Storage []Storage `json:"storage"`
}
type Storage struct {
Link Link `json:"link"`
Name string `json:"name"`
}
type StorageAction int
const (
_ StorageAction = iota
StorageRebuild
)
func (sa StorageAction) MarshalText() ([]byte, error) {
switch sa {
case StorageRebuild:
return []byte("rebuild"), nil
default:
return nil, fmt.Errorf("unknown StorageAction %d", sa)
}
}
func (sa *StorageAction) UnmarshalText(text []byte) error {
switch s := string(text); s {
case "rebuild":
*sa = StorageRebuild
default:
return fmt.Errorf("unknown StorageAction %s", s)
}
return nil
}
func (sa StorageAction) String() string {
s, err := sa.MarshalText()
if err != nil {
return err.Error()
}
return string(s)
}
type StorageActionOptions struct {
Action StorageAction `json:"action"`
}
func (c *Client) ListStorage() (StorageList, error) {
list := StorageList{}
u := *c.url
u.Path = storesPath
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return list, err
}
_, err = c.Do(req, &list, http.StatusOK)
if err != nil {
return list, err
}
return list, nil
}
func (c *Client) DoStorageAction(l Link, opt StorageActionOptions) error {
u := *c.url
u.Path = l.Href
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
err := enc.Encode(opt)
if err != nil {
return err
}
req, err := http.NewRequest("POST", u.String(), &buf)
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
_, err = c.Do(req, nil, http.StatusNoContent)
if err != nil {
return err
}
return nil
}
// Backup requests a backup of all storage from Kapacitor.
// A short read is possible, to verify that the backup was successful
// check that the number of bytes read matches the returned size.
func (c *Client) Backup() (int64, io.ReadCloser, error) {
u := *c.url
u.Path = backupPath
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return 0, nil, err
}
err = c.prepRequest(req)
if err != nil {
return 0, nil, err
}
resp, err := c.httpClient.Do(req)
if err != nil {
return 0, nil, err
}
return resp.ContentLength, resp.Body, nil
}
type LogLevelOptions struct {
Level string `json:"level"`
}

View File

@ -285,52 +285,45 @@ func Test_ReportsErrors(t *testing.T) {
return err
},
},
{
name: "ListTopicHandlers",
fnc: func(c *client.Client) error {
_, err := c.ListTopicHandlers(c.TopicHandlersLink(""))
return err
},
},
{
name: "Handler",
fnc: func(c *client.Client) error {
_, err := c.Handler(c.HandlerLink(""))
_, err := c.TopicHandler(c.TopicHandlerLink("", ""))
return err
},
},
{
name: "CreateHandler",
fnc: func(c *client.Client) error {
_, err := c.CreateHandler(client.HandlerOptions{})
_, err := c.CreateTopicHandler(c.TopicHandlersLink(""), client.TopicHandlerOptions{})
return err
},
},
{
name: "PatchHandler",
fnc: func(c *client.Client) error {
_, err := c.PatchHandler(c.HandlerLink(""), nil)
_, err := c.PatchTopicHandler(c.TopicHandlerLink("", ""), nil)
return err
},
},
{
name: "ReplaceHandler",
fnc: func(c *client.Client) error {
_, err := c.ReplaceHandler(c.HandlerLink(""), client.HandlerOptions{})
_, err := c.ReplaceTopicHandler(c.TopicHandlerLink("", ""), client.TopicHandlerOptions{})
return err
},
},
{
name: "DeleteHandler",
fnc: func(c *client.Client) error {
err := c.DeleteHandler(c.HandlerLink(""))
err := c.DeleteTopicHandler(c.TopicHandlerLink("", ""))
return err
},
},
{
name: "ListHandlers",
fnc: func(c *client.Client) error {
_, err := c.ListHandlers(nil)
_, err := c.ListTopicHandlers(c.TopicHandlersLink(""), nil)
return err
},
},
@ -410,7 +403,7 @@ func Test_Task(t *testing.T) {
"id": "t1",
"type":"stream",
"dbrps":[{"db":"db","rp":"rp"}],
"script":"stream\n |from()\n .measurement('cpu')\n",
"script":"stream\n\t|from()\n\t\t.measurement('cpu')\n",
"dot": "digraph t1 {}",
"status" : "enabled",
"executing" : false,
@ -439,8 +432,8 @@ func Test_Task(t *testing.T) {
RetentionPolicy: "rp",
}},
TICKscript: `stream
|from()
.measurement('cpu')
|from()
.measurement('cpu')
`,
Dot: "digraph t1 {}",
Status: client.Enabled,
@ -464,8 +457,8 @@ func Test_Task_Labels(t *testing.T) {
"id": "t1",
"type":"stream",
"dbrps":[{"db":"db","rp":"rp"}],
"script":"stream\n |from()\n .measurement('cpu')\n",
"dot": "digraph t1 {\n}",
"script":"stream|from().measurement('cpu')",
"dot": "digraph t1 {}",
"status" : "enabled",
"executing" : false,
"error": ""
@ -492,11 +485,8 @@ func Test_Task_Labels(t *testing.T) {
Database: "db",
RetentionPolicy: "rp",
}},
TICKscript: `stream
|from()
.measurement('cpu')
`,
Dot: "digraph t1 {\n}",
TICKscript: "stream|from().measurement('cpu')",
Dot: "digraph t1 {}",
Status: client.Enabled,
Executing: false,
Error: "",
@ -966,25 +956,25 @@ func Test_TaskOutput(t *testing.T) {
if r.URL.Path == "/kapacitor/v1/tasks/taskname/cpu" && r.Method == "GET" {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `{
"series": [
{
"name": "cpu",
"columns": [
"time",
"value"
],
"values": [
[
"2015-01-29T21:55:43.702900257Z",
55
],
[
"2015-01-29T21:56:43.702900257Z",
42
]
]
}
]
"series": [
{
"name": "cpu",
"columns": [
"time",
"value"
],
"values": [
[
"2015-01-29T21:55:43.702900257Z",
55
],
[
"2015-01-29T21:56:43.702900257Z",
42
]
]
}
]
}`)
} else {
w.WriteHeader(http.StatusBadRequest)
@ -1030,8 +1020,8 @@ func Test_Template(t *testing.T) {
fmt.Fprintf(w, `{
"link": {"rel":"self", "href":"/kapacitor/v1/templates/t1"},
"type":"stream",
"script":"var x = 5\nstream\n |from()\n .measurement('cpu')\n",
"vars": {"x":{"value": 5, "type":"int"}},
"script":"var x = 5 stream|from().measurement('cpu')",
"vars": {"x":{"value": 5, "type":"int"}},
"dot": "digraph t1 {}",
"error": ""
}`)
@ -1050,15 +1040,11 @@ func Test_Template(t *testing.T) {
t.Fatal(err)
}
exp := client.Template{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/templates/t1"},
Type: client.StreamTask,
TICKscript: `var x = 5
stream
|from()
.measurement('cpu')
`,
Dot: "digraph t1 {}",
Error: "",
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/templates/t1"},
Type: client.StreamTask,
TICKscript: "var x = 5 stream|from().measurement('cpu')",
Dot: "digraph t1 {}",
Error: "",
Vars: client.Vars{
"x": {
Type: client.VarInt,
@ -2484,11 +2470,11 @@ func Test_Topic(t *testing.T) {
r.Method == "GET" {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `{
"link": {"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system"},
"events-link" : {"rel":"events","href":"/kapacitor/v1preview/alerts/topics/system/events"},
"handlers-link": {"rel":"handlers","href":"/kapacitor/v1preview/alerts/topics/system/handlers"},
"id": "system",
"level":"CRITICAL",
"link": {"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system"},
"events-link" : {"rel":"events","href":"/kapacitor/v1preview/alerts/topics/system/events"},
"handlers-link": {"rel":"handlers","href":"/kapacitor/v1preview/alerts/topics/system/handlers"},
"id": "system",
"level":"CRITICAL",
"collected": 5
}`)
} else {
@ -2524,23 +2510,23 @@ func Test_ListTopics(t *testing.T) {
r.Method == "GET" {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `{
"link": {"rel":"self","href":"/kapacitor/v1preview/alerts/topics"},
"topics": [
{
"link": {"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system"},
"events-link" : {"rel":"events","href":"/kapacitor/v1preview/alerts/topics/system/events"},
"handlers-link": {"rel":"handlers","href":"/kapacitor/v1preview/alerts/topics/system/handlers"},
"id": "system",
"level":"CRITICAL"
},
{
"link": {"rel":"self","href":"/kapacitor/v1preview/alerts/topics/app"},
"events-link" : {"rel":"events","href":"/kapacitor/v1preview/alerts/topics/app/events"},
"handlers-link": {"rel":"handlers","href":"/kapacitor/v1preview/alerts/topics/app/handlers"},
"id": "app",
"level":"WARNING"
}
]
"link": {"rel":"self","href":"/kapacitor/v1preview/alerts/topics"},
"topics": [
{
"link": {"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system"},
"events-link" : {"rel":"events","href":"/kapacitor/v1preview/alerts/topics/system/events"},
"handlers-link": {"rel":"handlers","href":"/kapacitor/v1preview/alerts/topics/system/handlers"},
"id": "system",
"level":"CRITICAL"
},
{
"link": {"rel":"self","href":"/kapacitor/v1preview/alerts/topics/app"},
"events-link" : {"rel":"events","href":"/kapacitor/v1preview/alerts/topics/app/events"},
"handlers-link": {"rel":"handlers","href":"/kapacitor/v1preview/alerts/topics/app/handlers"},
"id": "app",
"level":"WARNING"
}
]
}`)
} else {
w.WriteHeader(http.StatusBadRequest)
@ -2610,14 +2596,14 @@ func Test_TopicEvent(t *testing.T) {
r.Method == "GET" {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `{
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/events/cpu"},
"id": "cpu",
"state": {
"level": "WARNING",
"message": "cpu is WARNING",
"time": "2016-12-01T00:00:00Z",
"duration": "5m"
}
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/events/cpu"},
"id": "cpu",
"state": {
"level": "WARNING",
"message": "cpu is WARNING",
"time": "2016-12-01T00:00:00Z",
"duration": "5m"
}
}`)
} else {
w.WriteHeader(http.StatusBadRequest)
@ -2654,30 +2640,30 @@ func Test_ListTopicEvents(t *testing.T) {
r.Method == "GET" {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `{
"link": {"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/events?min-level=OK"},
"topic": "system",
"events": [
{
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/events/cpu"},
"id": "cpu",
"state": {
"level": "WARNING",
"message": "cpu is WARNING",
"time": "2016-12-01T00:00:00Z",
"duration": "5m"
}
},
{
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/events/mem"},
"id": "mem",
"state": {
"level": "CRITICAL",
"message": "mem is CRITICAL",
"time": "2016-12-01T00:10:00Z",
"duration": "1m"
}
}
]
"link": {"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/events?min-level=OK"},
"topic": "system",
"events": [
{
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/events/cpu"},
"id": "cpu",
"state": {
"level": "WARNING",
"message": "cpu is WARNING",
"time": "2016-12-01T00:00:00Z",
"duration": "5m"
}
},
{
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/events/mem"},
"id": "mem",
"state": {
"level": "CRITICAL",
"message": "mem is CRITICAL",
"time": "2016-12-01T00:10:00Z",
"duration": "1m"
}
}
]
}`)
} else {
w.WriteHeader(http.StatusBadRequest)
@ -2725,33 +2711,27 @@ func Test_ListTopicEvents(t *testing.T) {
}
func Test_ListTopicHandlers(t *testing.T) {
s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.String() == "/kapacitor/v1preview/alerts/topics/system/handlers" &&
if r.URL.String() == "/kapacitor/v1preview/alerts/topics/system/handlers?pattern=" &&
r.Method == "GET" {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `{
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/handlers"},
"topic": "system",
"handlers": [
{
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/slack"},
"id":"slack",
"topics": ["system", "app"],
"actions": [{
"kind":"slack",
"options":{
"channel":"#alerts"
}
}]
},
{
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/smtp"},
"id":"smtp",
"topics": ["system", "app"],
"actions": [{
"kind":"smtp"
}]
}
]
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/handlers?pattern="},
"topic": "system",
"handlers": [
{
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/handlers/slack"},
"id":"slack",
"kind":"slack",
"options":{
"channel":"#alerts"
}
},
{
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/handlers/smtp"},
"id":"smtp",
"kind":"smtp"
}
]
}`)
} else {
w.WriteHeader(http.StatusBadRequest)
@ -2763,54 +2743,46 @@ func Test_ListTopicHandlers(t *testing.T) {
}
defer s.Close()
topicHandlers, err := c.ListTopicHandlers(c.TopicHandlersLink("system"))
topicHandlers, err := c.ListTopicHandlers(c.TopicHandlersLink("system"), nil)
if err != nil {
t.Fatal(err)
}
exp := client.TopicHandlers{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/system/handlers"},
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/system/handlers?pattern="},
Topic: "system",
Handlers: []client.Handler{
Handlers: []client.TopicHandler{
{
ID: "slack",
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers/slack"},
Topics: []string{"system", "app"},
Actions: []client.HandlerAction{{
Kind: "slack",
Options: map[string]interface{}{
"channel": "#alerts",
},
}},
ID: "slack",
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/system/handlers/slack"},
Kind: "slack",
Options: map[string]interface{}{
"channel": "#alerts",
},
},
{
ID: "smtp",
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers/smtp"},
Topics: []string{"system", "app"},
Actions: []client.HandlerAction{{
Kind: "smtp",
}},
ID: "smtp",
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/system/handlers/smtp"},
Kind: "smtp",
},
},
}
if !reflect.DeepEqual(exp, topicHandlers) {
t.Errorf("unexpected topic handlers result:\ngot:\n%v\nexp:\n%v", topicHandlers, exp)
t.Errorf("unexpected topic handlers result:\ngot:\n%v\nexp:\n%v", topicHandlers, exp)
}
}
func Test_Handler(t *testing.T) {
func Test_TopicHandler(t *testing.T) {
s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.String() == "/kapacitor/v1preview/alerts/handlers/slack" &&
if r.URL.String() == "/kapacitor/v1preview/alerts/topics/system/handlers/slack" &&
r.Method == "GET" {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `{
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/slack"},
"id":"slack",
"topics": ["system", "app"],
"actions": [{
"kind":"slack",
"options": {
"channel":"#alerts"
}
}]
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/handlers/slack"},
"id":"slack",
"topic": "system",
"kind":"slack",
"options": {
"channel":"#alerts"
}
}`)
} else {
w.WriteHeader(http.StatusBadRequest)
@ -2822,53 +2794,44 @@ func Test_Handler(t *testing.T) {
}
defer s.Close()
h, err := c.Handler(c.HandlerLink("slack"))
h, err := c.TopicHandler(c.TopicHandlerLink("system", "slack"))
if err != nil {
t.Fatal(err)
}
exp := client.Handler{
ID: "slack",
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers/slack"},
Topics: []string{"system", "app"},
Actions: []client.HandlerAction{{
Kind: "slack",
Options: map[string]interface{}{
"channel": "#alerts",
},
}},
exp := client.TopicHandler{
ID: "slack",
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/system/handlers/slack"},
Kind: "slack",
Options: map[string]interface{}{
"channel": "#alerts",
},
}
if !reflect.DeepEqual(exp, h) {
t.Errorf("unexpected handler result:\ngot:\n%v\nexp:\n%v", h, exp)
}
}
func Test_CreateHandler(t *testing.T) {
func Test_CreateTopicHandler(t *testing.T) {
s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
options := client.HandlerOptions{}
options := client.TopicHandlerOptions{}
json.NewDecoder(r.Body).Decode(&options)
expOptions := client.HandlerOptions{
ID: "slack",
Topics: []string{"system", "app"},
Actions: []client.HandlerAction{{
Kind: "slack",
Options: map[string]interface{}{
"channel": "#alerts",
},
}},
expOptions := client.TopicHandlerOptions{
ID: "slack",
Kind: "slack",
Options: map[string]interface{}{
"channel": "#alerts",
},
}
if r.URL.String() == "/kapacitor/v1preview/alerts/handlers" &&
if r.URL.String() == "/kapacitor/v1preview/alerts/topics/system/handlers" &&
r.Method == "POST" &&
reflect.DeepEqual(expOptions, options) {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `{
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/slack"},
"id": "slack",
"topics": ["system", "app"],
"actions": [{
"kind":"slack",
"options": {
"channel":"#alerts"
}
}]
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/handlers/slack"},
"id": "slack",
"kind":"slack",
"options": {
"channel":"#alerts"
}
}`)
} else {
w.WriteHeader(http.StatusBadRequest)
@ -2880,35 +2843,29 @@ func Test_CreateHandler(t *testing.T) {
}
defer s.Close()
h, err := c.CreateHandler(client.HandlerOptions{
ID: "slack",
Topics: []string{"system", "app"},
Actions: []client.HandlerAction{{
Kind: "slack",
Options: map[string]interface{}{
"channel": "#alerts",
},
}},
h, err := c.CreateTopicHandler(c.TopicHandlersLink("system"), client.TopicHandlerOptions{
ID: "slack",
Kind: "slack",
Options: map[string]interface{}{
"channel": "#alerts",
},
})
if err != nil {
t.Fatal(err)
}
exp := client.Handler{
ID: "slack",
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers/slack"},
Topics: []string{"system", "app"},
Actions: []client.HandlerAction{{
Kind: "slack",
Options: map[string]interface{}{
"channel": "#alerts",
},
}},
exp := client.TopicHandler{
ID: "slack",
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/system/handlers/slack"},
Kind: "slack",
Options: map[string]interface{}{
"channel": "#alerts",
},
}
if !reflect.DeepEqual(exp, h) {
t.Errorf("unexpected create handler result:\ngot:\n%v\nexp:\n%v", h, exp)
}
}
func Test_PatchHandler(t *testing.T) {
func Test_PatchTopicHandler(t *testing.T) {
s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var patch client.JSONPatch
json.NewDecoder(r.Body).Decode(&patch)
@ -2920,24 +2877,21 @@ func Test_PatchHandler(t *testing.T) {
},
client.JSONOperation{
Operation: "replace",
Path: "/actions/0/options/channel",
Path: "/options/channel",
Value: "#testing_alerts",
},
}
if r.URL.String() == "/kapacitor/v1preview/alerts/handlers/slack" &&
if r.URL.String() == "/kapacitor/v1preview/alerts/topics/system/handlers/slack" &&
r.Method == "PATCH" &&
reflect.DeepEqual(expPatch, patch) {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `{
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/slack"},
"id": "slack",
"topics": ["system", "test"],
"actions": [{
"kind":"slack",
"options": {
"channel":"#testing_alerts"
}
}]
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/handlers/slack"},
"id": "slack",
"kind":"slack",
"options": {
"channel":"#testing_alerts"
}
}`)
} else {
w.WriteHeader(http.StatusBadRequest)
@ -2949,7 +2903,7 @@ func Test_PatchHandler(t *testing.T) {
}
defer s.Close()
h, err := c.PatchHandler(c.HandlerLink("slack"), client.JSONPatch{
h, err := c.PatchTopicHandler(c.TopicHandlerLink("system", "slack"), client.JSONPatch{
client.JSONOperation{
Operation: "replace",
Path: "/topics",
@ -2957,56 +2911,47 @@ func Test_PatchHandler(t *testing.T) {
},
client.JSONOperation{
Operation: "replace",
Path: "/actions/0/options/channel",
Path: "/options/channel",
Value: "#testing_alerts",
},
})
if err != nil {
t.Fatal(err)
}
exp := client.Handler{
ID: "slack",
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers/slack"},
Topics: []string{"system", "test"},
Actions: []client.HandlerAction{{
Kind: "slack",
Options: map[string]interface{}{
"channel": "#testing_alerts",
},
}},
exp := client.TopicHandler{
ID: "slack",
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/system/handlers/slack"},
Kind: "slack",
Options: map[string]interface{}{
"channel": "#testing_alerts",
},
}
if !reflect.DeepEqual(exp, h) {
t.Errorf("unexpected replace handler result:\ngot:\n%v\nexp:\n%v", h, exp)
}
}
func Test_ReplaceHandler(t *testing.T) {
func Test_ReplaceTopicHandler(t *testing.T) {
s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
options := client.HandlerOptions{}
options := client.TopicHandlerOptions{}
json.NewDecoder(r.Body).Decode(&options)
expOptions := client.HandlerOptions{
ID: "slack",
Topics: []string{"system", "test"},
Actions: []client.HandlerAction{{
Kind: "slack",
Options: map[string]interface{}{
"channel": "#testing_alerts",
},
}},
expOptions := client.TopicHandlerOptions{
ID: "slack",
Kind: "slack",
Options: map[string]interface{}{
"channel": "#testing_alerts",
},
}
if r.URL.String() == "/kapacitor/v1preview/alerts/handlers/slack" &&
if r.URL.String() == "/kapacitor/v1preview/alerts/topics/system/handlers/slack" &&
r.Method == "PUT" &&
reflect.DeepEqual(expOptions, options) {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `{
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/slack"},
"id": "slack",
"topics": ["system", "test"],
"actions": [{
"kind":"slack",
"options": {
"channel":"#testing_alerts"
}
}]
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/handlers/slack"},
"id": "slack",
"kind":"slack",
"options": {
"channel":"#testing_alerts"
}
}`)
} else {
w.WriteHeader(http.StatusBadRequest)
@ -3018,37 +2963,31 @@ func Test_ReplaceHandler(t *testing.T) {
}
defer s.Close()
h, err := c.ReplaceHandler(c.HandlerLink("slack"), client.HandlerOptions{
ID: "slack",
Topics: []string{"system", "test"},
Actions: []client.HandlerAction{{
Kind: "slack",
Options: map[string]interface{}{
"channel": "#testing_alerts",
},
}},
h, err := c.ReplaceTopicHandler(c.TopicHandlerLink("system", "slack"), client.TopicHandlerOptions{
ID: "slack",
Kind: "slack",
Options: map[string]interface{}{
"channel": "#testing_alerts",
},
})
if err != nil {
t.Fatal(err)
}
exp := client.Handler{
ID: "slack",
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers/slack"},
Topics: []string{"system", "test"},
Actions: []client.HandlerAction{{
Kind: "slack",
Options: map[string]interface{}{
"channel": "#testing_alerts",
},
}},
exp := client.TopicHandler{
ID: "slack",
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/system/handlers/slack"},
Kind: "slack",
Options: map[string]interface{}{
"channel": "#testing_alerts",
},
}
if !reflect.DeepEqual(exp, h) {
t.Errorf("unexpected replace handler result:\ngot:\n%v\nexp:\n%v", h, exp)
}
}
func Test_DeleteHandler(t *testing.T) {
func Test_DeleteTopicHandler(t *testing.T) {
s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.String() == "/kapacitor/v1preview/alerts/handlers/slack" &&
if r.URL.String() == "/kapacitor/v1preview/alerts/topics/system/handlers/slack" &&
r.Method == "DELETE" {
w.WriteHeader(http.StatusNoContent)
} else {
@ -3061,86 +3000,12 @@ func Test_DeleteHandler(t *testing.T) {
}
defer s.Close()
err = c.DeleteHandler(c.HandlerLink("slack"))
err = c.DeleteTopicHandler(c.TopicHandlerLink("system", "slack"))
if err != nil {
t.Fatal(err)
}
}
func Test_ListHandlers(t *testing.T) {
s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.String() == "/kapacitor/v1preview/alerts/handlers?pattern=%2A" &&
r.Method == "GET" {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `{
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers"},
"handlers": [
{
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/slack"},
"id":"slack",
"topics": ["system", "app"],
"actions": [{
"kind":"slack",
"options": {
"channel":"#alerts"
}
}]
},
{
"link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/smtp"},
"id":"smtp",
"topics": ["system", "app"],
"actions": [{
"kind":"smtp"
}]
}
]
}`)
} else {
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(w, "request: %v", r)
}
}))
if err != nil {
t.Fatal(err)
}
defer s.Close()
handlers, err := c.ListHandlers(&client.ListHandlersOptions{
Pattern: "*",
})
if err != nil {
t.Fatal(err)
}
exp := client.Handlers{
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers"},
Handlers: []client.Handler{
{
ID: "slack",
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers/slack"},
Topics: []string{"system", "app"},
Actions: []client.HandlerAction{{
Kind: "slack",
Options: map[string]interface{}{
"channel": "#alerts",
},
}},
},
{
ID: "smtp",
Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers/smtp"},
Topics: []string{"system", "app"},
Actions: []client.HandlerAction{{
Kind: "smtp",
}},
},
},
}
if !reflect.DeepEqual(exp, handlers) {
t.Errorf("unexpected list handlers result:\ngot:\n%v\nexp:\n%v", handlers, exp)
}
}
func Test_LogLevel(t *testing.T) {
s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var opts client.LogLevelOptions

View File

@ -4,10 +4,13 @@ import (
"fmt"
"log"
"sort"
"sync"
"time"
"github.com/influxdata/kapacitor/expvar"
"github.com/influxdata/kapacitor/models"
"github.com/influxdata/kapacitor/pipeline"
"github.com/influxdata/kapacitor/tick/ast"
"github.com/influxdata/kapacitor/tick/stateful"
)
@ -19,6 +22,8 @@ type CombineNode struct {
expressionsByGroup map[models.GroupID][]stateful.Expression
scopePools []stateful.ScopePool
expressionsByGroupMu sync.RWMutex
combination combination
}
@ -30,6 +35,7 @@ func newCombineNode(et *ExecutingTask, n *pipeline.CombineNode, l *log.Logger) (
expressionsByGroup: make(map[models.GroupID][]stateful.Expression),
combination: combination{max: n.Max},
}
// Create stateful expressions
cn.expressions = make([]stateful.Expression, len(n.Lambdas))
cn.scopePools = make([]stateful.ScopePool, len(n.Lambdas))
@ -39,7 +45,7 @@ func newCombineNode(et *ExecutingTask, n *pipeline.CombineNode, l *log.Logger) (
return nil, fmt.Errorf("Failed to compile %v expression: %v", i, err)
}
cn.expressions[i] = statefulExpr
cn.scopePools[i] = stateful.NewScopePool(stateful.FindReferenceVariables(lambda.Expression))
cn.scopePools[i] = stateful.NewScopePool(ast.FindReferenceVariables(lambda.Expression))
}
cn.node.runF = cn.runCombine
return cn, nil
@ -60,6 +66,14 @@ func (t timeList) Less(i, j int) bool { return t[i].Before(t[j]) }
func (t timeList) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (n *CombineNode) runCombine([]byte) error {
valueF := func() int64 {
n.expressionsByGroupMu.RLock()
l := len(n.expressionsByGroup)
n.expressionsByGroupMu.RUnlock()
return int64(l)
}
n.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF))
switch n.Wants() {
case pipeline.StreamEdge:
buffers := make(map[models.GroupID]*buffer)
@ -162,13 +176,17 @@ func (n *CombineNode) combineBuffer(buf *buffer) error {
return nil
}
l := len(n.expressions)
n.expressionsByGroupMu.RLock()
expressions, ok := n.expressionsByGroup[buf.Group]
n.expressionsByGroupMu.RUnlock()
if !ok {
expressions = make([]stateful.Expression, l)
for i, expr := range n.expressions {
expressions[i] = expr.CopyReset()
}
n.expressionsByGroupMu.Lock()
n.expressionsByGroup[buf.Group] = expressions
n.expressionsByGroupMu.Unlock()
}
// Compute matching result for all points
@ -180,6 +198,7 @@ func (n *CombineNode) combineBuffer(buf *buffer) error {
for i := range expressions {
matched, err := EvalPredicate(expressions[i], n.scopePools[i], p.Time, p.Fields, p.Tags)
if err != nil {
n.incrementErrorCount()
n.logger.Println("E! evaluating lambda expression:", err)
}
matches[i][idx] = matched

View File

@ -42,6 +42,7 @@ func (e *DefaultNode) runDefault(snapshot []byte) error {
for p, ok := e.ins[0].NextPoint(); ok; p, ok = e.ins[0].NextPoint() {
e.timer.Start()
p.Fields, p.Tags = e.setDefaults(p.Fields, p.Tags)
p.UpdateGroup()
e.timer.Stop()
for _, child := range e.outs {
err := child.CollectPoint(p)
@ -53,6 +54,9 @@ func (e *DefaultNode) runDefault(snapshot []byte) error {
case pipeline.BatchEdge:
for b, ok := e.ins[0].NextBatch(); ok; b, ok = e.ins[0].NextBatch() {
e.timer.Start()
b.Points = b.ShallowCopyPoints()
_, b.Tags = e.setDefaults(nil, b.Tags)
b.UpdateGroup()
for i := range b.Points {
b.Points[i].Fields, b.Points[i].Tags = e.setDefaults(b.Points[i].Fields, b.Points[i].Tags)
}
@ -84,7 +88,7 @@ func (d *DefaultNode) setDefaults(fields models.Fields, tags models.Tags) (model
newTags := tags
tagsCopied := false
for tag, value := range d.d.Tags {
if _, ok := tags[tag]; !ok {
if v := tags[tag]; v == "" {
if !tagsCopied {
newTags = newTags.Copy()
tagsCopied = true

View File

@ -77,6 +77,7 @@ func (e *DeleteNode) runDelete(snapshot []byte) error {
case pipeline.BatchEdge:
for b, ok := e.ins[0].NextBatch(); ok; b, ok = e.ins[0].NextBatch() {
e.timer.Start()
b.Points = b.ShallowCopyPoints()
for i := range b.Points {
b.Points[i].Fields, b.Points[i].Tags = e.doDeletes(b.Points[i].Fields, b.Points[i].Tags)
}

View File

@ -2,8 +2,10 @@ package kapacitor
import (
"log"
"sync"
"time"
"github.com/influxdata/kapacitor/expvar"
"github.com/influxdata/kapacitor/models"
"github.com/influxdata/kapacitor/pipeline"
)
@ -27,53 +29,62 @@ func newDerivativeNode(et *ExecutingTask, n *pipeline.DerivativeNode, l *log.Log
func (d *DerivativeNode) runDerivative([]byte) error {
switch d.Provides() {
case pipeline.StreamEdge:
var mu sync.RWMutex
previous := make(map[models.GroupID]models.Point)
valueF := func() int64 {
mu.RLock()
l := len(previous)
mu.RUnlock()
return int64(l)
}
d.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF))
for p, ok := d.ins[0].NextPoint(); ok; p, ok = d.ins[0].NextPoint() {
d.timer.Start()
pr, ok := previous[p.Group]
if !ok {
previous[p.Group] = p
d.timer.Stop()
continue
}
mu.RLock()
pr := previous[p.Group]
mu.RUnlock()
value, ok := d.derivative(pr.Fields, p.Fields, pr.Time, p.Time)
if ok {
fields := pr.Fields.Copy()
value, store, emit := d.derivative(pr.Fields, p.Fields, pr.Time, p.Time)
if store {
mu.Lock()
previous[p.Group] = p
mu.Unlock()
}
if emit {
fields := p.Fields.Copy()
fields[d.d.As] = value
pr.Fields = fields
p.Fields = fields
d.timer.Pause()
for _, child := range d.outs {
err := child.CollectPoint(pr)
err := child.CollectPoint(p)
if err != nil {
return err
}
}
d.timer.Resume()
}
previous[p.Group] = p
d.timer.Stop()
}
case pipeline.BatchEdge:
for b, ok := d.ins[0].NextBatch(); ok; b, ok = d.ins[0].NextBatch() {
d.timer.Start()
if len(b.Points) > 0 {
pr := b.Points[0]
var p models.BatchPoint
for i := 1; i < len(b.Points); i++ {
p = b.Points[i]
value, ok := d.derivative(pr.Fields, p.Fields, pr.Time, p.Time)
if ok {
fields := pr.Fields.Copy()
fields[d.d.As] = value
b.Points[i-1].Fields = fields
} else {
b.Points = append(b.Points[:i-1], b.Points[i:]...)
i--
}
b.Points = b.ShallowCopyPoints()
var pr, p models.BatchPoint
for i := 0; i < len(b.Points); i++ {
p = b.Points[i]
value, store, emit := d.derivative(pr.Fields, p.Fields, pr.Time, p.Time)
if store {
pr = p
}
b.Points = b.Points[:len(b.Points)-1]
if emit {
fields := p.Fields.Copy()
fields[d.d.As] = value
b.Points[i].Fields = fields
} else {
b.Points = append(b.Points[:i], b.Points[i+1:]...)
i--
}
}
d.timer.Stop()
for _, child := range d.outs {
@ -87,32 +98,39 @@ func (d *DerivativeNode) runDerivative([]byte) error {
return nil
}
func (d *DerivativeNode) derivative(prev, curr models.Fields, prevTime, currTime time.Time) (float64, bool) {
f0, ok := numToFloat(prev[d.d.Field])
if !ok {
d.logger.Printf("E! cannot apply derivative to type %T", prev[d.d.Field])
return 0, false
}
// derivative calculates the derivative between prev and cur.
// Return is the resulting derivative, whether the current point should be
// stored as previous, and whether the point result should be emitted.
func (d *DerivativeNode) derivative(prev, curr models.Fields, prevTime, currTime time.Time) (float64, bool, bool) {
f1, ok := numToFloat(curr[d.d.Field])
if !ok {
d.incrementErrorCount()
d.logger.Printf("E! cannot apply derivative to type %T", curr[d.d.Field])
return 0, false
return 0, false, false
}
f0, ok := numToFloat(prev[d.d.Field])
if !ok {
// The only time this will fail to parse is if there is no previous.
// Because we only return `store=true` if current parses successfully, we will
// never get a previous which doesn't parse.
return 0, true, false
}
elapsed := float64(currTime.Sub(prevTime))
if elapsed == 0 {
d.incrementErrorCount()
d.logger.Printf("E! cannot perform derivative elapsed time was 0")
return 0, false
return 0, true, false
}
diff := f1 - f0
// Drop negative values for non-negative derivatives
if d.d.NonNegativeFlag && diff < 0 {
return 0, false
return 0, true, false
}
value := float64(diff) / (elapsed / float64(d.d.Unit))
return value, true
return value, true, true
}
func numToFloat(num interface{}) (float64, bool) {

View File

@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"log"
"sync"
"time"
"github.com/influxdata/kapacitor/expvar"
@ -13,10 +14,6 @@ import (
"github.com/influxdata/kapacitor/tick/stateful"
)
const (
statsEvalErrors = "eval_errors"
)
type EvalNode struct {
node
e *pipeline.EvalNode
@ -26,6 +23,8 @@ type EvalNode struct {
scopePool stateful.ScopePool
tags map[string]bool
expressionsByGroupMu sync.RWMutex
evalErrors *expvar.Int
}
@ -39,6 +38,7 @@ func newEvalNode(et *ExecutingTask, n *pipeline.EvalNode, l *log.Logger) (*EvalN
e: n,
expressionsByGroup: make(map[models.GroupID][]stateful.Expression),
}
// Create stateful expressions
en.expressions = make([]stateful.Expression, len(n.Lambdas))
en.refVarList = make([][]string, len(n.Lambdas))
@ -50,11 +50,11 @@ func newEvalNode(et *ExecutingTask, n *pipeline.EvalNode, l *log.Logger) (*EvalN
return nil, fmt.Errorf("Failed to compile %v expression: %v", i, err)
}
en.expressions[i] = statefulExpr
refVars := stateful.FindReferenceVariables(lambda.Expression)
refVars := ast.FindReferenceVariables(lambda.Expression)
en.refVarList[i] = refVars
}
// Create a single pool for the combination of all expressions
en.scopePool = stateful.NewScopePool(stateful.FindReferenceVariables(expressions...))
en.scopePool = stateful.NewScopePool(ast.FindReferenceVariables(expressions...))
// Create map of tags
if l := len(n.TagsList); l > 0 {
@ -69,8 +69,14 @@ func newEvalNode(et *ExecutingTask, n *pipeline.EvalNode, l *log.Logger) (*EvalN
}
func (e *EvalNode) runEval(snapshot []byte) error {
e.evalErrors = &expvar.Int{}
e.statMap.Set(statsEvalErrors, e.evalErrors)
valueF := func() int64 {
e.expressionsByGroupMu.RLock()
l := len(e.expressionsByGroup)
e.expressionsByGroupMu.RUnlock()
return int64(l)
}
e.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF))
switch e.Provides() {
case pipeline.StreamEdge:
var err error
@ -78,8 +84,8 @@ func (e *EvalNode) runEval(snapshot []byte) error {
e.timer.Start()
p.Fields, p.Tags, err = e.eval(p.Time, p.Group, p.Fields, p.Tags)
if err != nil {
e.evalErrors.Add(1)
if !e.e.QuiteFlag {
e.incrementErrorCount()
if !e.e.QuietFlag {
e.logger.Println("E!", err)
}
e.timer.Stop()
@ -98,12 +104,13 @@ func (e *EvalNode) runEval(snapshot []byte) error {
var err error
for b, ok := e.ins[0].NextBatch(); ok; b, ok = e.ins[0].NextBatch() {
e.timer.Start()
b.Points = b.ShallowCopyPoints()
for i := 0; i < len(b.Points); {
p := b.Points[i]
b.Points[i].Fields, b.Points[i].Tags, err = e.eval(p.Time, b.Group, p.Fields, p.Tags)
if err != nil {
e.evalErrors.Add(1)
if !e.e.QuiteFlag {
e.incrementErrorCount()
if !e.e.QuietFlag {
e.logger.Println("E!", err)
}
// Remove bad point
@ -127,13 +134,17 @@ func (e *EvalNode) runEval(snapshot []byte) error {
func (e *EvalNode) eval(now time.Time, group models.GroupID, fields models.Fields, tags models.Tags) (models.Fields, models.Tags, error) {
vars := e.scopePool.Get()
defer e.scopePool.Put(vars)
e.expressionsByGroupMu.RLock()
expressions, ok := e.expressionsByGroup[group]
e.expressionsByGroupMu.RUnlock()
if !ok {
expressions = make([]stateful.Expression, len(e.expressions))
for i, exp := range e.expressions {
expressions[i] = exp.CopyReset()
}
e.expressionsByGroupMu.Lock()
e.expressionsByGroup[group] = expressions
e.expressionsByGroupMu.Unlock()
}
for i, expr := range expressions {
err := fillScope(vars, e.refVarList[i], now, fields, tags)

View File

@ -5,6 +5,7 @@ import (
"time"
"github.com/influxdata/kapacitor/models"
"github.com/influxdata/kapacitor/tick/ast"
"github.com/influxdata/kapacitor/tick/stateful"
)
@ -17,11 +18,12 @@ func EvalPredicate(se stateful.Expression, scopePool stateful.ScopePool, now tim
return false, err
}
b, err := se.EvalBool(vars)
if err != nil {
// for function signature check
if _, err := se.Type(vars); err != nil {
return false, err
}
return b, nil
return se.EvalBool(vars)
}
// fillScope - given a scope and reference variables, we fill the exact variables from the now, fields and tags.
@ -50,8 +52,9 @@ func fillScope(vars *stateful.Scope, referenceVariables []string, now time.Time,
}
if !isFieldExists && !isTagExists {
if !vars.Has(refVariableName) {
return fmt.Errorf("no field or tag exists for %s", refVariableName)
vars.Set(refVariableName, ast.MissingValue)
}
}
}

View File

@ -7,6 +7,7 @@ import (
"sync"
"time"
"github.com/influxdata/kapacitor/expvar"
"github.com/influxdata/kapacitor/models"
"github.com/influxdata/kapacitor/pipeline"
)
@ -49,13 +50,24 @@ type flattenBatchBuffer struct {
}
func (n *FlattenNode) runFlatten([]byte) error {
var mu sync.RWMutex
switch n.Wants() {
case pipeline.StreamEdge:
flattenBuffers := make(map[models.GroupID]*flattenStreamBuffer)
valueF := func() int64 {
mu.RLock()
l := len(flattenBuffers)
mu.RUnlock()
return int64(l)
}
n.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF))
for p, ok := n.ins[0].NextPoint(); ok; p, ok = n.ins[0].NextPoint() {
n.timer.Start()
t := p.Time.Round(n.f.Tolerance)
mu.RLock()
currentBuf, ok := flattenBuffers[p.Group]
mu.RUnlock()
if !ok {
currentBuf = &flattenStreamBuffer{
Time: t,
@ -64,7 +76,9 @@ func (n *FlattenNode) runFlatten([]byte) error {
Dimensions: p.Dimensions,
Tags: p.PointTags(),
}
mu.Lock()
flattenBuffers[p.Group] = currentBuf
mu.Unlock()
}
rp := models.RawPoint{
Time: t,
@ -104,10 +118,20 @@ func (n *FlattenNode) runFlatten([]byte) error {
}
case pipeline.BatchEdge:
allBuffers := make(map[models.GroupID]*flattenBatchBuffer)
valueF := func() int64 {
mu.RLock()
l := len(allBuffers)
mu.RUnlock()
return int64(l)
}
n.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF))
for b, ok := n.ins[0].NextBatch(); ok; b, ok = n.ins[0].NextBatch() {
n.timer.Start()
t := b.TMax.Round(n.f.Tolerance)
mu.RLock()
currentBuf, ok := allBuffers[b.Group]
mu.RUnlock()
if !ok {
currentBuf = &flattenBatchBuffer{
Time: t,
@ -116,7 +140,9 @@ func (n *FlattenNode) runFlatten([]byte) error {
Tags: b.Tags,
Points: make(map[time.Time][]models.RawPoint),
}
mu.Lock()
allBuffers[b.Group] = currentBuf
mu.Unlock()
}
if !t.Equal(currentBuf.Time) {
// Flatten/Emit old buffer
@ -179,18 +205,26 @@ func (n *FlattenNode) flatten(points []models.RawPoint) (models.Fields, error) {
defer n.bufPool.Put(fieldPrefix)
POINTS:
for _, p := range points {
for _, tag := range n.f.Dimensions {
for i, tag := range n.f.Dimensions {
if v, ok := p.Tags[tag]; ok {
if i > 0 {
fieldPrefix.WriteString(n.f.Delimiter)
}
fieldPrefix.WriteString(v)
fieldPrefix.WriteString(n.f.Delimiter)
} else {
n.incrementErrorCount()
n.logger.Printf("E! point missing tag %q for flatten operation", tag)
continue POINTS
}
}
l := fieldPrefix.Len()
for fname, value := range p.Fields {
fieldPrefix.WriteString(fname)
if !n.f.DropOriginalFieldNameFlag {
if l > 0 {
fieldPrefix.WriteString(n.f.Delimiter)
}
fieldPrefix.WriteString(fname)
}
fields[fieldPrefix.String()] = value
fieldPrefix.Truncate(l)
}

View File

@ -3,8 +3,10 @@ package kapacitor
import (
"log"
"sort"
"sync"
"time"
"github.com/influxdata/kapacitor/expvar"
"github.com/influxdata/kapacitor/models"
"github.com/influxdata/kapacitor/pipeline"
"github.com/influxdata/kapacitor/tick/ast"
@ -40,7 +42,7 @@ func (g *GroupByNode) runGroupBy([]byte) error {
dims.TagNames = g.dimensions
for pt, ok := g.ins[0].NextPoint(); ok; pt, ok = g.ins[0].NextPoint() {
g.timer.Start()
pt = setGroupOnPoint(pt, g.allDimensions, dims)
pt = setGroupOnPoint(pt, g.allDimensions, dims, g.g.ExcludedDimensions)
g.timer.Stop()
for _, child := range g.outs {
err := child.CollectPoint(pt)
@ -50,13 +52,23 @@ func (g *GroupByNode) runGroupBy([]byte) error {
}
}
default:
var mu sync.RWMutex
var lastTime time.Time
groups := make(map[models.GroupID]*models.Batch)
valueF := func() int64 {
mu.RLock()
l := len(groups)
mu.RUnlock()
return int64(l)
}
g.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF))
for b, ok := g.ins[0].NextBatch(); ok; b, ok = g.ins[0].NextBatch() {
g.timer.Start()
if !b.TMax.Equal(lastTime) {
lastTime = b.TMax
// Emit all groups
mu.RLock()
for id, group := range groups {
for _, child := range g.outs {
err := child.CollectBatch(*group)
@ -64,18 +76,25 @@ func (g *GroupByNode) runGroupBy([]byte) error {
return err
}
}
mu.RUnlock()
mu.Lock()
// Remove from groups
delete(groups, id)
mu.Unlock()
mu.RLock()
}
mu.RUnlock()
}
for _, p := range b.Points {
if g.allDimensions {
dims.TagNames = models.SortedKeys(p.Tags)
dims.TagNames = filterExcludedDimensions(p.Tags, dims, g.g.ExcludedDimensions)
} else {
dims.TagNames = g.dimensions
}
groupID := models.ToGroupID(b.Name, p.Tags, dims)
mu.RLock()
group, ok := groups[groupID]
mu.RUnlock()
if !ok {
tags := make(map[string]string, len(dims.TagNames))
for _, dim := range dims.TagNames {
@ -88,7 +107,9 @@ func (g *GroupByNode) runGroupBy([]byte) error {
ByName: b.ByName,
Tags: tags,
}
mu.Lock()
groups[groupID] = group
mu.Unlock()
}
group.Points = append(group.Points, p)
}
@ -111,9 +132,27 @@ func determineDimensions(dimensions []interface{}) (allDimensions bool, realDime
return
}
func setGroupOnPoint(p models.Point, allDimensions bool, dimensions models.Dimensions) models.Point {
func filterExcludedDimensions(tags models.Tags, dimensions models.Dimensions, excluded []string) []string {
dimensions.TagNames = models.SortedKeys(tags)
filtered := dimensions.TagNames[0:0]
for _, t := range dimensions.TagNames {
found := false
for _, x := range excluded {
if x == t {
found = true
break
}
}
if !found {
filtered = append(filtered, t)
}
}
return filtered
}
func setGroupOnPoint(p models.Point, allDimensions bool, dimensions models.Dimensions, excluded []string) models.Point {
if allDimensions {
dimensions.TagNames = models.SortedKeys(p.Tags)
dimensions.TagNames = filterExcludedDimensions(p.Tags, dimensions, excluded)
}
p.Group = models.ToGroupID(p.Name, p.Tags, dimensions)
p.Dimensions = dimensions

View File

@ -7,8 +7,7 @@ import (
"path"
"sync"
"github.com/influxdata/influxdb/influxql"
imodels "github.com/influxdata/influxdb/models"
"github.com/influxdata/kapacitor/expvar"
"github.com/influxdata/kapacitor/models"
"github.com/influxdata/kapacitor/pipeline"
"github.com/influxdata/kapacitor/services/httpd"
@ -17,7 +16,7 @@ import (
type HTTPOutNode struct {
node
c *pipeline.HTTPOutNode
result *influxql.Result
result *models.Result
groupSeriesIdx map[models.GroupID]int
endpoint string
routes []httpd.Route
@ -30,7 +29,7 @@ func newHTTPOutNode(et *ExecutingTask, n *pipeline.HTTPOutNode, l *log.Logger) (
node: node{Node: n, et: et, logger: l},
c: n,
groupSeriesIdx: make(map[models.GroupID]int),
result: new(influxql.Result),
result: new(models.Result),
}
et.registerOutput(hn.c.Endpoint, hn)
hn.node.runF = hn.runOut
@ -43,6 +42,13 @@ func (h *HTTPOutNode) Endpoint() string {
}
func (h *HTTPOutNode) runOut([]byte) error {
valueF := func() int64 {
h.mu.RLock()
l := len(h.groupSeriesIdx)
h.mu.RUnlock()
return int64(l)
}
h.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF))
hndl := func(w http.ResponseWriter, req *http.Request) {
h.mu.RLock()
@ -112,7 +118,7 @@ func (h *HTTPOutNode) runOut([]byte) error {
}
// Update the result structure with a row.
func (h *HTTPOutNode) updateResultWithRow(group models.GroupID, row *imodels.Row) {
func (h *HTTPOutNode) updateResultWithRow(group models.GroupID, row *models.Row) {
h.mu.Lock()
defer h.mu.Unlock()
idx, ok := h.groupSeriesIdx[group]

117
vendor/github.com/influxdata/kapacitor/http_post.go generated vendored Normal file
View File

@ -0,0 +1,117 @@
package kapacitor
import (
"encoding/json"
"fmt"
"log"
"net/http"
"sync"
"github.com/influxdata/kapacitor/bufpool"
"github.com/influxdata/kapacitor/models"
"github.com/influxdata/kapacitor/pipeline"
"github.com/influxdata/kapacitor/services/httppost"
)
type HTTPPostNode struct {
node
c *pipeline.HTTPPostNode
endpoint *httppost.Endpoint
mu sync.RWMutex
bp *bufpool.Pool
}
// Create a new HTTPPostNode which submits received items via POST to an HTTP endpoint
func newHTTPPostNode(et *ExecutingTask, n *pipeline.HTTPPostNode, l *log.Logger) (*HTTPPostNode, error) {
hn := &HTTPPostNode{
node: node{Node: n, et: et, logger: l},
c: n,
bp: bufpool.New(),
}
// Should only ever be 0 or 1 from validation of n
if len(n.URLs) == 1 {
e := httppost.NewEndpoint(n.URLs[0], nil, httppost.BasicAuth{})
hn.endpoint = e
}
// Should only ever be 0 or 1 from validation of n
if len(n.Endpoints) == 1 {
endpointName := n.Endpoints[0]
e, ok := et.tm.HTTPPostService.Endpoint(endpointName)
if !ok {
return nil, fmt.Errorf("endpoint '%s' does not exist", endpointName)
}
hn.endpoint = e
}
hn.node.runF = hn.runPost
return hn, nil
}
func (h *HTTPPostNode) runPost([]byte) error {
switch h.Wants() {
case pipeline.StreamEdge:
for p, ok := h.ins[0].NextPoint(); ok; p, ok = h.ins[0].NextPoint() {
h.timer.Start()
row := models.PointToRow(p)
h.postRow(p.Group, row)
h.timer.Stop()
for _, child := range h.outs {
err := child.CollectPoint(p)
if err != nil {
return err
}
}
}
case pipeline.BatchEdge:
for b, ok := h.ins[0].NextBatch(); ok; b, ok = h.ins[0].NextBatch() {
h.timer.Start()
row := models.BatchToRow(b)
h.postRow(b.Group, row)
h.timer.Stop()
for _, child := range h.outs {
err := child.CollectBatch(b)
if err != nil {
return err
}
}
}
}
return nil
}
// Update the result structure with a row.
func (h *HTTPPostNode) postRow(group models.GroupID, row *models.Row) {
result := new(models.Result)
result.Series = []*models.Row{row}
body := h.bp.Get()
defer h.bp.Put(body)
err := json.NewEncoder(body).Encode(result)
if err != nil {
h.incrementErrorCount()
h.logger.Printf("E! failed to marshal row data json: %v", err)
return
}
req, err := h.endpoint.NewHTTPRequest(body)
if err != nil {
h.incrementErrorCount()
h.logger.Printf("E! failed to marshal row data json: %v", err)
return
}
req.Header.Set("Content-Type", "application/json")
for k, v := range h.c.Headers {
req.Header.Set(k, v)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
h.incrementErrorCount()
h.logger.Printf("E! failed to POST row data: %v", err)
return
}
resp.Body.Close()
}

View File

@ -1,545 +0,0 @@
package influxdb
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"sync"
"sync/atomic"
"time"
imodels "github.com/influxdata/influxdb/models"
"github.com/pkg/errors"
)
// Client is an interface for writing to and querying from an InfluxDB instance.
type Client interface {
// Ping checks that status of cluster
// The provided context can be used to cancel the request.
Ping(ctx context.Context) (time.Duration, string, error)
// Write takes a BatchPoints object and writes all Points to InfluxDB.
Write(bp BatchPoints) error
// Query makes an InfluxDB Query on the database.
// The response is checked for an error and the is returned
// if it exists
Query(q Query) (*Response, error)
}
type ClientUpdater interface {
Client
Update(new Config) error
Close() error
}
// BatchPointsConfig is the config data needed to create an instance of the BatchPoints struct
type BatchPointsConfig struct {
// Precision is the write precision of the points, defaults to "ns"
Precision string
// Database is the database to write points to
Database string
// RetentionPolicy is the retention policy of the points
RetentionPolicy string
// Write consistency is the number of servers required to confirm write
WriteConsistency string
}
// Query defines a query to send to the server
type Query struct {
Command string
Database string
Precision string
}
// HTTPConfig is the config data needed to create an HTTP Client
type Config struct {
// The URL of the InfluxDB server.
URLs []string
// Optional credentials for authenticating with the server.
Credentials Credentials
// UserAgent is the http User Agent, defaults to "KapacitorInfluxDBClient"
UserAgent string
// Timeout for requests, defaults to no timeout.
Timeout time.Duration
// Transport is the HTTP transport to use for requests
// If nil, a default transport will be used.
Transport *http.Transport
}
// AuthenticationMethod defines the type of authentication used.
type AuthenticationMethod int
// Supported authentication methods.
const (
NoAuthentication AuthenticationMethod = iota
UserAuthentication
BearerAuthentication
)
// Set of credentials depending on the authentication method
type Credentials struct {
Method AuthenticationMethod
// UserAuthentication fields
Username string
Password string
// BearerAuthentication fields
Token string
}
// HTTPClient is safe for concurrent use.
type HTTPClient struct {
mu sync.RWMutex
config Config
urls []url.URL
client *http.Client
index int32
}
// NewHTTPClient returns a new Client from the provided config.
// Client is safe for concurrent use by multiple goroutines.
func NewHTTPClient(conf Config) (*HTTPClient, error) {
if conf.UserAgent == "" {
conf.UserAgent = "KapacitorInfluxDBClient"
}
urls, err := parseURLs(conf.URLs)
if err != nil {
return nil, errors.Wrap(err, "invalid URLs")
}
if conf.Transport == nil {
conf.Transport = &http.Transport{}
}
c := &HTTPClient{
config: conf,
urls: urls,
client: &http.Client{
Timeout: conf.Timeout,
Transport: conf.Transport,
},
}
return c, nil
}
func parseURLs(urlStrs []string) ([]url.URL, error) {
urls := make([]url.URL, len(urlStrs))
for i, urlStr := range urlStrs {
u, err := url.Parse(urlStr)
if err != nil {
return nil, err
} else if u.Scheme != "http" && u.Scheme != "https" {
return nil, fmt.Errorf(
"Unsupported protocol scheme: %s, your address must start with http:// or https://",
u.Scheme,
)
}
urls[i] = *u
}
return urls, nil
}
func (c *HTTPClient) loadConfig() Config {
c.mu.RLock()
config := c.config
c.mu.RUnlock()
return config
}
func (c *HTTPClient) loadURLs() []url.URL {
c.mu.RLock()
urls := c.urls
c.mu.RUnlock()
return urls
}
func (c *HTTPClient) loadHTTPClient() *http.Client {
c.mu.RLock()
client := c.client
c.mu.RUnlock()
return client
}
func (c *HTTPClient) Close() error {
return nil
}
// UpdateURLs updates the running list of URLs.
func (c *HTTPClient) Update(new Config) error {
if new.UserAgent == "" {
new.UserAgent = "KapacitorInfluxDBClient"
}
c.mu.Lock()
defer c.mu.Unlock()
old := c.config
c.config = new
// Replace urls
urls, err := parseURLs(new.URLs)
if err != nil {
return err
}
c.urls = urls
if old.Timeout != new.Timeout || old.Transport != new.Transport {
//Replace the client
tr := new.Transport
if tr == nil {
tr = old.Transport
}
c.client = &http.Client{
Timeout: new.Timeout,
Transport: tr,
}
}
return nil
}
func (c *HTTPClient) url() url.URL {
urls := c.loadURLs()
i := atomic.LoadInt32(&c.index)
i = (i + 1) % int32(len(urls))
atomic.StoreInt32(&c.index, i)
return urls[i]
}
func (c *HTTPClient) do(req *http.Request, result interface{}, codes ...int) (*http.Response, error) {
// Get current config
config := c.loadConfig()
// Set auth credentials
cred := config.Credentials
switch cred.Method {
case NoAuthentication:
case UserAuthentication:
req.SetBasicAuth(cred.Username, cred.Password)
case BearerAuthentication:
req.Header.Set("Authorization", "Bearer "+cred.Token)
default:
return nil, errors.New("unknown authentication method set")
}
// Set user agent
req.Header.Set("User-Agent", config.UserAgent)
// Get client
client := c.loadHTTPClient()
// Do request
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
valid := false
for _, code := range codes {
if resp.StatusCode == code {
valid = true
break
}
}
if !valid {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
d := json.NewDecoder(bytes.NewReader(body))
rp := struct {
Error string `json:"error"`
}{}
if err := d.Decode(&rp); err != nil {
return nil, err
}
if rp.Error != "" {
return nil, errors.New(rp.Error)
}
return nil, fmt.Errorf("invalid response: code %d: body: %s", resp.StatusCode, string(body))
}
if result != nil {
d := json.NewDecoder(resp.Body)
d.UseNumber()
err := d.Decode(result)
if err != nil {
return nil, errors.Wrap(err, "failed to decode JSON")
}
}
return resp, nil
}
// Ping will check to see if the server is up with an optional timeout on waiting for leader.
// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred.
func (c *HTTPClient) Ping(ctx context.Context) (time.Duration, string, error) {
now := time.Now()
u := c.url()
u.Path = "ping"
if ctx != nil {
if dl, ok := ctx.Deadline(); ok {
v := url.Values{}
v.Set("wait_for_leader", fmt.Sprintf("%.0fs", time.Now().Sub(dl).Seconds()))
u.RawQuery = v.Encode()
}
}
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return 0, "", err
}
if ctx != nil {
req = req.WithContext(ctx)
}
resp, err := c.do(req, nil, http.StatusNoContent)
if err != nil {
return 0, "", err
}
version := resp.Header.Get("X-Influxdb-Version")
return time.Since(now), version, nil
}
func (c *HTTPClient) Write(bp BatchPoints) error {
var b bytes.Buffer
precision := bp.Precision()
for _, p := range bp.Points() {
if _, err := b.Write(p.Bytes(precision)); err != nil {
return err
}
if err := b.WriteByte('\n'); err != nil {
return err
}
}
u := c.url()
u.Path = "write"
v := url.Values{}
v.Set("db", bp.Database())
v.Set("rp", bp.RetentionPolicy())
v.Set("precision", bp.Precision())
v.Set("consistency", bp.WriteConsistency())
u.RawQuery = v.Encode()
req, err := http.NewRequest("POST", u.String(), &b)
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/octet-stream")
_, err = c.do(req, nil, http.StatusNoContent, http.StatusOK)
return err
}
// Response represents a list of statement results.
type Response struct {
Results []Result
Err string `json:"error,omitempty"`
}
// Error returns the first error from any statement.
// Returns nil if no errors occurred on any statements.
func (r *Response) Error() error {
if r.Err != "" {
return fmt.Errorf(r.Err)
}
for _, result := range r.Results {
if result.Err != "" {
return fmt.Errorf(result.Err)
}
}
return nil
}
// Message represents a user message.
type Message struct {
Level string
Text string
}
// Result represents a resultset returned from a single statement.
type Result struct {
Series []imodels.Row
Messages []*Message
Err string `json:"error,omitempty"`
}
// Query sends a command to the server and returns the Response
func (c *HTTPClient) Query(q Query) (*Response, error) {
u := c.url()
u.Path = "query"
v := url.Values{}
v.Set("q", q.Command)
v.Set("db", q.Database)
if q.Precision != "" {
v.Set("epoch", q.Precision)
}
u.RawQuery = v.Encode()
req, err := http.NewRequest("POST", u.String(), nil)
if err != nil {
return nil, err
}
response := &Response{}
_, err = c.do(req, response, http.StatusOK)
if err != nil {
return nil, err
}
if err := response.Error(); err != nil {
return nil, err
}
return response, nil
}
// BatchPoints is an interface into a batched grouping of points to write into
// InfluxDB together. BatchPoints is NOT thread-safe, you must create a separate
// batch for each goroutine.
type BatchPoints interface {
// AddPoint adds the given point to the Batch of points
AddPoint(p Point)
// AddPoints adds the given points to the Batch of points
AddPoints(ps []Point)
// Points lists the points in the Batch
Points() []Point
// Precision returns the currently set precision of this Batch
Precision() string
// SetPrecision sets the precision of this batch.
SetPrecision(s string) error
// Database returns the currently set database of this Batch
Database() string
// SetDatabase sets the database of this Batch
SetDatabase(s string)
// WriteConsistency returns the currently set write consistency of this Batch
WriteConsistency() string
// SetWriteConsistency sets the write consistency of this Batch
SetWriteConsistency(s string)
// RetentionPolicy returns the currently set retention policy of this Batch
RetentionPolicy() string
// SetRetentionPolicy sets the retention policy of this Batch
SetRetentionPolicy(s string)
}
// NewBatchPoints returns a BatchPoints interface based on the given config.
func NewBatchPoints(conf BatchPointsConfig) (BatchPoints, error) {
if conf.Precision == "" {
conf.Precision = "ns"
}
if _, err := time.ParseDuration("1" + conf.Precision); err != nil {
return nil, err
}
bp := &batchpoints{
database: conf.Database,
precision: conf.Precision,
retentionPolicy: conf.RetentionPolicy,
writeConsistency: conf.WriteConsistency,
}
return bp, nil
}
type batchpoints struct {
points []Point
database string
precision string
retentionPolicy string
writeConsistency string
}
func (bp *batchpoints) AddPoint(p Point) {
bp.points = append(bp.points, p)
}
func (bp *batchpoints) AddPoints(ps []Point) {
bp.points = append(bp.points, ps...)
}
func (bp *batchpoints) Points() []Point {
return bp.points
}
func (bp *batchpoints) Precision() string {
return bp.precision
}
func (bp *batchpoints) Database() string {
return bp.database
}
func (bp *batchpoints) WriteConsistency() string {
return bp.writeConsistency
}
func (bp *batchpoints) RetentionPolicy() string {
return bp.retentionPolicy
}
func (bp *batchpoints) SetPrecision(p string) error {
if _, err := time.ParseDuration("1" + p); err != nil {
return err
}
bp.precision = p
return nil
}
func (bp *batchpoints) SetDatabase(db string) {
bp.database = db
}
func (bp *batchpoints) SetWriteConsistency(wc string) {
bp.writeConsistency = wc
}
func (bp *batchpoints) SetRetentionPolicy(rp string) {
bp.retentionPolicy = rp
}
type Point struct {
Name string
Tags map[string]string
Fields map[string]interface{}
Time time.Time
}
// Returns byte array of a line protocol representation of the point
func (p Point) Bytes(precision string) []byte {
key := imodels.MakeKey([]byte(p.Name), imodels.NewTags(p.Tags))
fields := imodels.Fields(p.Fields).MarshalBinary()
kl := len(key)
fl := len(fields)
var bytes []byte
if p.Time.IsZero() {
bytes = make([]byte, fl+kl+1)
copy(bytes, key)
bytes[kl] = ' '
copy(bytes[kl+1:], fields)
} else {
timeStr := strconv.FormatInt(p.Time.UnixNano()/imodels.GetPrecisionMultiplier(precision), 10)
tl := len(timeStr)
bytes = make([]byte, fl+kl+tl+2)
copy(bytes, key)
bytes[kl] = ' '
copy(bytes[kl+1:], fields)
bytes[kl+fl+1] = ' '
copy(bytes[kl+fl+2:], []byte(timeStr))
}
return bytes
}
// Simple type to create github.com/influxdata/kapacitor/influxdb clients.
type ClientCreator struct{}
func (ClientCreator) Create(config Config) (ClientUpdater, error) {
return NewHTTPClient(config)
}

View File

@ -1,284 +0,0 @@
package influxdb
import (
"encoding/json"
"net/http"
"net/http/httptest"
"strings"
"sync"
"testing"
)
func TestClient_Query(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var data Response
w.WriteHeader(http.StatusOK)
_ = json.NewEncoder(w).Encode(data)
}))
defer ts.Close()
config := Config{URLs: []string{ts.URL}}
c, _ := NewHTTPClient(config)
query := Query{}
_, err := c.Query(query)
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
}
func TestClient_BasicAuth(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
u, p, ok := r.BasicAuth()
if !ok {
t.Errorf("basic auth error")
}
if u != "username" {
t.Errorf("unexpected username, expected %q, actual %q", "username", u)
}
if p != "password" {
t.Errorf("unexpected password, expected %q, actual %q", "password", p)
}
var data Response
w.WriteHeader(http.StatusOK)
_ = json.NewEncoder(w).Encode(data)
}))
defer ts.Close()
config := Config{URLs: []string{ts.URL}, Credentials: Credentials{Method: UserAuthentication, Username: "username", Password: "password"}}
c, _ := NewHTTPClient(config)
query := Query{}
_, err := c.Query(query)
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
}
func TestClient_Ping(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var data Response
w.WriteHeader(http.StatusNoContent)
_ = json.NewEncoder(w).Encode(data)
}))
defer ts.Close()
config := Config{URLs: []string{ts.URL}}
c, _ := NewHTTPClient(config)
_, _, err := c.Ping(nil)
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
}
func TestClient_Update(t *testing.T) {
ts0 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var data Response
w.WriteHeader(http.StatusNoContent)
_ = json.NewEncoder(w).Encode(data)
}))
defer ts0.Close()
config := Config{URLs: []string{ts0.URL}}
c, _ := NewHTTPClient(config)
_, _, err := c.Ping(nil)
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
ts1 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var data Response
w.WriteHeader(http.StatusNoContent)
_ = json.NewEncoder(w).Encode(data)
}))
defer ts1.Close()
config.URLs = []string{ts1.URL}
c.Update(config)
_, _, err = c.Ping(nil)
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
}
func TestClient_Concurrent_Use(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{}`))
}))
defer ts.Close()
config := Config{URLs: []string{ts.URL}}
c, _ := NewHTTPClient(config)
var wg sync.WaitGroup
wg.Add(3)
n := 1000
go func() {
defer wg.Done()
bp, err := NewBatchPoints(BatchPointsConfig{})
if err != nil {
t.Errorf("got error %v", err)
}
for i := 0; i < n; i++ {
if err = c.Write(bp); err != nil {
t.Fatalf("got error %v", err)
}
}
}()
go func() {
defer wg.Done()
var q Query
for i := 0; i < n; i++ {
if _, err := c.Query(q); err != nil {
t.Fatalf("got error %v", err)
}
}
}()
go func() {
defer wg.Done()
for i := 0; i < n; i++ {
c.Ping(nil)
}
}()
wg.Wait()
}
func TestClient_Write(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var data Response
w.WriteHeader(http.StatusNoContent)
_ = json.NewEncoder(w).Encode(data)
}))
defer ts.Close()
config := Config{URLs: []string{ts.URL}}
c, _ := NewHTTPClient(config)
bp, err := NewBatchPoints(BatchPointsConfig{})
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
err = c.Write(bp)
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
}
func TestClient_UserAgent(t *testing.T) {
receivedUserAgent := ""
var code int
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
receivedUserAgent = r.UserAgent()
var data Response
w.WriteHeader(code)
json.NewEncoder(w).Encode(data)
}))
defer ts.Close()
tests := []struct {
name string
userAgent string
expected string
}{
{
name: "Empty user agent",
userAgent: "",
expected: "KapacitorInfluxDBClient",
},
{
name: "Custom user agent",
userAgent: "Test Influx Client",
expected: "Test Influx Client",
},
}
for _, test := range tests {
var err error
config := Config{URLs: []string{ts.URL}, UserAgent: test.userAgent}
c, _ := NewHTTPClient(config)
receivedUserAgent = ""
code = http.StatusOK
query := Query{}
_, err = c.Query(query)
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
if !strings.HasPrefix(receivedUserAgent, test.expected) {
t.Errorf("Unexpected user agent for query request. expected %v, actual %v", test.expected, receivedUserAgent)
}
receivedUserAgent = ""
code = http.StatusNoContent
bp, _ := NewBatchPoints(BatchPointsConfig{})
err = c.Write(bp)
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
if !strings.HasPrefix(receivedUserAgent, test.expected) {
t.Errorf("Unexpected user agent for write request. expected %v, actual %v", test.expected, receivedUserAgent)
}
receivedUserAgent = ""
code = http.StatusNoContent
_, _, err = c.Ping(nil)
if err != nil {
t.Errorf("unexpected error. expected %v, actual %v", nil, err)
}
if receivedUserAgent != test.expected {
t.Errorf("Unexpected user agent for ping request. expected %v, actual %v", test.expected, receivedUserAgent)
}
}
}
func TestBatchPoints_SettersGetters(t *testing.T) {
bp, _ := NewBatchPoints(BatchPointsConfig{
Precision: "ns",
Database: "db",
RetentionPolicy: "rp",
WriteConsistency: "wc",
})
if bp.Precision() != "ns" {
t.Errorf("Expected: %s, got %s", bp.Precision(), "ns")
}
if bp.Database() != "db" {
t.Errorf("Expected: %s, got %s", bp.Database(), "db")
}
if bp.RetentionPolicy() != "rp" {
t.Errorf("Expected: %s, got %s", bp.RetentionPolicy(), "rp")
}
if bp.WriteConsistency() != "wc" {
t.Errorf("Expected: %s, got %s", bp.WriteConsistency(), "wc")
}
bp.SetDatabase("db2")
bp.SetRetentionPolicy("rp2")
bp.SetWriteConsistency("wc2")
err := bp.SetPrecision("s")
if err != nil {
t.Errorf("Did not expect error: %s", err.Error())
}
if bp.Precision() != "s" {
t.Errorf("Expected: %s, got %s", bp.Precision(), "s")
}
if bp.Database() != "db2" {
t.Errorf("Expected: %s, got %s", bp.Database(), "db2")
}
if bp.RetentionPolicy() != "rp2" {
t.Errorf("Expected: %s, got %s", bp.RetentionPolicy(), "rp2")
}
if bp.WriteConsistency() != "wc2" {
t.Errorf("Expected: %s, got %s", bp.WriteConsistency(), "wc2")
}
}

View File

@ -1,82 +0,0 @@
package influxdb_test
import (
"testing"
"time"
"github.com/influxdata/kapacitor/influxdb"
)
func TestPoint_Bytes(t *testing.T) {
fields := map[string]interface{}{"value": float64(1), "another": int64(42)}
tags := map[string]string{"host": "serverA", "dc": "nyc"}
tm, _ := time.Parse(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z")
tests := []struct {
name string
precision string
exp string
t time.Time
}{
{
name: "zero time",
precision: "",
exp: "cpu,dc=nyc,host=serverA another=42i,value=1",
},
{
name: "no precision",
precision: "",
exp: "cpu,dc=nyc,host=serverA another=42i,value=1 946730096789012345",
t: tm,
},
{
name: "nanosecond precision",
precision: "ns",
exp: "cpu,dc=nyc,host=serverA another=42i,value=1 946730096789012345",
t: tm,
},
{
name: "microsecond precision",
precision: "u",
exp: "cpu,dc=nyc,host=serverA another=42i,value=1 946730096789012",
t: tm,
},
{
name: "millisecond precision",
precision: "ms",
exp: "cpu,dc=nyc,host=serverA another=42i,value=1 946730096789",
t: tm,
},
{
name: "second precision",
precision: "s",
exp: "cpu,dc=nyc,host=serverA another=42i,value=1 946730096",
t: tm,
},
{
name: "minute precision",
precision: "m",
exp: "cpu,dc=nyc,host=serverA another=42i,value=1 15778834",
t: tm,
},
{
name: "hour precision",
precision: "h",
exp: "cpu,dc=nyc,host=serverA another=42i,value=1 262980",
t: tm,
},
}
for _, test := range tests {
p := influxdb.Point{
Name: "cpu",
Tags: tags,
Fields: fields,
Time: test.t,
}
if got, exp := string(p.Bytes(test.precision)), test.exp; got != exp {
t.Errorf("%s: Bytes() mismatch:\n actual: %v\n exp: %v",
test.name, got, exp)
}
}
}

View File

@ -78,6 +78,7 @@ func (i *InfluxDBOutNode) runOut([]byte) error {
return nil
}()
if err != nil {
i.incrementErrorCount()
i.logger.Printf("E! failed to create database %q on cluster %q: %v", i.i.Database, i.i.Cluster, err)
}
}
@ -234,6 +235,7 @@ func (w *writeBuffer) run() {
if !ok {
bp, err = influxdb.NewBatchPoints(qe.bpc)
if err != nil {
w.i.incrementErrorCount()
w.i.logger.Println("E! failed to write points to InfluxDB:", err)
break
}
@ -244,6 +246,7 @@ func (w *writeBuffer) run() {
if len(bp.Points()) >= w.size {
err = w.write(bp)
if err != nil {
w.i.incrementErrorCount()
w.i.logger.Println("E! failed to write points to InfluxDB:", err)
}
delete(w.buffer, qe.bpc)
@ -265,6 +268,7 @@ func (w *writeBuffer) writeAll() {
for bpc, bp := range w.buffer {
err := w.write(bp)
if err != nil {
w.i.incrementErrorCount()
w.i.logger.Println("E! failed to write points to InfluxDB:", err)
}
delete(w.buffer, bpc)

View File

@ -8,6 +8,7 @@ package kapacitor
import (
"fmt"
"reflect"
"time"
"github.com/influxdata/influxdb/influxql"
@ -1151,10 +1152,10 @@ type booleanBulkReduceContext struct {
booleanPointEmitter
}
func determineReduceContextCreateFn(method string, value interface{}, rc pipeline.ReduceCreater) (fn createReduceContextFunc, err error) {
switch value.(type) {
func determineReduceContextCreateFn(method string, kind reflect.Kind, rc pipeline.ReduceCreater) (fn createReduceContextFunc, err error) {
switch kind {
case float64:
case reflect.Float64:
switch {
case rc.CreateFloatReducer != nil:
@ -1301,7 +1302,7 @@ func determineReduceContextCreateFn(method string, value interface{}, rc pipelin
err = fmt.Errorf("cannot apply %s to float64 field", method)
}
case int64:
case reflect.Int64:
switch {
case rc.CreateIntegerFloatReducer != nil:
@ -1448,7 +1449,7 @@ func determineReduceContextCreateFn(method string, value interface{}, rc pipelin
err = fmt.Errorf("cannot apply %s to int64 field", method)
}
case string:
case reflect.String:
switch {
case rc.CreateStringFloatReducer != nil:
@ -1595,7 +1596,7 @@ func determineReduceContextCreateFn(method string, value interface{}, rc pipelin
err = fmt.Errorf("cannot apply %s to string field", method)
}
case bool:
case reflect.Bool:
switch {
case rc.CreateBooleanFloatReducer != nil:
@ -1743,7 +1744,7 @@ func determineReduceContextCreateFn(method string, value interface{}, rc pipelin
}
default:
err = fmt.Errorf("invalid field type: %T", value)
err = fmt.Errorf("invalid field kind: %v", kind)
}
return
}

View File

@ -4,6 +4,7 @@ package kapacitor
import (
"fmt"
"time"
"reflect"
"github.com/influxdata/influxdb/influxql"
"github.com/influxdata/kapacitor/models"
@ -274,10 +275,10 @@ type {{$a.name}}Bulk{{if ne $a.Name $e.Name}}{{$e.Name}}{{end}}ReduceContext str
{{/* Define switch cases for reduceContext contruction */}}
func determineReduceContextCreateFn(method string, value interface{}, rc pipeline.ReduceCreater) (fn createReduceContextFunc, err error) {
switch value.(type) {
func determineReduceContextCreateFn(method string, kind reflect.Kind, rc pipeline.ReduceCreater) (fn createReduceContextFunc, err error) {
switch kind {
{{range $a := $types}}
case {{.Type}}:
case {{.Kind}}:
switch {
{{range $e := $types}}
case rc.Create{{$a.Name}}{{if ne $a.Name $e.Name}}{{$e.Name}}{{end}}Reducer != nil:
@ -320,7 +321,7 @@ func determineReduceContextCreateFn(method string, value interface{}, rc pipelin
}
{{end}}
default:
err = fmt.Errorf("invalid field type: %T", value)
err = fmt.Errorf("invalid field kind: %v", kind)
}
return
}

View File

@ -3,15 +3,18 @@ package kapacitor
import (
"fmt"
"log"
"reflect"
"sync"
"time"
"github.com/influxdata/kapacitor/expvar"
"github.com/influxdata/kapacitor/models"
"github.com/influxdata/kapacitor/pipeline"
"github.com/pkg/errors"
)
// tmpl -- go get github.com/benbjohnson/tmpl
//go:generate tmpl -data=@tmpldata influxql.gen.go.tmpl
//go:generate tmpl -data=@tmpldata.json influxql.gen.go.tmpl
type createReduceContextFunc func(c baseReduceContext) reduceContext
@ -70,11 +73,23 @@ func (c *baseReduceContext) Time() time.Time {
}
func (n *InfluxQLNode) runStreamInfluxQL() error {
var mu sync.RWMutex
contexts := make(map[models.GroupID]reduceContext)
valueF := func() int64 {
mu.RLock()
l := len(contexts)
mu.RUnlock()
return int64(l)
}
n.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF))
var kind reflect.Kind
for p, ok := n.ins[0].NextPoint(); ok; {
n.timer.Start()
mu.RLock()
context := contexts[p.Group]
// Fisrt point in window
mu.RUnlock()
// First point in window
if context == nil {
// Create new context
c := baseReduceContext{
@ -88,30 +103,48 @@ func (n *InfluxQLNode) runStreamInfluxQL() error {
pointTimes: n.n.PointTimes || n.isStreamTransformation,
}
createFn, err := n.getCreateFn(p.Fields[c.field])
f, exists := p.Fields[c.field]
if !exists {
n.incrementErrorCount()
n.logger.Printf("E! field %s missing from point, skipping point", c.field)
p, ok = n.ins[0].NextPoint()
n.timer.Stop()
continue
}
k := reflect.TypeOf(f).Kind()
kindChanged := k != kind
kind = k
createFn, err := n.getCreateFn(kindChanged, kind)
if err != nil {
return err
}
context = createFn(c)
mu.Lock()
contexts[p.Group] = context
mu.Unlock()
}
if n.isStreamTransformation {
err := context.AggregatePoint(&p)
if err != nil {
n.incrementErrorCount()
n.logger.Println("E! failed to aggregate point:", err)
}
p, ok = n.ins[0].NextPoint()
err = n.emit(context)
if err != nil && err != ErrEmptyEmit {
n.incrementErrorCount()
n.logger.Println("E! failed to emit stream:", err)
}
} else {
if p.Time.Equal(context.Time()) {
err := context.AggregatePoint(&p)
if err != nil {
n.incrementErrorCount()
n.logger.Println("E! failed to aggregate point:", err)
}
// advance to next point
@ -119,11 +152,14 @@ func (n *InfluxQLNode) runStreamInfluxQL() error {
} else {
err := n.emit(context)
if err != nil {
n.incrementErrorCount()
n.logger.Println("E! failed to emit stream:", err)
}
// Nil out reduced point
mu.Lock()
contexts[p.Group] = nil
mu.Unlock()
// do not advance,
// go through loop again to initialize new iterator.
}
@ -134,7 +170,8 @@ func (n *InfluxQLNode) runStreamInfluxQL() error {
}
func (n *InfluxQLNode) runBatchInfluxQL() error {
var exampleValue interface{}
var kind reflect.Kind
kindChanged := true
for b, ok := n.ins[0].NextBatch(); ok; b, ok = n.ins[0].NextBatch() {
n.timer.Start()
// Create new base context
@ -154,14 +191,23 @@ func (n *InfluxQLNode) runBatchInfluxQL() error {
n.timer.Stop()
continue
}
if exampleValue == nil {
if kind == reflect.Invalid {
// If we have no points and have never seen a point assume float64
exampleValue = float64(0)
kind = reflect.Float64
}
} else {
exampleValue = b.Points[0].Fields[c.field]
f, ok := b.Points[0].Fields[c.field]
if !ok {
n.incrementErrorCount()
n.logger.Printf("E! field %s missing from point, skipping batch", c.field)
n.timer.Stop()
continue
}
k := reflect.TypeOf(f).Kind()
kindChanged = k != kind
kind = k
}
createFn, err := n.getCreateFn(exampleValue)
createFn, err := n.getCreateFn(kindChanged, kind)
if err != nil {
return err
}
@ -180,9 +226,11 @@ func (n *InfluxQLNode) runBatchInfluxQL() error {
Tags: bp.Tags,
}
if err := context.AggregatePoint(&p); err != nil {
n.incrementErrorCount()
n.logger.Println("E! failed to aggregate batch point:", err)
}
if ep, err := context.EmitPoint(); err != nil && err != ErrEmptyEmit {
n.incrementErrorCount()
n.logger.Println("E! failed to emit batch point:", err)
} else if err != ErrEmptyEmit {
eb.Points = append(eb.Points, models.BatchPoint{
@ -196,6 +244,7 @@ func (n *InfluxQLNode) runBatchInfluxQL() error {
n.timer.Pause()
for _, out := range n.outs {
if err := out.CollectBatch(eb); err != nil {
n.incrementErrorCount()
n.logger.Println("E! failed to emit batch points:", err)
}
}
@ -204,9 +253,11 @@ func (n *InfluxQLNode) runBatchInfluxQL() error {
err := context.AggregateBatch(&b)
if err == nil {
if err := n.emit(context); err != nil {
n.incrementErrorCount()
n.logger.Println("E! failed to emit batch:", err)
}
} else {
n.incrementErrorCount()
n.logger.Println("E! failed to aggregate batch:", err)
}
}
@ -215,11 +266,11 @@ func (n *InfluxQLNode) runBatchInfluxQL() error {
return nil
}
func (n *InfluxQLNode) getCreateFn(value interface{}) (createReduceContextFunc, error) {
if n.createFn != nil {
func (n *InfluxQLNode) getCreateFn(changed bool, kind reflect.Kind) (createReduceContextFunc, error) {
if !changed && n.createFn != nil {
return n.createFn, nil
}
createFn, err := determineReduceContextCreateFn(n.n.Method, value, n.n.ReduceCreater)
createFn, err := determineReduceContextCreateFn(n.n.Method, kind, n.n.ReduceCreater)
if err != nil {
return nil, errors.Wrapf(err, "invalid influxql func %s with field %s", n.n.Method, n.n.Field)
}

View File

@ -29,6 +29,8 @@ type JoinNode struct {
// Represents the lower bound of times per group per parent
lowMarks map[srcGroup]time.Time
groupsMu sync.RWMutex
reported map[int]bool
allReported bool
}
@ -65,8 +67,14 @@ func newJoinNode(et *ExecutingTask, n *pipeline.JoinNode, l *log.Logger) (*JoinN
}
func (j *JoinNode) runJoin([]byte) error {
j.groups = make(map[models.GroupID]*group)
valueF := func() int64 {
j.groupsMu.RLock()
l := len(j.groups)
j.groupsMu.RUnlock()
return int64(l)
}
j.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF))
groupErrs := make(chan error, 1)
done := make(chan struct{}, len(j.ins))
@ -109,16 +117,21 @@ func (j *JoinNode) runJoin([]byte) error {
}
}
// No more points are coming signal all groups to finish up.
j.groupsMu.RLock()
for _, group := range j.groups {
close(group.points)
}
j.groupsMu.RUnlock()
j.runningGroups.Wait()
j.groupsMu.RLock()
for _, group := range j.groups {
err := group.emitAll()
if err != nil {
return err
}
}
j.groupsMu.RUnlock()
return nil
}
@ -269,14 +282,19 @@ func (j *JoinNode) sendSpecificPoint(specific srcPoint, groupErrs chan<- error)
// safely get the group for the point or create one if it doesn't exist.
func (j *JoinNode) getGroup(p models.PointInterface, groupErrs chan<- error) *group {
j.groupsMu.RLock()
group := j.groups[p.PointGroup()]
j.groupsMu.RUnlock()
if group == nil {
group = newGroup(len(j.ins), j)
j.groupsMu.Lock()
j.groups[p.PointGroup()] = group
j.runningGroups.Add(1)
j.groupsMu.Unlock()
go func() {
err := group.run()
if err != nil {
j.incrementErrorCount()
j.logger.Println("E! join group error:", err)
select {
case groupErrs <- err:
@ -342,6 +360,7 @@ func (g *group) collect(i int, p models.PointInterface) error {
sets := g.sets[t]
if len(sets) == 0 {
set = newJoinset(
g.j,
g.j.j.StreamName,
g.j.fill,
g.j.fillValue,
@ -362,6 +381,7 @@ func (g *group) collect(i int, p models.PointInterface) error {
}
if set == nil {
set = newJoinset(
g.j,
g.j.j.StreamName,
g.j.fill,
g.j.fillValue,
@ -466,6 +486,7 @@ func (g *group) emitJoinedSet(set *joinset) error {
// represents a set of points or batches from the same joined time
type joinset struct {
j *JoinNode
name string
fill influxql.FillOption
fillValue interface{}
@ -486,6 +507,7 @@ type joinset struct {
}
func newJoinset(
n *JoinNode,
name string,
fill influxql.FillOption,
fillValue interface{},
@ -497,6 +519,7 @@ func newJoinset(
) *joinset {
expected := len(prefixes)
return &joinset{
j: n,
name: name,
fill: fill,
fillValue: fillValue,
@ -599,6 +622,7 @@ BATCH_POINT:
}
b, ok := batch.(models.Batch)
if !ok {
js.j.incrementErrorCount()
js.logger.Printf("E! invalid join data got %T expected models.Batch", batch)
return models.Batch{}, false
}

View File

@ -3,6 +3,7 @@ package kapacitor
import (
"fmt"
"log"
"sync"
"time"
"github.com/influxdata/kapacitor/expvar"
@ -17,7 +18,6 @@ import (
const (
statsK8sIncreaseEventsCount = "increase_events"
statsK8sDecreaseEventsCount = "decrease_events"
statsK8sErrorsCount = "errors"
statsK8sCooldownDropsCount = "cooldown_drops"
)
@ -36,13 +36,15 @@ type K8sAutoscaleNode struct {
decreaseCount *expvar.Int
cooldownDropsCount *expvar.Int
replicasExprsMu sync.RWMutex
min int
max int
}
// Create a new K8sAutoscaleNode which can trigger autoscale event for a Kubernetes cluster.
func newK8sAutoscaleNode(et *ExecutingTask, n *pipeline.K8sAutoscaleNode, l *log.Logger) (*K8sAutoscaleNode, error) {
client, err := et.tm.K8sService.Client()
client, err := et.tm.K8sService.Client(n.Cluster)
if err != nil {
return nil, fmt.Errorf("cannot use the k8sAutoscale node, could not create kubernetes client: %v", err)
}
@ -61,20 +63,26 @@ func newK8sAutoscaleNode(et *ExecutingTask, n *pipeline.K8sAutoscaleNode, l *log
// Initialize the replicas lambda expression scope pool
if n.Replicas != nil {
kn.replicasExprs = make(map[models.GroupID]stateful.Expression)
kn.replicasScopePool = stateful.NewScopePool(stateful.FindReferenceVariables(n.Replicas.Expression))
kn.replicasScopePool = stateful.NewScopePool(ast.FindReferenceVariables(n.Replicas.Expression))
}
return kn, nil
}
func (k *K8sAutoscaleNode) runAutoscale([]byte) error {
valueF := func() int64 {
k.replicasExprsMu.RLock()
l := len(k.replicasExprs)
k.replicasExprsMu.RUnlock()
return int64(l)
}
k.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF))
k.increaseCount = &expvar.Int{}
k.decreaseCount = &expvar.Int{}
errorsCount := &expvar.Int{}
k.cooldownDropsCount = &expvar.Int{}
k.statMap.Set(statsK8sIncreaseEventsCount, k.increaseCount)
k.statMap.Set(statsK8sDecreaseEventsCount, k.decreaseCount)
k.statMap.Set(statsK8sErrorsCount, errorsCount)
k.statMap.Set(statsK8sCooldownDropsCount, k.cooldownDropsCount)
switch k.Wants() {
@ -82,7 +90,7 @@ func (k *K8sAutoscaleNode) runAutoscale([]byte) error {
for p, ok := k.ins[0].NextPoint(); ok; p, ok = k.ins[0].NextPoint() {
k.timer.Start()
if np, err := k.handlePoint(p.Name, p.Group, p.Dimensions, p.Time, p.Fields, p.Tags); err != nil {
errorsCount.Add(1)
k.incrementErrorCount()
k.logger.Println("E!", err)
} else if np.Name != "" {
k.timer.Pause()
@ -101,7 +109,7 @@ func (k *K8sAutoscaleNode) runAutoscale([]byte) error {
k.timer.Start()
for _, p := range b.Points {
if np, err := k.handlePoint(b.Name, b.Group, b.PointDimensions(), p.Time, p.Fields, p.Tags); err != nil {
errorsCount.Add(1)
k.incrementErrorCount()
k.logger.Println("E!", err)
} else if np.Name != "" {
k.timer.Pause()
@ -151,7 +159,9 @@ func (k *K8sAutoscaleNode) handlePoint(streamName string, group models.GroupID,
}
// Eval the replicas expression
k.replicasExprsMu.Lock()
newReplicas, err := k.evalExpr(state.current, group, k.k.Replicas, k.replicasExprs, k.replicasScopePool, t, fields, tags)
k.replicasExprsMu.Unlock()
if err != nil {
return models.Point{}, errors.Wrap(err, "failed to evaluate the replicas expression")
}

View File

@ -0,0 +1,107 @@
package kapacitor
import (
"fmt"
"log"
"github.com/influxdata/kapacitor/expvar"
"github.com/influxdata/kapacitor/models"
"github.com/influxdata/kapacitor/pipeline"
)
const (
statsKapacitorLoopbackPointsWritten = "points_written"
)
type KapacitorLoopbackNode struct {
node
k *pipeline.KapacitorLoopbackNode
pointsWritten *expvar.Int
}
func newKapacitorLoopbackNode(et *ExecutingTask, n *pipeline.KapacitorLoopbackNode, l *log.Logger) (*KapacitorLoopbackNode, error) {
kn := &KapacitorLoopbackNode{
node: node{Node: n, et: et, logger: l},
k: n,
}
kn.node.runF = kn.runOut
// Check that a loop has not been created within this task
for _, dbrp := range et.Task.DBRPs {
if dbrp.Database == n.Database && dbrp.RetentionPolicy == n.RetentionPolicy {
return nil, fmt.Errorf("loop detected on dbrp: %v", dbrp)
}
}
return kn, nil
}
func (k *KapacitorLoopbackNode) runOut([]byte) error {
k.pointsWritten = &expvar.Int{}
k.statMap.Set(statsInfluxDBPointsWritten, k.pointsWritten)
switch k.Wants() {
case pipeline.StreamEdge:
for p, ok := k.ins[0].NextPoint(); ok; p, ok = k.ins[0].NextPoint() {
k.timer.Start()
if k.k.Database != "" {
p.Database = k.k.Database
}
if k.k.RetentionPolicy != "" {
p.RetentionPolicy = k.k.RetentionPolicy
}
if k.k.Measurement != "" {
p.Name = k.k.Measurement
}
if len(k.k.Tags) > 0 {
p.Tags = p.Tags.Copy()
for k, v := range k.k.Tags {
p.Tags[k] = v
}
}
err := k.et.tm.WriteKapacitorPoint(p)
if err != nil {
k.incrementErrorCount()
k.logger.Println("E! failed to write point over loopback")
} else {
k.pointsWritten.Add(1)
}
k.timer.Stop()
}
case pipeline.BatchEdge:
for b, ok := k.ins[0].NextBatch(); ok; b, ok = k.ins[0].NextBatch() {
k.timer.Start()
if k.k.Measurement != "" {
b.Name = k.k.Measurement
}
written := int64(0)
for _, bp := range b.Points {
tags := bp.Tags
if len(k.k.Tags) > 0 {
tags = bp.Tags.Copy()
for k, v := range k.k.Tags {
tags[k] = v
}
}
p := models.Point{
Database: k.k.Database,
RetentionPolicy: k.k.RetentionPolicy,
Name: b.Name,
Tags: tags,
Fields: bp.Fields,
Time: bp.Time,
}
err := k.et.tm.WriteKapacitorPoint(p)
if err != nil {
k.incrementErrorCount()
k.logger.Println("E! failed to write point over loopback")
} else {
written++
}
}
k.pointsWritten.Add(written)
k.timer.Stop()
}
}
return nil
}

View File

@ -41,6 +41,7 @@ func (s *LogNode) runLog([]byte) error {
for p, ok := s.ins[0].NextPoint(); ok; p, ok = s.ins[0].NextPoint() {
buf.Reset()
if err := env.Encode(p); err != nil {
s.incrementErrorCount()
s.logger.Println("E!", err)
continue
}
@ -56,6 +57,7 @@ func (s *LogNode) runLog([]byte) error {
for b, ok := s.ins[0].NextBatch(); ok; b, ok = s.ins[0].NextBatch() {
buf.Reset()
if err := env.Encode(b); err != nil {
s.incrementErrorCount()
s.logger.Println("E!", err)
continue
}

View File

@ -1,206 +0,0 @@
package models
import (
"encoding/json"
"errors"
"fmt"
"sort"
"time"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/kapacitor/influxdb"
)
// A point in batch, similar to Point but most information is
// found on the containing Batch.
//
// Tags on a BatchPoint are a superset of the tags on the Batch
// All points in a batch should have the same tag and field keys.
type BatchPoint struct {
Time time.Time `json:"time"`
Fields Fields `json:"fields"`
Tags Tags `json:"tags"`
}
func BatchPointFromPoint(p Point) BatchPoint {
return BatchPoint{
Time: p.Time,
Fields: p.Fields,
Tags: p.Tags,
}
}
type Batch struct {
Name string `json:"name,omitempty"`
TMax time.Time `json:"tmax,omitempty"`
Group GroupID `json:"group,omitempty"`
ByName bool `json:"byname,omitempty"`
Tags Tags `json:"tags,omitempty"`
Points []BatchPoint `json:"points,omitempty"`
}
func (b Batch) PointName() string {
return b.Name
}
func (b Batch) PointGroup() GroupID {
return b.Group
}
func (b Batch) PointTime() time.Time {
return b.TMax
}
func (b Batch) PointFields() Fields {
if len(b.Points) > 0 {
return b.Points[0].Fields
}
return nil
}
func (b Batch) PointTags() Tags {
return b.Tags
}
func (b Batch) PointDimensions() Dimensions {
return Dimensions{
ByName: b.ByName,
TagNames: SortedKeys(b.Tags),
}
}
func (b Batch) Copy() PointInterface {
cb := b
cb.Tags = b.Tags.Copy()
cb.Points = make([]BatchPoint, len(b.Points))
for i, p := range b.Points {
cb.Points[i] = p
cb.Points[i].Fields = p.Fields.Copy()
cb.Points[i].Tags = p.Tags.Copy()
}
return cb
}
func (b Batch) Setter() PointSetter {
return &b
}
func (b *Batch) Interface() PointInterface {
return *b
}
func (b *Batch) SetNewDimTag(key string, value string) {
b.Tags[key] = value
for _, p := range b.Points {
p.Tags[key] = value
}
}
func (b *Batch) UpdateGroup() {
b.Group = ToGroupID(b.Name, b.Tags, b.PointDimensions())
}
func BatchToRow(b Batch) (row *models.Row) {
row = &models.Row{
Name: b.Name,
Tags: b.Tags,
}
if len(b.Points) == 0 {
return
}
row.Columns = []string{"time"}
p := b.Points[0]
for f := range p.Fields {
row.Columns = append(row.Columns, f)
}
// Append tags that are not on the batch
for t := range p.Tags {
if _, ok := b.Tags[t]; !ok {
row.Columns = append(row.Columns, t)
}
}
// Sort all columns but leave time as first
sort.Strings(row.Columns[1:])
row.Values = make([][]interface{}, len(b.Points))
for i, p := range b.Points {
row.Values[i] = make([]interface{}, len(row.Columns))
row.Values[i][0] = p.Time
for j, c := range row.Columns[1:] {
if v, ok := p.Fields[c]; ok {
row.Values[i][j+1] = v
} else if v, ok := p.Tags[c]; ok {
row.Values[i][j+1] = v
}
}
}
return
}
func ResultToBatches(res influxdb.Result, groupByName bool) ([]Batch, error) {
if res.Err != "" {
return nil, errors.New(res.Err)
}
batches := make([]Batch, 0, len(res.Series))
dims := Dimensions{
ByName: groupByName,
}
for _, series := range res.Series {
var name string
if groupByName {
name = series.Name
}
dims.TagNames = SortedKeys(series.Tags)
groupID := ToGroupID(
name,
series.Tags,
dims,
)
b := Batch{
Name: series.Name,
Group: groupID,
Tags: series.Tags,
}
b.Points = make([]BatchPoint, 0, len(series.Values))
for _, v := range series.Values {
fields := make(Fields)
var t time.Time
for i, c := range series.Columns {
if c == "time" {
tStr, ok := v[i].(string)
if !ok {
return nil, fmt.Errorf("unexpected time value: %v", v[i])
}
var err error
t, err = time.Parse(time.RFC3339Nano, tStr)
if err != nil {
t, err = time.Parse(time.RFC3339, tStr)
if err != nil {
return nil, fmt.Errorf("unexpected time format: %v", err)
}
}
} else {
value := v[i]
if n, ok := value.(json.Number); ok {
f, err := n.Float64()
if err == nil {
value = f
}
}
if value == nil {
break
}
fields[c] = value
}
}
if len(fields) > 0 {
if t.After(b.TMax) {
b.TMax = t
}
b.Points = append(
b.Points,
BatchPoint{Time: t, Fields: fields, Tags: b.Tags},
)
}
}
batches = append(batches, b)
}
return batches, nil
}

View File

@ -1,4 +0,0 @@
/*
Provides a set of structures for passing data around Kapacitor.
*/
package models

View File

@ -1,248 +0,0 @@
package models
import (
"bytes"
"sort"
"strconv"
"time"
"github.com/influxdata/influxdb/models"
)
type GroupID string
type Fields map[string]interface{}
type Tags map[string]string
const (
NilGroup GroupID = ""
)
// Common interface for both Point and Batch objects
type PointInterface interface {
PointName() string
PointTime() time.Time
PointGroup() GroupID
PointTags() Tags
PointDimensions() Dimensions
PointFields() Fields
// Return a copy of self
Copy() PointInterface
Setter() PointSetter
}
type PointSetter interface {
PointInterface
SetNewDimTag(key string, value string)
UpdateGroup()
Interface() PointInterface
}
type Dimensions struct {
ByName bool
TagNames []string
}
// Represents a single data point
type Point struct {
Name string
Database string
RetentionPolicy string
Group GroupID
Dimensions Dimensions
Tags Tags
Fields Fields
Time time.Time
}
func (p Point) PointName() string {
return p.Name
}
func (p Point) PointGroup() GroupID {
return p.Group
}
func (p Point) PointTime() time.Time {
return p.Time
}
func (p Point) PointFields() Fields {
return p.Fields
}
func (p Point) PointTags() Tags {
tags := make(Tags, len(p.Dimensions.TagNames))
for _, dim := range p.Dimensions.TagNames {
tags[dim] = p.Tags[dim]
}
return tags
}
func (p Point) PointDimensions() Dimensions {
return p.Dimensions
}
func (p Point) Copy() PointInterface {
cp := p
cp.Fields = p.Fields.Copy()
cp.Tags = p.Tags.Copy()
cp.Dimensions = p.Dimensions.Copy()
return &cp
}
func (p Point) Setter() PointSetter {
return &p
}
func (p *Point) Interface() PointInterface {
return *p
}
func (p *Point) SetNewDimTag(key string, value string) {
p.Tags[key] = value
// Only add dim if it does not exist.
for _, dim := range p.Dimensions.TagNames {
if dim == key {
// Key exists we are done.
return
}
}
// Key doesn't exist add it.
p.Dimensions.TagNames = append(p.Dimensions.TagNames, key)
}
func (p *Point) UpdateGroup() {
sort.Strings(p.Dimensions.TagNames)
p.Group = ToGroupID(p.Name, p.Tags, p.Dimensions)
}
func SortedFields(fields Fields) []string {
a := make([]string, 0, len(fields))
for k := range fields {
a = append(a, k)
}
sort.Strings(a)
return a
}
func SortedKeys(tags map[string]string) []string {
a := make([]string, 0, len(tags))
for k := range tags {
a = append(a, k)
}
sort.Strings(a)
return a
}
func ToGroupID(name string, tags map[string]string, dims Dimensions) GroupID {
if len(dims.TagNames) == 0 {
if dims.ByName {
return GroupID(name)
}
return NilGroup
}
var buf bytes.Buffer
if dims.ByName {
buf.WriteString(name)
// Add delimiter that is not allowed in name.
buf.WriteRune('\n')
}
for i, d := range dims.TagNames {
if i != 0 {
buf.WriteRune(',')
}
buf.WriteString(d)
buf.WriteRune('=')
buf.WriteString(tags[d])
}
return GroupID(buf.Bytes())
}
// Returns byte array of a line protocol representation of the point
func (p Point) Bytes(precision string) []byte {
key := models.MakeKey([]byte(p.Name), models.NewTags(p.Tags))
fields := models.Fields(p.Fields).MarshalBinary()
kl := len(key)
fl := len(fields)
var bytes []byte
if p.Time.IsZero() {
bytes = make([]byte, fl+kl+1)
copy(bytes, key)
bytes[kl] = ' '
copy(bytes[kl+1:], fields)
} else {
timeStr := strconv.FormatInt(p.Time.UnixNano()/models.GetPrecisionMultiplier(precision), 10)
tl := len(timeStr)
bytes = make([]byte, fl+kl+tl+2)
copy(bytes, key)
bytes[kl] = ' '
copy(bytes[kl+1:], fields)
bytes[kl+fl+1] = ' '
copy(bytes[kl+fl+2:], []byte(timeStr))
}
return bytes
}
func PointToRow(p Point) (row *models.Row) {
row = &models.Row{
Name: p.Name,
Tags: p.Tags,
Columns: []string{"time"},
Values: make([][]interface{}, 1),
}
for _, f := range SortedFields(p.Fields) {
row.Columns = append(row.Columns, f)
}
row.Values[0] = make([]interface{}, len(p.Fields)+1)
row.Values[0][0] = p.Time
for i, c := range row.Columns[1:] {
row.Values[0][i+1] = p.Fields[c]
}
return
}
func (f Fields) Copy() Fields {
cf := make(Fields, len(f))
for k, v := range f {
cf[k] = v
}
return cf
}
func (t Tags) Copy() Tags {
ct := make(Tags, len(t))
for k, v := range t {
ct[k] = v
}
return ct
}
func (d Dimensions) Copy() Dimensions {
tags := make([]string, len(d.TagNames))
copy(tags, d.TagNames)
return Dimensions{ByName: d.ByName, TagNames: tags}
}
func (d Dimensions) ToSet() map[string]bool {
set := make(map[string]bool, len(d.TagNames))
for _, dim := range d.TagNames {
set[dim] = true
}
return set
}
// Simple container for point data.
type RawPoint struct {
Time time.Time
Fields Fields
Tags Tags
}

View File

@ -19,7 +19,9 @@ import (
)
const (
statAverageExecTime = "avg_exec_time_ns"
statErrorCount = "errors"
statCardinalityGauge = "working_cardinality"
statAverageExecTime = "avg_exec_time_ns"
)
// A node that can be in an executor.
@ -59,6 +61,8 @@ type Node interface {
emittedCount() int64
incrementErrorCount()
stats() map[string]interface{}
}
@ -80,6 +84,8 @@ type node struct {
timer timer.Timer
statsKey string
statMap *kexpvar.Map
nodeErrors *kexpvar.Int
}
func (n *node) addParentEdge(e *Edge) {
@ -102,6 +108,9 @@ func (n *node) init() {
n.statsKey, n.statMap = vars.NewStatistic("nodes", tags)
avgExecVar := &MaxDuration{}
n.statMap.Set(statAverageExecTime, avgExecVar)
n.nodeErrors = &kexpvar.Int{}
n.statMap.Set(statErrorCount, n.nodeErrors)
n.statMap.Set(statCardinalityGauge, kexpvar.NewIntFuncGauge(nil))
n.timer = n.et.tm.TimingService.NewTimer(avgExecVar)
n.errCh = make(chan error, 1)
}
@ -200,25 +209,36 @@ func (n *node) closeChildEdges() {
func (n *node) edot(buf *bytes.Buffer, labels bool) {
if labels {
// Print all stats on node.
buf.Write([]byte(
fmt.Sprintf("\n%s [label=\"%s ",
n.Name(),
buf.WriteString(
fmt.Sprintf("\n%s [xlabel=\"",
n.Name(),
),
))
)
i := 0
n.statMap.DoSorted(func(kv expvar.KeyValue) {
buf.Write([]byte(
fmt.Sprintf("%s=%s ",
if i != 0 {
// NOTE: A literal \r, indicates a newline right justified in graphviz syntax.
buf.WriteString(`\r`)
}
i++
var s string
if sv, ok := kv.Value.(kexpvar.StringVar); ok {
s = sv.StringValue()
} else {
s = kv.Value.String()
}
buf.WriteString(
fmt.Sprintf("%s=%s",
kv.Key,
kv.Value.String(),
s,
),
))
)
})
buf.Write([]byte("\"];\n"))
for i, c := range n.children {
buf.Write([]byte(
fmt.Sprintf("%s -> %s [label=\"%d\"];\n",
fmt.Sprintf("%s -> %s [label=\"processed=%d\"];\n",
n.Name(),
c.Name(),
n.outs[i].collectedCount(),
@ -276,6 +296,11 @@ func (n *node) emittedCount() (count int64) {
return
}
// node increment error count increments a nodes error_count stat
func (n *node) incrementErrorCount() {
n.nodeErrors.Add(1)
}
func (n *node) stats() map[string]interface{} {
stats := make(map[string]interface{})

View File

@ -3,6 +3,7 @@ package pipeline
import (
"fmt"
"reflect"
"strings"
"time"
"github.com/influxdata/kapacitor/tick/ast"
@ -45,6 +46,7 @@ const defaultDetailsTmpl = "{{ json . }}"
// * OpsGenie -- Send alert to OpsGenie.
// * VictorOps -- Send alert to VictorOps.
// * PagerDuty -- Send alert to PagerDuty.
// * Pushover -- Send alert to Pushover.
// * Talk -- Post alert message to Talk client.
// * Telegram -- Post alert message to Telegram client.
//
@ -134,6 +136,8 @@ type AlertNode struct {
// * Group -- Concatenation of all group-by tags of the form [key=value,]+.
// If no groupBy is performed equal to literal 'nil'.
// * Tags -- Map of tags. Use '{{ index .Tags "key" }}' to get a specific tag value.
// * ServerInfo -- Information about the running server. Available nested fields are:
// Hostname, ClusterID and ServerID.
//
// Example:
// stream
@ -289,7 +293,7 @@ type AlertNode struct {
// Post the JSON alert data to the specified URL.
// tick:ignore
PostHandlers []*PostHandler `tick:"Post"`
HTTPPostHandlers []*AlertHTTPPostHandler `tick:"Post"`
// Send the JSON alert data to the specified endpoint via TCP.
// tick:ignore
@ -315,6 +319,10 @@ type AlertNode struct {
// tick:ignore
PagerDutyHandlers []*PagerDutyHandler `tick:"PagerDuty"`
// Send alert to Pushover.
// tick:ignore
PushoverHandlers []*PushoverHandler `tick:"Pushover"`
// Send alert to Sensu.
// tick:ignore
SensuHandlers []*SensuHandler `tick:"Sensu"`
@ -372,6 +380,12 @@ func (n *AlertNode) validate() error {
return errors.Wrapf(err, "invalid SNMP trap %q", snmp.TrapOid)
}
}
for _, post := range n.HTTPPostHandlers {
if err := post.validate(); err != nil {
return errors.Wrap(err, "invalid post")
}
}
return nil
}
@ -461,23 +475,75 @@ func (a *AlertNode) Flapping(low, high float64) *AlertNode {
}
// HTTP POST JSON alert data to a specified URL.
//
// Example:
// stream
// |alert()
// .post()
// .endpoint('example')
//
// Example:
// stream
// |alert()
// .post('http://example.com')
//
// tick:property
func (a *AlertNode) Post(url string) *PostHandler {
post := &PostHandler{
func (a *AlertNode) Post(urls ...string) *AlertHTTPPostHandler {
post := &AlertHTTPPostHandler{
AlertNode: a,
URL: url,
}
a.PostHandlers = append(a.PostHandlers, post)
a.HTTPPostHandlers = append(a.HTTPPostHandlers, post)
if len(urls) == 0 {
return post
}
post.URL = urls[0]
return post
}
// Set a header key and value on the post request.
// Setting the Authenticate header is not allowed from within TICKscript,
// please use the configuration file to specify sensitive headers.
//
// Example:
// stream
// |alert()
// .post()
// .endpoint('example')
// .header('a','b')
// tick:property
func (a *AlertHTTPPostHandler) Header(k, v string) *AlertHTTPPostHandler {
if a.Headers == nil {
a.Headers = map[string]string{}
}
a.Headers[k] = v
return a
}
// tick:embedded:AlertNode.Post
type PostHandler struct {
type AlertHTTPPostHandler struct {
*AlertNode
// The POST URL.
// tick:ignore
URL string
// Name of the endpoint to be used, as is defined in the configuration file
Endpoint string
// tick:ignore
Headers map[string]string `tick:"Header"`
}
func (a *AlertHTTPPostHandler) validate() error {
for k := range a.Headers {
if strings.ToUpper(k) == "AUTHENTICATE" {
return errors.New("cannot set 'authenticate' header")
}
}
return nil
}
// Send JSON alert data to a specified address over TCP.
@ -769,6 +835,7 @@ type PagerDutyHandler struct {
}
// Send the alert to HipChat.
// For step-by-step instructions on setting up Kapacitor with HipChat, see the Event Handler Setup Guide (https://docs.influxdata.com//kapacitor/latest/guides/event-handler-setup/#hipchat-setup).
// To allow Kapacitor to post to HipChat,
// go to the URL https://www.hipchat.com/docs/apiv2 for
// information on how to get your room id and tokens.
@ -941,6 +1008,7 @@ func (a *AlertaHandler) Services(service ...string) *AlertaHandler {
// enabled = true
// url = "http://sensu:3030"
// source = "Kapacitor"
// handlers = ["sns","slack"]
//
// Example:
// stream
@ -949,6 +1017,14 @@ func (a *AlertaHandler) Services(service ...string) *AlertaHandler {
//
// Send alerts to Sensu client.
//
// Example:
// stream
// |alert()
// .sensu()
// .handlers('sns','slack')
//
// Send alerts to Sensu specifying the handlers
//
// tick:property
func (a *AlertNode) Sensu() *SensuHandler {
sensu := &SensuHandler{
@ -961,6 +1037,91 @@ func (a *AlertNode) Sensu() *SensuHandler {
// tick:embedded:AlertNode.Sensu
type SensuHandler struct {
*AlertNode
// Sensu source in which to post messages.
// If empty uses the Source from the configuration.
Source string
// Sensu handler list
// If empty uses the handler list from the configuration
// tick:ignore
HandlersList []string `tick:"Handlers"`
}
// List of effected services.
// If not specified defaults to the Name of the stream.
// tick:property
func (s *SensuHandler) Handlers(handlers ...string) *SensuHandler {
s.HandlersList = handlers
return s
}
// Send the alert to Pushover.
// Register your application with Pushover at
// https://pushover.net/apps/build to get a
// Pushover token.
//
// Alert Level Mapping:
// OK - Sends a -2 priority level.
// Info - Sends a -1 priority level.
// Warning - Sends a 0 priority level.
// Critical - Sends a 1 priority level.
//
// Example:
// [pushover]
// enabled = true
// token = "9hiWoDOZ9IbmHsOTeST123ABciWTIqXQVFDo63h9"
// user_key = "Pushover"
//
// Example:
// stream
// |alert()
// .pushover()
// .sound('siren')
// .user_key('other user')
// .device('mydev')
// .title('mytitle')
// .URL('myurl')
// .URLTitle('mytitle')
//
// Send alerts to Pushover.
//
// tick:property
func (a *AlertNode) Pushover() *PushoverHandler {
pushover := &PushoverHandler{
AlertNode: a,
}
a.PushoverHandlers = append(a.PushoverHandlers, pushover)
return pushover
}
// tick:embedded:AlertNode.Pushover
type PushoverHandler struct {
*AlertNode
// User/Group key of your user (or you), viewable when logged
// into the Pushover dashboard. Often referred to as USER_KEY
// in the Pushover documentation.
// If empty uses the user from the configuration.
UserKey string
// Users device name to send message directly to that device,
// rather than all of a user's devices (multiple device names may
// be separated by a comma)
Device string
// Your message's title, otherwise your apps name is used
Title string
// A supplementary URL to show with your message
URL string
// A title for your supplementary URL, otherwise just URL is shown
URLTitle string
// The name of one of the sounds supported by the device clients to override
// the user's default sound choice
Sound string
}
// Send the alert to Slack.
@ -1046,6 +1207,7 @@ type SlackHandler struct {
}
// Send the alert to Telegram.
// For step-by-step instructions on setting up Kapacitor with Telegram, see the Event Handler Setup Guide (https://docs.influxdata.com//kapacitor/latest/guides/event-handler-setup/#telegram-setup).
// To allow Kapacitor to post to Telegram,
//
// Example:

View File

@ -23,9 +23,9 @@ import (
// Computes the derivative via:
// (current - previous ) / ( time_difference / unit)
//
// For batch edges the derivative is computed for each
// point in the batch and because of boundary conditions
// the number of points is reduced by one.
// The derivative is computed for each point, and
// because of boundary conditions the first point is
// dropped.
type DerivativeNode struct {
chainnode

View File

@ -47,7 +47,7 @@ type EvalNode struct {
KeepList []string
// tick:ignore
QuiteFlag bool `tick:"Quiet"`
QuietFlag bool `tick:"Quiet"`
}
func newEvalNode(e EdgeType, exprs []*ast.LambdaNode) *EvalNode {
@ -160,6 +160,6 @@ func (e *EvalNode) Keep(fields ...string) *EvalNode {
// Suppress errors during evaluation.
// tick:property
func (e *EvalNode) Quiet() *EvalNode {
e.QuiteFlag = true
e.QuietFlag = true
return e
}

View File

@ -52,6 +52,11 @@ type FlattenNode struct {
// The joined data point's time will be rounded to the nearest
// multiple of the tolerance duration.
Tolerance time.Duration
// DropOriginalFieldNameFlag indicates whether the original field name should
// be included in the final field name.
//tick:ignore
DropOriginalFieldNameFlag bool `tick:"DropOriginalFieldName"`
}
func newFlattenNode(e EdgeType) *FlattenNode {
@ -68,3 +73,15 @@ func (f *FlattenNode) On(dims ...string) *FlattenNode {
f.Dimensions = dims
return f
}
// DropOriginalFieldName indicates whether the original field name should
// be dropped when constructing the final field name.
// tick:property
func (f *FlattenNode) DropOriginalFieldName(drop ...bool) *FlattenNode {
if len(drop) == 1 {
f.DropOriginalFieldNameFlag = drop[0]
} else {
f.DropOriginalFieldNameFlag = true
}
return f
}

View File

@ -26,6 +26,11 @@ type GroupByNode struct {
// tick:ignore
Dimensions []interface{}
// The dimensions to exclude.
// Useful for substractive tags from using *.
// tick:ignore
ExcludedDimensions []string `tick:"Exclude"`
// Whether to include the measurement in the group ID.
// tick:ignore
ByMeasurementFlag bool `tick:"ByMeasurement"`
@ -39,10 +44,10 @@ func newGroupByNode(wants EdgeType, dims []interface{}) *GroupByNode {
}
func (n *GroupByNode) validate() error {
return validateDimensions(n.Dimensions)
return validateDimensions(n.Dimensions, n.ExcludedDimensions)
}
func validateDimensions(dimensions []interface{}) error {
func validateDimensions(dimensions []interface{}, excludedDimensions []string) error {
hasStar := false
for _, d := range dimensions {
switch dim := d.(type) {
@ -59,6 +64,9 @@ func validateDimensions(dimensions []interface{}) error {
if hasStar && len(dimensions) > 1 {
return errors.New("cannot group by both '*' and named dimensions.")
}
if !hasStar && len(excludedDimensions) > 0 {
return errors.New("exclude requires '*'")
}
return nil
}
@ -84,3 +92,9 @@ func (n *GroupByNode) ByMeasurement() *GroupByNode {
n.ByMeasurementFlag = true
return n
}
// Exclude removes any tags from the group.
func (n *GroupByNode) Exclude(dims ...string) *GroupByNode {
n.ExcludedDimensions = append(n.ExcludedDimensions, dims...)
return n
}

View File

@ -0,0 +1,106 @@
package pipeline
import (
"errors"
"fmt"
"strings"
)
// An HTTPPostNode will take the incoming data stream and POST it to an HTTP endpoint.
// That endpoint may be specified as a positional argument, or as an endpoint property
// method on httpPost. Multiple endpoint property methods may be specified.
//
// Example:
// stream
// |window()
// .period(10s)
// .every(5s)
// |top('value', 10)
// //Post the top 10 results over the last 10s updated every 5s.
// |httpPost('http://example.com/api/top10')
//
// Example:
// stream
// |window()
// .period(10s)
// .every(5s)
// |top('value', 10)
// //Post the top 10 results over the last 10s updated every 5s.
// |httpPost()
// .endpoint('example')
//
type HTTPPostNode struct {
chainnode
// tick:ignore
Endpoints []string `tick:"Endpoint"`
// Headers
Headers map[string]string `tick:"Header"`
// tick:ignore
URLs []string
}
func newHTTPPostNode(wants EdgeType, urls ...string) *HTTPPostNode {
return &HTTPPostNode{
chainnode: newBasicChainNode("http_post", wants, wants),
URLs: urls,
}
}
// tick:ignore
func (p *HTTPPostNode) validate() error {
if len(p.URLs) >= 2 {
return fmt.Errorf("httpPost expects 0 or 1 arguments, got %v", len(p.URLs))
}
if len(p.Endpoints) > 1 {
return fmt.Errorf("httpPost expects 0 or 1 endpoints, got %v", len(p.Endpoints))
}
if len(p.URLs) == 0 && len(p.Endpoints) == 0 {
return errors.New("must provide url or endpoint")
}
if len(p.URLs) > 0 && len(p.Endpoints) > 0 {
return errors.New("only one endpoint and url may be specified")
}
for k := range p.Headers {
if strings.ToUpper(k) == "AUTHENTICATE" {
return errors.New("cannot set 'authenticate' header")
}
}
return nil
}
// Name of the endpoint to be used, as is defined in the configuration file.
//
// Example:
// stream
// |httpPost()
// .endpoint('example')
//
// tick:property
func (p *HTTPPostNode) Endpoint(endpoint string) *HTTPPostNode {
p.Endpoints = append(p.Endpoints, endpoint)
return p
}
// Example:
// stream
// |httpPost()
// .endpoint('example')
// .header('my', 'header')
//
// tick:property
func (p *HTTPPostNode) Header(k, v string) *HTTPPostNode {
if p.Headers == nil {
p.Headers = map[string]string{}
}
p.Headers[k] = v
return p
}

View File

@ -9,6 +9,8 @@ const DefaultFlushInterval = time.Second * 10
//
// Example:
// stream
// |from()
// .measurement('requests')
// |eval(lambda: "errors" / "total")
// .as('error_percent')
// // Write the transformed data to InfluxDB

View File

@ -7,7 +7,7 @@ import (
)
// tmpl -- go get github.com/benbjohnson/tmpl
//go:generate tmpl -data=@../tmpldata influxql.gen.go.tmpl
//go:generate tmpl -data=@../tmpldata.json influxql.gen.go.tmpl
// An InfluxQLNode performs the available function from the InfluxQL language.
// These function can be performed on a stream or batch edge.
@ -453,12 +453,12 @@ func (n *chainnode) MovingAverage(field string, window int64) *InfluxQLNode {
return i
}
// Compute the holt-winters forecast of a data set.
// Compute the holt-winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set.
func (n *chainnode) HoltWinters(field string, h, m int64, interval time.Duration) *InfluxQLNode {
return n.holtWinters(field, h, m, interval, false)
}
// Compute the holt-winters forecast of a data set.
// Compute the holt-winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set.
// This method also outputs all the points used to fit the data in addition to the forecasted data.
func (n *chainnode) HoltWintersWithFit(field string, h, m int64, interval time.Duration) *InfluxQLNode {
return n.holtWinters(field, h, m, interval, true)

View File

@ -79,6 +79,9 @@ const (
type K8sAutoscaleNode struct {
chainnode
// Cluster is the name of the Kubernetes cluster to use.
Cluster string
// Namespace is the namespace of the resource, if empty the default namespace will be used.
Namespace string

View File

@ -0,0 +1,68 @@
package pipeline
import (
"errors"
)
// Writes the data back into the Kapacitor stream.
// To write data to a remote Kapacitor instance use the InfluxDBOut node.
//
// Example:
// |kapacitorLoopback()
// .database('mydb')
// .retentionPolicy('myrp')
// .measurement('errors')
// .tag('kapacitor', 'true')
// .tag('version', '0.2')
//
//
// NOTE: It is possible to create infinite loops using this node.
// Take care to ensure you do not chain tasks together creating a loop.
//
// Available Statistics:
//
// * points_written -- number of points written back to Kapacitor
//
type KapacitorLoopbackNode struct {
node
// The name of the database.
Database string
// The name of the retention policy.
RetentionPolicy string
// The name of the measurement.
Measurement string
// Static set of tags to add to all data points before writing them.
// tick:ignore
Tags map[string]string `tick:"Tag"`
}
func newKapacitorLoopbackNode(wants EdgeType) *KapacitorLoopbackNode {
return &KapacitorLoopbackNode{
node: node{
desc: "kapacitor_loopback",
wants: wants,
provides: NoEdge,
},
Tags: make(map[string]string),
}
}
// Add a static tag to all data points.
// Tag can be called more than once.
//
// tick:property
func (k *KapacitorLoopbackNode) Tag(key, value string) *KapacitorLoopbackNode {
k.Tags[key] = value
return k
}
func (k *KapacitorLoopbackNode) validate() error {
if k.Database == "" {
return errors.New("must specify a database")
}
if k.RetentionPolicy == "" {
return errors.New("must specify a retention policy")
}
return nil
}

View File

@ -326,6 +326,15 @@ func (n *chainnode) HttpOut(endpoint string) *HTTPOutNode {
return h
}
// Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint.
// HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an
// endpoint property method.
func (n *chainnode) HttpPost(url ...string) *HTTPPostNode {
h := newHTTPPostNode(n.provides, url...)
n.linkChild(h)
return h
}
// Create an influxdb output node that will store the incoming data into InfluxDB.
func (n *chainnode) InfluxDBOut() *InfluxDBOutNode {
i := newInfluxDBOutNode(n.provides)
@ -333,6 +342,13 @@ func (n *chainnode) InfluxDBOut() *InfluxDBOutNode {
return i
}
// Create an kapacitor loopback node that will send data back into Kapacitor as a stream.
func (n *chainnode) KapacitorLoopback() *KapacitorLoopbackNode {
k := newKapacitorLoopbackNode(n.provides)
n.linkChild(k)
return k
}
// Create an alert node, which can trigger alerts.
func (n *chainnode) Alert() *AlertNode {
a := newAlertNode(n.provides)
@ -451,3 +467,17 @@ func (n *chainnode) K8sAutoscale() *K8sAutoscaleNode {
n.linkChild(k)
return k
}
// Create a node that tracks duration in a given state.
func (n *chainnode) StateDuration(expression *ast.LambdaNode) *StateDurationNode {
sd := newStateDurationNode(n.provides, expression)
n.linkChild(sd)
return sd
}
// Create a node that tracks number of consecutive points in a given state.
func (n *chainnode) StateCount(expression *ast.LambdaNode) *StateCountNode {
sc := newStateCountNode(n.provides, expression)
n.linkChild(sc)
return sc
}

View File

@ -0,0 +1,101 @@
package pipeline
import (
"time"
"github.com/influxdata/kapacitor/tick/ast"
)
// Compute the duration of a given state.
// The state is defined via a lambda expression. For each consecutive point for
// which the expression evaluates as true, the state duration will be
// incremented by the duration between points. When a point evaluates as false,
// the state duration is reset.
//
// The state duration will be added as an additional field to each point. If the
// expression evaluates as false, the value will be -1. If the expression
// generates an error during evaluation, the point is discarded, and does not
// affect the state duration.
//
// Example:
// stream
// |from()
// .measurement('cpu')
// |where(lambda: "cpu" == 'cpu-total')
// |groupBy('host')
// |stateDuration(lambda: "usage_idle" <= 10)
// .unit(1m)
// |alert()
// // Warn after 1 minute
// .warn(lambda: "state_duration" >= 1)
// // Critical after 5 minutes
// .crit(lambda: "state_duration" >= 5)
//
// Note that as the first point in the given state has no previous point, its
// state duration will be 0.
type StateDurationNode struct {
chainnode
// Expression to determine whether state is active.
// tick:ignore
Lambda *ast.LambdaNode
// The new name of the resulting duration field.
// Default: 'state_duration'
As string
// The time unit of the resulting duration value.
// Default: 1s.
Unit time.Duration
}
func newStateDurationNode(wants EdgeType, predicate *ast.LambdaNode) *StateDurationNode {
return &StateDurationNode{
chainnode: newBasicChainNode("state_duration", wants, wants),
Lambda: predicate,
As: "state_duration",
Unit: time.Second,
}
}
// Compute the number of consecutive points in a given state.
// The state is defined via a lambda expression. For each consecutive point for
// which the expression evaluates as true, the state count will be incremented
// When a point evaluates as false, the state count is reset.
//
// The state count will be added as an additional field to each point. If the
// expression evaluates as false, the value will be -1. If the expression
// generates an error during evaluation, the point is discarded, and does not
// affect the state count.
//
// Example:
// stream
// |from()
// .measurement('cpu')
// |where(lambda: "cpu" == 'cpu-total')
// |groupBy('host')
// |stateCount(lambda: "usage_idle" <= 10)
// |alert()
// // Warn after 1 point
// .warn(lambda: "state_count" >= 1)
// // Critical after 5 points
// .crit(lambda: "state_count" >= 5)
type StateCountNode struct {
chainnode
// Expression to determine whether state is active.
// tick:ignore
Lambda *ast.LambdaNode
// The new name of the resulting duration field.
// Default: 'state_count'
As string
}
func newStateCountNode(wants EdgeType, predicate *ast.LambdaNode) *StateCountNode {
return &StateCountNode{
chainnode: newBasicChainNode("state_count", wants, wants),
Lambda: predicate,
As: "state_count",
}
}

View File

@ -271,5 +271,5 @@ func (n *FromNode) GroupByMeasurement() *FromNode {
}
func (s *FromNode) validate() error {
return validateDimensions(s.Dimensions)
return validateDimensions(s.Dimensions, nil)
}

View File

@ -5,7 +5,7 @@ import (
"time"
"github.com/influxdata/kapacitor/tick"
"github.com/influxdata/kapacitor/udf"
"github.com/influxdata/kapacitor/udf/agent"
)
// A UDFNode is a node that can run a User Defined Function (UDF) in a separate process.
@ -48,11 +48,11 @@ type UDFNode struct {
chainnode
UDFName string
options map[string]*udf.OptionInfo
options map[string]*agent.OptionInfo
// Options that were set on the node
// tick:ignore
Options []*udf.Option
Options []*agent.Option
describer *tick.ReflectionDescriber
}
@ -61,20 +61,20 @@ func NewUDF(
parent Node,
name string,
wants,
provides udf.EdgeType,
options map[string]*udf.OptionInfo,
provides agent.EdgeType,
options map[string]*agent.OptionInfo,
) *UDFNode {
var pwants, pprovides EdgeType
switch wants {
case udf.EdgeType_STREAM:
case agent.EdgeType_STREAM:
pwants = StreamEdge
case udf.EdgeType_BATCH:
case agent.EdgeType_BATCH:
pwants = BatchEdge
}
switch provides {
case udf.EdgeType_STREAM:
case agent.EdgeType_STREAM:
pprovides = StreamEdge
case udf.EdgeType_BATCH:
case agent.EdgeType_BATCH:
pprovides = BatchEdge
}
udf := &UDFNode{
@ -123,31 +123,31 @@ func (u *UDFNode) SetProperty(name string, args ...interface{}) (interface{}, er
if got, exp := len(args), len(opt.ValueTypes); got != exp {
return nil, fmt.Errorf("unexpected number of args to %s, got %d expected %d", name, got, exp)
}
values := make([]*udf.OptionValue, len(args))
values := make([]*agent.OptionValue, len(args))
for i, arg := range args {
values[i] = &udf.OptionValue{}
values[i] = &agent.OptionValue{}
switch v := arg.(type) {
case bool:
values[i].Type = udf.ValueType_BOOL
values[i].Value = &udf.OptionValue_BoolValue{v}
values[i].Type = agent.ValueType_BOOL
values[i].Value = &agent.OptionValue_BoolValue{v}
case int64:
values[i].Type = udf.ValueType_INT
values[i].Value = &udf.OptionValue_IntValue{v}
values[i].Type = agent.ValueType_INT
values[i].Value = &agent.OptionValue_IntValue{v}
case float64:
values[i].Type = udf.ValueType_DOUBLE
values[i].Value = &udf.OptionValue_DoubleValue{v}
values[i].Type = agent.ValueType_DOUBLE
values[i].Value = &agent.OptionValue_DoubleValue{v}
case string:
values[i].Type = udf.ValueType_STRING
values[i].Value = &udf.OptionValue_StringValue{v}
values[i].Type = agent.ValueType_STRING
values[i].Value = &agent.OptionValue_StringValue{v}
case time.Duration:
values[i].Type = udf.ValueType_DURATION
values[i].Value = &udf.OptionValue_DurationValue{int64(v)}
values[i].Type = agent.ValueType_DURATION
values[i].Value = &agent.OptionValue_DurationValue{int64(v)}
}
if values[i].Type != opt.ValueTypes[i] {
return nil, fmt.Errorf("unexpected arg to %s, got %v expected %v", name, values[i].Type, opt.ValueTypes[i])
}
}
u.Options = append(u.Options, &udf.Option{
u.Options = append(u.Options, &agent.Option{
Name: name,
Values: values,
})

View File

@ -3,8 +3,10 @@ package kapacitor
import (
"errors"
"log"
"sync"
"time"
"github.com/influxdata/kapacitor/expvar"
"github.com/influxdata/kapacitor/models"
"github.com/influxdata/kapacitor/pipeline"
)
@ -13,6 +15,8 @@ type SampleNode struct {
node
s *pipeline.SampleNode
countsMu sync.RWMutex
counts map[models.GroupID]int64
duration time.Duration
}
@ -33,6 +37,14 @@ func newSampleNode(et *ExecutingTask, n *pipeline.SampleNode, l *log.Logger) (*S
}
func (s *SampleNode) runSample([]byte) error {
valueF := func() int64 {
s.countsMu.RLock()
l := len(s.counts)
s.countsMu.RUnlock()
return int64(l)
}
s.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF))
switch s.Wants() {
case pipeline.StreamEdge:
for p, ok := s.ins[0].NextPoint(); ok; p, ok = s.ins[0].NextPoint() {
@ -73,10 +85,12 @@ func (s *SampleNode) shouldKeep(group models.GroupID, t time.Time) bool {
keepTime := t.Truncate(s.duration)
return t.Equal(keepTime)
} else {
s.countsMu.Lock()
count := s.counts[group]
keep := count%s.s.N == 0
count++
s.counts[group] = count
s.countsMu.Unlock()
return keep
}
}

View File

@ -113,6 +113,7 @@ func New(c Config) (Client, error) {
urls: urls,
client: &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: c.TLSConfig,
},
},
@ -157,6 +158,7 @@ func (c *httpClient) Update(new Config) error {
if old.TLSConfig != new.TLSConfig {
c.client = &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: new.TLSConfig,
},
}

View File

@ -4,18 +4,23 @@ import (
"crypto/tls"
"crypto/x509"
"io/ioutil"
"net/url"
"github.com/influxdata/kapacitor/listmap"
"github.com/influxdata/kapacitor/services/k8s/client"
"github.com/pkg/errors"
"github.com/prometheus/prometheus/config"
)
type Config struct {
Enabled bool `toml:"enabled" override:"enabled"`
ID string `toml:"id" override:"id"`
InCluster bool `toml:"in-cluster" override:"in-cluster"`
APIServers []string `toml:"api-servers" override:"api-servers"`
Token string `toml:"token" override:"token,redact"`
CAPath string `toml:"ca-path" override:"ca-path"`
Namespace string `toml:"namespace" override:"namespace"`
Resource string `toml:"resource" override:"resource"`
}
func NewConfig() Config {
@ -41,6 +46,21 @@ func (c Config) Validate() error {
}
} else if len(c.APIServers) == 0 {
return errors.New("no api-servers specified, must provide at least one server URL")
} else {
for _, s := range c.APIServers {
_, err := url.Parse(s)
if err != nil {
return err
}
}
}
if c.Resource != "" {
switch c.Resource {
case "node", "pod", "service", "endpoint":
default:
return errors.New("Resource must be one of node, pod, service, or endpoints")
}
}
return nil
}
@ -70,3 +90,65 @@ func (c Config) ClientConfig() (client.Config, error) {
TLSConfig: t,
}, nil
}
type Configs []Config
func (cs *Configs) UnmarshalTOML(data interface{}) error {
return listmap.DoUnmarshalTOML(cs, data)
}
func (cs Configs) Validate() error {
l := len(cs)
for _, c := range cs {
if err := c.Validate(); err != nil {
return err
}
// ID must not be empty when we have more than one.
if l > 1 && c.ID == "" {
return errors.New("id must not be empty")
}
}
return nil
}
// Prom writes the prometheus configuration for discoverer into ScrapeConfig
func (c Config) Prom(conf *config.ScrapeConfig) {
if len(c.APIServers) == 0 {
conf.ServiceDiscoveryConfig.KubernetesSDConfigs = []*config.KubernetesSDConfig{
&config.KubernetesSDConfig{
Role: config.KubernetesRole(c.Resource),
BearerToken: c.Token,
TLSConfig: config.TLSConfig{
CAFile: c.CAPath,
},
},
}
return
}
sds := make([]*config.KubernetesSDConfig, len(c.APIServers))
for i, srv := range c.APIServers {
url, _ := url.Parse(srv)
sds[i] = &config.KubernetesSDConfig{
APIServer: config.URL{
URL: url,
},
Role: config.KubernetesRole(c.Resource),
BearerToken: c.Token,
TLSConfig: config.TLSConfig{
CAFile: c.CAPath,
},
}
}
conf.ServiceDiscoveryConfig.KubernetesSDConfigs = sds
}
// Service return discoverer type
func (c Config) Service() string {
return "kubernetes"
}
// ServiceID returns the discoverers name
func (c Config) ServiceID() string {
return c.ID
}

View File

@ -0,0 +1,69 @@
package k8s
import (
"log"
"sync/atomic"
"github.com/influxdata/kapacitor/services/k8s/client"
"github.com/pkg/errors"
)
type Cluster struct {
configValue atomic.Value // Config
client client.Client
logger *log.Logger
}
func NewCluster(c Config, l *log.Logger) (*Cluster, error) {
clientConfig, err := c.ClientConfig()
if err != nil {
return nil, errors.Wrap(err, "failed to create k8s client config")
}
cli, err := client.New(clientConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to create k8s client")
}
s := &Cluster{
client: cli,
logger: l,
}
s.configValue.Store(c)
return s, nil
}
func (s *Cluster) Open() error {
return nil
}
func (s *Cluster) Close() error {
return nil
}
func (s *Cluster) Update(c Config) error {
s.configValue.Store(c)
clientConfig, err := c.ClientConfig()
if err != nil {
return errors.Wrap(err, "failed to create k8s client config")
}
return s.client.Update(clientConfig)
}
func (s *Cluster) Test() error {
cli, err := s.Client()
if err != nil {
return errors.Wrap(err, "failed to get client")
}
_, err = cli.Versions()
if err != nil {
return errors.Wrap(err, "failed to query server versions")
}
return nil
}
func (s *Cluster) Client() (client.Client, error) {
config := s.configValue.Load().(Config)
if !config.Enabled {
return nil, errors.New("service is not enabled")
}
return s.client, nil
}

View File

@ -3,80 +3,150 @@ package k8s
import (
"fmt"
"log"
"sync/atomic"
"sync"
"github.com/influxdata/kapacitor/services/k8s/client"
"github.com/pkg/errors"
"github.com/influxdata/kapacitor/services/scraper"
)
// Service is the kubernetes discovery and autoscale service
type Service struct {
configValue atomic.Value // Config
client client.Client
logger *log.Logger
mu sync.Mutex
configs []Config
clusters map[string]*Cluster
registry scraper.Registry
logger *log.Logger
}
func NewService(c Config, l *log.Logger) (*Service, error) {
clientConfig, err := c.ClientConfig()
if err != nil {
return nil, errors.Wrap(err, "failed to create k8s client config")
}
cli, err := client.New(clientConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to create k8s client")
// NewService creates a new unopened k8s service
func NewService(c []Config, r scraper.Registry, l *log.Logger) (*Service, error) {
clusters := make(map[string]*Cluster, len(c))
for i := range c {
cluster, err := NewCluster(c[i], l)
if err != nil {
return nil, err
}
clusters[c[i].ID] = cluster
}
s := &Service{
client: cli,
logger: l,
}
s.configValue.Store(c)
return s, nil
return &Service{
clusters: clusters,
configs: c,
logger: l,
registry: r,
}, nil
}
// Open starts the kubernetes service
func (s *Service) Open() error {
return nil
s.mu.Lock()
defer s.mu.Unlock()
for _, c := range s.clusters {
if err := c.Open(); err != nil {
return err
}
}
s.register()
return s.registry.Commit()
}
func (s *Service) Close() error {
s.mu.Lock()
defer s.mu.Unlock()
for _, c := range s.clusters {
c.Close()
}
s.deregister()
return s.registry.Commit()
}
func (s *Service) deregister() {
// Remove all the configurations in the registry
for _, d := range s.configs {
s.registry.RemoveDiscoverer(&d)
}
}
func (s *Service) register() {
// Add all configurations to registry
for _, d := range s.configs {
if d.Enabled {
s.registry.AddDiscoverer(&d)
}
}
}
func (s *Service) Update(newConfigs []interface{}) error {
s.mu.Lock()
defer s.mu.Unlock()
configs := make([]Config, len(newConfigs))
existingClusters := make(map[string]bool, len(newConfigs))
for i := range newConfigs {
c, ok := newConfigs[i].(Config)
if !ok {
return fmt.Errorf("expected config object to be of type %T, got %T", c, newConfigs[i])
}
configs[i] = c
cluster, ok := s.clusters[c.ID]
if !ok {
var err error
cluster, err = NewCluster(c, s.logger)
if err != nil {
return err
}
if err := cluster.Open(); err != nil {
return err
}
s.clusters[c.ID] = cluster
} else {
if err := cluster.Update(c); err != nil {
return err
}
}
existingClusters[c.ID] = true
}
// Close and delete any removed clusters
for id := range s.clusters {
if !existingClusters[id] {
s.clusters[id].Close()
delete(s.clusters, id)
}
}
s.deregister()
s.configs = configs
s.register()
return nil
}
func (s *Service) Update(newConfig []interface{}) error {
if l := len(newConfig); l != 1 {
return fmt.Errorf("expected only one new config object, got %d", l)
}
c, ok := newConfig[0].(Config)
if !ok {
return fmt.Errorf("expected config object to be of type %T, got %T", c, newConfig[0])
}
s.configValue.Store(c)
clientConfig, err := c.ClientConfig()
if err != nil {
return errors.Wrap(err, "failed to create k8s client config")
}
return s.client.Update(clientConfig)
type testOptions struct {
ID string `json:"id"`
}
func (s *Service) TestOptions() interface{} {
return nil
return new(testOptions)
}
func (s *Service) Test(options interface{}) error {
cli, err := s.Client()
if err != nil {
return errors.Wrap(err, "failed to get client")
o, ok := options.(*testOptions)
if !ok {
return fmt.Errorf("unexpected options type %T", options)
}
_, err = cli.Versions()
if err != nil {
return errors.Wrap(err, "failed to query server versions")
s.mu.Lock()
cluster, ok := s.clusters[o.ID]
s.mu.Unlock()
if !ok {
return fmt.Errorf("unknown kubernetes cluster %q", o.ID)
}
return nil
return cluster.Test()
}
func (s *Service) Client() (client.Client, error) {
config := s.configValue.Load().(Config)
if !config.Enabled {
return nil, errors.New("service is not enabled")
func (s *Service) Client(id string) (client.Client, error) {
s.mu.Lock()
cluster, ok := s.clusters[id]
s.mu.Unlock()
if !ok {
return nil, fmt.Errorf("unknown kubernetes cluster %q, cannot get client", id)
}
return s.client, nil
return cluster.Client()
}

View File

@ -47,6 +47,7 @@ func (s *ShiftNode) runShift([]byte) error {
for b, ok := s.ins[0].NextBatch(); ok; b, ok = s.ins[0].NextBatch() {
s.timer.Start()
b.TMax = b.TMax.Add(s.shift)
b.Points = b.ShallowCopyPoints()
for i, p := range b.Points {
b.Points[i].Time = p.Time.Add(s.shift)
}

View File

@ -0,0 +1,216 @@
package kapacitor
import (
"fmt"
"log"
"sync"
"time"
"github.com/influxdata/kapacitor/expvar"
"github.com/influxdata/kapacitor/models"
"github.com/influxdata/kapacitor/pipeline"
"github.com/influxdata/kapacitor/tick/ast"
"github.com/influxdata/kapacitor/tick/stateful"
)
type stateTracker interface {
track(p models.BatchPoint, inState bool) interface{}
reset()
}
type stateTrackingGroup struct {
stateful.Expression
stateful.ScopePool
tracker stateTracker
}
type StateTrackingNode struct {
node
lambda *ast.LambdaNode
as string
newTracker func() stateTracker
groupsMu sync.RWMutex
groups map[models.GroupID]*stateTrackingGroup
}
func (stn *StateTrackingNode) group(g models.GroupID) (*stateTrackingGroup, error) {
stn.groupsMu.RLock()
stg := stn.groups[g]
stn.groupsMu.RUnlock()
if stg == nil {
// Grab the write lock
stn.groupsMu.Lock()
defer stn.groupsMu.Unlock()
// Check again now that we have the write lock
stg = stn.groups[g]
if stg == nil {
// Create a new tracking group
stg = &stateTrackingGroup{}
var err error
stg.Expression, err = stateful.NewExpression(stn.lambda.Expression)
if err != nil {
return nil, fmt.Errorf("Failed to compile expression: %v", err)
}
stg.ScopePool = stateful.NewScopePool(ast.FindReferenceVariables(stn.lambda.Expression))
stg.tracker = stn.newTracker()
stn.groups[g] = stg
}
}
return stg, nil
}
func (stn *StateTrackingNode) runStateTracking(_ []byte) error {
// Setup working_cardinality gauage.
valueF := func() int64 {
stn.groupsMu.RLock()
l := len(stn.groups)
stn.groupsMu.RUnlock()
return int64(l)
}
stn.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF))
switch stn.Provides() {
case pipeline.StreamEdge:
for p, ok := stn.ins[0].NextPoint(); ok; p, ok = stn.ins[0].NextPoint() {
stn.timer.Start()
stg, err := stn.group(p.Group)
if err != nil {
return err
}
pass, err := EvalPredicate(stg.Expression, stg.ScopePool, p.Time, p.Fields, p.Tags)
if err != nil {
stn.incrementErrorCount()
stn.logger.Println("E! error while evaluating expression:", err)
stn.timer.Stop()
continue
}
p.Fields = p.Fields.Copy()
p.Fields[stn.as] = stg.tracker.track(models.BatchPointFromPoint(p), pass)
stn.timer.Stop()
for _, child := range stn.outs {
err := child.CollectPoint(p)
if err != nil {
return err
}
}
}
case pipeline.BatchEdge:
for b, ok := stn.ins[0].NextBatch(); ok; b, ok = stn.ins[0].NextBatch() {
stn.timer.Start()
stg, err := stn.group(b.Group)
if err != nil {
return err
}
stg.tracker.reset()
b.Points = b.ShallowCopyPoints()
for i := 0; i < len(b.Points); {
p := &b.Points[i]
pass, err := EvalPredicate(stg.Expression, stg.ScopePool, p.Time, p.Fields, p.Tags)
if err != nil {
stn.incrementErrorCount()
stn.logger.Println("E! error while evaluating epression:", err)
b.Points = append(b.Points[:i], b.Points[i+1:]...)
continue
}
i++
p.Fields = p.Fields.Copy()
p.Fields[stn.as] = stg.tracker.track(*p, pass)
}
stn.timer.Stop()
for _, child := range stn.outs {
err := child.CollectBatch(b)
if err != nil {
return err
}
}
}
}
return nil
}
type stateDurationTracker struct {
sd *pipeline.StateDurationNode
startTime time.Time
}
func (sdt *stateDurationTracker) reset() {
sdt.startTime = time.Time{}
}
func (sdt *stateDurationTracker) track(p models.BatchPoint, inState bool) interface{} {
if !inState {
sdt.startTime = time.Time{}
return float64(-1)
}
if sdt.startTime.IsZero() {
sdt.startTime = p.Time
}
return float64(p.Time.Sub(sdt.startTime)) / float64(sdt.sd.Unit)
}
func newStateDurationNode(et *ExecutingTask, sd *pipeline.StateDurationNode, l *log.Logger) (*StateTrackingNode, error) {
if sd.Lambda == nil {
return nil, fmt.Errorf("nil expression passed to StateDurationNode")
}
stn := &StateTrackingNode{
node: node{Node: sd, et: et, logger: l},
lambda: sd.Lambda,
as: sd.As,
groups: make(map[models.GroupID]*stateTrackingGroup),
newTracker: func() stateTracker { return &stateDurationTracker{sd: sd} },
}
stn.node.runF = stn.runStateTracking
return stn, nil
}
type stateCountTracker struct {
count int64
}
func (sct *stateCountTracker) reset() {
sct.count = 0
}
func (sct *stateCountTracker) track(p models.BatchPoint, inState bool) interface{} {
if !inState {
sct.count = 0
return int64(-1)
}
sct.count++
return sct.count
}
func newStateCountNode(et *ExecutingTask, sc *pipeline.StateCountNode, l *log.Logger) (*StateTrackingNode, error) {
if sc.Lambda == nil {
return nil, fmt.Errorf("nil expression passed to StateCountNode")
}
stn := &StateTrackingNode{
node: node{Node: sc, et: et, logger: l},
lambda: sc.Lambda,
as: sc.As,
groups: make(map[models.GroupID]*stateTrackingGroup),
newTracker: func() stateTracker { return &stateCountTracker{} },
}
stn.node.runF = stn.runStateTracking
return stn, nil
}

View File

@ -6,6 +6,7 @@ import (
"github.com/influxdata/kapacitor/models"
"github.com/influxdata/kapacitor/pipeline"
"github.com/influxdata/kapacitor/tick/ast"
"github.com/influxdata/kapacitor/tick/stateful"
)
@ -67,7 +68,7 @@ func newFromNode(et *ExecutingTask, n *pipeline.FromNode, l *log.Logger) (*FromN
}
sn.expression = expr
sn.scopePool = stateful.NewScopePool(stateful.FindReferenceVariables(n.Lambda.Expression))
sn.scopePool = stateful.NewScopePool(ast.FindReferenceVariables(n.Lambda.Expression))
}
return sn, nil
@ -87,7 +88,7 @@ func (s *FromNode) runStream([]byte) error {
pt.Time = pt.Time.Round(s.s.Round)
}
dims.TagNames = s.dimensions
pt = setGroupOnPoint(pt, s.allDimensions, dims)
pt = setGroupOnPoint(pt, s.allDimensions, dims, nil)
s.timer.Pause()
for _, child := range s.outs {
err := child.CollectPoint(pt)
@ -114,6 +115,7 @@ func (s *FromNode) matches(p models.Point) bool {
}
if s.expression != nil {
if pass, err := EvalPredicate(s.expression, s.scopePool, p.Time, p.Fields, p.Tags); err != nil {
s.incrementErrorCount()
s.logger.Println("E! error while evaluating WHERE expression:", err)
return false
} else {

View File

@ -371,29 +371,31 @@ func (et *ExecutingTask) EDot(labels bool) []byte {
var buf bytes.Buffer
buf.Write([]byte("digraph "))
buf.Write([]byte(et.Task.ID))
buf.Write([]byte(" {\n"))
buf.WriteString("digraph ")
buf.WriteString(et.Task.ID)
buf.WriteString(" {\n")
// Write graph attributes
unit := "points"
if et.Task.Type == BatchTask {
unit = "batches"
}
buf.WriteString("graph [")
if labels {
buf.Write([]byte(
fmt.Sprintf("graph [label=\"Throughput: %0.2f %s/s\"];\n",
buf.WriteString(
fmt.Sprintf("label=\"Throughput: %0.2f %s/s\" forcelabels=true pad=\"0.8,0.5\"",
et.getThroughput(),
unit,
),
))
)
} else {
buf.Write([]byte(
fmt.Sprintf("graph [throughput=\"%0.2f %s/s\"];\n",
buf.WriteString(
fmt.Sprintf("throughput=\"%0.2f %s/s\"",
et.getThroughput(),
unit,
),
))
)
}
buf.WriteString("];\n")
_ = et.walk(func(n Node) error {
n.edot(&buf, labels)
@ -452,8 +454,12 @@ func (et *ExecutingTask) createNode(p pipeline.Node, l *log.Logger) (n Node, err
n, err = newWindowNode(et, t, l)
case *pipeline.HTTPOutNode:
n, err = newHTTPOutNode(et, t, l)
case *pipeline.HTTPPostNode:
n, err = newHTTPPostNode(et, t, l)
case *pipeline.InfluxDBOutNode:
n, err = newInfluxDBOutNode(et, t, l)
case *pipeline.KapacitorLoopbackNode:
n, err = newKapacitorLoopbackNode(et, t, l)
case *pipeline.AlertNode:
n, err = newAlertNode(et, t, l)
case *pipeline.GroupByNode:
@ -492,6 +498,10 @@ func (et *ExecutingTask) createNode(p pipeline.Node, l *log.Logger) (n Node, err
n, err = newCombineNode(et, t, l)
case *pipeline.K8sAutoscaleNode:
n, err = newK8sAutoscaleNode(et, t, l)
case *pipeline.StateDurationNode:
n, err = newStateDurationNode(et, t, l)
case *pipeline.StateCountNode:
n, err = newStateCountNode(et, t, l)
default:
return nil, fmt.Errorf("unknown pipeline node type %T", p)
}

View File

@ -14,12 +14,16 @@ import (
"github.com/influxdata/kapacitor/influxdb"
"github.com/influxdata/kapacitor/models"
"github.com/influxdata/kapacitor/pipeline"
alertservice "github.com/influxdata/kapacitor/services/alert"
"github.com/influxdata/kapacitor/services/alerta"
"github.com/influxdata/kapacitor/services/hipchat"
"github.com/influxdata/kapacitor/services/httpd"
"github.com/influxdata/kapacitor/services/httppost"
k8s "github.com/influxdata/kapacitor/services/k8s/client"
"github.com/influxdata/kapacitor/services/opsgenie"
"github.com/influxdata/kapacitor/services/pagerduty"
"github.com/influxdata/kapacitor/services/pushover"
"github.com/influxdata/kapacitor/services/sensu"
"github.com/influxdata/kapacitor/services/slack"
"github.com/influxdata/kapacitor/services/smtp"
"github.com/influxdata/kapacitor/services/snmptrap"
@ -72,14 +76,9 @@ type TaskMaster struct {
UDFService UDFService
AlertService interface {
EventState(topic, event string) (alert.EventState, bool)
UpdateEvent(topic string, event alert.EventState) error
Collect(event alert.Event) error
RegisterHandler(topics []string, h alert.Handler)
DeregisterHandler(topics []string, h alert.Handler)
RestoreTopic(topic string) error
CloseTopic(topic string) error
DeleteTopic(topic string) error
alertservice.AnonHandlerRegistrar
alertservice.Events
alertservice.TopicPersister
}
InfluxDBService interface {
NewNamedClient(name string) (influxdb.Client, error)
@ -101,6 +100,13 @@ type TaskMaster struct {
Global() bool
Handler(pagerduty.HandlerConfig, *log.Logger) alert.Handler
}
PushoverService interface {
Handler(pushover.HandlerConfig, *log.Logger) alert.Handler
}
HTTPPostService interface {
Handler(httppost.HandlerConfig, *log.Logger) alert.Handler
Endpoint(string) (*httppost.Endpoint, bool)
}
SlackService interface {
Global() bool
StateChangesOnly() bool
@ -124,7 +130,7 @@ type TaskMaster struct {
Handler(alerta.HandlerConfig, *log.Logger) (alert.Handler, error)
}
SensuService interface {
Handler(*log.Logger) alert.Handler
Handler(sensu.HandlerConfig, *log.Logger) (alert.Handler, error)
}
TalkService interface {
Handler(*log.Logger) alert.Handler
@ -133,7 +139,7 @@ type TaskMaster struct {
NewTimer(timer.Setter) timer.Timer
}
K8sService interface {
Client() (k8s.Client, error)
Client(string) (k8s.Client, error)
}
LogService LogService
@ -143,6 +149,8 @@ type TaskMaster struct {
// Incoming streams
writePointsIn StreamCollector
writesClosed bool
writesMu sync.RWMutex
// Forks of incoming streams
// We are mapping from (db, rp, measurement) to map of task ids to their edges
@ -211,6 +219,7 @@ func (tm *TaskMaster) New(id string) *TaskMaster {
n.OpsGenieService = tm.OpsGenieService
n.VictorOpsService = tm.VictorOpsService
n.PagerDutyService = tm.PagerDutyService
n.PushoverService = tm.PushoverService
n.SlackService = tm.SlackService
n.TelegramService = tm.TelegramService
n.SNMPTrapService = tm.SNMPTrapService
@ -254,12 +263,18 @@ func (tm *TaskMaster) StopTasks() {
}
func (tm *TaskMaster) Close() error {
tm.Drain()
tm.mu.Lock()
defer tm.mu.Unlock()
if tm.closed {
closed := tm.closed
tm.mu.Unlock()
if closed {
return ErrTaskMasterClosed
}
tm.Drain()
tm.mu.Lock()
defer tm.mu.Unlock()
tm.closed = true
for _, et := range tm.tasks {
_ = tm.stopTask(et.Task.ID)
@ -356,6 +371,10 @@ func (tm *TaskMaster) waitForForks() {
tm.drained = true
tm.mu.Unlock()
tm.writesMu.Lock()
tm.writesClosed = true
tm.writesMu.Unlock()
// Close the write points in stream
tm.writePointsIn.Close()
@ -373,7 +392,7 @@ func (tm *TaskMaster) CreateTICKScope() *stateful.Scope {
info, _ := tm.UDFService.Info(f)
scope.SetDynamicMethod(
f,
stateful.DynamicMethod(func(self interface{}, args ...interface{}) (interface{}, error) {
func(self interface{}, args ...interface{}) (interface{}, error) {
parent, ok := self.(pipeline.Node)
if !ok {
return nil, fmt.Errorf("cannot call %s on %T", f, self)
@ -386,7 +405,7 @@ func (tm *TaskMaster) CreateTICKScope() *stateful.Scope {
info.Options,
)
return udf, nil
}),
},
)
}
}
@ -621,7 +640,9 @@ func (tm *TaskMaster) forkPoint(p models.Point) {
}
func (tm *TaskMaster) WritePoints(database, retentionPolicy string, consistencyLevel imodels.ConsistencyLevel, points []imodels.Point) error {
if tm.closed {
tm.writesMu.RLock()
defer tm.writesMu.RUnlock()
if tm.writesClosed {
return ErrTaskMasterClosed
}
if retentionPolicy == "" {
@ -645,6 +666,18 @@ func (tm *TaskMaster) WritePoints(database, retentionPolicy string, consistencyL
return nil
}
func (tm *TaskMaster) WriteKapacitorPoint(p models.Point) error {
tm.writesMu.RLock()
defer tm.writesMu.RUnlock()
if tm.writesClosed {
return ErrTaskMasterClosed
}
p.Group = models.NilGroup
p.Dimensions = models.Dimensions{}
return tm.writePointsIn.CollectPoint(p)
}
func (tm *TaskMaster) NewFork(taskName string, dbrps []DBRP, measurements []string) (*Edge, error) {
tm.mu.Lock()
defer tm.mu.Unlock()

View File

@ -140,24 +140,24 @@ fi
case $ENVIRONMENT_INDEX in
0)
# 64 bit tests
run_test_docker Dockerfile_build_ubuntu64 test_64bit --debug --test --generate $no_uncommitted_arg
run_test_docker Dockerfile_build_ubuntu64 test_64bit --test --generate $no_uncommitted_arg
rc=$?
;;
1)
# 64 bit race tests
GORACE="halt_on_error=1"
run_test_docker Dockerfile_build_ubuntu64 test_64bit_race --debug --test --generate $no_uncommitted_arg --race
run_test_docker Dockerfile_build_ubuntu64 test_64bit_race --test --generate $no_uncommitted_arg --race
rc=$?
;;
2)
# 32 bit tests
run_test_docker Dockerfile_build_ubuntu32 test_32bit --debug --test --generate $no_uncommitted_arg --arch=i386
run_test_docker Dockerfile_build_ubuntu32 test_32bit --test --generate $no_uncommitted_arg --arch=i386
rc=$?
;;
#3)
# # 64 bit tests on golang HEAD
# GO_CHECKOUT=HEAD
# run_test_docker Dockerfile_build_ubuntu64_git test_64bit_go_tip --debug --test --generate $no_uncommitted_arg
# run_test_docker Dockerfile_build_ubuntu64_git test_64bit_go_tip --test --generate $no_uncommitted_arg
# rc=$?
# ;;
"save")

View File

@ -0,0 +1,45 @@
package ast
// FindReferenceVariables walks all nodes and returns a list of name from reference variables.
func FindReferenceVariables(nodes ...Node) []string {
variablesSet := make(map[string]bool)
for _, node := range nodes {
Walk(node, func(n Node) (Node, error) {
if ref, ok := n.(*ReferenceNode); ok {
variablesSet[ref.Reference] = true
}
return n, nil
})
}
variables := make([]string, 0, len(variablesSet))
for variable := range variablesSet {
variables = append(variables, variable)
}
return variables
}
// FindFunctionCalls walks all nodes and returns a list of name of function calls.
func FindFunctionCalls(nodes ...Node) []string {
funcCallsSet := make(map[string]bool)
for _, node := range nodes {
Walk(node, func(n Node) (Node, error) {
if fnc, ok := n.(*FunctionNode); ok {
funcCallsSet[fnc.Func] = true
}
return n, nil
})
}
funcCalls := make([]string, 0, len(funcCallsSet))
for variable := range funcCallsSet {
funcCalls = append(funcCalls, variable)
}
return funcCalls
}

View File

@ -29,6 +29,12 @@ type Node interface {
Equal(interface{}) bool
}
func Format(n Node) string {
var buf bytes.Buffer
n.Format(&buf, "", false)
return buf.String()
}
// Represents a node that can have a comment associated with it.
type commentedNode interface {
SetComment(c *CommentNode)

View File

@ -10,7 +10,7 @@ import (
type ValueType uint8
const (
InvalidType ValueType = iota << 1
InvalidType ValueType = iota
TFloat
TInt
TString
@ -21,8 +21,13 @@ const (
TLambda
TList
TStar
TMissing
)
type Missing struct{}
var MissingValue = &Missing{}
func (v ValueType) String() string {
switch v {
case TFloat:
@ -45,6 +50,8 @@ func (v ValueType) String() string {
return "list"
case TStar:
return "star"
case TMissing:
return "missing"
}
return "invalid type"
@ -72,6 +79,8 @@ func TypeOf(v interface{}) ValueType {
return TList
case *StarNode:
return TStar
case *Missing:
return TMissing
default:
return InvalidType
}
@ -99,6 +108,8 @@ func ZeroValue(t ValueType) interface{} {
return []interface{}(nil)
case TStar:
return (*StarNode)(nil)
case TMissing:
return (*Missing)(nil)
default:
return errors.New("invalid type")
}

View File

@ -22,6 +22,7 @@ func Test_TypeOf(t *testing.T) {
{value: regexp.MustCompile("\\d"), valueType: ast.TRegex},
{value: time.Duration(5), valueType: ast.TDuration},
{value: time.Time{}, valueType: ast.TTime},
{value: ast.MissingValue, valueType: ast.TMissing},
{value: t, valueType: ast.InvalidType},
}

View File

@ -0,0 +1,70 @@
package ast
import "errors"
// Walk calls f on all nodes reachable from the root node.
// The node returned will replace the node provided within the AST.
// Returning an error from f will stop the walking process and f will not be called on any other nodes.
func Walk(root Node, f func(n Node) (Node, error)) (Node, error) {
replacement, err := f(root)
if err != nil {
return nil, err
}
switch node := replacement.(type) {
case *LambdaNode:
r, err := Walk(node.Expression, f)
if err != nil {
return nil, err
}
node.Expression = r
case *UnaryNode:
r, err := Walk(node.Node, f)
if err != nil {
return nil, err
}
node.Node = r
case *BinaryNode:
r, err := Walk(node.Left, f)
if err != nil {
return nil, err
}
node.Left = r
r, err = Walk(node.Right, f)
if err != nil {
return nil, err
}
node.Right = r
case *DeclarationNode:
r, err := Walk(node.Left, f)
if err != nil {
return nil, err
}
ident, ok := r.(*IdentifierNode)
if !ok {
return nil, errors.New("declaration node must always have an IdentifierNode")
}
node.Left = ident
r, err = Walk(node.Right, f)
if err != nil {
return nil, err
}
node.Right = r
case *FunctionNode:
for i := range node.Args {
r, err := Walk(node.Args[i], f)
if err != nil {
return nil, err
}
node.Args[i] = r
}
case *ProgramNode:
for i := range node.Nodes {
r, err := Walk(node.Nodes[i], f)
if err != nil {
return nil, err
}
node.Nodes[i] = r
}
}
return replacement, nil
}

View File

@ -0,0 +1,137 @@
package ast_test
import (
"reflect"
"testing"
"github.com/influxdata/kapacitor/tick/ast"
)
func TestWalk(t *testing.T) {
// AST for `var x = lambda: "value" < 10`
root := &ast.ProgramNode{
Nodes: []ast.Node{
&ast.DeclarationNode{
Left: &ast.IdentifierNode{
Ident: "x",
},
Right: &ast.LambdaNode{
Expression: &ast.BinaryNode{
Operator: ast.TokenLess,
Left: &ast.ReferenceNode{
Reference: "value",
},
Right: &ast.NumberNode{
IsInt: true,
Base: 10,
Int64: 10,
},
},
},
},
},
}
expList := []string{
"*ast.ProgramNode",
"*ast.DeclarationNode",
"*ast.IdentifierNode",
"*ast.LambdaNode",
"*ast.BinaryNode",
"*ast.ReferenceNode",
"*ast.NumberNode",
}
list := make([]string, 0, len(expList))
f := func(n ast.Node) (ast.Node, error) {
list = append(list, reflect.TypeOf(n).String())
return n, nil
}
if _, err := ast.Walk(root, f); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(expList, list) {
t.Errorf("unexpected walk list:\ngot\n%v\nexp\n%v\n", list, expList)
}
}
func TestWalk_Mutate(t *testing.T) {
// AST for `lambda: "value" < VAR_A AND "value" > VAR_B`
root := &ast.LambdaNode{
Expression: &ast.BinaryNode{
Operator: ast.TokenAnd,
Left: &ast.BinaryNode{
Operator: ast.TokenLess,
Left: &ast.ReferenceNode{
Reference: "value",
},
Right: &ast.IdentifierNode{
Ident: "VAR_A",
},
},
Right: &ast.BinaryNode{
Operator: ast.TokenGreater,
Left: &ast.ReferenceNode{
Reference: "value",
},
Right: &ast.IdentifierNode{
Ident: "VAR_B",
},
},
},
}
// Replace the IdentifierNodes with a NumberNode
numbers := map[string]int64{
"VAR_A": 42,
"VAR_B": 3,
}
replace := func(n ast.Node) (ast.Node, error) {
if ident, ok := n.(*ast.IdentifierNode); ok {
return &ast.NumberNode{
IsInt: true,
Int64: numbers[ident.Ident],
Base: 10,
}, nil
}
return n, nil
}
if _, err := ast.Walk(root, replace); err != nil {
t.Fatal(err)
}
expList := []string{
"*ast.LambdaNode",
"*ast.BinaryNode",
"*ast.BinaryNode",
"*ast.ReferenceNode",
"*ast.NumberNode",
"*ast.BinaryNode",
"*ast.ReferenceNode",
"*ast.NumberNode",
}
list := make([]string, 0, len(expList))
f := func(n ast.Node) (ast.Node, error) {
list = append(list, reflect.TypeOf(n).String())
return n, nil
}
if _, err := ast.Walk(root, f); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(expList, list) {
t.Errorf("unexpected walk list:\ngot\n%v\nexp\n%v\n", list, expList)
}
// Check the lambda formatted lambda expression
expStr := `lambda: "value" < 42 AND "value" > 3`
str := ast.Format(root)
if expStr != str {
t.Errorf("unexpected lambda str: got %s exp %s", str, expStr)
}
}

View File

@ -1,10 +1,6 @@
package tick
import (
"bytes"
"github.com/influxdata/kapacitor/tick/ast"
)
import "github.com/influxdata/kapacitor/tick/ast"
// Formats a TICKscript according to the standard.
func Format(script string) (string, error) {
@ -12,8 +8,5 @@ func Format(script string) (string, error) {
if err != nil {
return "", err
}
var buf bytes.Buffer
buf.Grow(len(script))
root.Format(&buf, "", false)
return buf.String(), nil
return ast.Format(root), nil
}

View File

@ -117,15 +117,15 @@ func NewEvalBinaryNode(node *ast.BinaryNode) (*EvalBinaryNode, error) {
return b, nil
}
func (n *EvalBinaryNode) Type(scope ReadOnlyScope, executionState ExecutionState) (ast.ValueType, error) {
func (n *EvalBinaryNode) Type(scope ReadOnlyScope) (ast.ValueType, error) {
if n.constReturnType == ast.InvalidType {
var err error
// We are dynamic and we need to figure out our type
n.leftType, err = n.leftEvaluator.Type(scope, executionState)
n.leftType, err = n.leftEvaluator.Type(scope)
if err != nil {
return ast.InvalidType, err
}
n.rightType, err = n.rightEvaluator.Type(scope, executionState)
n.rightType, err = n.rightEvaluator.Type(scope)
if err != nil {
return ast.InvalidType, err
}
@ -154,6 +154,10 @@ func (n *EvalBinaryNode) EvalTime(scope *Scope, executionState ExecutionState) (
return time.Time{}, ErrTypeGuardFailed{RequestedType: ast.TTime, ActualType: n.constReturnType}
}
func (n *EvalBinaryNode) EvalMissing(scope *Scope, executionState ExecutionState) (*ast.Missing, error) {
return nil, ErrTypeGuardFailed{RequestedType: ast.TMissing, ActualType: n.constReturnType}
}
func (e *EvalBinaryNode) EvalDuration(scope *Scope, executionState ExecutionState) (time.Duration, error) {
result, err := e.eval(scope, executionState)
if err != nil {
@ -182,7 +186,13 @@ func (e *EvalBinaryNode) EvalString(scope *Scope, executionState ExecutionState)
// EvalBool executes the expression based on eval bool
func (e *EvalBinaryNode) EvalBool(scope *Scope, executionState ExecutionState) (bool, error) {
result, err := e.eval(scope, executionState)
var result resultContainer
var err *ErrSide
if e.leftEvaluator.IsDynamic() || e.rightEvaluator.IsDynamic() {
result, err = e.evaluateDynamicNode(scope, executionState, e.leftEvaluator, e.rightEvaluator)
} else {
result, err = e.eval(scope, executionState)
}
if err != nil {
return false, err.error
}
@ -280,13 +290,12 @@ func (e *EvalBinaryNode) evaluateDynamicNode(scope *Scope, executionState Execut
// For example: "count() == 1"
// 1. we evaluate the left side and counter is 1 (upper ^ in this function)
// 2. we evaluate the second time in "EvalBool"
typeExecutionState := CreateExecutionState()
if leftType, err = left.Type(scope, typeExecutionState); err != nil {
if leftType, err = left.Type(scope); err != nil {
return emptyResultContainer, &ErrSide{error: err, IsLeft: true}
}
if rightType, err = right.Type(scope, typeExecutionState); err != nil {
if rightType, err = right.Type(scope); err != nil {
return emptyResultContainer, &ErrSide{error: err, IsRight: true}
}
@ -301,8 +310,7 @@ func (e *EvalBinaryNode) evaluateDynamicNode(scope *Scope, executionState Execut
// Return an understandable error which is most specific to the issue.
func (e *EvalBinaryNode) determineError(scope *Scope, executionState ExecutionState) error {
if scope != nil {
typeExecutionState := CreateExecutionState()
leftType, err := e.leftEvaluator.Type(scope, typeExecutionState)
leftType, err := e.leftEvaluator.Type(scope)
if err != nil {
return fmt.Errorf("can't get the type of the left node: %s", err)
}
@ -312,7 +320,15 @@ func (e *EvalBinaryNode) determineError(scope *Scope, executionState ExecutionSt
return errors.New("left value is invalid value type")
}
rightType, err := e.rightEvaluator.Type(scope, typeExecutionState)
if leftType == ast.TMissing {
ref, ok := e.leftEvaluator.(*EvalReferenceNode)
if !ok {
return fmt.Errorf("expected leftEvaluator to be *EvalReferenceNode got %T", e.leftEvaluator)
}
return fmt.Errorf("left reference value \"%s\" is missing value", ref.Node.Reference)
}
rightType, err := e.rightEvaluator.Type(scope)
if err != nil {
return fmt.Errorf("can't get the type of the right node: %s", err)
}
@ -321,6 +337,14 @@ func (e *EvalBinaryNode) determineError(scope *Scope, executionState ExecutionSt
if rightType == ast.InvalidType {
return errors.New("right value is invalid value type")
}
if rightType == ast.TMissing {
ref, ok := e.rightEvaluator.(*EvalReferenceNode)
if !ok {
return fmt.Errorf("expected rightEvaluator to be *EvalReferenceNode got %T", e.rightEvaluator)
}
return fmt.Errorf("right reference value \"%s\" is missing value", ref.Node.Reference)
}
}
if e.leftType != ast.InvalidType && !typeToBinaryOperators[e.leftType][e.operator] {

View File

@ -11,7 +11,7 @@ type EvalBoolNode struct {
Node *ast.BoolNode
}
func (n *EvalBoolNode) Type(scope ReadOnlyScope, executionState ExecutionState) (ast.ValueType, error) {
func (n *EvalBoolNode) Type(scope ReadOnlyScope) (ast.ValueType, error) {
return ast.TBool, nil
}
@ -38,9 +38,15 @@ func (n *EvalBoolNode) EvalRegex(scope *Scope, executionState ExecutionState) (*
func (n *EvalBoolNode) EvalTime(scope *Scope, executionState ExecutionState) (time.Time, error) {
return time.Time{}, ErrTypeGuardFailed{RequestedType: ast.TTime, ActualType: ast.TBool}
}
func (n *EvalBoolNode) EvalDuration(scope *Scope, executionState ExecutionState) (time.Duration, error) {
return 0, ErrTypeGuardFailed{RequestedType: ast.TDuration, ActualType: ast.TBool}
}
func (n *EvalBoolNode) EvalMissing(scope *Scope, executionState ExecutionState) (*ast.Missing, error) {
return nil, ErrTypeGuardFailed{RequestedType: ast.TMissing, ActualType: ast.TBool}
}
func (n *EvalBoolNode) IsDynamic() bool {
return false
}

View File

@ -11,7 +11,7 @@ type EvalDurationNode struct {
Duration time.Duration
}
func (n *EvalDurationNode) Type(scope ReadOnlyScope, executionState ExecutionState) (ast.ValueType, error) {
func (n *EvalDurationNode) Type(scope ReadOnlyScope) (ast.ValueType, error) {
return ast.TDuration, nil
}
@ -30,9 +30,11 @@ func (n *EvalDurationNode) EvalString(scope *Scope, executionState ExecutionStat
func (n *EvalDurationNode) EvalBool(scope *Scope, executionState ExecutionState) (bool, error) {
return false, ErrTypeGuardFailed{RequestedType: ast.TBool, ActualType: ast.TDuration}
}
func (n *EvalDurationNode) EvalRegex(scope *Scope, executionState ExecutionState) (*regexp.Regexp, error) {
return nil, ErrTypeGuardFailed{RequestedType: ast.TRegex, ActualType: ast.TDuration}
}
func (n *EvalDurationNode) EvalTime(scope *Scope, executionState ExecutionState) (time.Time, error) {
return time.Time{}, ErrTypeGuardFailed{RequestedType: ast.TTime, ActualType: ast.TDuration}
}
@ -40,6 +42,11 @@ func (n *EvalDurationNode) EvalTime(scope *Scope, executionState ExecutionState)
func (n *EvalDurationNode) EvalDuration(scope *Scope, executionState ExecutionState) (time.Duration, error) {
return n.Duration, nil
}
func (n *EvalDurationNode) EvalMissing(scope *Scope, executionState ExecutionState) (*ast.Missing, error) {
return nil, ErrTypeGuardFailed{RequestedType: ast.TMissing, ActualType: ast.TDuration}
}
func (n *EvalDurationNode) IsDynamic() bool {
return false
}

View File

@ -11,7 +11,7 @@ type EvalFloatNode struct {
Float64 float64
}
func (n *EvalFloatNode) Type(scope ReadOnlyScope, executionState ExecutionState) (ast.ValueType, error) {
func (n *EvalFloatNode) Type(scope ReadOnlyScope) (ast.ValueType, error) {
return ast.TFloat, nil
}
@ -30,15 +30,23 @@ func (n *EvalFloatNode) EvalString(scope *Scope, executionState ExecutionState)
func (n *EvalFloatNode) EvalBool(scope *Scope, executionState ExecutionState) (bool, error) {
return false, ErrTypeGuardFailed{RequestedType: ast.TBool, ActualType: ast.TFloat}
}
func (n *EvalFloatNode) EvalRegex(scope *Scope, executionState ExecutionState) (*regexp.Regexp, error) {
return nil, ErrTypeGuardFailed{RequestedType: ast.TRegex, ActualType: ast.TFloat}
}
func (n *EvalFloatNode) EvalTime(scope *Scope, executionState ExecutionState) (time.Time, error) {
return time.Time{}, ErrTypeGuardFailed{RequestedType: ast.TTime, ActualType: ast.TFloat}
}
func (n *EvalFloatNode) EvalDuration(scope *Scope, executionState ExecutionState) (time.Duration, error) {
return 0, ErrTypeGuardFailed{RequestedType: ast.TDuration, ActualType: ast.TFloat}
}
func (n *EvalFloatNode) EvalMissing(scope *Scope, executionState ExecutionState) (*ast.Missing, error) {
return nil, ErrTypeGuardFailed{RequestedType: ast.TMissing, ActualType: ast.TFloat}
}
func (n *EvalFloatNode) IsDynamic() bool {
return false
}

View File

@ -3,6 +3,7 @@ package stateful
import (
"fmt"
"regexp"
"strings"
"time"
"github.com/influxdata/kapacitor/tick/ast"
@ -31,16 +32,32 @@ func NewEvalFunctionNode(funcNode *ast.FunctionNode) (*EvalFunctionNode, error)
return evalFuncNode, nil
}
func (n *EvalFunctionNode) Type(scope ReadOnlyScope, executionState ExecutionState) (ast.ValueType, error) {
// PERF: today we are evaluating the function, it will be much faster if will type info the function it self
result, err := n.callFunction(scope.(*Scope), executionState)
if err != nil {
return ast.InvalidType, err
func (n *EvalFunctionNode) Type(scope ReadOnlyScope) (ast.ValueType, error) {
f := lookupFunc(n.funcName, builtinFuncs, scope)
if f == nil {
return ast.InvalidType, fmt.Errorf("undefined function: %q", n.funcName)
}
signature := f.Signature()
domain := Domain{}
for i, argEvaluator := range n.argsEvaluators {
t, err := argEvaluator.Type(scope)
if err != nil {
return ast.InvalidType, fmt.Errorf("Failed to handle %v argument: %v", i+1, err)
}
domain[i] = t
}
// We can't cache here the result (although it's very tempting ;))
// because can't trust function to return always the same consistent type
return ast.TypeOf(result), nil
if gotLen, expLen := len(n.argsEvaluators), len(domain); gotLen > expLen {
return ast.InvalidType, ErrWrongFuncSignature{Name: n.funcName, DomainProvided: domain, Func: f}
}
retType, ok := signature[domain]
if !ok {
return ast.InvalidType, ErrWrongFuncSignature{Name: n.funcName, DomainProvided: domain, Func: f}
}
return retType, nil
}
func (n *EvalFunctionNode) IsDynamic() bool {
@ -59,10 +76,9 @@ func (n *EvalFunctionNode) callFunction(scope *Scope, executionState ExecutionSt
args = append(args, value)
}
f := executionState.Funcs[n.funcName]
f := lookupFunc(n.funcName, executionState.Funcs, scope)
if f == nil {
return nil, fmt.Errorf("undefined function: %q", n.funcName)
return ast.InvalidType, fmt.Errorf("undefined function: %q", n.funcName)
}
ret, err := f.Call(args...)
@ -164,10 +180,22 @@ func (n *EvalFunctionNode) EvalBool(scope *Scope, executionState ExecutionState)
return false, ErrTypeGuardFailed{RequestedType: ast.TBool, ActualType: ast.TypeOf(refValue)}
}
func (n *EvalFunctionNode) EvalMissing(scope *Scope, executionState ExecutionState) (*ast.Missing, error) {
refValue, err := n.callFunction(scope, executionState)
if err != nil {
return nil, err
}
if missingValue, isMissing := refValue.(*ast.Missing); isMissing {
return missingValue, nil
}
return nil, ErrTypeGuardFailed{RequestedType: ast.TMissing, ActualType: ast.TypeOf(refValue)}
}
// eval - generic evaluation until we have reflection/introspection capabillities so we can know the type of args
// and return type, we can remove this entirely
func eval(n NodeEvaluator, scope *Scope, executionState ExecutionState) (interface{}, error) {
retType, err := n.Type(scope, CreateExecutionState())
retType, err := n.Type(scope)
if err != nil {
return nil, err
}
@ -187,8 +215,32 @@ func eval(n NodeEvaluator, scope *Scope, executionState ExecutionState) (interfa
return n.EvalTime(scope, executionState)
case ast.TDuration:
return n.EvalDuration(scope, executionState)
case ast.TMissing:
v, err := n.EvalMissing(scope, executionState)
if err != nil && !strings.Contains(err.Error(), "missing value") {
return v, err
}
return v, nil
default:
return nil, fmt.Errorf("function arg expression returned unexpected type %s", retType)
}
}
func lookupFunc(name string, funcs Funcs, scope ReadOnlyScope) Func {
f := funcs[name]
if f != nil {
return f
}
df := scope.DynamicFunc(name)
if df != nil {
return df
}
// Return nil here explicitly so its a nil Func not nil *DynamicFunc
// returning scope.DynamicFunc(name) caused nil check in callFunction
// f == nil to evaluate to false even though f was nil. This was weird
// enough that I felt it warranted a comment.
return nil
}

View File

@ -11,7 +11,7 @@ type EvalIntNode struct {
Int64 int64
}
func (n *EvalIntNode) Type(scope ReadOnlyScope, executionState ExecutionState) (ast.ValueType, error) {
func (n *EvalIntNode) Type(scope ReadOnlyScope) (ast.ValueType, error) {
return ast.TInt, nil
}
@ -30,15 +30,23 @@ func (n *EvalIntNode) EvalString(scope *Scope, executionState ExecutionState) (s
func (n *EvalIntNode) EvalBool(scope *Scope, executionState ExecutionState) (bool, error) {
return false, ErrTypeGuardFailed{RequestedType: ast.TBool, ActualType: ast.TInt}
}
func (n *EvalIntNode) EvalRegex(scope *Scope, executionState ExecutionState) (*regexp.Regexp, error) {
return nil, ErrTypeGuardFailed{RequestedType: ast.TRegex, ActualType: ast.TInt}
}
func (n *EvalIntNode) EvalTime(scope *Scope, executionState ExecutionState) (time.Time, error) {
return time.Time{}, ErrTypeGuardFailed{RequestedType: ast.TTime, ActualType: ast.TInt}
}
func (n *EvalIntNode) EvalDuration(scope *Scope, executionState ExecutionState) (time.Duration, error) {
return 0, ErrTypeGuardFailed{RequestedType: ast.TDuration, ActualType: ast.TInt}
}
func (n *EvalIntNode) EvalMissing(scope *Scope, executionState ExecutionState) (*ast.Missing, error) {
return nil, ErrTypeGuardFailed{RequestedType: ast.TMissing, ActualType: ast.TInt}
}
func (n *EvalIntNode) IsDynamic() bool {
return false
}

View File

@ -28,11 +28,11 @@ func NewEvalLambdaNode(lambda *ast.LambdaNode) (*EvalLambdaNode, error) {
}, nil
}
func (n *EvalLambdaNode) Type(scope ReadOnlyScope, _ ExecutionState) (ast.ValueType, error) {
func (n *EvalLambdaNode) Type(scope ReadOnlyScope) (ast.ValueType, error) {
if n.constReturnType == ast.InvalidType {
// We are dynamic and we need to figure out our type
// Do NOT cache this result in n.returnType since it can change.
return n.nodeEvaluator.Type(scope, n.state)
return n.nodeEvaluator.Type(scope)
}
return n.constReturnType, nil
}
@ -42,7 +42,7 @@ func (n *EvalLambdaNode) IsDynamic() bool {
}
func (n *EvalLambdaNode) EvalRegex(scope *Scope, _ ExecutionState) (*regexp.Regexp, error) {
typ, err := n.Type(scope, n.state)
typ, err := n.Type(scope)
if err != nil {
return nil, err
}
@ -54,7 +54,7 @@ func (n *EvalLambdaNode) EvalRegex(scope *Scope, _ ExecutionState) (*regexp.Rege
}
func (n *EvalLambdaNode) EvalTime(scope *Scope, _ ExecutionState) (time.Time, error) {
typ, err := n.Type(scope, n.state)
typ, err := n.Type(scope)
if err != nil {
return time.Time{}, err
}
@ -62,7 +62,7 @@ func (n *EvalLambdaNode) EvalTime(scope *Scope, _ ExecutionState) (time.Time, er
}
func (n *EvalLambdaNode) EvalDuration(scope *Scope, _ ExecutionState) (time.Duration, error) {
typ, err := n.Type(scope, n.state)
typ, err := n.Type(scope)
if err != nil {
return 0, err
}
@ -74,7 +74,7 @@ func (n *EvalLambdaNode) EvalDuration(scope *Scope, _ ExecutionState) (time.Dura
}
func (n *EvalLambdaNode) EvalString(scope *Scope, _ ExecutionState) (string, error) {
typ, err := n.Type(scope, n.state)
typ, err := n.Type(scope)
if err != nil {
return "", err
}
@ -86,7 +86,7 @@ func (n *EvalLambdaNode) EvalString(scope *Scope, _ ExecutionState) (string, err
}
func (n *EvalLambdaNode) EvalFloat(scope *Scope, _ ExecutionState) (float64, error) {
typ, err := n.Type(scope, n.state)
typ, err := n.Type(scope)
if err != nil {
return 0, err
}
@ -98,7 +98,7 @@ func (n *EvalLambdaNode) EvalFloat(scope *Scope, _ ExecutionState) (float64, err
}
func (n *EvalLambdaNode) EvalInt(scope *Scope, _ ExecutionState) (int64, error) {
typ, err := n.Type(scope, n.state)
typ, err := n.Type(scope)
if err != nil {
return 0, err
}
@ -110,7 +110,7 @@ func (n *EvalLambdaNode) EvalInt(scope *Scope, _ ExecutionState) (int64, error)
}
func (n *EvalLambdaNode) EvalBool(scope *Scope, _ ExecutionState) (bool, error) {
typ, err := n.Type(scope, n.state)
typ, err := n.Type(scope)
if err != nil {
return false, err
}
@ -120,3 +120,15 @@ func (n *EvalLambdaNode) EvalBool(scope *Scope, _ ExecutionState) (bool, error)
return false, ErrTypeGuardFailed{RequestedType: ast.TBool, ActualType: typ}
}
func (n *EvalLambdaNode) EvalMissing(scope *Scope, _ ExecutionState) (*ast.Missing, error) {
typ, err := n.Type(scope)
if err != nil {
return nil, err
}
if typ == ast.TMissing {
return n.nodeEvaluator.EvalMissing(scope, n.state)
}
return nil, ErrTypeGuardFailed{RequestedType: ast.TBool, ActualType: typ}
}

View File

@ -13,7 +13,7 @@ type EvalReferenceNode struct {
}
// getReferenceValue - core method for evaluating function where all NodeEvaluator methods should use
func (n *EvalReferenceNode) getReferenceValue(scope *Scope, executionState ExecutionState) (interface{}, error) {
func (n *EvalReferenceNode) getReferenceValue(scope *Scope) (interface{}, error) {
value, err := scope.Get(n.Node.Reference)
if err != nil {
return nil, err
@ -25,8 +25,8 @@ func (n *EvalReferenceNode) getReferenceValue(scope *Scope, executionState Execu
return value, nil
}
func (n *EvalReferenceNode) Type(scope ReadOnlyScope, executionState ExecutionState) (ast.ValueType, error) {
value, err := n.getReferenceValue(scope.(*Scope), executionState)
func (n *EvalReferenceNode) Type(scope ReadOnlyScope) (ast.ValueType, error) {
value, err := n.getReferenceValue(scope.(*Scope))
if err != nil {
return ast.InvalidType, err
}
@ -39,7 +39,7 @@ func (n *EvalReferenceNode) IsDynamic() bool {
}
func (n *EvalReferenceNode) EvalRegex(scope *Scope, executionState ExecutionState) (*regexp.Regexp, error) {
refValue, err := n.getReferenceValue(scope, executionState)
refValue, err := n.getReferenceValue(scope)
if err != nil {
return nil, err
}
@ -48,11 +48,16 @@ func (n *EvalReferenceNode) EvalRegex(scope *Scope, executionState ExecutionStat
return regexValue, nil
}
refType := ast.TypeOf(refValue)
if refType == ast.TMissing {
return nil, fmt.Errorf("reference \"%s\" is missing value", n.Node.Reference)
}
return nil, ErrTypeGuardFailed{RequestedType: ast.TRegex, ActualType: ast.TypeOf(refValue)}
}
func (n *EvalReferenceNode) EvalTime(scope *Scope, executionState ExecutionState) (time.Time, error) {
refValue, err := n.getReferenceValue(scope, executionState)
refValue, err := n.getReferenceValue(scope)
if err != nil {
return time.Time{}, err
}
@ -61,11 +66,16 @@ func (n *EvalReferenceNode) EvalTime(scope *Scope, executionState ExecutionState
return timeValue, nil
}
refType := ast.TypeOf(refValue)
if refType == ast.TMissing {
return time.Time{}, fmt.Errorf("reference \"%s\" is missing value", n.Node.Reference)
}
return time.Time{}, ErrTypeGuardFailed{RequestedType: ast.TTime, ActualType: ast.TypeOf(refValue)}
}
func (n *EvalReferenceNode) EvalDuration(scope *Scope, executionState ExecutionState) (time.Duration, error) {
refValue, err := n.getReferenceValue(scope, executionState)
refValue, err := n.getReferenceValue(scope)
if err != nil {
return 0, err
}
@ -74,11 +84,16 @@ func (n *EvalReferenceNode) EvalDuration(scope *Scope, executionState ExecutionS
return durValue, nil
}
refType := ast.TypeOf(refValue)
if refType == ast.TMissing {
return 0, fmt.Errorf("reference \"%s\" is missing value", n.Node.Reference)
}
return 0, ErrTypeGuardFailed{RequestedType: ast.TDuration, ActualType: ast.TypeOf(refValue)}
}
func (n *EvalReferenceNode) EvalString(scope *Scope, executionState ExecutionState) (string, error) {
refValue, err := n.getReferenceValue(scope, executionState)
refValue, err := n.getReferenceValue(scope)
if err != nil {
return "", err
}
@ -87,11 +102,16 @@ func (n *EvalReferenceNode) EvalString(scope *Scope, executionState ExecutionSta
return stringValue, nil
}
refType := ast.TypeOf(refValue)
if refType == ast.TMissing {
return "", fmt.Errorf("reference \"%s\" is missing value", n.Node.Reference)
}
return "", ErrTypeGuardFailed{RequestedType: ast.TString, ActualType: ast.TypeOf(refValue)}
}
func (n *EvalReferenceNode) EvalFloat(scope *Scope, executionState ExecutionState) (float64, error) {
refValue, err := n.getReferenceValue(scope, executionState)
refValue, err := n.getReferenceValue(scope)
if err != nil {
return float64(0), err
}
@ -100,11 +120,16 @@ func (n *EvalReferenceNode) EvalFloat(scope *Scope, executionState ExecutionStat
return float64Value, nil
}
refType := ast.TypeOf(refValue)
if refType == ast.TMissing {
return float64(0), fmt.Errorf("reference \"%s\" is missing value", n.Node.Reference)
}
return float64(0), ErrTypeGuardFailed{RequestedType: ast.TFloat, ActualType: ast.TypeOf(refValue)}
}
func (n *EvalReferenceNode) EvalInt(scope *Scope, executionState ExecutionState) (int64, error) {
refValue, err := n.getReferenceValue(scope, executionState)
refValue, err := n.getReferenceValue(scope)
if err != nil {
return int64(0), err
}
@ -113,11 +138,16 @@ func (n *EvalReferenceNode) EvalInt(scope *Scope, executionState ExecutionState)
return int64Value, nil
}
refType := ast.TypeOf(refValue)
if refType == ast.TMissing {
return int64(0), fmt.Errorf("reference \"%s\" is missing value", n.Node.Reference)
}
return int64(0), ErrTypeGuardFailed{RequestedType: ast.TInt, ActualType: ast.TypeOf(refValue)}
}
func (n *EvalReferenceNode) EvalBool(scope *Scope, executionState ExecutionState) (bool, error) {
refValue, err := n.getReferenceValue(scope, executionState)
refValue, err := n.getReferenceValue(scope)
if err != nil {
return false, err
}
@ -126,5 +156,24 @@ func (n *EvalReferenceNode) EvalBool(scope *Scope, executionState ExecutionState
return boolValue, nil
}
refType := ast.TypeOf(refValue)
if refType == ast.TMissing {
return false, fmt.Errorf("reference \"%s\" is missing value", n.Node.Reference)
}
return false, ErrTypeGuardFailed{RequestedType: ast.TBool, ActualType: ast.TypeOf(refValue)}
}
func (n *EvalReferenceNode) EvalMissing(scope *Scope, executionState ExecutionState) (*ast.Missing, error) {
refValue, err := n.getReferenceValue(scope)
if err != nil {
return nil, err
}
if missingVal, isMissing := refValue.(*ast.Missing); isMissing {
// This error gets checked in the eval method of a function node
return missingVal, fmt.Errorf("missing value: \"%v\"", n.Node.Reference)
}
return nil, ErrTypeGuardFailed{RequestedType: ast.TMissing, ActualType: ast.TypeOf(refValue)}
}

View File

@ -11,7 +11,7 @@ type EvalRegexNode struct {
Node *ast.RegexNode
}
func (n *EvalRegexNode) Type(scope ReadOnlyScope, executionState ExecutionState) (ast.ValueType, error) {
func (n *EvalRegexNode) Type(scope ReadOnlyScope) (ast.ValueType, error) {
return ast.TRegex, nil
}
@ -34,12 +34,19 @@ func (n *EvalRegexNode) EvalInt(scope *Scope, executionState ExecutionState) (in
func (n *EvalRegexNode) EvalBool(scope *Scope, executionState ExecutionState) (bool, error) {
return false, ErrTypeGuardFailed{RequestedType: ast.TBool, ActualType: ast.TRegex}
}
func (n *EvalRegexNode) EvalTime(scope *Scope, executionState ExecutionState) (time.Time, error) {
return time.Time{}, ErrTypeGuardFailed{RequestedType: ast.TTime, ActualType: ast.TRegex}
}
func (n *EvalRegexNode) EvalDuration(scope *Scope, executionState ExecutionState) (time.Duration, error) {
return 0, ErrTypeGuardFailed{RequestedType: ast.TDuration, ActualType: ast.TRegex}
}
func (n *EvalRegexNode) EvalMissing(scope *Scope, executionState ExecutionState) (*ast.Missing, error) {
return nil, ErrTypeGuardFailed{RequestedType: ast.TMissing, ActualType: ast.TRegex}
}
func (n *EvalRegexNode) IsDynamic() bool {
return false
}

View File

@ -11,7 +11,7 @@ type EvalStringNode struct {
Node *ast.StringNode
}
func (n *EvalStringNode) Type(scope ReadOnlyScope, executionState ExecutionState) (ast.ValueType, error) {
func (n *EvalStringNode) Type(scope ReadOnlyScope) (ast.ValueType, error) {
return ast.TString, nil
}
@ -34,12 +34,19 @@ func (n *EvalStringNode) EvalBool(scope *Scope, executionState ExecutionState) (
func (n *EvalStringNode) EvalRegex(scope *Scope, executionState ExecutionState) (*regexp.Regexp, error) {
return nil, ErrTypeGuardFailed{RequestedType: ast.TRegex, ActualType: ast.TString}
}
func (n *EvalStringNode) EvalTime(scope *Scope, executionState ExecutionState) (time.Time, error) {
return time.Time{}, ErrTypeGuardFailed{RequestedType: ast.TTime, ActualType: ast.TString}
}
func (n *EvalStringNode) EvalDuration(scope *Scope, executionState ExecutionState) (time.Duration, error) {
return 0, ErrTypeGuardFailed{RequestedType: ast.TDuration, ActualType: ast.TString}
}
func (n *EvalStringNode) EvalMissing(scope *Scope, executionState ExecutionState) (*ast.Missing, error) {
return nil, ErrTypeGuardFailed{RequestedType: ast.TMissing, ActualType: ast.TString}
}
func (n *EvalStringNode) IsDynamic() bool {
return false
}

View File

@ -33,11 +33,11 @@ func isValidUnaryOperator(operator ast.TokenType) bool {
return operator == ast.TokenNot || operator == ast.TokenMinus
}
func (n *EvalUnaryNode) Type(scope ReadOnlyScope, executionState ExecutionState) (ast.ValueType, error) {
func (n *EvalUnaryNode) Type(scope ReadOnlyScope) (ast.ValueType, error) {
if n.constReturnType == ast.InvalidType {
// We are dynamic and we need to figure out our type
// Do NOT cache this result in n.returnType since it can change.
return n.nodeEvaluator.Type(scope, executionState)
return n.nodeEvaluator.Type(scope)
}
return n.constReturnType, nil
}
@ -57,8 +57,16 @@ func (n *EvalUnaryNode) EvalTime(scope *Scope, executionState ExecutionState) (t
return time.Time{}, ErrTypeGuardFailed{RequestedType: ast.TTime, ActualType: n.constReturnType}
}
func (n *EvalUnaryNode) EvalMissing(scope *Scope, executionState ExecutionState) (*ast.Missing, error) {
ref, ok := n.nodeEvaluator.(*EvalReferenceNode)
if !ok {
return nil, fmt.Errorf("expected nodeEvaluator to be *EvalReferenceNode got %T", n.nodeEvaluator)
}
return nil, fmt.Errorf("reference \"%s\" is missing value", ref.Node.Reference)
}
func (n *EvalUnaryNode) EvalDuration(scope *Scope, executionState ExecutionState) (time.Duration, error) {
typ, err := n.Type(scope, executionState)
typ, err := n.Type(scope)
if err != nil {
return 0, err
}
@ -79,7 +87,7 @@ func (n *EvalUnaryNode) EvalString(scope *Scope, executionState ExecutionState)
}
func (n *EvalUnaryNode) EvalFloat(scope *Scope, executionState ExecutionState) (float64, error) {
typ, err := n.Type(scope, executionState)
typ, err := n.Type(scope)
if err != nil {
return 0, err
}
@ -96,7 +104,7 @@ func (n *EvalUnaryNode) EvalFloat(scope *Scope, executionState ExecutionState) (
}
func (n *EvalUnaryNode) EvalInt(scope *Scope, executionState ExecutionState) (int64, error) {
typ, err := n.Type(scope, executionState)
typ, err := n.Type(scope)
if err != nil {
return 0, err
}
@ -113,7 +121,7 @@ func (n *EvalUnaryNode) EvalInt(scope *Scope, executionState ExecutionState) (in
}
func (n *EvalUnaryNode) EvalBool(scope *Scope, executionState ExecutionState) (bool, error) {
typ, err := n.Type(scope, executionState)
typ, err := n.Type(scope)
if err != nil {
return false, err
}

View File

@ -12,6 +12,8 @@ import (
type Expression interface {
Reset()
Type(scope ReadOnlyScope) (ast.ValueType, error)
EvalFloat(scope *Scope) (float64, error)
EvalInt(scope *Scope) (int64, error)
EvalString(scope *Scope) (string, error)
@ -58,6 +60,10 @@ func (se *expression) Reset() {
se.executionState.ResetAll()
}
func (se *expression) Type(scope ReadOnlyScope) (ast.ValueType, error) {
return se.nodeEvaluator.Type(scope)
}
func (se *expression) EvalBool(scope *Scope) (bool, error) {
return se.nodeEvaluator.EvalBool(scope, se.executionState)
}
@ -78,8 +84,12 @@ func (se *expression) EvalDuration(scope *Scope) (time.Duration, error) {
return se.nodeEvaluator.EvalDuration(scope, se.executionState)
}
func (se *expression) EvalMissing(scope *Scope) (*ast.Missing, error) {
return se.nodeEvaluator.EvalMissing(scope, se.executionState)
}
func (se *expression) Eval(scope *Scope) (interface{}, error) {
typ, err := se.nodeEvaluator.Type(scope, CreateExecutionState())
typ, err := se.nodeEvaluator.Type(scope)
if err != nil {
return nil, err
}
@ -115,41 +125,13 @@ func (se *expression) Eval(scope *Scope) (interface{}, error) {
return nil, err
}
return result, err
case ast.TMissing:
result, err := se.EvalMissing(scope)
if err != nil {
return nil, err
}
return result, err
default:
return nil, fmt.Errorf("expression returned unexpected type %s", typ)
}
}
func FindReferenceVariables(nodes ...ast.Node) []string {
variablesSet := make(map[string]bool, 0)
for _, node := range nodes {
buildReferenceVariablesSet(node, variablesSet)
}
variables := make([]string, 0, len(variablesSet))
for variable := range variablesSet {
variables = append(variables, variable)
}
return variables
}
// util method for findReferenceVariables, we are passing the itemsSet and not returning it
// so we will won't to merge the maps
func buildReferenceVariablesSet(n ast.Node, itemsSet map[string]bool) {
switch node := n.(type) {
case *ast.ReferenceNode:
itemsSet[node.Reference] = true
case *ast.UnaryNode:
buildReferenceVariablesSet(node.Node, itemsSet)
case *ast.BinaryNode:
buildReferenceVariablesSet(node.Left, itemsSet)
buildReferenceVariablesSet(node.Right, itemsSet)
case *ast.FunctionNode:
for _, arg := range node.Args {
buildReferenceVariablesSet(arg, itemsSet)
}
}
}

View File

@ -12,14 +12,71 @@ import (
"github.com/dustin/go-humanize"
"github.com/influxdata/influxdb/influxql"
"github.com/influxdata/kapacitor/tick/ast"
)
// maxArgs is used to specify the largest number of arguments that a
// builtin function can accept.
// Increment this value if you create a builtin function with more than
// the current value of maxArgs.
const (
maxArgs = 4
)
type ErrWrongFuncSignature struct {
Name string
DomainProvided Domain
Func Func
}
func (e ErrWrongFuncSignature) Error() string {
return fmt.Sprintf("Cannot call function \"%s\" with args signature %s, available signatures are %s.",
e.Name, e.DomainProvided, FuncDomains(e.Func))
}
var ErrNotFloat = errors.New("value is not a float")
type Domain [maxArgs]ast.ValueType
func (d Domain) String() string {
input := []string{}
for _, el := range d {
if el == ast.InvalidType {
// Because inputs should be consecutive
break
}
input = append(input, el.String())
}
return "(" + strings.Join(input, ",") + ")"
}
type Domains []Domain
func (ds Domains) String() string {
input := []string{}
for _, d := range ds {
input = append(input, d.String())
}
return "[" + strings.Join(input, ", ") + "]"
}
// A callable function from within the expression
type Func interface {
Reset()
Call(...interface{}) (interface{}, error)
Signature() map[Domain]ast.ValueType
}
func FuncDomains(f Func) Domains {
ds := []Domain{}
for d := range f.Signature() {
ds = append(ds, d)
}
return ds
}
// Lookup for functions
@ -27,6 +84,8 @@ type Funcs map[string]Func
var statelessFuncs Funcs
var builtinFuncs Funcs
func init() {
statelessFuncs = make(Funcs)
// Conversion functions
@ -105,6 +164,9 @@ func init() {
// Regex functions
statelessFuncs["regexReplace"] = regexReplace{}
// Missing functions
statelessFuncs["isPresent"] = isPresent{}
// Time functions
statelessFuncs["minute"] = minute{}
statelessFuncs["hour"] = hour{}
@ -118,6 +180,9 @@ func init() {
// Conditionals
statelessFuncs["if"] = ifFunc{}
// Create map of builtin functions after all functions have been added to statelessFuncs
builtinFuncs = NewFunctions()
}
// Return set of built-in Funcs
@ -161,6 +226,19 @@ func (m math1) Call(args ...interface{}) (v interface{}, err error) {
return
}
var math1FuncSignature = map[Domain]ast.ValueType{}
// Initialize Math 1 Func Signature
func init() {
d := Domain{}
d[0] = ast.TFloat
math1FuncSignature[d] = ast.TFloat
}
func (m math1) Signature() map[Domain]ast.ValueType {
return math1FuncSignature
}
func (m math1) Reset() {}
type math2Func func(float64, float64) float64
@ -194,6 +272,20 @@ func (m math2) Call(args ...interface{}) (v interface{}, err error) {
return
}
var math2FuncSignature = map[Domain]ast.ValueType{}
// Initialize Math 2 Function Signature
func init() {
d := Domain{}
d[0] = ast.TFloat
d[1] = ast.TFloat
math2FuncSignature[d] = ast.TFloat
}
func (m math2) Signature() map[Domain]ast.ValueType {
return math2FuncSignature
}
func (m math2) Reset() {}
type mathIFunc func(int) float64
@ -222,6 +314,19 @@ func (m mathI) Call(args ...interface{}) (v interface{}, err error) {
return
}
var mathIFuncSignature = map[Domain]ast.ValueType{}
// Initialize Math I Function Signature
func init() {
d := Domain{}
d[0] = ast.TInt
mathIFuncSignature[d] = ast.TFloat
}
func (m mathI) Signature() map[Domain]ast.ValueType {
return mathIFuncSignature
}
func (m mathI) Reset() {}
type mathIFFunc func(int, float64) float64
@ -255,6 +360,20 @@ func (m mathIF) Call(args ...interface{}) (v interface{}, err error) {
return
}
var mathIFFuncSignature = map[Domain]ast.ValueType{}
// Initialize Math IF Function Signature
func init() {
d := Domain{}
d[0] = ast.TInt
d[1] = ast.TFloat
mathIFFuncSignature[d] = ast.TFloat
}
func (m mathIF) Signature() map[Domain]ast.ValueType {
return mathIFFuncSignature
}
func (m mathIF) Reset() {}
type string2BoolFunc func(string, string) bool
@ -288,6 +407,20 @@ func (m string2Bool) Call(args ...interface{}) (v interface{}, err error) {
return
}
var string2BoolFuncSignature = map[Domain]ast.ValueType{}
// Initialize String 2 Bool Function Signature
func init() {
d := Domain{}
d[0] = ast.TString
d[1] = ast.TString
string2BoolFuncSignature[d] = ast.TBool
}
func (m string2Bool) Signature() map[Domain]ast.ValueType {
return string2BoolFuncSignature
}
func (m string2Bool) Reset() {}
type string2IntFunc func(string, string) int
@ -321,6 +454,20 @@ func (m string2Int) Call(args ...interface{}) (v interface{}, err error) {
return
}
var string2IntFuncSignature = map[Domain]ast.ValueType{}
// Initialize String 2 Int Function Signature
func init() {
d := Domain{}
d[0] = ast.TString
d[1] = ast.TString
string2IntFuncSignature[d] = ast.TInt
}
func (m string2Int) Signature() map[Domain]ast.ValueType {
return string2IntFuncSignature
}
func (m string2Int) Reset() {}
type string2StringFunc func(string, string) string
@ -354,6 +501,20 @@ func (m string2String) Call(args ...interface{}) (v interface{}, err error) {
return
}
var string2StringFuncSignature = map[Domain]ast.ValueType{}
// Initialize String 2 String Function Signature
func init() {
d := Domain{}
d[0] = ast.TString
d[1] = ast.TString
string2StringFuncSignature[d] = ast.TString
}
func (m string2String) Signature() map[Domain]ast.ValueType {
return string2StringFuncSignature
}
func (m string2String) Reset() {}
type string1StringFunc func(string) string
@ -382,6 +543,19 @@ func (m string1String) Call(args ...interface{}) (v interface{}, err error) {
return
}
var string1StringFuncSignature = map[Domain]ast.ValueType{}
// Initialize String 1 String Function Signature
func init() {
d := Domain{}
d[0] = ast.TString
string1StringFuncSignature[d] = ast.TString
}
func (m string1String) Signature() map[Domain]ast.ValueType {
return string1StringFuncSignature
}
func (m string1String) Reset() {}
type strLength struct {
@ -400,6 +574,19 @@ func (m strLength) Call(args ...interface{}) (v interface{}, err error) {
return
}
var strLengthFuncSignature = map[Domain]ast.ValueType{}
// Initialize String Length Function Signature
func init() {
d := Domain{}
d[0] = ast.TString
strLengthFuncSignature[d] = ast.TInt
}
func (strLength) Signature() map[Domain]ast.ValueType {
return strLengthFuncSignature
}
func (m strLength) Reset() {}
type strReplace struct {
@ -433,6 +620,22 @@ func (m strReplace) Call(args ...interface{}) (v interface{}, err error) {
return
}
var strReplaceFuncSignature = map[Domain]ast.ValueType{}
// Initialize String Replace Function Signature
func init() {
d := Domain{}
d[0] = ast.TString
d[1] = ast.TString
d[2] = ast.TString
d[3] = ast.TInt
strReplaceFuncSignature[d] = ast.TString
}
func (strReplace) Signature() map[Domain]ast.ValueType {
return strReplaceFuncSignature
}
func (m strReplace) Reset() {}
type strSubstring struct {
@ -471,6 +674,21 @@ func (m strSubstring) Call(args ...interface{}) (v interface{}, err error) {
return
}
var strSubstringFuncSignature = map[Domain]ast.ValueType{}
// Initialize String Substring Function Signature
func init() {
d := Domain{}
d[0] = ast.TString
d[1] = ast.TInt
d[2] = ast.TInt
strSubstringFuncSignature[d] = ast.TString
}
func (strSubstring) Signature() map[Domain]ast.ValueType {
return strSubstringFuncSignature
}
func (m strSubstring) Reset() {}
type regexReplace struct {
@ -499,6 +717,21 @@ func (m regexReplace) Call(args ...interface{}) (v interface{}, err error) {
return
}
var regexReplaceFuncSignature = map[Domain]ast.ValueType{}
// Initialize Regex Replace Function Signature
func init() {
d := Domain{}
d[0] = ast.TRegex
d[1] = ast.TString
d[2] = ast.TString
regexReplaceFuncSignature[d] = ast.TString
}
func (regexReplace) Signature() map[Domain]ast.ValueType {
return regexReplaceFuncSignature
}
func (m regexReplace) Reset() {}
type boolean struct {
@ -541,6 +774,25 @@ func (boolean) Call(args ...interface{}) (v interface{}, err error) {
return
}
var booleanFuncSignature = map[Domain]ast.ValueType{}
// Initialize Boolean Function Signature
func init() {
d := Domain{}
d[0] = ast.TBool
booleanFuncSignature[d] = ast.TBool
d[0] = ast.TString
booleanFuncSignature[d] = ast.TBool
d[0] = ast.TInt
booleanFuncSignature[d] = ast.TBool
d[0] = ast.TFloat
booleanFuncSignature[d] = ast.TBool
}
func (boolean) Signature() map[Domain]ast.ValueType {
return booleanFuncSignature
}
type integer struct {
}
@ -573,6 +825,25 @@ func (integer) Call(args ...interface{}) (v interface{}, err error) {
return
}
var integerFuncSignature = map[Domain]ast.ValueType{}
// Initialize Integer Function Signature
func init() {
d := Domain{}
d[0] = ast.TBool
integerFuncSignature[d] = ast.TInt
d[0] = ast.TString
integerFuncSignature[d] = ast.TInt
d[0] = ast.TInt
integerFuncSignature[d] = ast.TInt
d[0] = ast.TFloat
integerFuncSignature[d] = ast.TInt
}
func (integer) Signature() map[Domain]ast.ValueType {
return integerFuncSignature
}
type float struct {
}
@ -603,6 +874,25 @@ func (float) Call(args ...interface{}) (v interface{}, err error) {
return
}
var floatFuncSignature = map[Domain]ast.ValueType{}
// Initialize Float Function Signature
func init() {
d := Domain{}
d[0] = ast.TBool
floatFuncSignature[d] = ast.TFloat
d[0] = ast.TString
floatFuncSignature[d] = ast.TFloat
d[0] = ast.TInt
floatFuncSignature[d] = ast.TFloat
d[0] = ast.TFloat
floatFuncSignature[d] = ast.TFloat
}
func (float) Signature() map[Domain]ast.ValueType {
return floatFuncSignature
}
type str struct {
}
@ -631,6 +921,27 @@ func (str) Call(args ...interface{}) (v interface{}, err error) {
return
}
var stringFuncSignature = map[Domain]ast.ValueType{}
// Initialize String Function Signature
func init() {
d := Domain{}
d[0] = ast.TBool
stringFuncSignature[d] = ast.TString
d[0] = ast.TString
stringFuncSignature[d] = ast.TString
d[0] = ast.TInt
stringFuncSignature[d] = ast.TString
d[0] = ast.TFloat
stringFuncSignature[d] = ast.TString
d[0] = ast.TDuration
stringFuncSignature[d] = ast.TString
}
func (str) Signature() map[Domain]ast.ValueType {
return stringFuncSignature
}
type duration struct {
}
@ -674,6 +985,28 @@ func (duration) Call(args ...interface{}) (v interface{}, err error) {
return
}
var durationFuncSignature = map[Domain]ast.ValueType{}
// Initialize Duration Function Signature
func init() {
d := Domain{}
d[0] = ast.TDuration
durationFuncSignature[d] = ast.TDuration
d[0] = ast.TInt
d[1] = ast.TDuration
durationFuncSignature[d] = ast.TDuration
d[0] = ast.TFloat
d[1] = ast.TDuration
durationFuncSignature[d] = ast.TDuration
d[0] = ast.TString
d[1] = ast.TDuration
durationFuncSignature[d] = ast.TDuration
}
func (duration) Signature() map[Domain]ast.ValueType {
return durationFuncSignature
}
type count struct {
n int64
}
@ -688,6 +1021,18 @@ func (c *count) Call(args ...interface{}) (v interface{}, err error) {
return c.n, nil
}
var countFuncSignature = map[Domain]ast.ValueType{}
// Initialize Count Function Signature
func init() {
d := Domain{}
countFuncSignature[d] = ast.TInt
}
func (c *count) Signature() map[Domain]ast.ValueType {
return countFuncSignature
}
type sigma struct {
mean float64
variance float64
@ -723,6 +1068,19 @@ func (s *sigma) Call(args ...interface{}) (interface{}, error) {
return math.Abs(x-s.mean) / math.Sqrt(s.variance), nil
}
var sigmaFuncSignature = map[Domain]ast.ValueType{}
// Initialize Sigma Function Signature
func init() {
d := Domain{}
d[0] = ast.TFloat
sigmaFuncSignature[d] = ast.TFloat
}
func (s *sigma) Signature() map[Domain]ast.ValueType {
return sigmaFuncSignature
}
type spread struct {
min float64
max float64
@ -754,6 +1112,29 @@ func (s *spread) Call(args ...interface{}) (interface{}, error) {
return s.max - s.min, nil
}
var spreadFuncSignature = map[Domain]ast.ValueType{}
// Initialize Spread Function Signature
func init() {
d := Domain{}
d[0] = ast.TFloat
spreadFuncSignature[d] = ast.TFloat
}
func (s *spread) Signature() map[Domain]ast.ValueType {
return spreadFuncSignature
}
// Time function signatures
var timeFuncSignature = map[Domain]ast.ValueType{}
// Initialize Time Function Signature
func init() {
d := Domain{}
d[0] = ast.TTime
timeFuncSignature[d] = ast.TInt
}
type minute struct {
}
@ -774,6 +1155,10 @@ func (minute) Call(args ...interface{}) (v interface{}, err error) {
return
}
func (minute) Signature() map[Domain]ast.ValueType {
return timeFuncSignature
}
type hour struct {
}
@ -794,6 +1179,10 @@ func (hour) Call(args ...interface{}) (v interface{}, err error) {
return
}
func (hour) Signature() map[Domain]ast.ValueType {
return timeFuncSignature
}
type weekday struct {
}
@ -814,6 +1203,10 @@ func (weekday) Call(args ...interface{}) (v interface{}, err error) {
return
}
func (weekday) Signature() map[Domain]ast.ValueType {
return timeFuncSignature
}
type day struct {
}
@ -834,6 +1227,10 @@ func (day) Call(args ...interface{}) (v interface{}, err error) {
return
}
func (day) Signature() map[Domain]ast.ValueType {
return timeFuncSignature
}
type month struct {
}
@ -854,6 +1251,10 @@ func (month) Call(args ...interface{}) (v interface{}, err error) {
return
}
func (month) Signature() map[Domain]ast.ValueType {
return timeFuncSignature
}
type year struct {
}
@ -874,6 +1275,10 @@ func (year) Call(args ...interface{}) (v interface{}, err error) {
return
}
func (year) Signature() map[Domain]ast.ValueType {
return timeFuncSignature
}
type humanBytes struct {
}
@ -896,6 +1301,21 @@ func (humanBytes) Call(args ...interface{}) (v interface{}, err error) {
return
}
var humanBytesFuncSignature = map[Domain]ast.ValueType{}
// Initialize HumanBytes Function Signature
func init() {
d := Domain{}
d[0] = ast.TFloat
humanBytesFuncSignature[d] = ast.TString
d[0] = ast.TInt
humanBytesFuncSignature[d] = ast.TString
}
func (humanBytes) Signature() map[Domain]ast.ValueType {
return humanBytesFuncSignature
}
type ifFunc struct {
}
@ -925,3 +1345,67 @@ func (ifFunc) Call(args ...interface{}) (interface{}, error) {
return args[2], nil
}
var ifFuncSignature = map[Domain]ast.ValueType{}
// Initialize If Function Signature
func init() {
d := Domain{}
d[0] = ast.TBool
types := []ast.ValueType{
ast.TFloat,
ast.TInt,
ast.TString,
ast.TBool,
ast.TRegex,
ast.TTime,
ast.TDuration,
}
for _, t := range types {
d[1] = t
d[2] = t
ifFuncSignature[d] = t
}
}
func (ifFunc) Signature() map[Domain]ast.ValueType {
return ifFuncSignature
}
type isPresent struct {
}
func (isPresent) Reset() {
}
func (isPresent) Call(args ...interface{}) (v interface{}, err error) {
if len(args) != 1 {
return false, errors.New("isMissing expects exactly one argument")
}
_, isMissing := args[0].(*ast.Missing)
return !isMissing, nil
}
var isPresentFuncSignature = map[Domain]ast.ValueType{}
// Initialize isPresent Function Signature
func init() {
d := Domain{}
d[0] = ast.TMissing
isPresentFuncSignature[d] = ast.TBool
d[0] = ast.TBool
isPresentFuncSignature[d] = ast.TBool
d[0] = ast.TString
isPresentFuncSignature[d] = ast.TBool
d[0] = ast.TInt
isPresentFuncSignature[d] = ast.TBool
d[0] = ast.TFloat
isPresentFuncSignature[d] = ast.TBool
}
func (isPresent) Signature() map[Domain]ast.ValueType {
return isPresentFuncSignature
}

View File

@ -22,6 +22,7 @@ func (e ErrTypeGuardFailed) Error() string {
type ReadOnlyScope interface {
Get(name string) (interface{}, error)
DynamicFunc(name string) *DynamicFunc
}
// NodeEvaluator provides a generic way for trying to fetch
@ -34,9 +35,10 @@ type NodeEvaluator interface {
EvalRegex(scope *Scope, executionState ExecutionState) (*regexp.Regexp, error)
EvalTime(scope *Scope, executionState ExecutionState) (time.Time, error)
EvalDuration(scope *Scope, executionState ExecutionState) (time.Duration, error)
EvalMissing(scope *Scope, executionState ExecutionState) (*ast.Missing, error)
// Type returns the type of ast.ValueType
Type(scope ReadOnlyScope, executionState ExecutionState) (ast.ValueType, error)
Type(scope ReadOnlyScope) (ast.ValueType, error)
// Whether the type returned by the node can change.
IsDynamic() bool
}

View File

@ -3,10 +3,27 @@ package stateful
import (
"fmt"
"strings"
"github.com/influxdata/kapacitor/tick/ast"
)
type DynamicMethod func(self interface{}, args ...interface{}) (interface{}, error)
type DynamicFunc struct {
F func(args ...interface{}) (interface{}, error)
Sig map[Domain]ast.ValueType
}
func (df DynamicFunc) Call(args ...interface{}) (interface{}, error) {
return df.F(args...)
}
func (df DynamicFunc) Reset() {
}
func (df DynamicFunc) Signature() map[Domain]ast.ValueType {
return df.Sig
}
// Special marker that a value is empty
var empty = new(interface{})
@ -15,6 +32,7 @@ type Scope struct {
variables map[string]interface{}
dynamicMethods map[string]DynamicMethod
dynamicFuncs map[string]*DynamicFunc
}
//Initialize a new Scope object.
@ -22,6 +40,7 @@ func NewScope() *Scope {
return &Scope{
variables: make(map[string]interface{}),
dynamicMethods: make(map[string]DynamicMethod),
dynamicFuncs: make(map[string]*DynamicFunc),
}
}
@ -63,3 +82,11 @@ func (s *Scope) SetDynamicMethod(name string, m DynamicMethod) {
func (s *Scope) DynamicMethod(name string) DynamicMethod {
return s.dynamicMethods[name]
}
func (s *Scope) SetDynamicFunc(name string, f *DynamicFunc) {
s.dynamicFuncs[name] = f
}
func (s *Scope) DynamicFunc(name string) *DynamicFunc {
return s.dynamicFuncs[name]
}

View File

@ -58,7 +58,7 @@ func TestExpression_RefernceVariables(t *testing.T) {
}
for i, expect := range expectations {
refVariables := stateful.FindReferenceVariables(expect.node)
refVariables := ast.FindReferenceVariables(expect.node)
if !reflect.DeepEqual(refVariables, expect.refVariables) {
t.Errorf("[Iteration: %v, Node: %T] Got unexpected result:\ngot: %v\nexpected: %v", i+1, expect.node, refVariables, expect.refVariables)
}

View File

@ -3,6 +3,7 @@
"Name":"Float",
"name":"float",
"Type":"float64",
"Kind":"reflect.Float64",
"Nil":"0",
"Zero":"float64(0)"
},
@ -10,6 +11,7 @@
"Name":"Integer",
"name":"integer",
"Type":"int64",
"Kind":"reflect.Int64",
"Nil":"0",
"Zero":"int64(0)"
},
@ -17,6 +19,7 @@
"Name":"String",
"name":"string",
"Type":"string",
"Kind":"reflect.String",
"Nil":"\"\"",
"Zero":"\"\""
},
@ -24,8 +27,8 @@
"Name":"Boolean",
"name":"boolean",
"Type":"bool",
"Kind":"reflect.Bool",
"Nil":"false",
"Zero":"false"
}
]

View File

@ -14,6 +14,7 @@ import (
"github.com/influxdata/kapacitor/models"
"github.com/influxdata/kapacitor/pipeline"
"github.com/influxdata/kapacitor/udf"
"github.com/influxdata/kapacitor/udf/agent"
"github.com/pkg/errors"
)
@ -291,15 +292,15 @@ func (p *UDFProcess) logStdErr() {
}
}
func (p *UDFProcess) Abort(err error) { p.server.Abort(err) }
func (p *UDFProcess) Init(options []*udf.Option) error { return p.server.Init(options) }
func (p *UDFProcess) Snapshot() ([]byte, error) { return p.server.Snapshot() }
func (p *UDFProcess) Restore(snapshot []byte) error { return p.server.Restore(snapshot) }
func (p *UDFProcess) PointIn() chan<- models.Point { return p.server.PointIn() }
func (p *UDFProcess) BatchIn() chan<- models.Batch { return p.server.BatchIn() }
func (p *UDFProcess) PointOut() <-chan models.Point { return p.server.PointOut() }
func (p *UDFProcess) BatchOut() <-chan models.Batch { return p.server.BatchOut() }
func (p *UDFProcess) Info() (udf.Info, error) { return p.server.Info() }
func (p *UDFProcess) Abort(err error) { p.server.Abort(err) }
func (p *UDFProcess) Init(options []*agent.Option) error { return p.server.Init(options) }
func (p *UDFProcess) Snapshot() ([]byte, error) { return p.server.Snapshot() }
func (p *UDFProcess) Restore(snapshot []byte) error { return p.server.Restore(snapshot) }
func (p *UDFProcess) PointIn() chan<- models.Point { return p.server.PointIn() }
func (p *UDFProcess) BatchIn() chan<- models.Batch { return p.server.BatchIn() }
func (p *UDFProcess) PointOut() <-chan models.Point { return p.server.PointOut() }
func (p *UDFProcess) BatchOut() <-chan models.Batch { return p.server.BatchOut() }
func (p *UDFProcess) Info() (udf.Info, error) { return p.server.Info() }
type UDFSocket struct {
server *udf.Server
@ -363,15 +364,15 @@ func (s *UDFSocket) Close() error {
return nil
}
func (s *UDFSocket) Abort(err error) { s.server.Abort(err) }
func (s *UDFSocket) Init(options []*udf.Option) error { return s.server.Init(options) }
func (s *UDFSocket) Snapshot() ([]byte, error) { return s.server.Snapshot() }
func (s *UDFSocket) Restore(snapshot []byte) error { return s.server.Restore(snapshot) }
func (s *UDFSocket) PointIn() chan<- models.Point { return s.server.PointIn() }
func (s *UDFSocket) BatchIn() chan<- models.Batch { return s.server.BatchIn() }
func (s *UDFSocket) PointOut() <-chan models.Point { return s.server.PointOut() }
func (s *UDFSocket) BatchOut() <-chan models.Batch { return s.server.BatchOut() }
func (s *UDFSocket) Info() (udf.Info, error) { return s.server.Info() }
func (s *UDFSocket) Abort(err error) { s.server.Abort(err) }
func (s *UDFSocket) Init(options []*agent.Option) error { return s.server.Init(options) }
func (s *UDFSocket) Snapshot() ([]byte, error) { return s.server.Snapshot() }
func (s *UDFSocket) Restore(snapshot []byte) error { return s.server.Restore(snapshot) }
func (s *UDFSocket) PointIn() chan<- models.Point { return s.server.PointIn() }
func (s *UDFSocket) BatchIn() chan<- models.Batch { return s.server.BatchIn() }
func (s *UDFSocket) PointOut() <-chan models.Point { return s.server.PointOut() }
func (s *UDFSocket) BatchOut() <-chan models.Batch { return s.server.BatchOut() }
func (s *UDFSocket) Info() (udf.Info, error) { return s.server.Info() }
type socket struct {
path string

View File

@ -0,0 +1,68 @@
# UDF Agents and Servers
A UDF is a User Defined Function, meaning that you can write your own functions/algorithms and plug them into Kapacitor.
Your custom function runs in its own process and Kapacitor communicates with it via a defined protocol, see [udf.proto](https://github.com/influxdata/kapacitor/blob/master/udf/udf.proto).
To facilitate working with the protocol several `agents` have been written in various languages that abstract the protocol communication through an interface in the respective languages.
You can find those agent implementations in this directory and subdirectories based on language name.
Example uses of the agents can be found in the `examples` directory.
These examples are working examples and are executed as part of the testing suite,
see [server_test.go](https://github.com/influxdata/kapacitor/blob/master/cmd/kapacitord/run/server_test.go).
## Child process vs Socket
There are two approaches for writing UDFs.
* A child process based approach where Kapacitor spawns a child process and communicates over STDIN/STDOUT.
* A socket based approach where you start the UDF process externally and Kapacitor connects to it over a socket.
For the socket based approach there will only ever be one instance of your UDF process running.
Each use of the UDF in a TICKscript will be a new connection the socket.
Where as each use of a process based UDF means a new child process is spawned for each.
## Design
The protocol for communicating with Kapacitor consists of Request and Response messages.
The agents wrap the communication and serialization and expose an interface that needs to be implemented to handle each request/response.
In addition to the request/response paradigm agents provide a way to stream data back to Kapacitor.
Your UDF is in control of when new points or batches are sent back to Kapacitor.
### Agents and Servers
There are two main objects provided in the current implementations, an `Agent` and a `Server`.
The `Agent` is responsible for managing the communication over input and output streams.
The `Server` is responsible for accepting new connections and creating new `Agents` to handle those new connections.
Both process based and socket based UDFs will need to use an `Agent` to handle the communication/serialization aspects of the protocol.
Only socket based UDFs need use the `Server`.
## Writing an Agent for a new Language
The UDF protocol is designed to be simple and consists of reading and writing protocol buffer messages.
In order to write a UDF in the language of your choice your language must have protocol buffer support and be able to read and write to a socket.
The basic steps are:
0. Add the language to the `udf/io.go` generate comment so the udf.proto code exists for your language.
1. Implement a Varint encoder/decoder, this is trivial see the python implementation.
2. Implement a method for reading and writing streamed protobuf messages. See `udf.proto` for more details.
3. Create an interface for handling each of the request/responses.
4. Write a loop for reading from an input stream and calling the handler interface, and write responses to an output stream.
5. Provide an thread safe mechanism for writing points and batches to the output stream independent of the handler interface.
This is easily accomplished with a synchronized write method, see the python implementation.
6. Implement the examples using your new agent.
7. Add your example to the test suite in `cmd/kapacitord/run/server_test.go`.
For process based UDFs it is expected that the process terminate after STDIN is closed and the remaining requests processed.
After STDIN is closed, the agent process can continue to send Responses to Kapacitor as long as a keepalive timeout does not occur.
Once a keepalive timeout is reached and after a 2*keepalive_time grace period, if the process has not terminated then it will be forcefully terminated.
## Docker
It is expected that the example can run inside the test suite.
Since generating different protocol buffer code requires different plugins and libraries to run we make use of Docker to provide the necessary environment.
This makes testing the code easier as the developer does not have to install each supported language locally.

View File

@ -0,0 +1,236 @@
package agent
import (
"bufio"
"errors"
"fmt"
"io"
"sync"
)
// The Agent calls the appropriate methods on the Handler as it receives requests over a socket.
//
// Returning an error from any method will cause the Agent to stop and an ErrorResponse to be sent.
// Some *Response objects (like SnapshotResponse) allow for returning their own error within the object itself.
// These types of errors will not stop the Agent and Kapacitor will deal with them appropriately.
//
// The Handler is called from a single goroutine, meaning methods will not be called concurrently.
//
// To write Points/Batches back to the Agent/Kapacitor use the Agent.Responses channel.
type Handler interface {
// Return the InfoResponse. Describing the properties of this Handler
Info() (*InfoResponse, error)
// Initialize the Handler with the provided options.
Init(*InitRequest) (*InitResponse, error)
// Create a snapshot of the running state of the handler.
Snapshot() (*SnapshotResponse, error)
// Restore a previous snapshot.
Restore(*RestoreRequest) (*RestoreResponse, error)
// A batch has begun.
BeginBatch(*BeginBatch) error
// A point has arrived.
Point(*Point) error
// The batch is complete.
EndBatch(*EndBatch) error
// Gracefully stop the Handler.
// No other methods will be called.
Stop()
}
// Go implementation of a Kapacitor UDF agent.
// This agent is responsible for reading and writing
// messages over a socket.
//
// The Agent requires a Handler object in order to fulfill requests.
type Agent struct {
in io.ReadCloser
out io.WriteCloser
outGroup sync.WaitGroup
outResponses chan *Response
responses chan *Response
// A channel for writing Responses, specifically Batch and Point responses.
Responses chan<- *Response
writeErrC chan error
readErrC chan error
// The handler for requests.
Handler Handler
}
// Create a new Agent is the provided in/out objects.
// To create an Agent that reads from STDIN/STDOUT of the process use New(os.Stdin, os.Stdout)
func New(in io.ReadCloser, out io.WriteCloser) *Agent {
s := &Agent{
in: in,
out: out,
outResponses: make(chan *Response),
responses: make(chan *Response),
}
s.Responses = s.responses
return s
}
// Start the Agent, you must set an Handler on the agent before starting.
func (a *Agent) Start() error {
if a.Handler == nil {
return errors.New("must set a Handler on the agent before starting")
}
a.readErrC = make(chan error, 1)
a.writeErrC = make(chan error, 1)
a.outGroup.Add(1)
go func() {
defer a.outGroup.Done()
err := a.readLoop()
if err != nil {
a.outResponses <- &Response{
Message: &Response_Error{
Error: &ErrorResponse{Error: err.Error()},
},
}
}
a.readErrC <- err
}()
go func() {
a.writeErrC <- a.writeLoop()
}()
a.outGroup.Add(1)
go func() {
defer a.outGroup.Done()
a.forwardResponses()
}()
return nil
}
// Wait for the Agent to terminate.
// The Agent will not terminate till the Responses channel is closed.
// You will need to close this channel externally, typically in the Stop method for the Handler.
// The Agent will terminate if the In reader is closed or an error occurs.
func (a *Agent) Wait() error {
a.outGroup.Wait()
close(a.outResponses)
for a.readErrC != nil || a.writeErrC != nil {
select {
case err := <-a.readErrC:
a.readErrC = nil
if err != nil {
return fmt.Errorf("read error: %s", err)
}
case err := <-a.writeErrC:
a.writeErrC = nil
if err != nil {
return fmt.Errorf("write error: %s", err)
}
}
}
return nil
}
func (a *Agent) readLoop() error {
defer a.Handler.Stop()
defer a.in.Close()
in := bufio.NewReader(a.in)
var buf []byte
request := &Request{}
for {
err := ReadMessage(&buf, in, request)
if err == io.EOF {
break
}
if err != nil {
return err
}
// Hand message to handler
var res *Response
switch msg := request.Message.(type) {
case *Request_Info:
info, err := a.Handler.Info()
if err != nil {
return err
}
res = &Response{}
res.Message = &Response_Info{
Info: info,
}
case *Request_Init:
init, err := a.Handler.Init(msg.Init)
if err != nil {
return err
}
res = &Response{}
res.Message = &Response_Init{
Init: init,
}
case *Request_Keepalive:
res = &Response{
Message: &Response_Keepalive{
Keepalive: &KeepaliveResponse{
Time: msg.Keepalive.Time,
},
},
}
case *Request_Snapshot:
snapshot, err := a.Handler.Snapshot()
if err != nil {
return err
}
res = &Response{}
res.Message = &Response_Snapshot{
Snapshot: snapshot,
}
case *Request_Restore:
restore, err := a.Handler.Restore(msg.Restore)
if err != nil {
return err
}
res = &Response{}
res.Message = &Response_Restore{
Restore: restore,
}
case *Request_Begin:
err := a.Handler.BeginBatch(msg.Begin)
if err != nil {
return err
}
case *Request_Point:
err := a.Handler.Point(msg.Point)
if err != nil {
return err
}
case *Request_End:
err := a.Handler.EndBatch(msg.End)
if err != nil {
return err
}
}
if res != nil {
a.outResponses <- res
}
}
return nil
}
func (a *Agent) writeLoop() error {
defer a.out.Close()
for response := range a.outResponses {
err := WriteMessage(response, a.out)
if err != nil {
return err
}
}
return nil
}
func (a *Agent) forwardResponses() {
for r := range a.responses {
a.outResponses <- r
}
}

View File

@ -1,4 +1,4 @@
package udf
package agent
import (
"encoding/binary"
@ -8,7 +8,7 @@ import (
"github.com/golang/protobuf/proto"
)
//go:generate protoc --go_out=./ --python_out=./agent/py/kapacitor/udf/ udf.proto
//go:generate protoc --go_out=./ --python_out=./py/kapacitor/udf/ udf.proto
// Interface for reading messages
// If you have an io.Reader

View File

@ -1,31 +1,31 @@
package udf_test
package agent_test
import (
"bytes"
"reflect"
"testing"
"github.com/influxdata/kapacitor/udf"
"github.com/influxdata/kapacitor/udf/agent"
)
func TestMessage_ReadWrite(t *testing.T) {
req := &udf.Request{}
req.Message = &udf.Request_Keepalive{
Keepalive: &udf.KeepaliveRequest{
req := &agent.Request{}
req.Message = &agent.Request_Keepalive{
Keepalive: &agent.KeepaliveRequest{
Time: 42,
},
}
var buf bytes.Buffer
err := udf.WriteMessage(req, &buf)
err := agent.WriteMessage(req, &buf)
if err != nil {
t.Fatal(err)
}
nreq := &udf.Request{}
nreq := &agent.Request{}
var b []byte
err = udf.ReadMessage(&b, &buf, nreq)
err = agent.ReadMessage(&b, &buf, nreq)
if err != nil {
t.Fatal(err)
}
@ -36,9 +36,9 @@ func TestMessage_ReadWrite(t *testing.T) {
}
func TestMessage_ReadWriteMultiple(t *testing.T) {
req := &udf.Request{}
req.Message = &udf.Request_Keepalive{
Keepalive: &udf.KeepaliveRequest{
req := &agent.Request{}
req.Message = &agent.Request_Keepalive{
Keepalive: &agent.KeepaliveRequest{
Time: 42,
},
}
@ -47,17 +47,17 @@ func TestMessage_ReadWriteMultiple(t *testing.T) {
var count int = 1e4
for i := 0; i < count; i++ {
err := udf.WriteMessage(req, &buf)
err := agent.WriteMessage(req, &buf)
if err != nil {
t.Fatal(err)
}
}
nreq := &udf.Request{}
nreq := &agent.Request{}
var b []byte
for i := 0; i < count; i++ {
err := udf.ReadMessage(&b, &buf, nreq)
err := agent.ReadMessage(&b, &buf, nreq)
if err != nil {
t.Fatal(err)
}

View File

@ -0,0 +1,91 @@
package agent
import (
"net"
"os"
"os/signal"
"strings"
"sync"
)
// A server accepts connections on a listener and
// spawns new Agents for each connection.
type Server struct {
listener net.Listener
accepter Accepter
conns chan net.Conn
mu sync.Mutex
stopped bool
stopping chan struct{}
}
type Accepter interface {
// Accept new connections from the listener and handle them accordingly.
// The typical action is to create a new Agent with the connection as both its in and out objects.
Accept(net.Conn)
}
// Create a new server.
func NewServer(l net.Listener, a Accepter) *Server {
return &Server{
listener: l,
accepter: a,
conns: make(chan net.Conn),
stopping: make(chan struct{}),
}
}
func (s *Server) Serve() error {
return s.run()
}
func (s *Server) Stop() {
s.mu.Lock()
defer s.mu.Unlock()
if s.stopped {
return
}
s.stopped = true
close(s.stopping)
}
// Register a signal handler to stop the Server for the given signals.
func (s *Server) StopOnSignals(signals ...os.Signal) {
c := make(chan os.Signal)
signal.Notify(c, signals...)
go func() {
for range c {
s.Stop()
}
}()
}
func (s *Server) run() error {
errC := make(chan error, 1)
go func() {
for {
conn, err := s.listener.Accept()
if err != nil {
errC <- err
}
s.conns <- conn
}
}()
for {
select {
case <-s.stopping:
s.listener.Close()
return nil
case err := <-errC:
// If err is listener closed err ignore and return nil
if strings.Contains(err.Error(), "closed") {
return nil
}
return err
case conn := <-s.conns:
s.accepter.Accept(conn)
}
}
}

View File

@ -3,7 +3,7 @@
// DO NOT EDIT!
/*
Package udf is a generated protocol buffer package.
Package agent is a generated protocol buffer package.
It is generated from these files:
udf.proto
@ -29,7 +29,7 @@ It has these top-level messages:
Request
Response
*/
package udf
package agent
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
@ -107,8 +107,8 @@ func (*InfoRequest) ProtoMessage() {}
func (*InfoRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
type InfoResponse struct {
Wants EdgeType `protobuf:"varint,1,opt,name=wants,enum=udf.EdgeType" json:"wants,omitempty"`
Provides EdgeType `protobuf:"varint,2,opt,name=provides,enum=udf.EdgeType" json:"provides,omitempty"`
Wants EdgeType `protobuf:"varint,1,opt,name=wants,enum=agent.EdgeType" json:"wants,omitempty"`
Provides EdgeType `protobuf:"varint,2,opt,name=provides,enum=agent.EdgeType" json:"provides,omitempty"`
Options map[string]*OptionInfo `protobuf:"bytes,3,rep,name=options" json:"options,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
@ -125,7 +125,7 @@ func (m *InfoResponse) GetOptions() map[string]*OptionInfo {
}
type OptionInfo struct {
ValueTypes []ValueType `protobuf:"varint,1,rep,name=valueTypes,enum=udf.ValueType" json:"valueTypes,omitempty"`
ValueTypes []ValueType `protobuf:"varint,1,rep,name=valueTypes,enum=agent.ValueType" json:"valueTypes,omitempty"`
}
func (m *OptionInfo) Reset() { *m = OptionInfo{} }
@ -168,7 +168,7 @@ func (m *Option) GetValues() []*OptionValue {
}
type OptionValue struct {
Type ValueType `protobuf:"varint,1,opt,name=type,enum=udf.ValueType" json:"type,omitempty"`
Type ValueType `protobuf:"varint,1,opt,name=type,enum=agent.ValueType" json:"type,omitempty"`
// Types that are valid to be assigned to Value:
// *OptionValue_BoolValue
// *OptionValue_IntValue
@ -1202,100 +1202,100 @@ func _Response_OneofSizer(msg proto.Message) (n int) {
}
func init() {
proto.RegisterType((*InfoRequest)(nil), "udf.InfoRequest")
proto.RegisterType((*InfoResponse)(nil), "udf.InfoResponse")
proto.RegisterType((*OptionInfo)(nil), "udf.OptionInfo")
proto.RegisterType((*InitRequest)(nil), "udf.InitRequest")
proto.RegisterType((*Option)(nil), "udf.Option")
proto.RegisterType((*OptionValue)(nil), "udf.OptionValue")
proto.RegisterType((*InitResponse)(nil), "udf.InitResponse")
proto.RegisterType((*SnapshotRequest)(nil), "udf.SnapshotRequest")
proto.RegisterType((*SnapshotResponse)(nil), "udf.SnapshotResponse")
proto.RegisterType((*RestoreRequest)(nil), "udf.RestoreRequest")
proto.RegisterType((*RestoreResponse)(nil), "udf.RestoreResponse")
proto.RegisterType((*KeepaliveRequest)(nil), "udf.KeepaliveRequest")
proto.RegisterType((*KeepaliveResponse)(nil), "udf.KeepaliveResponse")
proto.RegisterType((*ErrorResponse)(nil), "udf.ErrorResponse")
proto.RegisterType((*BeginBatch)(nil), "udf.BeginBatch")
proto.RegisterType((*Point)(nil), "udf.Point")
proto.RegisterType((*EndBatch)(nil), "udf.EndBatch")
proto.RegisterType((*Request)(nil), "udf.Request")
proto.RegisterType((*Response)(nil), "udf.Response")
proto.RegisterEnum("udf.EdgeType", EdgeType_name, EdgeType_value)
proto.RegisterEnum("udf.ValueType", ValueType_name, ValueType_value)
proto.RegisterType((*InfoRequest)(nil), "agent.InfoRequest")
proto.RegisterType((*InfoResponse)(nil), "agent.InfoResponse")
proto.RegisterType((*OptionInfo)(nil), "agent.OptionInfo")
proto.RegisterType((*InitRequest)(nil), "agent.InitRequest")
proto.RegisterType((*Option)(nil), "agent.Option")
proto.RegisterType((*OptionValue)(nil), "agent.OptionValue")
proto.RegisterType((*InitResponse)(nil), "agent.InitResponse")
proto.RegisterType((*SnapshotRequest)(nil), "agent.SnapshotRequest")
proto.RegisterType((*SnapshotResponse)(nil), "agent.SnapshotResponse")
proto.RegisterType((*RestoreRequest)(nil), "agent.RestoreRequest")
proto.RegisterType((*RestoreResponse)(nil), "agent.RestoreResponse")
proto.RegisterType((*KeepaliveRequest)(nil), "agent.KeepaliveRequest")
proto.RegisterType((*KeepaliveResponse)(nil), "agent.KeepaliveResponse")
proto.RegisterType((*ErrorResponse)(nil), "agent.ErrorResponse")
proto.RegisterType((*BeginBatch)(nil), "agent.BeginBatch")
proto.RegisterType((*Point)(nil), "agent.Point")
proto.RegisterType((*EndBatch)(nil), "agent.EndBatch")
proto.RegisterType((*Request)(nil), "agent.Request")
proto.RegisterType((*Response)(nil), "agent.Response")
proto.RegisterEnum("agent.EdgeType", EdgeType_name, EdgeType_value)
proto.RegisterEnum("agent.ValueType", ValueType_name, ValueType_value)
}
func init() { proto.RegisterFile("udf.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 1100 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xc4, 0x56, 0x6d, 0x73, 0xdb, 0x44,
0x10, 0xb6, 0x22, 0xcb, 0x96, 0xd6, 0x79, 0x91, 0x8f, 0xb4, 0x88, 0x4c, 0xa7, 0x93, 0x8a, 0x49,
0x62, 0x02, 0x18, 0xc6, 0x05, 0xda, 0x61, 0x3a, 0x40, 0x4c, 0x5c, 0xec, 0x69, 0x49, 0x3a, 0x17,
0xb7, 0xdf, 0xe5, 0xe8, 0xe2, 0x6a, 0xea, 0x48, 0x46, 0x3a, 0x07, 0xcc, 0xcf, 0x81, 0xff, 0xc2,
0x07, 0xfe, 0x07, 0x33, 0x7c, 0xe4, 0x27, 0x30, 0xb7, 0x27, 0x9d, 0xce, 0x2f, 0xd0, 0x29, 0xd3,
0x19, 0xbe, 0xe9, 0x76, 0x9f, 0x7d, 0x7b, 0x6e, 0x77, 0x4f, 0xe0, 0xcc, 0xc2, 0xab, 0xf6, 0x34,
0x4d, 0x78, 0x42, 0xcc, 0x59, 0x78, 0xe5, 0x6f, 0x41, 0x63, 0x10, 0x5f, 0x25, 0x94, 0xfd, 0x30,
0x63, 0x19, 0xf7, 0xff, 0x30, 0x60, 0x53, 0x9e, 0xb3, 0x69, 0x12, 0x67, 0x8c, 0xbc, 0x0f, 0xd6,
0x8f, 0x41, 0xcc, 0x33, 0xcf, 0xd8, 0x37, 0x5a, 0xdb, 0x9d, 0xad, 0xb6, 0xb0, 0xef, 0x85, 0x63,
0x36, 0x9c, 0x4f, 0x19, 0x95, 0x3a, 0xf2, 0x01, 0xd8, 0xd3, 0x34, 0xb9, 0x89, 0x42, 0x96, 0x79,
0x1b, 0xeb, 0x70, 0x4a, 0x4d, 0x1e, 0x42, 0x3d, 0x99, 0xf2, 0x28, 0x89, 0x33, 0xcf, 0xdc, 0x37,
0x5b, 0x8d, 0xce, 0x5d, 0x44, 0xea, 0x31, 0xdb, 0xe7, 0x12, 0xd0, 0x8b, 0x79, 0x3a, 0xa7, 0x05,
0x7c, 0xef, 0x09, 0x6c, 0xea, 0x0a, 0xe2, 0x82, 0xf9, 0x8a, 0xcd, 0x31, 0x2f, 0x87, 0x8a, 0x4f,
0x72, 0x00, 0xd6, 0x4d, 0x30, 0x99, 0x31, 0xcc, 0xa1, 0xd1, 0xd9, 0x41, 0xcf, 0xd2, 0x06, 0xfd,
0x4b, 0xed, 0x97, 0x1b, 0x0f, 0x0d, 0xff, 0x11, 0x40, 0xa9, 0x20, 0x6d, 0x00, 0x54, 0x89, 0x5c,
0x45, 0xa5, 0x66, 0x6b, 0xbb, 0xb3, 0x8d, 0xd6, 0x2f, 0x0a, 0x31, 0xd5, 0x10, 0xfe, 0x67, 0x82,
0xb4, 0x88, 0xe7, 0xa4, 0x91, 0x83, 0xb2, 0x26, 0x03, 0x6b, 0x6a, 0x68, 0x91, 0x55, 0x01, 0xfe,
0x63, 0xa8, 0x49, 0x11, 0x21, 0x50, 0x8d, 0x83, 0x6b, 0x96, 0xe7, 0x8e, 0xdf, 0xa4, 0x05, 0x35,
0x8c, 0x20, 0x18, 0x14, 0x3e, 0x5c, 0xcd, 0x07, 0x66, 0x41, 0x73, 0xbd, 0xff, 0xa7, 0x01, 0x0d,
0x4d, 0x4e, 0x7c, 0xa8, 0xf2, 0xf9, 0x94, 0xe5, 0x37, 0xb4, 0x9c, 0x37, 0xea, 0xc8, 0x5d, 0x70,
0x46, 0x49, 0x32, 0x79, 0xa1, 0xe8, 0xb1, 0xfb, 0x15, 0x5a, 0x8a, 0xc8, 0x1d, 0xb0, 0xa3, 0x98,
0x4b, 0xb5, 0xb9, 0x6f, 0xb4, 0xcc, 0x7e, 0x85, 0x2a, 0x09, 0xf1, 0xa1, 0x11, 0x26, 0xb3, 0xd1,
0x84, 0x49, 0x40, 0x75, 0xdf, 0x68, 0x19, 0xfd, 0x0a, 0xd5, 0x85, 0x02, 0x93, 0xf1, 0x34, 0x8a,
0xc7, 0x12, 0x63, 0x89, 0xd2, 0x04, 0x46, 0x13, 0x92, 0x43, 0xd8, 0x0a, 0x67, 0x69, 0xa0, 0x52,
0xf7, 0x6a, 0x79, 0xa8, 0x45, 0x71, 0xb7, 0x9e, 0x5f, 0xa4, 0xff, 0x95, 0xe8, 0x46, 0x41, 0x74,
0xde, 0x8d, 0x1e, 0xd4, 0xb3, 0xd9, 0xe5, 0x25, 0xcb, 0x64, 0x3f, 0xda, 0xb4, 0x38, 0x92, 0x5d,
0xb0, 0x58, 0x9a, 0x26, 0x29, 0x16, 0xe7, 0x50, 0x79, 0xf0, 0x9b, 0xb0, 0x73, 0x11, 0x07, 0xd3,
0xec, 0x65, 0x52, 0x5c, 0x96, 0xdf, 0x06, 0xb7, 0x14, 0xe5, 0x6e, 0xf7, 0xc0, 0xce, 0x72, 0x19,
0xfa, 0xdd, 0xa4, 0xea, 0xec, 0x7f, 0x04, 0xdb, 0x94, 0x65, 0x3c, 0x49, 0x59, 0x71, 0xdd, 0xff,
0x86, 0x3e, 0x81, 0x1d, 0x85, 0xfe, 0x8f, 0x39, 0x1f, 0x82, 0xfb, 0x84, 0xb1, 0x69, 0x30, 0x89,
0x6e, 0x54, 0x48, 0x02, 0x55, 0x1e, 0xe5, 0x0d, 0x63, 0x52, 0xfc, 0xf6, 0x8f, 0xa0, 0xa9, 0xe1,
0xf2, 0x60, 0xeb, 0x80, 0x07, 0xb0, 0xd5, 0x13, 0x9e, 0x15, 0x48, 0xc5, 0x35, 0xf4, 0xb8, 0xbf,
0x1b, 0x00, 0x5d, 0x36, 0x8e, 0xe2, 0x6e, 0xc0, 0x2f, 0x5f, 0xae, 0xed, 0xd1, 0x5d, 0xb0, 0xc6,
0x69, 0x32, 0x9b, 0x16, 0x09, 0xe3, 0x81, 0x7c, 0x0c, 0x55, 0x1e, 0x8c, 0x8b, 0x79, 0x7e, 0x0f,
0xfb, 0xaf, 0x74, 0xd4, 0x1e, 0x06, 0xe3, 0x7c, 0x94, 0x11, 0x26, 0x1c, 0x67, 0xd1, 0xcf, 0xb2,
0x8b, 0x4c, 0x8a, 0xdf, 0xe4, 0x36, 0xd4, 0x46, 0xf3, 0x33, 0x11, 0xce, 0x42, 0x8a, 0xf2, 0xd3,
0xde, 0x03, 0x70, 0x94, 0xf9, 0x9a, 0x81, 0xdf, 0xd5, 0x07, 0xde, 0xd1, 0xe7, 0xfb, 0x57, 0x0b,
0xac, 0x67, 0x49, 0x14, 0xaf, 0xa5, 0x4e, 0xd5, 0xb6, 0xa1, 0xd5, 0xb6, 0x07, 0x76, 0x18, 0xf0,
0x60, 0x14, 0x64, 0x72, 0x02, 0x1c, 0xaa, 0xce, 0xa4, 0x05, 0x3b, 0x29, 0xe3, 0x2c, 0x16, 0x1d,
0xfa, 0x2c, 0x99, 0x44, 0x97, 0x73, 0xcc, 0xde, 0xa1, 0xcb, 0xe2, 0x92, 0x21, 0x4b, 0x67, 0xe8,
0x2e, 0x40, 0x18, 0x5d, 0xb3, 0x38, 0xc3, 0x1d, 0x51, 0xdb, 0x37, 0x5b, 0x0e, 0xd5, 0x24, 0xa4,
0x95, 0x33, 0x58, 0x47, 0x06, 0x77, 0x91, 0x41, 0xcc, 0x7e, 0x85, 0xbc, 0x6f, 0x60, 0xf3, 0x2a,
0x62, 0x93, 0x30, 0x3b, 0xc5, 0xd1, 0xf3, 0x6c, 0xb4, 0xb8, 0xa3, 0x59, 0x3c, 0xd6, 0xd4, 0xd2,
0x72, 0xc1, 0x82, 0x3c, 0x00, 0x47, 0x9e, 0x07, 0x31, 0xf7, 0x1c, 0xed, 0xca, 0x74, 0xf3, 0x41,
0xcc, 0xa5, 0x6d, 0x89, 0x2d, 0x43, 0x5f, 0xe0, 0x44, 0x7b, 0xf0, 0x0f, 0xa1, 0xa5, 0x7a, 0x21,
0xb4, 0x14, 0x69, 0xb7, 0xdc, 0x78, 0x2b, 0xb7, 0xbc, 0xf7, 0x35, 0x34, 0x57, 0xca, 0x7d, 0x9d,
0x03, 0x43, 0x77, 0xf0, 0x08, 0xb6, 0x17, 0x0b, 0x7e, 0x9d, 0xb5, 0xb9, 0x36, 0xbc, 0x56, 0xf2,
0x1b, 0x75, 0xe9, 0x6f, 0x06, 0xd8, 0xbd, 0x38, 0x7c, 0xd3, 0x81, 0x13, 0x2d, 0x7d, 0x1d, 0xfc,
0x24, 0x17, 0x35, 0xc5, 0x6f, 0xf2, 0x61, 0xde, 0x42, 0x55, 0xbc, 0x95, 0x77, 0xe5, 0xf3, 0x9b,
0xbb, 0x5e, 0xe9, 0xa2, 0xb7, 0x3e, 0x6e, 0x7f, 0x6d, 0x40, 0xbd, 0xd8, 0x55, 0x87, 0x50, 0x8d,
0xe2, 0xab, 0x04, 0x0d, 0x8b, 0x67, 0x4c, 0xfb, 0xc5, 0xe8, 0x57, 0x28, 0xea, 0x25, 0x2e, 0xe2,
0xf9, 0x63, 0x5d, 0xe0, 0xd4, 0xab, 0x2a, 0x71, 0x11, 0x27, 0x9f, 0x83, 0xf3, 0xaa, 0xd8, 0x73,
0x58, 0x72, 0xa3, 0x73, 0x0b, 0xc1, 0xcb, 0x5b, 0x52, 0xbc, 0x68, 0x0a, 0x49, 0x3a, 0xda, 0x96,
0xae, 0xa2, 0x95, 0x9c, 0xab, 0xa5, 0xf7, 0x40, 0xbc, 0x73, 0x05, 0x8e, 0x7c, 0x02, 0xf5, 0x54,
0x6e, 0x6f, 0x24, 0xa6, 0xd1, 0x79, 0x07, 0x4d, 0x16, 0xf7, 0x7f, 0xbf, 0x42, 0x0b, 0x14, 0x39,
0x02, 0x6b, 0x24, 0x36, 0x9d, 0xe7, 0x6a, 0x7f, 0x1c, 0xe5, 0xee, 0xeb, 0x57, 0xa8, 0xd4, 0x13,
0x1f, 0xac, 0xa9, 0x98, 0x11, 0xaf, 0x89, 0x40, 0x28, 0xa7, 0x46, 0x60, 0x50, 0x45, 0xee, 0x81,
0xc9, 0xe2, 0xd0, 0x23, 0x88, 0xd8, 0x5a, 0xb8, 0xc1, 0x7e, 0x85, 0x0a, 0x5d, 0xd7, 0x81, 0xfa,
0x35, 0xcb, 0xb2, 0x60, 0xcc, 0xfc, 0x5f, 0x4c, 0xb0, 0xd5, 0x46, 0x3f, 0x5a, 0xe0, 0xbc, 0xb9,
0xf2, 0x4b, 0xa5, 0x48, 0x3f, 0x5a, 0x20, 0xbd, 0xa9, 0x91, 0xae, 0x03, 0x23, 0x4e, 0xbe, 0x58,
0x65, 0xfd, 0xf6, 0x32, 0xeb, 0xca, 0x44, 0xa3, 0xfd, 0xfe, 0x0a, 0xed, 0xb7, 0x96, 0x68, 0x57,
0x56, 0x25, 0xef, 0x9f, 0x2e, 0xf3, 0xbe, 0xbb, 0xc8, 0xbb, 0x32, 0x51, 0xc4, 0x1f, 0x17, 0x4f,
0x58, 0x0d, 0xf1, 0x44, 0xb2, 0xa5, 0xbf, 0x72, 0x82, 0x57, 0x84, 0xfc, 0x8f, 0x97, 0x74, 0x7c,
0x0f, 0xec, 0xe2, 0x1f, 0x98, 0x00, 0xd4, 0x2e, 0x86, 0xb4, 0x77, 0xf2, 0xbd, 0x5b, 0x21, 0x0e,
0x58, 0xdd, 0x93, 0xe1, 0xb7, 0x7d, 0xd7, 0x38, 0x3e, 0x05, 0x47, 0xfd, 0xac, 0x11, 0x1b, 0xaa,
0xdd, 0xf3, 0xf3, 0xa7, 0x6e, 0x85, 0xd4, 0xc1, 0x1c, 0x9c, 0x0d, 0x5d, 0x43, 0x98, 0x9d, 0x9e,
0x3f, 0xef, 0x3e, 0xed, 0xb9, 0x1b, 0xb9, 0x8b, 0xc1, 0xd9, 0x77, 0xae, 0x49, 0x36, 0xc1, 0x3e,
0x7d, 0x4e, 0x4f, 0x86, 0x83, 0xf3, 0x33, 0xb7, 0x3a, 0xaa, 0xe1, 0x2f, 0xfd, 0xfd, 0xbf, 0x03,
0x00, 0x00, 0xff, 0xff, 0x29, 0x62, 0x27, 0x91, 0xdf, 0x0b, 0x00, 0x00,
// 1099 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xc4, 0x57, 0xdd, 0x72, 0xdb, 0x44,
0x14, 0xae, 0x22, 0xcb, 0x96, 0x8e, 0x9d, 0x44, 0xd9, 0x86, 0x54, 0x04, 0x26, 0x13, 0x04, 0x6d,
0x93, 0x50, 0x0c, 0x98, 0xbf, 0xd2, 0x61, 0x60, 0x62, 0x62, 0x88, 0x87, 0x36, 0xee, 0x28, 0x6e,
0xef, 0xe5, 0x68, 0xe3, 0x6a, 0xea, 0x48, 0x46, 0x5a, 0x07, 0xcc, 0x15, 0xef, 0xc3, 0x2d, 0x0f,
0xc1, 0x05, 0x4f, 0xc2, 0x0c, 0xef, 0xc0, 0xfe, 0x69, 0xb5, 0xb2, 0x0d, 0x9d, 0x32, 0x9d, 0xe9,
0x9d, 0x77, 0xcf, 0x77, 0xce, 0xd9, 0x73, 0xbe, 0xf3, 0x23, 0x83, 0x33, 0x8b, 0x2e, 0xdb, 0xd3,
0x2c, 0x25, 0x29, 0xb2, 0xc2, 0x31, 0x4e, 0x88, 0xbf, 0x0e, 0xcd, 0x7e, 0x72, 0x99, 0x06, 0xf8,
0xc7, 0x19, 0xce, 0x89, 0xff, 0xb7, 0x01, 0x2d, 0x71, 0xce, 0xa7, 0x69, 0x92, 0x63, 0x74, 0x1b,
0xac, 0x9f, 0xc2, 0x84, 0xe4, 0x9e, 0xb1, 0x6f, 0x1c, 0x6c, 0x74, 0x36, 0xdb, 0x5c, 0xad, 0xdd,
0x8b, 0xc6, 0x78, 0x38, 0x9f, 0xe2, 0x40, 0x48, 0xd1, 0xfb, 0x60, 0x53, 0xb3, 0xd7, 0x71, 0x84,
0x73, 0x6f, 0x6d, 0x35, 0x52, 0x01, 0xd0, 0x03, 0x68, 0xa4, 0x53, 0x12, 0x53, 0xfb, 0x9e, 0xb9,
0x6f, 0x1e, 0x34, 0x3b, 0xfb, 0x12, 0xab, 0x7b, 0x6e, 0x0f, 0x04, 0xa4, 0x97, 0x90, 0x6c, 0x1e,
0x14, 0x0a, 0xbb, 0x8f, 0xa0, 0xa5, 0x0b, 0x90, 0x0b, 0xe6, 0x73, 0x3c, 0xe7, 0xaf, 0x73, 0x02,
0xf6, 0x13, 0xdd, 0x05, 0xeb, 0x3a, 0x9c, 0xcc, 0x30, 0x7f, 0x47, 0xb3, 0xb3, 0x25, 0x6d, 0x0b,
0x2d, 0xee, 0x41, 0xc8, 0x1f, 0xac, 0xdd, 0x37, 0xfc, 0xaf, 0x01, 0x4a, 0x01, 0xfa, 0x08, 0x80,
0x8b, 0xd8, 0x7b, 0x59, 0xc4, 0x26, 0x8d, 0xc3, 0x95, 0xfa, 0x4f, 0x0b, 0x41, 0xa0, 0x61, 0xfc,
0xcf, 0x59, 0xfa, 0x62, 0x22, 0xd3, 0x47, 0x7d, 0xab, 0xc8, 0x0c, 0x1e, 0xd9, 0x7a, 0xc5, 0xbb,
0x0a, 0xc3, 0x3f, 0x85, 0xba, 0xb8, 0x42, 0x08, 0x6a, 0x49, 0x78, 0x85, 0x65, 0x04, 0xfc, 0x37,
0x3a, 0x82, 0x3a, 0xf7, 0xc1, 0x72, 0xc9, 0xac, 0xa0, 0x8a, 0x15, 0xfe, 0x92, 0x40, 0x22, 0xfc,
0xbf, 0x0c, 0x68, 0x6a, 0xf7, 0xe8, 0x3d, 0xa8, 0x11, 0xfa, 0x34, 0xc9, 0xd7, 0xf2, 0xeb, 0xb9,
0x14, 0xed, 0x81, 0x33, 0x4a, 0xd3, 0xc9, 0x53, 0x95, 0x28, 0xfb, 0xf4, 0x46, 0x50, 0x5e, 0xa1,
0xb7, 0xc1, 0x8e, 0x13, 0x22, 0xc4, 0x26, 0x15, 0x9b, 0x54, 0xac, 0x6e, 0x90, 0x0f, 0xcd, 0x28,
0x9d, 0x8d, 0x26, 0x58, 0x00, 0x6a, 0x14, 0x60, 0x50, 0x80, 0x7e, 0xc9, 0x30, 0x39, 0xc9, 0xe2,
0x64, 0x2c, 0x30, 0x16, 0x0b, 0x8f, 0x61, 0xb4, 0x4b, 0x74, 0x07, 0xd6, 0xa3, 0x59, 0x16, 0xaa,
0xc7, 0x7b, 0x75, 0xe9, 0xaa, 0x7a, 0xdd, 0x6d, 0x48, 0x4a, 0x29, 0x5d, 0x2d, 0x91, 0x6e, 0x59,
0x9d, 0x1e, 0x34, 0xf2, 0xd9, 0xc5, 0x05, 0xce, 0x45, 0x7d, 0xda, 0x41, 0x71, 0x44, 0xdb, 0x60,
0xe1, 0x2c, 0x4b, 0x33, 0x1e, 0x9c, 0x13, 0x88, 0x83, 0xbf, 0x05, 0x9b, 0xe7, 0x49, 0x38, 0xcd,
0x9f, 0xa5, 0x05, 0x65, 0x7e, 0x1b, 0xdc, 0xf2, 0x4a, 0x9a, 0xdd, 0x05, 0x3b, 0x97, 0x77, 0xdc,
0x6e, 0x2b, 0x50, 0x67, 0xff, 0x1e, 0x6c, 0x50, 0x1c, 0x49, 0x33, 0x5c, 0x90, 0xfe, 0x5f, 0xe8,
0x63, 0xd8, 0x54, 0xe8, 0xff, 0xf9, 0xe6, 0x3b, 0xe0, 0xfe, 0x80, 0xf1, 0x34, 0x9c, 0xc4, 0xd7,
0xca, 0x25, 0x2d, 0x1a, 0x12, 0xcb, 0xa2, 0x31, 0x03, 0xfe, 0xdb, 0xbf, 0x0b, 0x5b, 0x1a, 0x4e,
0x3a, 0x5b, 0x05, 0xbc, 0x0d, 0xeb, 0x3d, 0x66, 0x59, 0x81, 0x94, 0x5f, 0x43, 0xf7, 0xfb, 0xa7,
0x01, 0xd0, 0xc5, 0xe3, 0x38, 0xe9, 0x86, 0xe4, 0xe2, 0xd9, 0xca, 0x3a, 0xa5, 0x8a, 0xe3, 0x2c,
0x9d, 0x4d, 0x8b, 0x07, 0xf3, 0x03, 0xfa, 0x90, 0xfa, 0x0c, 0xc7, 0x45, 0x6f, 0xbf, 0x25, 0x2b,
0xb0, 0x34, 0xd5, 0x1e, 0x52, 0xa9, 0x68, 0x6b, 0x0e, 0x64, 0xa6, 0xf3, 0xf8, 0x17, 0x51, 0x47,
0xf4, 0x91, 0xec, 0x37, 0xda, 0x81, 0xfa, 0x68, 0x7e, 0xc6, 0x1c, 0x5a, 0x3c, 0x49, 0xf2, 0xb4,
0xfb, 0x05, 0x38, 0x4a, 0x7d, 0x45, 0xf3, 0x6f, 0xeb, 0xcd, 0xef, 0xe8, 0x9d, 0xfe, 0x9b, 0x05,
0xd6, 0xe3, 0x94, 0x96, 0xf0, 0xaa, 0x9c, 0xa8, 0xe8, 0xd6, 0xb4, 0xe8, 0x28, 0xaf, 0x51, 0x48,
0xc2, 0x51, 0x98, 0x8b, 0x1e, 0x70, 0x02, 0x75, 0x46, 0x07, 0xb0, 0x99, 0x61, 0x42, 0xe3, 0xa2,
0x35, 0xfa, 0x38, 0x9d, 0xc4, 0x17, 0x73, 0xfe, 0x7a, 0x27, 0x58, 0xbc, 0x2e, 0x73, 0x64, 0xe9,
0x39, 0xda, 0x03, 0x88, 0xa8, 0xdf, 0x24, 0xe7, 0xb3, 0xa2, 0x4e, 0x33, 0xe5, 0x04, 0xda, 0x0d,
0x9d, 0x00, 0x22, 0x87, 0x0d, 0x9e, 0xc3, 0x1d, 0x99, 0x43, 0xfe, 0xfe, 0xa5, 0xf4, 0x75, 0xa1,
0x75, 0x19, 0xe3, 0x49, 0x94, 0x9f, 0xf0, 0xf6, 0xf3, 0x6c, 0xae, 0xb3, 0x57, 0xd1, 0xf9, 0x4e,
0x03, 0x08, 0xdd, 0x8a, 0x0e, 0xfa, 0x12, 0x1c, 0x71, 0xee, 0x27, 0xc4, 0x73, 0x2a, 0xc4, 0xe9,
0x06, 0xa8, 0x54, 0x68, 0x97, 0xe8, 0xd2, 0xfd, 0x39, 0xef, 0x6c, 0x0f, 0xfe, 0xd5, 0xbd, 0x00,
0x54, 0xdc, 0x8b, 0x2b, 0x8d, 0xed, 0xe6, 0x2b, 0x61, 0x7b, 0xf7, 0x1b, 0xd8, 0x5a, 0x0a, 0xf9,
0x45, 0x06, 0x0c, 0xdd, 0xc0, 0x57, 0xb0, 0x51, 0x0d, 0xf9, 0x45, 0xda, 0xe6, 0x4a, 0xf7, 0x5a,
0xc8, 0x2f, 0x55, 0xad, 0x7f, 0x18, 0x60, 0xf7, 0x92, 0xe8, 0x65, 0x5b, 0x8f, 0x95, 0xf6, 0x55,
0xf8, 0xb3, 0x18, 0xd9, 0x01, 0xff, 0x8d, 0x3e, 0x90, 0xa5, 0x54, 0xe3, 0xbc, 0xbc, 0x59, 0xac,
0x65, 0x69, 0x7c, 0xa9, 0x9a, 0x5e, 0x79, 0xe3, 0xfd, 0x6a, 0x42, 0xa3, 0x98, 0x5b, 0x07, 0x50,
0x8b, 0xe9, 0xa2, 0xe5, 0x8a, 0xe5, 0x5a, 0xd3, 0x3e, 0x40, 0xe8, 0xec, 0xe7, 0x08, 0x81, 0x8c,
0x89, 0x5c, 0xe2, 0x25, 0x52, 0xed, 0x5a, 0x81, 0x8c, 0x09, 0xa2, 0x0f, 0x7b, 0x5e, 0xcc, 0x3d,
0x1e, 0x78, 0xb3, 0x73, 0x4b, 0xc2, 0x17, 0xe7, 0x26, 0xdb, 0x71, 0x0a, 0x8b, 0x3e, 0xd5, 0xe6,
0x76, 0x8d, 0xeb, 0x15, 0x7d, 0xb6, 0xb0, 0x23, 0xd8, 0xee, 0x2b, 0x90, 0xe8, 0x63, 0x68, 0x64,
0x62, 0xa2, 0xf3, 0x04, 0x35, 0x3b, 0x6f, 0x48, 0xa5, 0xea, 0x56, 0xa0, 0x3a, 0x05, 0x0e, 0x1d,
0x82, 0x35, 0x62, 0xd3, 0xcf, 0x73, 0x2b, 0x5f, 0x24, 0xe5, 0x44, 0xa4, 0x60, 0x81, 0xa0, 0xdb,
0xdb, 0x9a, 0xb2, 0x8e, 0xf1, 0xb6, 0x38, 0xb4, 0xa5, 0x77, 0x11, 0x43, 0x71, 0x21, 0x7a, 0x17,
0x4c, 0x9c, 0x44, 0x1e, 0xe2, 0x98, 0xcd, 0x05, 0x46, 0x29, 0x8c, 0x49, 0xbb, 0x0e, 0x34, 0xae,
0xe8, 0x56, 0xa1, 0x42, 0xff, 0x77, 0x13, 0x6c, 0x35, 0xed, 0x0f, 0x2b, 0x1c, 0xdc, 0x5c, 0xf1,
0xe9, 0xa5, 0x48, 0x38, 0xac, 0x90, 0x70, 0xb3, 0x42, 0x82, 0x0e, 0xa5, 0x2c, 0xdc, 0x5f, 0x66,
0xc1, 0x5b, 0x66, 0x41, 0x29, 0x69, 0x34, 0x7c, 0xb6, 0x44, 0xc3, 0xad, 0x25, 0x1a, 0x94, 0x5e,
0xc9, 0x43, 0x67, 0x91, 0x87, 0x9d, 0x45, 0x1e, 0x94, 0x92, 0x22, 0xe2, 0x5e, 0xb1, 0xe8, 0xea,
0x5c, 0x63, 0xbb, 0xc8, 0x9c, 0xbe, 0x0d, 0x59, 0x96, 0x39, 0xe8, 0xb5, 0xd3, 0x76, 0xf4, 0x0e,
0x9d, 0x01, 0xf2, 0xeb, 0x19, 0x01, 0xd4, 0xcf, 0x87, 0x41, 0xef, 0xf8, 0x91, 0x7b, 0x03, 0x39,
0x60, 0x75, 0x8f, 0x87, 0xdf, 0x9e, 0xba, 0xc6, 0xd1, 0x09, 0x38, 0xea, 0xd3, 0x0e, 0xd9, 0x50,
0xeb, 0x0e, 0x06, 0x0f, 0x29, 0xa2, 0x01, 0x66, 0xff, 0x6c, 0xe8, 0x1a, 0x4c, 0xed, 0x64, 0xf0,
0xa4, 0xfb, 0xb0, 0xe7, 0xae, 0x49, 0x13, 0xfd, 0xb3, 0xef, 0x5d, 0x13, 0xb5, 0xc0, 0x3e, 0x79,
0x12, 0x1c, 0x0f, 0xfb, 0x83, 0x33, 0xb7, 0x36, 0xaa, 0xf3, 0xbf, 0x04, 0x9f, 0xfc, 0x13, 0x00,
0x00, 0xff, 0xff, 0x8b, 0x8a, 0xa6, 0x74, 0x1f, 0x0c, 0x00, 0x00,
}

View File

@ -1,6 +1,6 @@
syntax = "proto3";
package udf;
package agent;
//------------------------------------------------------
// RPC Messages for Kapacitor to communicate with

Some files were not shown because too many files have changed in this diff Show More