diff --git a/CHANGELOG.md b/CHANGELOG.md index 979cc7bf2..53dc021df 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,55 @@ ### Release Notes +>**Breaking changes may require special upgrade steps please read below.** + +#### Upgrade Steps + +Changes to how and where task data is store have been made. +In order to safely upgrade to version 0.13 you need to follow these steps: + +1. Upgrade the Kapacitor binary. +2. Configure new database location. By default the location `/var/lib/kapacitor/kapacitor.db` is chosen for package installs or `./kapacitor.db` for manual installs. +Do **not** remove the configuration for the location of the old task.db database file since it is still needed to do the migration. + + ``` + [storage] + boltdb = "/var/lib/kapacitor/kapacitor.db" + ``` + +3. Restart Kapacitor. At this point Kapacitor will migrate all existing data to the new database file. +If any errors occur Kapacitor will log them and fail to startup. This way if Kapacitor starts up you can be sure the migration was a success and can continue normal operation. +The old database is opened in read only mode so that existing data cannot be corrupted. +Its recommended to start Kapacitor in debug logging mode for the migration so you can follow the details of the migration process. + +At this point you may remove the configuration for the old `task` `dir` and restart Kapacitor to ensure everything is working. +Kapacitor will attempt the migration on every startup while the old configuration and db file exist, but will skip any data that was already migrated. + + +#### API Changes + +With this release the API has been updated to what we believe will be the stable version for a 1.0 release. +Small changes may still be made but the significant work to create a RESTful HTTP API is complete. +Many breaking changes introduced, see the [client/API.md](http://github.com/influxdata/kapacitor/blob/master/client/API.md) doc for details on how the API works now. + +#### CLI Changes + +Along with the API changes, breaking changes where also made to the `kapacitor` CLI command. +Here is a break down of the CLI changes: + +* Every thing has an ID now: tasks, recordings, even replays. + The `name` used before to define a task is now its `ID`. + As such instead of using `-name` and `-id` to refer to tasks and recordings, + the flags have been changed to `-task` and `-recording` accordingly. +* Replays can be listed and deleted like tasks and recordings. +* Replays default to `fast` clock mode. +* The record and replay commands now have a `-no-wait` option to start but not wait for the recording/replay to complete. +* Listing recordings and replays displays the status of the respective action. +* Record and Replay command now have an optional flag `-replay-id`/`-recording-id` to specify the ID of the replay or recording. + If not set then a random ID will be chosen like the previous behavior. + +#### Notable features + UDF can now be managed externally to Kapacitor via Unix sockets. A process or container can be launched independent of Kapacitor exposing a socket. On startup Kapacitor will connect to the socket and begin communication. @@ -55,6 +104,7 @@ For example, let's say we want to store all data that triggered an alert in Infl - [#486](https://github.com/influxdata/kapacitor/pull/486): Default config file location. - [#461](https://github.com/influxdata/kapacitor/pull/461): Make Alerta `event` property configurable. - [#491](https://github.com/influxdata/kapacitor/pull/491): BREAKING: Rewriting stateful expression in order to improve performance, the only breaking change is: short circuit evaluation for booleans - for example: ``lambda: "bool_value" && (count() > 100)`` if "bool_value" is false, we won't evaluate "count". +- [#504](https://github.com/influxdata/kapacitor/pull/504): BREAKING: Many changes to the API and underlying storage system. This release requires a special upgrade process. ### Bugfixes diff --git a/README.md b/README.md index 42dabe9b9..f59e6009f 100644 --- a/README.md +++ b/README.md @@ -71,7 +71,7 @@ Place the above script into a file `cpu_alert.tick` then run these commands to s ```sh # Define the task (assumes cpu data is in db 'telegraf') kapacitor define \ - -name cpu_alert \ + cpu_alert \ -type stream \ -dbrp telegraf.default \ -tick ./cpu_alert.tick diff --git a/alert.go b/alert.go index 1dc8c65a8..8ef38117a 100644 --- a/alert.go +++ b/alert.go @@ -671,7 +671,7 @@ func (a *AlertNode) renderID(name string, group models.GroupID, tags models.Tags } info := idInfo{ Name: name, - TaskName: a.et.Task.Name, + TaskName: a.et.Task.ID, Group: g, Tags: tags, } @@ -693,7 +693,7 @@ func (a *AlertNode) renderMessageAndDetails(id, name string, t time.Time, group minfo := messageInfo{ idInfo: idInfo{ Name: name, - TaskName: a.et.Task.Name, + TaskName: a.et.Task.ID, Group: g, Tags: tags, }, diff --git a/client/API.md b/client/API.md index ce51c7d3f..c86e987c2 100644 --- a/client/API.md +++ b/client/API.md @@ -1,11 +1,20 @@ # Kapacitor API Reference Documentation +* [General Information](#general-information) +* [Writing Data](#writing-data) +* [Tasks](#tasks) +* [Recordings](#recordings) +* [Replays](#replays) +* [Miscellaneous](#miscellaneous) + +## General Information + Kapacitor provides an HTTP API on port 9092 by default. With the API you can control which tasks are executing, query status of tasks and manage recordings etc. Each section below defines the available API endpoints and there inputs and outputs. -All requests are versioned and namespaced using the root path `/kapacitor/v1/`. +All requests are versioned and namespaced using the base path `/kapacitor/v1/`. ### Response Codes @@ -13,7 +22,7 @@ All requests can return these response codes: | HTTP Response Code | Meaning | | ------------------ | ------- | -| 2xx | The request was a success the content is dependent on the request. | +| 2xx | The request was a success, content is dependent on the request. | | 4xx | Invalid request, refer to error for what it wrong with the request. Repeating the request will continue to return the same error. | | 5xx | The server was unable to process the request, refer to the error for a reason. Repeating the request may result in a success if the server issue has been resolved. | @@ -36,6 +45,12 @@ Query parameters are used only for GET requests and all other requests expect pa >NOTE: The /kapacitor/v1/write endpoint is the one exception to this rule since Kapacitor is compatible with the InfluxDB /write endpoint. + +### Links + +When creating resources in Kapacitor the API server will return a `link` object with an `href` of the resource. +Clients should not need to perform path manipulation in most cases and can use the links provided from previous calls. + ## Writing Data Kapacitor can accept writes over HTTP using the line protocol. @@ -46,7 +61,8 @@ This endpoint is identical in nature to the InfluxDB write endpoint. | db | Database name for the writes. | | rp | Retention policy name for the writes. | ->NOTE: Kapacitor scopes all points by their database and retention policy. This means you MUST specify the `rp` for writes or Kapacitor will not know which retention policy to use. +>NOTE: Kapacitor scopes all points by their database and retention policy. +This means you MUST specify the `rp` for writes or Kapacitor will not know which retention policy to use. #### Example @@ -104,8 +120,16 @@ Response with task id and link. ``` { + "link" : {"rel": "self", "href": "/kapacitor/v1/tasks/TASK_ID"}, "id" : "TASK_ID", - "link" : {"rel": "self", "href": "/kapacitor/v1/tasks/TASK_ID"} + "type" : "stream", + "dbrps" : [{"db": "DATABASE_NAME", "rp" : "RP_NAME"}], + "script" : "stream\n |from()\n .measurement('cpu')\n", + "dot" : "digraph TASK_ID { ... }", + "status" : "enabled", + "executing" : true, + "error" : "", + "stats" : {} } ``` @@ -162,10 +186,11 @@ Response with task id and link. #### Response -| Code | Meaning | -| ---- | ------- | -| 200 | Success, contains id and link for new task. | -| 404 | Task does not exist | +| Code | Meaning | +| ---- | ------- | +| 200 | Task created, contains task information. | +| 204 | Task updated, no content | +| 404 | Task does not exist | ### Get Task @@ -248,9 +273,9 @@ DELETE /kapacitor/v1/tasks/TASK_ID #### Response -| Code | Meaning | -| ---- | ------- | -| 204 | Success | +| Code | Meaning | +| ---- | ------- | +| 204 | Success | >NOTE: Deleting a non-existent task is not an error and will return a 204 success. @@ -262,7 +287,7 @@ To get information about several tasks make a GET request to the `/kapacitor/v1/ | Query Parameter | Default | Purpose | | --------------- | ------- | ------- | | pattern | | Filter results based on the pattern. Uses standard shell glob matching, see [this](https://golang.org/pkg/path/filepath/#Match) for more details. | -| fields | | List of fields to return. If empty returns all fields. Fields `id` and `link` are always returned. | +| fields | | List of fields to return. If empty returns all fields. Fields `id` and `link` are always returned. | | dot-view | attributes | One of `labels` or `attributes`. Labels is less readable but will correctly render with all the information contained in labels. | | script-format | formatted | One of `formatted` or `raw`. Raw will return the script identical to how it was defined. Formatted will first format the script. | | offset | 0 | Offset count for paginating through tasks. | @@ -280,7 +305,7 @@ GET /kapacitor/v1/tasks { "tasks" : [ { - "link" : "/kapacitor/v1/tasks/TASK_ID", + "link" : {"rel":"self", "href":"/kapacitor/v1/tasks/TASK_ID"}, "id" : "TASK_ID", "type" : "stream", "dbrps" : [{"db": "DATABASE_NAME", "rp" : "RP_NAME"}], @@ -292,7 +317,7 @@ GET /kapacitor/v1/tasks "stats" : {} }, { - "link" : "/kapacitor/v1/tasks/ANOTHER_TASK_ID", + "link" : {"rel":"self", "href":"/kapacitor/v1/tasks/ANOTHER_TASK_ID"}, "id" : "ANOTHER_TASK_ID", "type" : "stream", "dbrps" : [{"db": "DATABASE_NAME", "rp" : "RP_NAME"}], @@ -317,7 +342,7 @@ GET /kapacitor/v1/task?pattern=TASK* { "tasks" : [ { - "link" : "/kapacitor/v1/tasks/TASK_ID", + "link" : {"rel":"self", "href":"/kapacitor/v1/tasks/TASK_ID"}, "id" : "TASK_ID", "type" : "stream", "dbrps" : [{"db": "DATABASE_NAME", "rp" : "RP_NAME"}], @@ -342,14 +367,14 @@ GET /kapacitor/v1/tasks?fields=status&fields=executing&fields=error { "tasks" : [ { - "link" : "/kapacitor/v1/tasks/TASK_ID", + "link" : {"rel":"self", "href":"/kapacitor/v1/tasks/TASK_ID"}, "id" : "TASK_ID", "status" : "enabled", "executing" : true, "error" : "", }, { - "link" : "/kapacitor/v1/tasks/ANOTHER_TASK_ID", + "link" : {"rel":"self", "href":"/kapacitor/v1/tasks/ANOTHER_TASK_ID"}, "id" : "ANOTHER_TASK_ID", "status" : "disabled", "executing" : true, @@ -361,12 +386,61 @@ GET /kapacitor/v1/tasks?fields=status&fields=executing&fields=error #### Response -| Code | Meaning | -| ---- | ------- | -| 200 | Success | +| Code | Meaning | +| ---- | ------- | +| 200 | Success | >NOTE: If the pattern does not match any tasks an empty list will be returned, with a 200 success. +### Custom Task HTTP Endpoints + +In TICKscript it is possible to expose a cache of recent data via the [HTTPOut](https://docs.influxdata.com/kapacitor/latest/nodes/http_out_node/) node. +The data is available at the path `/kapacitor/v1/tasks/TASK_ID/ENDPOINT_NAME`. + +### Example + +For the TICKscript: + +```go +stream + |from() + .measurement('cpu') + |window() + .period(60s) + .every(60s) + |httpOut('mycustom_endpoint') +``` + +``` +GET /kapacitor/v1/tasks/TASK_ID/mycustom_endpoint +``` + +``` +{ + "series": [ + { + "name": "cpu", + "columns": [ + "time", + "value" + ], + "values": [ + [ + "2015-01-29T21:55:43.702900257Z", + 55 + ], + [ + "2015-01-29T21:56:43.702900257Z", + 42 + ], + ] + } + ] +} +``` + +The output is the same as a query for data to [InfluxDB](https://docs.influxdata.com/influxdb/latest/guides/querying_data/). + ## Recordings Kapacitor can save recordings of data and replay them against a specified task. @@ -476,7 +550,13 @@ All recordings are assigned an ID which is returned in this format with a link. ``` { "link" : {"rel": "self", "href": "/kapacitor/v1/recordings/e24db07d-1646-4bb3-a445-828f5049bea0"}, - "id" : "e24db07d-1646-4bb3-a445-828f5049bea0" + "id" : "e24db07d-1646-4bb3-a445-828f5049bea0", + "type" : "stream", + "size" : 0, + "date" : "2006-01-02T15:04:05Z07:00", + "error" : "", + "status" : "running", + "progress" : 0 } ``` @@ -490,15 +570,13 @@ In order to determine when a recording has finished you must make a GET request A recording has these read only properties. -| Property | Description | -| -------- | ----------- | -| id | A unique identifier for the recording. | -| type | One of `stream` or `batch`. A recording cannot be replayed to a task of a different type. | -| size | Size of the recording on disk in bytes. | -| date | Date the recording finished. | -| error | Any error encountered when creating the recording. | -| status | One of `recording` or `finished`. | -| progress | Number between 0 and 1 indicating the approximate progress of the recording. | +| Property | Description | +| -------- | ----------- | +| size | Size of the recording on disk in bytes. | +| date | Date the recording finished. | +| error | Any error encountered when creating the recording. | +| status | One of `recording` or `finished`. | +| progress | Number between 0 and 1 indicating the approximate progress of the recording. | #### Example @@ -539,11 +617,30 @@ GET /kapacitor/v1/recordings/e24db07d-1646-4bb3-a445-828f5049bea0 } ``` +Or if the recording fails. + +``` +GET /kapacitor/v1/recordings/e24db07d-1646-4bb3-a445-828f5049bea0 +``` + +``` +{ + "link" : {"rel": "self", "href": "/kapacitor/v1/recordings/e24db07d-1646-4bb3-a445-828f5049bea0"}, + "id" : "e24db07d-1646-4bb3-a445-828f5049bea0", + "type" : "stream", + "size" : 1980353, + "date" : "2006-01-02T15:04:05Z07:00", + "error" : "error message explaining failure", + "status" : "failed", + "progress" : 1 +} +``` + #### Response | Code | Meaning | | ---- | ------- | -| 200 | Success, the recording is finished. | +| 200 | Success, the recording is no longer running. | | 202 | Success, the recording exists but is not finished. | | 404 | No such recording exists. | @@ -557,9 +654,9 @@ DELETE /kapacitor/v1/recordings/RECORDING_ID #### Response -| Code | Meaning | -| ---- | ------- | -| 204 | Success | +| Code | Meaning | +| ---- | ------- | +| 204 | Success | >NOTE: Deleting a non-existent recording is not an error and will return a 204 success. @@ -568,10 +665,12 @@ DELETE /kapacitor/v1/recordings/RECORDING_ID To list all recordings make a GET request to the `/kapacitor/v1/recordings` endpoint. Recordings are sorted by date. -| Query Parameter | Default | Purpose | -| --------------- | ------- | ------- | -| offset | 0 | Offset count for paginating through tasks. | -| limit | 100 | Maximum number of tasks to return. | +| Query Parameter | Default | Purpose | +| --------------- | ------- | ------- | +| pattern | | Filter results based on the pattern. Uses standard shell glob matching, see [this](https://golang.org/pkg/path/filepath/#Match) for more details. | +| fields | | List of fields to return. If empty returns all fields. Fields `id` and `link` are always returned. | +| offset | 0 | Offset count for paginating through tasks. | +| limit | 100 | Maximum number of tasks to return. | #### Example @@ -621,7 +720,7 @@ To replay a recording make a POST request to `/kapacitor/v1/replays/` | Parameter | Default | Purpose | | ---------- | ------- | ------- | -| id | | Unique identifier for the replay. If empty a random ID is chosen. | +| id | random | Unique identifier for the replay. If empty a random ID is chosen. | | task | | ID of task. | | recording | | ID of recording. | | recording-time | false | If true, use the times in the recording, otherwise adjust times relative to the current time. | @@ -651,7 +750,6 @@ POST /kapacitor/v1/replays/ } ``` - Replay a recording using a custom ID. ``` @@ -671,13 +769,19 @@ The request returns once the replay is started and provides a replay ID and link { "link" : {"rel": "self", "href": "/kapacitor/v1/replays/ad95677b-096b-40c8-82a8-912706f41d4c"}, "id" : "ad95677b-096b-40c8-82a8-912706f41d4c", + "task" : "TASK_ID", + "recording" : "RECORDING_ID", + "clock" : "fast", + "recording-time" : false, + "status" : "running", + "progress" : 0, + "error" : "" } ``` -| Code | Meaning | -| ---- | ------- | -| 201 | Success, replay has started. | -| 404 | The specified task or recording does not exist. | +| Code | Meaning | +| ---- | ------- | +| 201 | Success, replay has started. | ### Waiting for a Replay @@ -734,14 +838,61 @@ GET /kapacitor/v1/replays/ad95677b-096b-40c8-82a8-912706f41d4c } ``` +Or if the replay fails. + +``` +GET /kapacitor/v1/replays/ad95677b-096b-40c8-82a8-912706f41d4c +``` + +``` +{ + "link" : {"rel": "self", "href": "/kapacitor/v1/replays/ad95677b-096b-40c8-82a8-912706f41d4c"}, + "id" : "ad95677b-096b-40c8-82a8-912706f41d4c", + "task" : "TASK_ID", + "recording" : "RECORDING_ID", + "clock" : "fast", + "recording-time" : false, + "status" : "failed", + "progress" : 1, + "error" : "error message explaining failure" +} +``` + +#### Response + +| Code | Meaning | +| ---- | ------- | +| 200 | Success, replay is no longer running. | +| 202 | Success, the replay exists but is not finished. | +| 404 | No such replay exists. | + +### Delete Replay + +To delete a replay make a DELETE request to the `/kapacitor/v1/replays/REPLAY_ID` endpoint. + +``` +DELETE /kapacitor/v1/replays/REPLAY_ID +``` + +#### Response + +| Code | Meaning | +| ---- | ------- | +| 204 | Success | + +>NOTE: Deleting a non-existent replay is not an error and will return a 204 success. + + ### List Replays You can list replays for a given recording by making a GET request to `/kapacitor/v1/replays`. -| Query Parameter | Default | Purpose | -| --------------- | ------- | ------- | -| offset | 0 | Offset count for paginating through tasks. | -| limit | 100 | Maximum number of tasks to return. | +| Query Parameter | Default | Purpose | +| --------------- | ------- | ------- | +| pattern | | Filter results based on the pattern. Uses standard shell glob matching, see [this](https://golang.org/pkg/path/filepath/#Match) for more details. | +| fields | | List of fields to return. If empty returns all fields. Fields `id` and `link` are always returned. | +| offset | 0 | Offset count for paginating through tasks. | +| limit | 100 | Maximum number of tasks to return. | #### Example @@ -812,3 +963,21 @@ These can be accessed at the `/kapacitor/v1/debug/vars` endpoint. ``` GET /kapacitor/v1/debug/vars ``` + +### Debug Pprof + +Kapacitor also the standard Go [net/http/pprof](https://golang.org/pkg/net/http/pprof/) endpoints. + +``` +GET /kapacitor/v1/debug/pprof/... +``` + +>NOTE: Not all of these endpoints return JSON content. + +### Routes + +Displays available routes for the API + +``` +GET /kapacitor/v1/:routes +``` diff --git a/client/v1/client.go b/client/v1/client.go index 68f30c636..0207f80fe 100644 --- a/client/v1/client.go +++ b/client/v1/client.go @@ -7,13 +7,11 @@ import ( "encoding/json" "errors" "fmt" - "io" "io/ioutil" "net/http" "net/url" - "sort" + "path" "strconv" - "strings" "time" "github.com/influxdata/influxdb/influxql" @@ -21,6 +19,22 @@ import ( const DefaultUserAgent = "KapacitorClient" +// These are the constant enpoints for the API. +// The server will always return a `link` to resources, +// so path manipulation should not be necessary. +// The only exception is if you only have an ID for a resource +// then use the appropriate *Link methods. + +const basePath = "/kapacitor/v1" +const pingPath = basePath + "/ping" +const logLevelPath = basePath + "/loglevel" +const tasksPath = basePath + "/tasks" +const recordingsPath = basePath + "/recordings" +const recordStreamPath = basePath + "/recordings/stream" +const recordBatchPath = basePath + "/recordings/batch" +const recordQueryPath = basePath + "/recordings/query" +const replaysPath = basePath + "/replays" + // HTTP configuration for connecting to Kapacitor type Config struct { // The URL of the Kapacitor server. @@ -82,6 +96,54 @@ func New(conf Config) (*Client, error) { }, nil } +type Relation int + +const ( + Self Relation = iota + Next + Previous +) + +func (r Relation) MarshalText() ([]byte, error) { + switch r { + case Self: + return []byte("self"), nil + case Next: + return []byte("next"), nil + case Previous: + return []byte("prev"), nil + default: + return nil, fmt.Errorf("unknown Relation %d", r) + } +} + +func (r *Relation) UnmarshalText(text []byte) error { + switch s := string(text); s { + case "self": + *r = Self + case "next": + *r = Next + case "prev": + *r = Previous + default: + return fmt.Errorf("unknown Relation %s", s) + } + return nil +} + +func (r Relation) String() string { + s, err := r.MarshalText() + if err != nil { + return err.Error() + } + return string(s) +} + +type Link struct { + Relation Relation `json:"rel"` + Href string `json:"href"` +} + type DBRP struct { Database string `json:"db"` RetentionPolicy string `json:"rp"` @@ -94,49 +156,202 @@ func (d DBRP) String() string { // Statistics about the execution of a task. type ExecutionStats struct { // Summary stats about the entire task - TaskStats map[string]float64 + TaskStats map[string]interface{} `json:"task-stats"` // Stats for each node in the task - NodeStats map[string]map[string]float64 + NodeStats map[string]map[string]interface{} `json:"node-stats"` +} + +type TaskType int + +const ( + StreamTask TaskType = 1 + BatchTask TaskType = 2 +) + +func (tt TaskType) MarshalText() ([]byte, error) { + switch tt { + case StreamTask: + return []byte("stream"), nil + case BatchTask: + return []byte("batch"), nil + default: + return nil, fmt.Errorf("unknown TaskType %d", tt) + } +} + +func (tt *TaskType) UnmarshalText(text []byte) error { + switch s := string(text); s { + case "stream": + *tt = StreamTask + case "batch": + *tt = BatchTask + default: + return fmt.Errorf("unknown TaskType %s", s) + } + return nil +} +func (tt TaskType) String() string { + s, err := tt.MarshalText() + if err != nil { + return err.Error() + } + return string(s) +} + +type TaskStatus int + +const ( + Disabled TaskStatus = 1 + Enabled TaskStatus = 2 +) + +func (ts TaskStatus) MarshalText() ([]byte, error) { + switch ts { + case Disabled: + return []byte("disabled"), nil + case Enabled: + return []byte("enabled"), nil + default: + return nil, fmt.Errorf("unknown TaskStatus %d", ts) + } +} + +func (ts *TaskStatus) UnmarshalText(text []byte) error { + switch s := string(text); s { + case "enabled": + *ts = Enabled + case "disabled": + *ts = Disabled + default: + return fmt.Errorf("unknown TaskStatus %s", s) + } + return nil } -// Summary information about a task -type TaskSummary struct { - Name string - Type string - DBRPs []DBRP - Enabled bool - Executing bool - ExecutionStats ExecutionStats +func (ts TaskStatus) String() string { + s, err := ts.MarshalText() + if err != nil { + return err.Error() + } + return string(s) +} + +type Status int + +const ( + Failed Status = iota + Running + Finished +) + +func (s Status) MarshalText() ([]byte, error) { + switch s { + case Running: + return []byte("running"), nil + case Finished: + return []byte("finished"), nil + default: + return nil, fmt.Errorf("unknown Status %d", s) + } +} + +func (s *Status) UnmarshalText(text []byte) error { + switch t := string(text); t { + case "running": + *s = Running + case "finished": + *s = Finished + default: + return fmt.Errorf("unknown Status %s", t) + } + return nil +} + +func (s Status) String() string { + t, err := s.MarshalText() + if err != nil { + return err.Error() + } + return string(t) +} + +type Clock int + +const ( + Fast Clock = iota + Real +) + +func (c Clock) MarshalText() ([]byte, error) { + switch c { + case Fast: + return []byte("fast"), nil + case Real: + return []byte("real"), nil + default: + return nil, fmt.Errorf("unknown Clock %d", c) + } +} + +func (c *Clock) UnmarshalText(text []byte) error { + switch s := string(text); s { + case "fast": + *c = Fast + case "real": + *c = Real + default: + return fmt.Errorf("unknown Clock %s", s) + } + return nil +} + +func (c Clock) String() string { + s, err := c.MarshalText() + if err != nil { + return err.Error() + } + return string(s) } -// Complete information about a task +// A Task plus its read-only attributes. type Task struct { - Name string - Type string - DBRPs []DBRP - TICKscript string - Dot string - Enabled bool - Executing bool - Error string - ExecutionStats ExecutionStats -} - -// Information about a recording -type Recording struct { - ID string - Type string - Size int64 - Created time.Time - Error string + Link Link `json:"link"` + ID string `json:"id"` + Type TaskType `json:"type"` + DBRPs []DBRP `json:"dbrps"` + TICKscript string `json:"script"` + Dot string `json:"dot"` + Status TaskStatus `json:"status"` + Executing bool `json:"executing"` + Error string `json:"error"` + ExecutionStats ExecutionStats `json:"stats"` } -// Set of recordings sorted by created date. -type Recordings []Recording +// Information about a recording. +type Recording struct { + Link Link `json:"link"` + ID string `json:"id"` + Type TaskType `json:"type"` + Size int64 `json:"size"` + Date time.Time `json:"date"` + Error string `json:"error"` + Status Status `json:"status"` + Progress float64 `json:"progress"` +} -func (r Recordings) Len() int { return len(r) } -func (r Recordings) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r Recordings) Less(i, j int) bool { return r[i].Created.Before(r[j].Created) } +// Information about a replay. +type Replay struct { + Link Link `json:"link"` + ID string `json:"id"` + Task string `json:"task"` + Recording string `json:"recording"` + RecordingTime bool `json:"recording-time"` + Clock Clock `json:"clock"` + Date time.Time `json:"date"` + Error string `json:"error"` + Status Status `json:"status"` + Progress float64 `json:"progress"` +} // Perform the request. // If result is not nil the response body is JSON decoded into result. @@ -162,7 +377,7 @@ func (c *Client) do(req *http.Request, result interface{}, codes ...int) (*http. return nil, err } type errResp struct { - Error string `json:"Error"` + Error string `json:"error"` } d := json.NewDecoder(bytes.NewReader(body)) rp := errResp{} @@ -170,11 +385,14 @@ func (c *Client) do(req *http.Request, result interface{}, codes ...int) (*http. if rp.Error != "" { return nil, errors.New(rp.Error) } - return nil, fmt.Errorf("invalid repsonse: code %d: body: %s", resp.StatusCode, string(body)) + return nil, fmt.Errorf("invalid response: code %d: body: %s", resp.StatusCode, string(body)) } if result != nil { d := json.NewDecoder(resp.Body) - d.Decode(result) + err := d.Decode(result) + if err != nil { + return nil, fmt.Errorf("failed to decode JSON: %v", err) + } } return resp, nil } @@ -184,7 +402,7 @@ func (c *Client) do(req *http.Request, result interface{}, codes ...int) (*http. func (c *Client) Ping() (time.Duration, string, error) { now := time.Now() u := *c.url - u.Path = "ping" + u.Path = pingPath req, err := http.NewRequest("GET", u.String(), nil) if err != nil { @@ -199,57 +417,116 @@ func (c *Client) Ping() (time.Duration, string, error) { return time.Since(now), version, nil } -// Get summary information about tasks. -// If names list is empty all tasks are returned. -func (c *Client) ListTasks(names []string) ([]TaskSummary, error) { - tasks := strings.Join(names, ",") - v := url.Values{} - v.Add("tasks", tasks) +func (c *Client) TaskLink(id string) Link { + return Link{Relation: Self, Href: path.Join(tasksPath, id)} +} + +type CreateTaskOptions struct { + ID string `json:"id,omitempty"` + Type TaskType `json:"type,omitempty"` + DBRPs []DBRP `json:"dbrps,omitempty"` + TICKscript string `json:"script,omitempty"` + Status TaskStatus `json:"status,omitempty"` +} + +// Create a new task. +// Errors if the task already exists. +func (c *Client) CreateTask(opt CreateTaskOptions) (Task, error) { + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + err := enc.Encode(opt) + if err != nil { + return Task{}, err + } u := *c.url - u.Path = "tasks" - u.RawQuery = v.Encode() + u.Path = tasksPath - req, err := http.NewRequest("GET", u.String(), nil) + req, err := http.NewRequest("POST", u.String(), &buf) if err != nil { - return nil, err + return Task{}, err } - // Response type - type response struct { - Error string `json:"Error"` - Tasks []TaskSummary `json:"Tasks"` + t := Task{} + _, err = c.do(req, &t, http.StatusOK) + return t, err +} + +type UpdateTaskOptions struct { + Type TaskType `json:"type,omitempty"` + DBRPs []DBRP `json:"dbrps,omitempty"` + TICKscript string `json:"script,omitempty"` + Status TaskStatus `json:"status,omitempty"` +} + +// Update an existing task. +// Only fields that are not their default value will be updated. +func (c *Client) UpdateTask(link Link, opt UpdateTaskOptions) error { + if link.Href == "" { + return fmt.Errorf("invalid link %v", link) } - r := &response{} + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + err := enc.Encode(opt) + if err != nil { + return err + } - _, err = c.do(req, r, http.StatusOK) + u := *c.url + u.Path = link.Href + + req, err := http.NewRequest("PATCH", u.String(), &buf) if err != nil { - return nil, err + return err } - return r.Tasks, nil + + _, err = c.do(req, nil, http.StatusNoContent) + if err != nil { + return err + } + return nil } -// Get detailed information about a task. -// If dotLabels is true then the DOT string returned -// will use label attributes for the stats on the nodes and edges -// making it more useful to graph. -// Using skipFormat will skip the formatting step when returning the TICKscript contents. -func (c *Client) Task(name string, dotLabels, skipFormat bool) (Task, error) { - task := Task{} +type TaskOptions struct { + DotView string + ScriptFormat string +} + +func (o *TaskOptions) Default() { + if o.DotView == "" { + o.DotView = "attributes" + } + if o.ScriptFormat == "" { + o.ScriptFormat = "formatted" + } +} - v := url.Values{} - v.Add("name", name) - if dotLabels { - v.Add("labels", "true") +func (o *TaskOptions) Values() *url.Values { + v := &url.Values{} + v.Set("dot-view", o.DotView) + v.Set("script-format", o.ScriptFormat) + return v +} + +// Get information about a task. +// Options can be nil and the default options will be used. +// By default the DOT content will use attributes for stats. Use DotView="labels" to generate a purley labels based DOT content, which can accurately be rendered but is less readable. +// By default the TICKscript contents are formatted, use ScriptFormat="raw" to return the TICKscript unmodified. +func (c *Client) Task(link Link, opt *TaskOptions) (Task, error) { + task := Task{} + if link.Href == "" { + return task, fmt.Errorf("invalid link %v", link) } - if skipFormat { - v.Add("skip-format", "true") + + if opt == nil { + opt = new(TaskOptions) } + opt.Default() u := *c.url - u.Path = "task" - u.RawQuery = v.Encode() + u.Path = link.Href + u.RawQuery = opt.Values().Encode() req, err := http.NewRequest("GET", u.String(), nil) if err != nil { @@ -263,25 +540,69 @@ func (c *Client) Task(name string, dotLabels, skipFormat bool) (Task, error) { return task, nil } -// Get information about recordings. -// If rids is empty than all recordings are returned. -func (c *Client) ListRecordings(rids []string) (Recordings, error) { - ids := strings.Join(rids, ",") - v := url.Values{} - v.Add("rids", ids) +// Delete a task. +func (c *Client) DeleteTask(link Link) error { + if link.Href == "" { + return fmt.Errorf("invalid link %v", link) + } u := *c.url - u.Path = "recordings" - u.RawQuery = v.Encode() + u.Path = link.Href + + req, err := http.NewRequest("DELETE", u.String(), nil) + if err != nil { + return err + } + + _, err = c.do(req, nil, http.StatusNoContent) + return err +} + +type ListTasksOptions struct { + TaskOptions + Pattern string + Fields []string + Offset int + Limit int +} + +func (o *ListTasksOptions) Default() { + o.TaskOptions.Default() + if o.Limit == 0 { + o.Limit = 100 + } +} + +func (o *ListTasksOptions) Values() *url.Values { + v := o.TaskOptions.Values() + v.Set("pattern", o.Pattern) + for _, field := range o.Fields { + v.Add("fields", field) + } + v.Set("offset", strconv.FormatInt(int64(o.Offset), 10)) + v.Set("limit", strconv.FormatInt(int64(o.Limit), 10)) + return v +} + +// Get tasks. +func (c *Client) ListTasks(opt *ListTasksOptions) ([]Task, error) { + if opt == nil { + opt = new(ListTasksOptions) + } + opt.Default() + + u := *c.url + u.Path = tasksPath + u.RawQuery = opt.Values().Encode() req, err := http.NewRequest("GET", u.String(), nil) if err != nil { return nil, err } - // Decode valid response + + // Response type type response struct { - Error string `json:"Error"` - Recordings Recordings `json:"Recordings"` + Tasks []Task `json:"tasks"` } r := &response{} @@ -290,223 +611,296 @@ func (c *Client) ListRecordings(rids []string) (Recordings, error) { if err != nil { return nil, err } - sort.Sort(r.Recordings) - return r.Recordings, nil + return r.Tasks, nil } -// Perform the record requests. -func (c *Client) doRecord(v url.Values) (string, error) { +func (c *Client) TaskOutput(link Link, name string) (*influxql.Result, error) { u := *c.url - u.Path = "record" - u.RawQuery = v.Encode() + u.Path = path.Join(link.Href, name) - req, err := http.NewRequest("POST", u.String(), nil) + req, err := http.NewRequest("GET", u.String(), nil) if err != nil { - return "", err + return nil, err } + r := &influxql.Result{} + _, err = c.do(req, r, http.StatusOK) + if err != nil { + return nil, err + } + return r, nil +} - // Decode valid response - type response struct { - RecordingID string `json:"RecordingID"` - Error string `json:"Error"` +// Get information about a recording. +func (c *Client) Recording(link Link) (Recording, error) { + r := Recording{} + if link.Href == "" { + return r, fmt.Errorf("invalid link %v", link) } - r := &response{} + u := *c.url + u.Path = link.Href - _, err = c.do(req, r, http.StatusOK) + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return r, err + } + + _, err = c.do(req, &r, http.StatusOK, http.StatusAccepted) if err != nil { - return "", err + return r, err } - return r.RecordingID, nil + return r, nil +} +func (c *Client) RecordingLink(id string) Link { + return Link{Relation: Self, Href: path.Join(recordingsPath, id)} +} + +type RecordStreamOptions struct { + ID string `json:"id,omitempty"` + Task string `json:"task"` + Stop time.Time `json:"stop"` } // Record the stream for a task. // Returns once the recording is started. -func (c *Client) RecordStream(name string, duration time.Duration) (string, error) { - v := url.Values{} - v.Add("type", "stream") - v.Add("name", name) - v.Add("duration", influxql.FormatDuration(duration)) +func (c *Client) RecordStream(opt RecordStreamOptions) (Recording, error) { + r := Recording{} + + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + err := enc.Encode(opt) + if err != nil { + return r, err + } + + u := *c.url + u.Path = recordStreamPath + + req, err := http.NewRequest("POST", u.String(), &buf) + if err != nil { + return r, err + } + + _, err = c.do(req, &r, http.StatusCreated) + if err != nil { + return r, err + } + return r, nil +} - return c.doRecord(v) +type RecordBatchOptions struct { + ID string `json:"id,omitempty"` + Task string `json:"task"` + Start time.Time `json:"start"` + Stop time.Time `json:"stop"` + Cluster string `json:"cluster,omitempty"` } // Record the batch queries for a task. // Returns once the recording is started. -func (c *Client) RecordBatch(name, cluster string, start, stop time.Time, past time.Duration) (string, error) { - v := url.Values{} - v.Add("type", "batch") - v.Add("name", name) - v.Add("cluster", cluster) - if !start.IsZero() { - v.Add("start", start.Format(time.RFC3339Nano)) +func (c *Client) RecordBatch(opt RecordBatchOptions) (Recording, error) { + r := Recording{} + + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + err := enc.Encode(opt) + if err != nil { + return r, err } - if !stop.IsZero() { - v.Add("stop", stop.Format(time.RFC3339Nano)) + + u := *c.url + u.Path = recordBatchPath + + req, err := http.NewRequest("POST", u.String(), &buf) + if err != nil { + return r, err } - v.Add("past", past.String()) - return c.doRecord(v) + _, err = c.do(req, &r, http.StatusCreated) + if err != nil { + return r, err + } + return r, nil +} + +type RecordQueryOptions struct { + ID string `json:"id,omitempty"` + Query string `json:"query"` + Type TaskType `json:"type"` + Cluster string `json:"cluster,omitempty"` } // Record the results of a query. // The recordingType must be one of "stream", or "batch". // Returns once the recording is started. -func (c *Client) RecordQuery(query, recordingType, cluster string) (string, error) { - v := url.Values{} - v.Add("type", "query") - v.Add("query", query) - v.Add("cluster", cluster) - v.Add("ttype", recordingType) - - return c.doRecord(v) -} - -// Get information about a recording. -// If the recording is currently being recorded then -// this method blocks until it is finished. -func (c *Client) Recording(rid string) (Recording, error) { +func (c *Client) RecordQuery(opt RecordQueryOptions) (Recording, error) { r := Recording{} - v := url.Values{} - v.Add("id", rid) + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + err := enc.Encode(opt) + if err != nil { + return r, err + } u := *c.url - u.Path = "record" - u.RawQuery = v.Encode() + u.Path = recordQueryPath - req, err := http.NewRequest("GET", u.String(), nil) + req, err := http.NewRequest("POST", u.String(), &buf) if err != nil { return r, err } - _, err = c.do(req, &r, http.StatusOK) + _, err = c.do(req, &r, http.StatusCreated) if err != nil { return r, err } return r, nil } -// Replay a recording for a task. -func (c *Client) Replay(name, rid string, recordingTime, fast bool) error { - v := url.Values{} - v.Add("name", name) - v.Add("id", rid) - v.Add("rec-time", strconv.FormatBool(recordingTime)) - if fast { - v.Add("clock", "fast") +// Delete a recording. +func (c *Client) DeleteRecording(link Link) error { + if link.Href == "" { + return fmt.Errorf("invalid link %v", link) } - u := *c.url - u.Path = "replay" - u.RawQuery = v.Encode() + u.Path = link.Href - req, err := http.NewRequest("POST", u.String(), nil) + req, err := http.NewRequest("DELETE", u.String(), nil) if err != nil { return err } _, err = c.do(req, nil, http.StatusNoContent) - if err != nil { - return err + return err +} + +type ListRecordingsOptions struct { + Pattern string + Fields []string + Offset int + Limit int +} + +func (o *ListRecordingsOptions) Default() { + if o.Limit == 0 { + o.Limit = 100 } - return nil } -// Define a task. -// Name is always required. -// The other options are only modified if not empty or nil. -func (c *Client) Define(name, taskType string, dbrps []DBRP, tickScript io.Reader, reload bool) error { - v := url.Values{} - v.Add("name", name) - v.Add("type", taskType) - if len(dbrps) > 0 { - b, err := json.Marshal(dbrps) - if err != nil { - return err - } - v.Add("dbrps", string(b)) +func (o *ListRecordingsOptions) Values() *url.Values { + v := &url.Values{} + v.Set("pattern", o.Pattern) + for _, field := range o.Fields { + v.Add("fields", field) } + v.Set("offset", strconv.FormatInt(int64(o.Offset), 10)) + v.Set("limit", strconv.FormatInt(int64(o.Limit), 10)) + return v +} +// Get information about recordings. +// If rids is empty than all recordings are returned. +func (c *Client) ListRecordings(opt *ListRecordingsOptions) ([]Recording, error) { + if opt == nil { + opt = new(ListRecordingsOptions) + } + opt.Default() u := *c.url - u.Path = "task" - u.RawQuery = v.Encode() + u.Path = recordingsPath + u.RawQuery = opt.Values().Encode() - req, err := http.NewRequest("POST", u.String(), tickScript) + req, err := http.NewRequest("GET", u.String(), nil) if err != nil { - return err + return nil, err + } + // Decode valid response + type response struct { + Recordings []Recording `json:"recordings"` } - _, err = c.do(req, nil, http.StatusNoContent) + r := &response{} + + _, err = c.do(req, r, http.StatusOK) if err != nil { - return err - } - if reload { - tasks, err := c.ListTasks([]string{name}) - if err != nil { - return err - } - if len(tasks) == 1 && tasks[0].Enabled { - return c.Reload(name) - } + return nil, err } - return nil + return r.Recordings, nil +} + +func (c *Client) ReplayLink(id string) Link { + return Link{Relation: Self, Href: path.Join(replaysPath, id)} +} + +type CreateReplayOptions struct { + ID string `json:"id"` + Recording string `json:"recording"` + Task string `json:"task"` + RecordingTime bool `json:"recording-time"` + Clock Clock `json:"clock"` +} + +func (o *CreateReplayOptions) Default() { } -// Enable a task. -func (c *Client) Enable(name string) error { - v := url.Values{} - v.Add("name", name) +// Replay a recording for a task. +func (c *Client) CreateReplay(opt CreateReplayOptions) (Replay, error) { + r := Replay{} + + opt.Default() u := *c.url - u.Path = "enable" - u.RawQuery = v.Encode() + u.Path = replaysPath - req, err := http.NewRequest("POST", u.String(), nil) + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + err := enc.Encode(opt) if err != nil { - return err + return r, err } - _, err = c.do(req, nil, http.StatusNoContent) - return err + req, err := http.NewRequest("POST", u.String(), &buf) + if err != nil { + return r, err + } + + _, err = c.do(req, &r, http.StatusCreated) + if err != nil { + return r, err + } + return r, nil } -// Disable a task. -func (c *Client) Disable(name string) error { - v := url.Values{} - v.Add("name", name) +// Return the replay information +func (c *Client) Replay(link Link) (Replay, error) { + r := Replay{} + if link.Href == "" { + return r, fmt.Errorf("invalid link %v", link) + } u := *c.url - u.Path = "disable" - u.RawQuery = v.Encode() + u.Path = link.Href - req, err := http.NewRequest("POST", u.String(), nil) + req, err := http.NewRequest("GET", u.String(), nil) if err != nil { - return err + return r, err } - _, err = c.do(req, nil, http.StatusNoContent) - return err -} - -// Reload a task, aka disable/enable. -func (c *Client) Reload(name string) error { - err := c.Disable(name) + _, err = c.do(req, &r, http.StatusOK, http.StatusAccepted) if err != nil { - return err + return r, err } - return c.Enable(name) + return r, nil } -// Delete a task. -func (c *Client) DeleteTask(name string) error { - v := url.Values{} - v.Add("name", name) - +// Delete a replay. This will cancel a running replay. +func (c *Client) DeleteReplay(link Link) error { + if link.Href == "" { + return fmt.Errorf("invalid link %v", link) + } u := *c.url - u.Path = "task" - u.RawQuery = v.Encode() + u.Path = link.Href req, err := http.NewRequest("DELETE", u.String(), nil) if err != nil { @@ -514,38 +908,84 @@ func (c *Client) DeleteTask(name string) error { } _, err = c.do(req, nil, http.StatusNoContent) - return err + if err != nil { + return err + } + return nil } -// Delete a recording. -func (c *Client) DeleteRecording(rid string) error { - v := url.Values{} - v.Add("rid", rid) +type ListReplaysOptions struct { + Pattern string + Fields []string + Offset int + Limit int +} +func (o *ListReplaysOptions) Default() { + if o.Limit == 0 { + o.Limit = 100 + } +} + +func (o *ListReplaysOptions) Values() *url.Values { + v := &url.Values{} + v.Set("pattern", o.Pattern) + for _, field := range o.Fields { + v.Add("fields", field) + } + v.Set("offset", strconv.FormatInt(int64(o.Offset), 10)) + v.Set("limit", strconv.FormatInt(int64(o.Limit), 10)) + return v +} + +// Get information about replays. +// If rids is empty than all replays are returned. +func (c *Client) ListReplays(opt *ListReplaysOptions) ([]Replay, error) { + if opt == nil { + opt = new(ListReplaysOptions) + } + opt.Default() u := *c.url - u.Path = "recording" - u.RawQuery = v.Encode() + u.Path = replaysPath + u.RawQuery = opt.Values().Encode() - req, err := http.NewRequest("DELETE", u.String(), nil) + req, err := http.NewRequest("GET", u.String(), nil) if err != nil { - return err + return nil, err + } + // Decode valid response + type response struct { + Replays []Replay `json:"replays"` } - _, err = c.do(req, nil, http.StatusNoContent) - return err + r := &response{} + + _, err = c.do(req, r, http.StatusOK) + if err != nil { + return nil, err + } + return r.Replays, nil +} + +type LogLevelOptions struct { + Level string `json:"level"` } // Set the logging level. // Level must be one of DEBUG, INFO, WARN, ERROR, or OFF func (c *Client) LogLevel(level string) error { - v := url.Values{} - v.Add("level", level) - u := *c.url - u.Path = "loglevel" - u.RawQuery = v.Encode() + u.Path = logLevelPath + + opt := LogLevelOptions{Level: level} + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + err := enc.Encode(opt) + if err != nil { + return err + } - req, err := http.NewRequest("POST", u.String(), nil) + req, err := http.NewRequest("POST", u.String(), &buf) if err != nil { return err } diff --git a/client/v1/client_test.go b/client/v1/client_test.go index 3df8e0087..981166841 100644 --- a/client/v1/client_test.go +++ b/client/v1/client_test.go @@ -1,15 +1,17 @@ package client_test import ( + "encoding/json" "fmt" "io/ioutil" "net/http" "net/http/httptest" "reflect" - "strings" "testing" "time" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/models" "github.com/influxdata/kapacitor/client/v1" ) @@ -31,7 +33,6 @@ func Test_NewClient_Error(t *testing.T) { } func Test_ReportsErrors(t *testing.T) { - testCases := []struct { name string fnc func(c *client.Client) error @@ -44,100 +45,107 @@ func Test_ReportsErrors(t *testing.T) { }, }, { - name: "ListTasks", + name: "CreateTask", fnc: func(c *client.Client) error { - _, err := c.ListTasks(nil) + _, err := c.CreateTask(client.CreateTaskOptions{}) return err }, }, { - name: "Task", + name: "UpdateTask", fnc: func(c *client.Client) error { - _, err := c.Task("", false, false) + err := c.UpdateTask(c.TaskLink(""), client.UpdateTaskOptions{}) return err }, }, { - name: "ListRecordings", + name: "DeleteTask", fnc: func(c *client.Client) error { - _, err := c.ListRecordings(nil) + err := c.DeleteTask(c.TaskLink("")) return err }, }, { - name: "RecordStream", + name: "Task", fnc: func(c *client.Client) error { - _, err := c.RecordStream("", 0) + _, err := c.Task(c.TaskLink(""), nil) return err }, }, { - name: "RecordBatch", + name: "ListTasks", fnc: func(c *client.Client) error { - _, err := c.RecordBatch("", "", time.Time{}, time.Time{}, 0) + _, err := c.ListTasks(nil) return err }, }, { - name: "RecordQuery", + name: "RecordStream", fnc: func(c *client.Client) error { - _, err := c.RecordQuery("", "", "") + _, err := c.RecordStream(client.RecordStreamOptions{}) return err }, }, { - name: "Recording", + name: "RecordBatch", fnc: func(c *client.Client) error { - _, err := c.Recording("") + _, err := c.RecordBatch(client.RecordBatchOptions{}) return err }, }, { - name: "Replay", + name: "RecordQuery", fnc: func(c *client.Client) error { - err := c.Replay("", "", false, false) + _, err := c.RecordQuery(client.RecordQueryOptions{Type: client.StreamTask}) return err }, }, { - name: "Define", + name: "Recording", fnc: func(c *client.Client) error { - err := c.Define("", "", nil, nil, false) + _, err := c.Recording(c.RecordingLink("")) return err }, }, { - name: "Enable", + name: "ListRecordings", fnc: func(c *client.Client) error { - err := c.Enable("") + _, err := c.ListRecordings(nil) return err }, }, { - name: "Disable", + name: "DeleteRecording", fnc: func(c *client.Client) error { - err := c.Disable("") + err := c.DeleteRecording(c.RecordingLink("")) return err }, }, { - name: "Reload", + name: "CreateReplay", fnc: func(c *client.Client) error { - err := c.Reload("") + _, err := c.CreateReplay(client.CreateReplayOptions{}) return err }, }, { - name: "DeleteTask", + name: "DeleteReplay", fnc: func(c *client.Client) error { - err := c.DeleteTask("") + err := c.DeleteReplay(c.ReplayLink("")) return err }, }, { - name: "DeleteRecording", + name: "Replay", + fnc: func(c *client.Client) error { + _, err := c.Replay(c.ReplayLink("")) + return err + }, + }, + { + name: "ListReplay", fnc: func(c *client.Client) error { - err := c.DeleteRecording("") + _, err := c.ListReplays(nil) return err }, }, @@ -184,7 +192,7 @@ func Test_ReportsErrors(t *testing.T) { func Test_PingVersion(t *testing.T) { s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/ping" && r.Method == "GET" { + if r.URL.Path == "/kapacitor/v1/ping" && r.Method == "GET" { w.Header().Set("X-Kapacitor-Version", "versionStr") w.WriteHeader(http.StatusNoContent) } else { @@ -206,104 +214,21 @@ func Test_PingVersion(t *testing.T) { } } -func Test_ListTasks(t *testing.T) { - s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/tasks" && r.Method == "GET" && r.URL.Query().Get("tasks") == "t1,t2" { - w.WriteHeader(http.StatusOK) - fmt.Fprintf(w, `{ -"Tasks":[ - { - "Name":"t1", - "Type":"stream", - "DBRPs":[{"db":"db","rp":"rp"}], - "Enabled" : false, - "Executing" : false - }, - { - "Name":"t2", - "Type":"batch", - "DBRPs":[{"db":"db","rp":"rp"}], - "Enabled" : true, - "Executing" : true, - "ExecutionStats": { - "TaskStats" : { - "throughput" : 5.6 - }, - "NodeStats" : { - "stream1" : { - "processed" : 1500, - "avg_exec_time_ns": 2345.83 - } - } - } - } -]}`) - } else { - w.WriteHeader(http.StatusBadRequest) - fmt.Fprintf(w, "request: %v", r) - } - })) - if err != nil { - t.Fatal(err) - } - defer s.Close() - - tasks, err := c.ListTasks([]string{"t1", "t2"}) - if err != nil { - t.Fatal(err) - } - exp := []client.TaskSummary{ - { - Name: "t1", - Type: "stream", - DBRPs: []client.DBRP{{ - Database: "db", - RetentionPolicy: "rp", - }}, - Enabled: false, - Executing: false, - ExecutionStats: client.ExecutionStats{}, - }, - { - Name: "t2", - Type: "batch", - DBRPs: []client.DBRP{{ - Database: "db", - RetentionPolicy: "rp", - }}, - Enabled: true, - Executing: true, - ExecutionStats: client.ExecutionStats{ - TaskStats: map[string]float64{ - "throughput": 5.6, - }, - NodeStats: map[string]map[string]float64{ - "stream1": map[string]float64{ - "processed": 1500.0, - "avg_exec_time_ns": 2345.83, - }, - }, - }, - }, - } - if !reflect.DeepEqual(exp, tasks) { - t.Errorf("unexpected task list: got:\n%v\nexp:\n%v", tasks, exp) - } -} - func Test_Task(t *testing.T) { s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/task" && r.Method == "GET" && r.URL.Query().Get("name") == "t1" { + if r.URL.Path == "/kapacitor/v1/tasks/t1" && r.Method == "GET" && + r.URL.Query().Get("dot-view") == "attributes" && + r.URL.Query().Get("script-format") == "formatted" { w.WriteHeader(http.StatusOK) fmt.Fprintf(w, `{ - "Name":"t1", - "Type":"stream", - "DBRPs":[{"db":"db","rp":"rp"}], - "TICKscript":"stream\n |from()\n .measurement('cpu')\n", - "Dot": "digraph t1 {\n}", - "Enabled" : true, - "Executing" : false, - "Error": "" + "link": {"rel":"self", "href":"/kapacitor/v1/tasks/t1"}, + "type":"stream", + "dbrps":[{"db":"db","rp":"rp"}], + "script":"stream\n |from()\n .measurement('cpu')\n", + "dot": "digraph t1 {}", + "status" : "enabled", + "executing" : false, + "error": "" }`) } else { w.WriteHeader(http.StatusBadRequest) @@ -315,13 +240,13 @@ func Test_Task(t *testing.T) { } defer s.Close() - task, err := c.Task("t1", false, false) + task, err := c.Task(c.TaskLink("t1"), nil) if err != nil { t.Fatal(err) } exp := client.Task{ - Name: "t1", - Type: "stream", + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/tasks/t1"}, + Type: client.StreamTask, DBRPs: []client.DBRP{{ Database: "db", RetentionPolicy: "rp", @@ -330,8 +255,8 @@ func Test_Task(t *testing.T) { |from() .measurement('cpu') `, - Dot: "digraph t1 {\n}", - Enabled: true, + Dot: "digraph t1 {}", + Status: client.Enabled, Executing: false, Error: "", ExecutionStats: client.ExecutionStats{}, @@ -343,19 +268,19 @@ func Test_Task(t *testing.T) { func Test_Task_Labels(t *testing.T) { s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/task" && r.Method == "GET" && - r.URL.Query().Get("name") == "t1" && - r.URL.Query().Get("labels") == "true" { + if r.URL.Path == "/kapacitor/v1/tasks/t1" && r.Method == "GET" && + r.URL.Query().Get("dot-view") == "labels" && + r.URL.Query().Get("script-format") == "formatted" { w.WriteHeader(http.StatusOK) fmt.Fprintf(w, `{ - "Name":"t1", - "Type":"stream", - "DBRPs":[{"db":"db","rp":"rp"}], - "TICKscript":"stream\n |from()\n .measurement('cpu')\n", - "Dot": "digraph t1 {\n}", - "Enabled" : true, - "Executing" : false, - "Error": "" + "link": {"rel":"self", "href":"/kapacitor/v1/tasks/t1"}, + "type":"stream", + "dbrps":[{"db":"db","rp":"rp"}], + "script":"stream\n |from()\n .measurement('cpu')\n", + "dot": "digraph t1 {\n}", + "status" : "enabled", + "executing" : false, + "error": "" }`) } else { w.WriteHeader(http.StatusBadRequest) @@ -367,13 +292,13 @@ func Test_Task_Labels(t *testing.T) { } defer s.Close() - task, err := c.Task("t1", true, false) + task, err := c.Task(c.TaskLink("t1"), &client.TaskOptions{DotView: "labels"}) if err != nil { t.Fatal(err) } exp := client.Task{ - Name: "t1", - Type: "stream", + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/tasks/t1"}, + Type: client.StreamTask, DBRPs: []client.DBRP{{ Database: "db", RetentionPolicy: "rp", @@ -383,7 +308,7 @@ func Test_Task_Labels(t *testing.T) { .measurement('cpu') `, Dot: "digraph t1 {\n}", - Enabled: true, + Status: client.Enabled, Executing: false, Error: "", ExecutionStats: client.ExecutionStats{}, @@ -393,21 +318,21 @@ func Test_Task_Labels(t *testing.T) { } } -func Test_Task_SkipFormat(t *testing.T) { +func Test_Task_RawFormat(t *testing.T) { s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/task" && r.Method == "GET" && - r.URL.Query().Get("name") == "t1" && - r.URL.Query().Get("skip-format") == "true" { + if r.URL.Path == "/kapacitor/v1/tasks/t1" && r.Method == "GET" && + r.URL.Query().Get("dot-view") == "attributes" && + r.URL.Query().Get("script-format") == "raw" { w.WriteHeader(http.StatusOK) fmt.Fprintf(w, `{ - "Name":"t1", - "Type":"stream", - "DBRPs":[{"db":"db","rp":"rp"}], - "TICKscript":"stream|from().measurement('cpu')", - "Dot": "digraph t1 {\n}", - "Enabled" : true, - "Executing" : false, - "Error": "" + "link": {"rel":"self", "href":"/kapacitor/v1/tasks/t1"}, + "type":"stream", + "dbrps":[{"db":"db","rp":"rp"}], + "script":"stream|from().measurement('cpu')", + "dot": "digraph t1 {\n}", + "status" : "enabled", + "executing" : false, + "error": "" }`) } else { w.WriteHeader(http.StatusBadRequest) @@ -419,20 +344,20 @@ func Test_Task_SkipFormat(t *testing.T) { } defer s.Close() - task, err := c.Task("t1", false, true) + task, err := c.Task(c.TaskLink("t1"), &client.TaskOptions{ScriptFormat: "raw"}) if err != nil { t.Fatal(err) } exp := client.Task{ - Name: "t1", - Type: "stream", + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/tasks/t1"}, + Type: client.StreamTask, DBRPs: []client.DBRP{{ Database: "db", RetentionPolicy: "rp", }}, TICKscript: "stream|from().measurement('cpu')", Dot: "digraph t1 {\n}", - Enabled: true, + Status: client.Enabled, Executing: false, Error: "", ExecutionStats: client.ExecutionStats{}, @@ -442,25 +367,222 @@ func Test_Task_SkipFormat(t *testing.T) { } } -func Test_ListRecordings(t *testing.T) { +func Test_CreateTask(t *testing.T) { + tickScript := "stream|from().measurement('cpu')" + s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var task client.CreateTaskOptions + body, _ := ioutil.ReadAll(r.Body) + json.Unmarshal(body, &task) + + if r.URL.Path == "/kapacitor/v1/tasks" && r.Method == "POST" { + exp := client.CreateTaskOptions{ + ID: "taskname", + Type: client.StreamTask, + DBRPs: []client.DBRP{{Database: "dbname", RetentionPolicy: "rpname"}}, + TICKscript: tickScript, + Status: client.Disabled, + } + if !reflect.DeepEqual(exp, task) { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "unexpected CreateTask body: got:\n%v\nexp:\n%v\n", task, exp) + } else { + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, `{"link": {"rel":"self", "href":"/kapacitor/v1/tasks/taskname"}}`) + } + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) + } + })) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + task, err := c.CreateTask(client.CreateTaskOptions{ + ID: "taskname", + Type: client.StreamTask, + DBRPs: []client.DBRP{{Database: "dbname", RetentionPolicy: "rpname"}}, + TICKscript: tickScript, + Status: client.Disabled, + }) + if got, exp := string(task.Link.Href), "/kapacitor/v1/tasks/taskname"; got != exp { + t.Errorf("unexpected task link got %s exp %s", got, exp) + } + if err != nil { + t.Fatal(err) + } +} + +func Test_UpdateTask(t *testing.T) { + s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var task client.UpdateTaskOptions + task.Status = client.Enabled + body, _ := ioutil.ReadAll(r.Body) + json.Unmarshal(body, &task) + + if r.URL.Path == "/kapacitor/v1/tasks/taskname" && r.Method == "PATCH" { + exp := client.UpdateTaskOptions{ + DBRPs: []client.DBRP{{Database: "newdb", RetentionPolicy: "rpname"}}, + Status: client.Enabled, + } + if !reflect.DeepEqual(exp, task) { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "unexpected UpdateTask body: got:\n%v\nexp:\n%v\n", task, exp) + } else { + w.WriteHeader(http.StatusNoContent) + } + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) + } + })) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + err = c.UpdateTask( + c.TaskLink("taskname"), + client.UpdateTaskOptions{ + DBRPs: []client.DBRP{{Database: "newdb", RetentionPolicy: "rpname"}}, + }, + ) + if err != nil { + t.Fatal(err) + } +} + +func Test_UpdateTask_Enable(t *testing.T) { + s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var task client.UpdateTaskOptions + body, _ := ioutil.ReadAll(r.Body) + json.Unmarshal(body, &task) + + if r.URL.Path == "/kapacitor/v1/tasks/taskname" && r.Method == "PATCH" { + exp := client.UpdateTaskOptions{ + Status: client.Enabled, + } + if !reflect.DeepEqual(exp, task) { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "unexpected UpdateTask body: got:\n%v\nexp:\n%v\n", task, exp) + } else { + w.WriteHeader(http.StatusNoContent) + } + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) + } + })) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + err = c.UpdateTask( + c.TaskLink("taskname"), + client.UpdateTaskOptions{ + Status: client.Enabled, + }, + ) + if err != nil { + t.Fatal(err) + } +} + +func Test_UpdateTask_Disable(t *testing.T) { + s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var task client.UpdateTaskOptions + task.Status = client.Enabled + body, _ := ioutil.ReadAll(r.Body) + json.Unmarshal(body, &task) + + if r.URL.Path == "/kapacitor/v1/tasks/taskname" && r.Method == "PATCH" { + exp := client.UpdateTaskOptions{ + Status: client.Disabled, + } + if !reflect.DeepEqual(exp, task) { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "unexpected UpdateTask body: got:\n%v\nexp:\n%v\n", task, exp) + } else { + w.WriteHeader(http.StatusNoContent) + } + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) + } + })) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + err = c.UpdateTask( + c.TaskLink("taskname"), + client.UpdateTaskOptions{ + Status: client.Disabled, + }) + if err != nil { + t.Fatal(err) + } +} + +func Test_DeleteTask(t *testing.T) { + s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/kapacitor/v1/tasks/taskname" && r.Method == "DELETE" { + w.WriteHeader(http.StatusNoContent) + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) + } + })) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + err = c.DeleteTask(c.TaskLink("taskname")) + if err != nil { + t.Fatal(err) + } +} + +func Test_ListTasks(t *testing.T) { s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/recordings" && r.Method == "GET" { + if r.URL.Path == "/kapacitor/v1/tasks" && r.Method == "GET" && + r.URL.Query().Get("pattern") == "" && + r.URL.Query().Get("fields") == "" && + r.URL.Query().Get("dot-view") == "attributes" && + r.URL.Query().Get("script-format") == "formatted" && + r.URL.Query().Get("offset") == "0" && + r.URL.Query().Get("limit") == "100" { w.WriteHeader(http.StatusOK) fmt.Fprintf(w, `{ -"Recordings":[ +"tasks":[ { - "ID":"rid1", - "Type":"batch", - "Size": 42, - "Created" : "2016-03-31T11:24:55.526388889Z", - "Error": "" + "link": {"rel":"self", "href":"/kapacitor/v1/tasks/t1"}, + "type":"stream", + "dbrps":[{"db":"db","rp":"rp"}], + "status" : "disabled", + "executing" : false }, { - "ID":"rid2", - "Type":"stream", - "Size": 4200, - "Created" : "2016-03-31T10:24:55.526388889Z", - "Error": "" + "link": {"rel":"self", "href":"/kapacitor/v1/tasks/t2"}, + "type":"batch", + "dbrps":[{"db":"db","rp":"rp"}], + "status" : "enabled", + "executing" : true, + "stats": { + "task-stats" : { + "throughput" : 5.6 + }, + "node-stats" : { + "stream1" : { + "processed" : 1500, + "avg_exec_time_ns": 2345.83 + } + } + } } ]}`) } else { @@ -473,113 +595,290 @@ func Test_ListRecordings(t *testing.T) { } defer s.Close() - tasks, err := c.ListRecordings([]string{"rid1", "rid2"}) + tasks, err := c.ListTasks(nil) if err != nil { t.Fatal(err) } - exp := client.Recordings{ + exp := []client.Task{ { - ID: "rid2", - Type: "stream", - Size: 4200, - Created: time.Date(2016, 3, 31, 10, 24, 55, 526388889, time.UTC), + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/tasks/t1"}, + Type: client.StreamTask, + DBRPs: []client.DBRP{{ + Database: "db", + RetentionPolicy: "rp", + }}, + Status: client.Disabled, + Executing: false, + ExecutionStats: client.ExecutionStats{}, }, { - ID: "rid1", - Type: "batch", - Size: 42, - Created: time.Date(2016, 3, 31, 11, 24, 55, 526388889, time.UTC), + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/tasks/t2"}, + Type: client.BatchTask, + DBRPs: []client.DBRP{{ + Database: "db", + RetentionPolicy: "rp", + }}, + Status: client.Enabled, + Executing: true, + ExecutionStats: client.ExecutionStats{ + TaskStats: map[string]interface{}{ + "throughput": 5.6, + }, + NodeStats: map[string]map[string]interface{}{ + "stream1": map[string]interface{}{ + "processed": 1500.0, + "avg_exec_time_ns": 2345.83, + }, + }, + }, }, } if !reflect.DeepEqual(exp, tasks) { - t.Errorf("unexpected recording list:\ngot:\n%v\nexp:\n%v", tasks, exp) + t.Errorf("unexpected task list: got:\n%v\nexp:\n%v", tasks, exp) } } -func Test_Record(t *testing.T) { - testCases := []struct { - name string - fnc func(c *client.Client) (string, error) - checkRequest func(r *http.Request) bool - }{ +func Test_ListTasks_Options(t *testing.T) { + s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/kapacitor/v1/tasks" && r.Method == "GET" && + r.URL.Query().Get("pattern") == "t*" && + len(r.URL.Query()["fields"]) == 3 && + r.URL.Query()["fields"][0] == "status" && + r.URL.Query()["fields"][1] == "error" && + r.URL.Query()["fields"][2] == "executing" && + r.URL.Query().Get("dot-view") == "attributes" && + r.URL.Query().Get("script-format") == "formatted" && + r.URL.Query().Get("offset") == "100" && + r.URL.Query().Get("limit") == "100" { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ +"tasks":[ + { + "link": {"rel":"self", "href":"/kapacitor/v1/tasks/t1"}, + "status" : "enabled", + "executing" : false, + "error": "failed" + }, + { + "link": {"rel":"self", "href":"/kapacitor/v1/tasks/t2"}, + "status" : "enabled", + "executing" : true, + "error": "" + } +]}`) + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) + } + })) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + tasks, err := c.ListTasks(&client.ListTasksOptions{ + Pattern: "t*", + Fields: []string{"status", "error", "executing"}, + Offset: 100, + }) + if err != nil { + t.Fatal(err) + } + exp := []client.Task{ { - name: "RecordStream", - fnc: func(c *client.Client) (string, error) { - return c.RecordStream("taskname", time.Minute) - }, - checkRequest: func(r *http.Request) bool { - return r.URL.Query().Get("type") == "stream" && - r.URL.Query().Get("name") == "taskname" && - r.URL.Query().Get("duration") == "1m" - }, + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/tasks/t1"}, + Status: client.Enabled, + Executing: false, + Error: "failed", }, { - name: "RecordBatch", - fnc: func(c *client.Client) (string, error) { - return c.RecordBatch( - "taskname", - "cluster", - time.Date(2016, 3, 31, 10, 24, 55, 526388889, time.UTC), - time.Date(2016, 3, 31, 11, 24, 55, 526388889, time.UTC), - time.Hour*5, - ) - }, - checkRequest: func(r *http.Request) bool { - return r.URL.Query().Get("type") == "batch" && - r.URL.Query().Get("name") == "taskname" && - r.URL.Query().Get("cluster") == "cluster" && - r.URL.Query().Get("start") == "2016-03-31T10:24:55.526388889Z" && - r.URL.Query().Get("stop") == "2016-03-31T11:24:55.526388889Z" && - r.URL.Query().Get("past") == "5h0m0s" - }, + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/tasks/t2"}, + Status: client.Enabled, + Executing: true, + Error: "", }, - { - name: "RecordQuery", - fnc: func(c *client.Client) (string, error) { - return c.RecordQuery("queryStr", "stream", "cluster") - }, - checkRequest: func(r *http.Request) bool { - return r.URL.Query().Get("type") == "query" && - r.URL.Query().Get("ttype") == "stream" && - r.URL.Query().Get("cluster") == "cluster" + } + if !reflect.DeepEqual(exp, tasks) { + t.Errorf("unexpected task list: got:\n%v\nexp:\n%v", tasks, exp) + } +} + +func Test_TaskOutput(t *testing.T) { + s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/kapacitor/v1/tasks/taskname/cpu" && r.Method == "GET" { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ + "series": [ + { + "name": "cpu", + "columns": [ + "time", + "value" + ], + "values": [ + [ + "2015-01-29T21:55:43.702900257Z", + 55 + ], + [ + "2015-01-29T21:56:43.702900257Z", + 42 + ] + ] + } + ] +}`) + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) + } + })) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + r, err := c.TaskOutput(c.TaskLink("taskname"), "cpu") + if err != nil { + t.Fatal(err) + } + exp := &influxql.Result{ + Series: models.Rows{{ + Name: "cpu", + Columns: []string{"time", "value"}, + Values: [][]interface{}{ + { + "2015-01-29T21:55:43.702900257Z", + 55.0, + }, + { + "2015-01-29T21:56:43.702900257Z", + 42.0, + }, }, - }, + }}, } - for _, tc := range testCases { - s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/record" && r.Method == "POST" && tc.checkRequest(r) { - w.WriteHeader(http.StatusOK) - fmt.Fprintf(w, `{"RecordingID":"rid1"}`) - } else { - w.WriteHeader(http.StatusBadRequest) - fmt.Fprintf(w, "request: %v", r) - } - })) - if err != nil { - t.Fatal(err) + if !reflect.DeepEqual(exp, r) { + t.Errorf("unexpected task output: \ngot\n%v\nexp\n%v\n", r, exp) + t.Errorf("unexpected task output: \ngot.Series\n%v\nexp.Series\n%v\n", r.Series[0], exp.Series[0]) + } +} + +func Test_RecordStream(t *testing.T) { + stop := time.Now().Add(time.Minute).UTC() + s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var opts client.RecordStreamOptions + body, _ := ioutil.ReadAll(r.Body) + json.Unmarshal(body, &opts) + if r.URL.Path == "/kapacitor/v1/recordings/stream" && r.Method == "POST" && + opts.Task == "taskname" && + opts.Stop == stop { + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, `{"link": {"rel":"self", "href":"/kapacitor/v1/recordings/rid1"}}`) + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) } - defer s.Close() + })) + if err != nil { + t.Fatal(err) + } + defer s.Close() - rid, err := tc.fnc(c) - if err != nil { - t.Fatal(tc.name, err) + r, err := c.RecordStream(client.RecordStreamOptions{ + Task: "taskname", + Stop: stop, + }) + if err != nil { + t.Fatal(err) + } + if exp, got := "/kapacitor/v1/recordings/rid1", string(r.Link.Href); got != exp { + t.Errorf("unexpected recording id for test: got: %s exp: %s", got, exp) + } +} +func Test_RecordBatch(t *testing.T) { + stop := time.Now().UTC() + start := stop.Add(-24 * time.Hour) + s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var opts client.RecordBatchOptions + body, _ := ioutil.ReadAll(r.Body) + json.Unmarshal(body, &opts) + if r.URL.Path == "/kapacitor/v1/recordings/batch" && r.Method == "POST" && + opts.Task == "taskname" && + opts.Start == start && + opts.Stop == stop && + opts.Cluster == "" { + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, `{"link": {"rel":"self", "href":"/kapacitor/v1/recordings/rid1"}}`) + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) } - if exp, got := "rid1", rid; got != exp { - t.Errorf("unexpected recording id for test %s: got: %s exp: %s", tc.name, got, exp) + })) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + r, err := c.RecordBatch(client.RecordBatchOptions{ + Task: "taskname", + Start: start, + Stop: stop, + }) + if err != nil { + t.Fatal(err) + } + if exp, got := "/kapacitor/v1/recordings/rid1", string(r.Link.Href); got != exp { + t.Errorf("unexpected recording id for test: got: %s exp: %s", got, exp) + } +} + +func Test_RecordQuery(t *testing.T) { + s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var opts client.RecordQueryOptions + body, _ := ioutil.ReadAll(r.Body) + json.Unmarshal(body, &opts) + if r.URL.Path == "/kapacitor/v1/recordings/query" && r.Method == "POST" && + opts.Query == "SELECT * FROM allthetings" && + opts.Type == client.StreamTask && + opts.Cluster == "mycluster" { + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, `{"link": {"rel":"self", "href":"/kapacitor/v1/recordings/rid1"}}`) + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v body: %s", r, string(body)) } + })) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + r, err := c.RecordQuery(client.RecordQueryOptions{ + Query: "SELECT * FROM allthetings", + Cluster: "mycluster", + Type: client.StreamTask, + }) + if err != nil { + t.Fatal(err) + } + if exp, got := "/kapacitor/v1/recordings/rid1", string(r.Link.Href); got != exp { + t.Errorf("unexpected recording id for test: got: %s exp: %s", got, exp) } } func Test_Recording(t *testing.T) { s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/record" && r.Method == "GET" { + if r.URL.Path == "/kapacitor/v1/recordings/rid1" && r.Method == "GET" { w.WriteHeader(http.StatusOK) fmt.Fprintf(w, `{ - "ID":"rid1", - "Type":"batch", - "Size": 42, - "Created" : "2016-03-31T11:24:55.526388889Z", - "Error": "" + "link": {"rel":"self", "href":"/kapacitor/v1/recordings/rid1"}, + "type":"batch", + "size": 42, + "date" : "2016-03-31T11:24:55.526388889Z", + "error": "", + "status": "finished", + "progress": 1.0 }`) } else { w.WriteHeader(http.StatusBadRequest) @@ -591,29 +890,36 @@ func Test_Recording(t *testing.T) { } defer s.Close() - tasks, err := c.Recording("rid1") + recordings, err := c.Recording(c.RecordingLink("rid1")) if err != nil { t.Fatal(err) } exp := client.Recording{ - ID: "rid1", - Type: "batch", - Size: 42, - Created: time.Date(2016, 3, 31, 11, 24, 55, 526388889, time.UTC), + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/recordings/rid1"}, + Type: client.BatchTask, + Size: 42, + Date: time.Date(2016, 3, 31, 11, 24, 55, 526388889, time.UTC), + Status: client.Finished, + Progress: 1.0, } - if !reflect.DeepEqual(exp, tasks) { - t.Errorf("unexpected recording list:\ngot:\n%v\nexp:\n%v", tasks, exp) + if !reflect.DeepEqual(exp, recordings) { + t.Errorf("unexpected recording list:\ngot:\n%v\nexp:\n%v", recordings, exp) } } -func Test_Replay(t *testing.T) { +func Test_RecordingRunning(t *testing.T) { s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/replay" && r.Method == "POST" && - r.URL.Query().Get("name") == "taskname" && - r.URL.Query().Get("id") == "rid1" && - r.URL.Query().Get("rec-time") == "false" && - r.URL.Query().Get("clock") == "fast" { - w.WriteHeader(http.StatusNoContent) + if r.URL.Path == "/kapacitor/v1/recordings/rid1" && r.Method == "GET" { + w.WriteHeader(http.StatusAccepted) + fmt.Fprintf(w, `{ + "link": {"rel":"self", "href":"/kapacitor/v1/recordings/rid1"}, + "type":"batch", + "size": 42, + "date" : "2016-03-31T11:24:55.526388889Z", + "error": "", + "status": "running", + "progress": 0.42 +}`) } else { w.WriteHeader(http.StatusBadRequest) fmt.Fprintf(w, "request: %v", r) @@ -624,23 +930,51 @@ func Test_Replay(t *testing.T) { } defer s.Close() - err = c.Replay("taskname", "rid1", false, true) + recordings, err := c.Recording(c.RecordingLink("rid1")) if err != nil { t.Fatal(err) } + exp := client.Recording{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/recordings/rid1"}, + Type: client.BatchTask, + Size: 42, + Date: time.Date(2016, 3, 31, 11, 24, 55, 526388889, time.UTC), + Status: client.Running, + Progress: 0.42, + } + if !reflect.DeepEqual(exp, recordings) { + t.Errorf("unexpected recording list:\ngot:\n%v\nexp:\n%v", recordings, exp) + } } -func Test_Define(t *testing.T) { - tickScript := "stream|from().measurement('cpu')" +func Test_ListRecordings(t *testing.T) { s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - script, _ := ioutil.ReadAll(r.Body) - - if r.URL.Path == "/task" && r.Method == "POST" && - r.URL.Query().Get("name") == "taskname" && - r.URL.Query().Get("type") == "stream" && - r.URL.Query().Get("dbrps") == `[{"db":"dbname","rp":"rpname"}]` && - string(script) == tickScript { - w.WriteHeader(http.StatusNoContent) + if r.URL.Path == "/kapacitor/v1/recordings" && r.Method == "GET" && + r.URL.Query().Get("pattern") == "" && + r.URL.Query().Get("offset") == "0" && + r.URL.Query().Get("limit") == "100" { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ +"recordings":[ + { + "link": {"rel":"self", "href":"/kapacitor/v1/recordings/rid1"}, + "type":"batch", + "size": 42, + "date" : "2016-03-31T11:24:55.526388889Z", + "error": "", + "status": "running", + "progress": 0.67 + }, + { + "link": {"rel":"self", "href":"/kapacitor/v1/recordings/rid2"}, + "type":"stream", + "size": 4200, + "date" : "2016-03-31T10:24:55.526388889Z", + "error": "", + "status": "finished", + "progress": 1.0 + } +]}`) } else { w.WriteHeader(http.StatusBadRequest) fmt.Fprintf(w, "request: %v", r) @@ -651,87 +985,86 @@ func Test_Define(t *testing.T) { } defer s.Close() - err = c.Define( - "taskname", - "stream", - []client.DBRP{{Database: "dbname", RetentionPolicy: "rpname"}}, - strings.NewReader(tickScript), - false, - ) + tasks, err := c.ListRecordings(nil) if err != nil { t.Fatal(err) } + exp := []client.Recording{ + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/recordings/rid1"}, + Type: client.BatchTask, + Size: 42, + Date: time.Date(2016, 3, 31, 11, 24, 55, 526388889, time.UTC), + Status: client.Running, + Progress: 0.67, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/recordings/rid2"}, + Type: client.StreamTask, + Size: 4200, + Date: time.Date(2016, 3, 31, 10, 24, 55, 526388889, time.UTC), + Status: client.Finished, + Progress: 1.0, + }, + } + if !reflect.DeepEqual(exp, tasks) { + t.Errorf("unexpected recording list:\ngot:\n%v\nexp:\n%v", tasks, exp) + } } -func Test_Define_Reload(t *testing.T) { - requestCount := 0 - tickScript := "stream|from().measurement('cpu')" + +func Test_ListRecordings_Filter(t *testing.T) { s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - requestCount++ - switch requestCount { - case 1: - script, _ := ioutil.ReadAll(r.Body) - if r.URL.Path == "/task" && r.Method == "POST" && - r.URL.Query().Get("name") == "taskname" && - r.URL.Query().Get("type") == "stream" && - r.URL.Query().Get("dbrps") == `[{"db":"dbname","rp":"rpname"}]` && - string(script) == tickScript { - w.WriteHeader(http.StatusNoContent) - return - } - case 2: - if r.URL.Path == "/tasks" && r.Method == "GET" && - r.URL.Query().Get("tasks") == "taskname" { - w.WriteHeader(http.StatusOK) - fmt.Fprintf(w, `{ -"Tasks":[ + if r.URL.Path == "/kapacitor/v1/recordings" && r.Method == "GET" && + r.URL.Query().Get("pattern") == "rid1" && + len(r.URL.Query()["fields"]) == 3 && + r.URL.Query()["fields"][0] == "status" && + r.URL.Query()["fields"][1] == "error" && + r.URL.Query()["fields"][2] == "progress" && + r.URL.Query().Get("offset") == "0" && + r.URL.Query().Get("limit") == "1" { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ +"recordings":[ { - "Name":"taskname", - "Type":"stream", - "DBRPs":[{"db":"db","rp":"rp"}], - "Enabled": true, - "Executing": true + "link": {"rel":"self", "href":"/kapacitor/v1/recordings/rid1"}, + "error": "", + "status": "running", + "progress": 0.67 } ]}`) - return - } - case 3: - if r.URL.Path == "/disable" && r.Method == "POST" { - w.WriteHeader(http.StatusNoContent) - return - } - case 4: - if r.URL.Path == "/enable" && r.Method == "POST" { - w.WriteHeader(http.StatusNoContent) - return - } + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) } - w.WriteHeader(http.StatusBadRequest) - fmt.Fprintf(w, "request: %v", r) })) if err != nil { t.Fatal(err) } defer s.Close() - err = c.Define( - "taskname", - "stream", - []client.DBRP{{Database: "dbname", RetentionPolicy: "rpname"}}, - strings.NewReader(tickScript), - true, - ) + tasks, err := c.ListRecordings(&client.ListRecordingsOptions{ + Pattern: "rid1", + Fields: []string{"status", "error", "progress"}, + Limit: 1, + }) if err != nil { t.Fatal(err) } - if exp, got := 4, requestCount; got != exp { - t.Errorf("unexpected request count: got %d exp %d", got, exp) + exp := []client.Recording{ + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/recordings/rid1"}, + Status: client.Running, + Progress: 0.67, + }, + } + if !reflect.DeepEqual(exp, tasks) { + t.Errorf("unexpected recording list:\ngot:\n%v\nexp:\n%v", tasks, exp) } } -func Test_Enable(t *testing.T) { +func Test_DeleteRecording(t *testing.T) { s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/enable" && r.Method == "POST" && - r.URL.Query().Get("name") == "taskname" { + if r.URL.Path == "/kapacitor/v1/recordings/rid1" && r.Method == "DELETE" { w.WriteHeader(http.StatusNoContent) } else { w.WriteHeader(http.StatusBadRequest) @@ -743,17 +1076,25 @@ func Test_Enable(t *testing.T) { } defer s.Close() - err = c.Enable("taskname") + err = c.DeleteRecording(c.RecordingLink("rid1")) if err != nil { t.Fatal(err) } } - -func Test_Disable(t *testing.T) { +func Test_Replay(t *testing.T) { s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/disable" && r.Method == "POST" && - r.URL.Query().Get("name") == "taskname" { - w.WriteHeader(http.StatusNoContent) + if r.URL.Path == "/kapacitor/v1/replays/replayid" && r.Method == "GET" { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ + "link": {"rel":"self", "href":"/kapacitor/v1/replays/replayid"}, + "task": "taskid", + "recording": "recordingid", + "recording-time":false, + "clock": "fast", + "error": "", + "status": "finished", + "progress": 1.0 +}`) } else { w.WriteHeader(http.StatusBadRequest) fmt.Fprintf(w, "request: %v", r) @@ -764,69 +1105,106 @@ func Test_Disable(t *testing.T) { } defer s.Close() - err = c.Disable("taskname") + replay, err := c.Replay(c.ReplayLink("replayid")) if err != nil { t.Fatal(err) } + exp := client.Replay{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/replays/replayid"}, + Task: "taskid", + Recording: "recordingid", + RecordingTime: false, + Clock: client.Fast, + Error: "", + Status: client.Finished, + Progress: 1.0, + } + if !reflect.DeepEqual(exp, replay) { + t.Errorf("unexpected replay got: %v exp %v", replay, exp) + } } -func Test_Reload(t *testing.T) { - requestCount := 0 +func Test_ReplayRunning(t *testing.T) { s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - requestCount++ - switch requestCount { - case 1: - if r.URL.Path == "/disable" && r.Method == "POST" && - r.URL.Query().Get("name") == "taskname" { - w.WriteHeader(http.StatusNoContent) - return - } - case 2: - if r.URL.Path == "/enable" && r.Method == "POST" && - r.URL.Query().Get("name") == "taskname" { - w.WriteHeader(http.StatusNoContent) - return - } - + if r.URL.Path == "/kapacitor/v1/replays/replayid" && r.Method == "GET" { + w.WriteHeader(http.StatusAccepted) + fmt.Fprintf(w, `{ + "link": {"rel":"self", "href":"/kapacitor/v1/replays/replayid"}, + "task": "taskid", + "recording": "recordingid", + "recording-time":false, + "clock": "fast", + "error": "", + "status": "running", + "progress": 0.67 +}`) + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) } - w.WriteHeader(http.StatusBadRequest) - fmt.Fprintf(w, "request: %v", r) })) if err != nil { t.Fatal(err) } defer s.Close() - err = c.Reload("taskname") + replay, err := c.Replay(c.ReplayLink("replayid")) if err != nil { t.Fatal(err) } + exp := client.Replay{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/replays/replayid"}, + Task: "taskid", + Recording: "recordingid", + RecordingTime: false, + Clock: client.Fast, + Error: "", + Status: client.Running, + Progress: 0.67, + } + if !reflect.DeepEqual(exp, replay) { + t.Errorf("unexpected replay got: %v exp %v", replay, exp) + } } -func Test_Reload_SkipEnable(t *testing.T) { - requestCount := 0 +func Test_CreateReplay(t *testing.T) { s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - requestCount++ - w.WriteHeader(http.StatusInternalServerError) + var opts client.CreateReplayOptions + body, _ := ioutil.ReadAll(r.Body) + json.Unmarshal(body, &opts) + if r.URL.Path == "/kapacitor/v1/replays" && r.Method == "POST" && + opts.Task == "taskname" && + opts.Recording == "recording" && + opts.RecordingTime == false && + opts.Clock == client.Fast { + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, `{"link":{"rel":"self","href":"/kapacitor/v1/replays/replayid"}}`) + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) + } })) if err != nil { t.Fatal(err) } defer s.Close() - err = c.Reload("taskname") - if err == nil { - t.Error("expected error from Reload") + replay, err := c.CreateReplay(client.CreateReplayOptions{ + Task: "taskname", + Recording: "recording", + Clock: client.Fast, + }) + if err != nil { + t.Fatal(err) } - if exp, got := 1, requestCount; got != exp { - t.Errorf("unexpected request count: got %d exp %d", got, exp) + if exp, got := "/kapacitor/v1/replays/replayid", string(replay.Link.Href); exp != got { + t.Errorf("unexpected replay.Link.Href got %s exp %s", got, exp) } } -func Test_DeleteTask(t *testing.T) { +func Test_DeleteReplay(t *testing.T) { s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/task" && r.Method == "DELETE" && - r.URL.Query().Get("name") == "taskname" { + if r.URL.Path == "/kapacitor/v1/replays/replayid" && r.Method == "DELETE" { w.WriteHeader(http.StatusNoContent) } else { w.WriteHeader(http.StatusBadRequest) @@ -838,17 +1216,42 @@ func Test_DeleteTask(t *testing.T) { } defer s.Close() - err = c.DeleteTask("taskname") + err = c.DeleteReplay(c.ReplayLink("replayid")) if err != nil { t.Fatal(err) } } -func Test_DeleteRecording(t *testing.T) { +func Test_ListReplays(t *testing.T) { s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/recording" && r.Method == "DELETE" && - r.URL.Query().Get("rid") == "rid1" { - w.WriteHeader(http.StatusNoContent) + if r.URL.Path == "/kapacitor/v1/replays" && r.Method == "GET" && + r.URL.Query().Get("pattern") == "" && + r.URL.Query().Get("offset") == "0" && + r.URL.Query().Get("limit") == "100" { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ +"replays":[ + { + "link": {"rel":"self", "href":"/kapacitor/v1/replays/rpid1"}, + "task": "taskid", + "recording" : "recordingid", + "clock": "fast", + "recording-time": true, + "error": "", + "status": "running", + "progress": 0.67 + }, + { + "link": {"rel":"self", "href":"/kapacitor/v1/replays/rpid2"}, + "task": "taskid2", + "recording" : "recordingid2", + "clock": "real", + "recording-time": false, + "error": "", + "status": "finished", + "progress": 1.0 + } +]}`) } else { w.WriteHeader(http.StatusBadRequest) fmt.Fprintf(w, "request: %v", r) @@ -859,16 +1262,93 @@ func Test_DeleteRecording(t *testing.T) { } defer s.Close() - err = c.DeleteRecording("rid1") + tasks, err := c.ListReplays(nil) if err != nil { t.Fatal(err) } + exp := []client.Replay{ + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/replays/rpid1"}, + Task: "taskid", + Recording: "recordingid", + Clock: client.Fast, + RecordingTime: true, + Status: client.Running, + Progress: 0.67, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/replays/rpid2"}, + Task: "taskid2", + Recording: "recordingid2", + Clock: client.Real, + RecordingTime: false, + Status: client.Finished, + Progress: 1.0, + }, + } + if !reflect.DeepEqual(exp, tasks) { + t.Errorf("unexpected replay list:\ngot:\n%v\nexp:\n%v", tasks, exp) + } +} + +func Test_ListReplays_Filter(t *testing.T) { + s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/kapacitor/v1/replays" && r.Method == "GET" && + r.URL.Query().Get("pattern") == "rpid1" && + len(r.URL.Query()["fields"]) == 3 && + r.URL.Query()["fields"][0] == "status" && + r.URL.Query()["fields"][1] == "error" && + r.URL.Query()["fields"][2] == "progress" && + r.URL.Query().Get("offset") == "0" && + r.URL.Query().Get("limit") == "1" { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ +"replays":[ + { + "link": {"rel":"self", "href":"/kapacitor/v1/replays/rpid1"}, + "error": "", + "status": "running", + "progress": 0.67 + } +]}`) + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) + } + })) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + tasks, err := c.ListReplays(&client.ListReplaysOptions{ + Pattern: "rpid1", + Fields: []string{"status", "error", "progress"}, + Limit: 1, + }) + if err != nil { + t.Fatal(err) + } + exp := []client.Replay{ + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1/replays/rpid1"}, + Status: client.Running, + Progress: 0.67, + }, + } + if !reflect.DeepEqual(exp, tasks) { + t.Errorf("unexpected replay list:\ngot:\n%v\nexp:\n%v", tasks, exp) + } } func Test_LogLevel(t *testing.T) { s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/loglevel" && r.Method == "POST" && - r.URL.Query().Get("level") == "DEBUG" { + var opts client.LogLevelOptions + body, _ := ioutil.ReadAll(r.Body) + json.Unmarshal(body, &opts) + + if r.URL.Path == "/kapacitor/v1/loglevel" && r.Method == "POST" && + opts.Level == "DEBUG" { w.WriteHeader(http.StatusNoContent) } else { w.WriteHeader(http.StatusBadRequest) diff --git a/client/v1/swagger.yml b/client/v1/swagger.yml index cbd2d6a44..2f18cfbba 100644 --- a/client/v1/swagger.yml +++ b/client/v1/swagger.yml @@ -100,10 +100,8 @@ paths: schema: $ref: '#/definitions/Task' responses: - '200': - description: Task ID - schema: - $ref: '#/definitions/Task' + '204': + description: Update succeeded default: description: A processing or an unexpected error. schema: @@ -220,8 +218,8 @@ paths: - task - stop responses: - '200': - description: Recording ID + '201': + description: Recording Started schema: $ref: '#/definitions/Recording' default: @@ -257,8 +255,8 @@ paths: - task - start responses: - '200': - description: Recording ID + '201': + description: Recording Started schema: $ref: '#/definitions/Recording' default: @@ -291,8 +289,8 @@ paths: - type - query responses: - '200': - description: Recording ID + '201': + description: Recording Started schema: $ref: '#/definitions/Recording' default: @@ -327,7 +325,11 @@ paths: required: true responses: '200': - description: Recording information + description: Recording finished + schema: + $ref: '#/definitions/Recording' + '202': + description: Recording running schema: $ref: '#/definitions/Recording' default: @@ -406,8 +408,12 @@ paths: - task - recording responses: - '200': - description: Replay ID + '201': + description: Replay Started + schema: + $ref: '#/definitions/Replay' + '202': + description: Replay running schema: $ref: '#/definitions/Replay' default: @@ -442,7 +448,7 @@ paths: required: true responses: '200': - description: Task information + description: Replay information schema: $ref: '#/definitions/Replay' default: @@ -533,6 +539,9 @@ definitions: type: string type: type: string + size: + type: integer + format: int64 date: type: string format: dateTime @@ -593,6 +602,7 @@ definitions: error: type: string description: Detailed description of an error + required: [error] Link: type: object readOnly: true diff --git a/cmd/kapacitor/main.go b/cmd/kapacitor/main.go index 8e9bbda54..820494cc4 100644 --- a/cmd/kapacitor/main.go +++ b/cmd/kapacitor/main.go @@ -2,10 +2,9 @@ package main import ( "bytes" - "errors" "flag" "fmt" - "io" + "io/ioutil" "log" "os" "strings" @@ -14,6 +13,7 @@ import ( "github.com/dustin/go-humanize" "github.com/influxdata/influxdb/influxql" "github.com/influxdata/kapacitor/client/v1" + "github.com/pkg/errors" ) // These variables are populated via the Go linker. @@ -26,7 +26,7 @@ var ( var defaultURL = "http://localhost:9092" var mainFlags = flag.NewFlagSet("main", flag.ExitOnError) -var kapacitordURL = mainFlags.String("url", "", "the URL http(s)://host:port of the kapacitord server. Defaults to the KAPACITOR_URL environment variable or "+defaultURL+" if not set.") +var kapacitordURL = mainFlags.String("url", "", "The URL http(s)://host:port of the kapacitord server. Defaults to the KAPACITOR_URL environment variable or "+defaultURL+" if not set.") var l = log.New(os.Stderr, "[run] ", log.LstdFlags) @@ -37,19 +37,19 @@ Usage: kapacitor [options] [command] [args] Commands: - record record the result of a query or a snapshot of the current stream data. - define create/update a task. - replay replay a recording to a task. - enable enable and start running a task with live data. - disable stop running a task. - reload reload a running task with an updated task definition. - push publish a task definition to another Kapacitor instance. Not implemented yet. - delete delete a task or a recording. - list list information about tasks or recordings. - show display detailed information about a task. - help get help for a command. - level sets the logging level on the kapacitord server. - version displays the Kapacitor version info. + record Record the result of a query or a snapshot of the current stream data. + define Create/update a task. + replay Replay a recording to a task. + enable Enable and start running a task with live data. + disable Stop running a task. + reload Reload a running task with an updated task definition. + push Publish a task definition to another Kapacitor instance. Not implemented yet. + delete Delete a task or a recording. + list List information about tasks or recordings. + show Display detailed information about a task. + help Prints help for a command. + level Sets the logging level on the kapacitord server. + version Displays the Kapacitor version info. Options: ` @@ -102,8 +102,7 @@ func main() { commandArgs = args commandF = doRecord case "define": - defineFlags.Parse(args) - commandArgs = defineFlags.Args() + commandArgs = args commandF = doDefine case "replay": replayFlags.Parse(args) @@ -219,20 +218,26 @@ func doHelp(args []string) error { // Record var ( recordStreamFlags = flag.NewFlagSet("record-stream", flag.ExitOnError) - rsname = recordStreamFlags.String("name", "", "the name of a task. Uses the dbrp value for the task.") - rsdur = recordStreamFlags.String("duration", "", "how long to record the data stream.") + rstask = recordStreamFlags.String("task", "", "The ID of a task. Uses the dbrp value for the task.") + rsdur = recordStreamFlags.String("duration", "", "How long to record the data stream.") + rsnowait = recordStreamFlags.Bool("no-wait", false, "Do not wait for the recording to finish.") + rsid = recordStreamFlags.String("recording-id", "", "The ID to give to this recording. If not set an random ID is chosen.") recordBatchFlags = flag.NewFlagSet("record-batch", flag.ExitOnError) - rbname = recordBatchFlags.String("name", "", "the name of a task. Uses the queries contained in the task.") - rbstart = recordBatchFlags.String("start", "", "the start time for the set of queries.") - rbstop = recordBatchFlags.String("stop", "", "the stop time for the set of queries (default now).") - rbpast = recordBatchFlags.String("past", "", "set start time via 'now - past'.") - rbcluster = recordBatchFlags.String("cluster", "", "optional named InfluxDB cluster from configuration.") + rbtask = recordBatchFlags.String("task", "", "The ID of a task. Uses the queries contained in the task.") + rbstart = recordBatchFlags.String("start", "", "The start time for the set of queries.") + rbstop = recordBatchFlags.String("stop", "", "The stop time for the set of queries (default now).") + rbpast = recordBatchFlags.String("past", "", "Set start time via 'now - past'.") + rbcluster = recordBatchFlags.String("cluster", "", "Optional named InfluxDB cluster from configuration.") + rbnowait = recordBatchFlags.Bool("no-wait", false, "Do not wait for the recording to finish.") + rbid = recordBatchFlags.String("recording-id", "", "The ID to give to this recording. If not set an random ID is chosen.") recordQueryFlags = flag.NewFlagSet("record-query", flag.ExitOnError) - rqquery = recordQueryFlags.String("query", "", "the query to record.") - rqtype = recordQueryFlags.String("type", "", "the type of the recording to save (stream|batch).") - rqcluster = recordQueryFlags.String("cluster", "", "optional named InfluxDB cluster from configuration.") + rqquery = recordQueryFlags.String("query", "", "The query to record.") + rqtype = recordQueryFlags.String("type", "", "The type of the recording to save (stream|batch).") + rqcluster = recordQueryFlags.String("cluster", "", "Optional named InfluxDB cluster from configuration.") + rqnowait = recordQueryFlags.Bool("no-wait", false, "Do not wait for the recording to finish.") + rqid = recordQueryFlags.String("recording-id", "", "The ID to give to this recording. If not set an random ID is chosen.") ) func recordUsage() { @@ -258,7 +263,7 @@ func recordStreamUsage() { Examples: - $ kapacitor record stream -name mem_free -duration 1m + $ kapacitor record stream -task mem_free -duration 1m This records the live data stream for 1 minute using the databases and retention policies from the named task. @@ -280,13 +285,13 @@ func recordBatchUsage() { Examples: - $ kapacitor record batch -name cpu_idle -start 2015-09-01T00:00:00Z -stop 2015-09-02T00:00:00Z + $ kapacitor record batch -task cpu_idle -start 2015-09-01T00:00:00Z -stop 2015-09-02T00:00:00Z This records the result of the query defined in task 'cpu_idle' and runs the query as many times as many times as defined by the schedule until the queries reaches the stop time. starting at time 'start' and incrementing by the schedule defined in the task. - $ kapacitor record batch -name cpu_idle -past 10h + $ kapacitor record batch -task cpu_idle -past 10h This records the result of the query defined in task 'cpu_idle' and runs the query as many times as defined by the schedule until the queries reaches the present time. @@ -322,30 +327,37 @@ Options: } func doRecord(args []string) error { - var rid string + var recording client.Recording var err error + noWait := false + switch args[0] { case "stream": recordStreamFlags.Parse(args[1:]) - if *rsname == "" || *rsdur == "" { + if *rstask == "" || *rsdur == "" { recordStreamFlags.Usage() - return errors.New("both name and duration are required") + return errors.New("both task and duration are required") } var duration time.Duration duration, err = influxql.ParseDuration(*rsdur) if err != nil { return err } - rid, err = cli.RecordStream(*rsname, duration) + noWait = *rsnowait + recording, err = cli.RecordStream(client.RecordStreamOptions{ + ID: *rsid, + Task: *rstask, + Stop: time.Now().Add(duration), + }) if err != nil { return err } case "batch": recordBatchFlags.Parse(args[1:]) - if *rbname == "" { + if *rbtask == "" { recordBatchFlags.Usage() - return errors.New("name is required") + return errors.New("task is required") } if *rbstart == "" && *rbpast == "" { recordBatchFlags.Usage() @@ -355,8 +367,7 @@ func doRecord(args []string) error { recordBatchFlags.Usage() return errors.New("cannot set both start and past flags.") } - var start, stop time.Time - var past time.Duration + start, stop := time.Time{}, time.Now() if *rbstart != "" { start, err = time.Parse(time.RFC3339Nano, *rbstart) if err != nil { @@ -370,12 +381,20 @@ func doRecord(args []string) error { } } if *rbpast != "" { - past, err = influxql.ParseDuration(*rbpast) + past, err := influxql.ParseDuration(*rbpast) if err != nil { return err } + start = stop.Add(-1 * past) } - rid, err = cli.RecordBatch(*rbname, *rbcluster, start, stop, past) + noWait = *rbnowait + recording, err = cli.RecordBatch(client.RecordBatchOptions{ + ID: *rbid, + Task: *rbtask, + Cluster: *rbcluster, + Start: start, + Stop: stop, + }) if err != nil { return err } @@ -385,36 +404,58 @@ func doRecord(args []string) error { recordQueryFlags.Usage() return errors.New("both query and type are required") } - rid, err = cli.RecordQuery(*rqquery, *rqtype, *rqcluster) + var typ client.TaskType + switch *rqtype { + case "stream": + typ = client.StreamTask + case "batch": + typ = client.BatchTask + } + noWait = *rqnowait + recording, err = cli.RecordQuery(client.RecordQueryOptions{ + ID: *rqid, + Query: *rqquery, + Type: typ, + Cluster: *rqcluster, + }) if err != nil { return err } default: return fmt.Errorf("Unknown record type %q, expected 'stream', 'batch' or 'query'", args[0]) } - info, err := cli.Recording(rid) - if err != nil { - return err + if noWait { + fmt.Println(recording.ID) + return nil } - if info.Error != "" { - return errors.New(info.Error) + for recording.Status == client.Running { + time.Sleep(500 * time.Millisecond) + recording, err = cli.Recording(recording.Link) + if err != nil { + return err + } + } + fmt.Println(recording.ID) + if recording.Status == client.Failed { + if recording.Error == "" { + recording.Error = "recording failed: unknown reason" + } + return errors.New(recording.Error) } - fmt.Println(rid) return nil } // Define var ( defineFlags = flag.NewFlagSet("define", flag.ExitOnError) - dname = defineFlags.String("name", "", "the task name") - dtick = defineFlags.String("tick", "", "path to the TICKscript") - dtype = defineFlags.String("type", "", "the task type (stream|batch)") - dnoReload = defineFlags.Bool("no-reload", false, "do not reload the task even if it is enabled") + dtick = defineFlags.String("tick", "", "Path to the TICKscript") + dtype = defineFlags.String("type", "", "The task type (stream|batch)") + dnoReload = defineFlags.Bool("no-reload", false, "Do not reload the task even if it is enabled") ddbrp = make(dbrps, 0) ) func init() { - defineFlags.Var(&ddbrp, "dbrp", `a database and retention policy pair of the form "db"."rp" the quotes are optional. The flag can be specified multiple times.`) + defineFlags.Var(&ddbrp, "dbrp", `A database and retention policy pair of the form "db"."rp" the quotes are optional. The flag can be specified multiple times.`) } type dbrps []client.DBRP @@ -478,32 +519,32 @@ func parseQuotedStr(txt string) (string, int) { } func defineUsage() { - var u = `Usage: kapacitor define [options] + var u = `Usage: kapacitor define [options] -Create or update a task. + Create or update a task. -A task is defined via a TICKscript that defines the data processing pipeline of the task. + A task is defined via a TICKscript that defines the data processing pipeline of the task. -If an option is absent it will be left unmodified. + If an option is absent it will be left unmodified. -If the task is enabled then it will be reloaded unless -no-reload is specified. + If the task is enabled then it will be reloaded unless -no-reload is specified. For example: - You can define a task for the first time with all the flags. + You can define a task for the first time with all the flags. - $ kapacitor define -name my_task -tick path/to/TICKscript -type stream -dbrp mydb.myrp + $ kapacitor define my_task -tick path/to/TICKscript -type stream -dbrp mydb.myrp - Later you can change a single property of the task by referencing its name - and only providing the single option you wish to modify. + Later you can change a single property of the task by referencing its name + and only providing the single option you wish to modify. - $ kapacitor define -name my_task -tick path/to/TICKscript + $ kapacitor define my_task -tick path/to/TICKscript - or + or - $ kapacitor define -name my_task -dbrp mydb.myrp -dbrp otherdb.default + $ kapacitor define my_task -dbrp mydb.myrp -dbrp otherdb.default - NOTE: you must specify all 'dbrp' flags you desire if you wish to modify them. + NOTE: you must specify all 'dbrp' flags you desire if you wish to modify them. Options: @@ -513,37 +554,92 @@ Options: } func doDefine(args []string) error { - if *dname == "" { - fmt.Fprintln(os.Stderr, "Must always pass name flag.") + if len(args) < 1 { + fmt.Fprintln(os.Stderr, "Must provide a task ID.") defineFlags.Usage() os.Exit(2) } + defineFlags.Parse(args[1:]) + id := args[0] - var f io.Reader + var script string if *dtick != "" { file, err := os.Open(*dtick) if err != nil { return err } defer file.Close() - f = file + data, err := ioutil.ReadAll(file) + if err != nil { + return err + } + script = string(data) + } + + var ttype client.TaskType + switch *dtype { + case "stream": + ttype = client.StreamTask + case "batch": + ttype = client.BatchTask + } + + l := cli.TaskLink(id) + task, _ := cli.Task(l, nil) + var err error + if task.ID == "" { + _, err = cli.CreateTask(client.CreateTaskOptions{ + ID: id, + Type: ttype, + DBRPs: ddbrp, + TICKscript: script, + Status: client.Disabled, + }) + } else { + err = cli.UpdateTask( + l, + client.UpdateTaskOptions{ + Type: ttype, + DBRPs: ddbrp, + TICKscript: script, + }, + ) + } + if err != nil { + return err } - return cli.Define(*dname, *dtype, ddbrp, f, !*dnoReload) + if task.Error != "" { + return errors.New(task.Error) + } + + if !*dnoReload && task.Status == client.Enabled { + err := cli.UpdateTask(l, client.UpdateTaskOptions{Status: client.Disabled}) + if err != nil { + return err + } + err = cli.UpdateTask(l, client.UpdateTaskOptions{Status: client.Enabled}) + if err != nil { + return err + } + } + return nil } // Replay var ( replayFlags = flag.NewFlagSet("replay", flag.ExitOnError) - rtname = replayFlags.String("name", "", "the task name") - rid = replayFlags.String("id", "", "the recording ID") - rfast = replayFlags.Bool("fast", false, "If set, replay the data as fast as possible. If not set, replay the data in real time.") + rtask = replayFlags.String("task", "", "The task ID.") + rrecording = replayFlags.String("recording", "", "The recording ID.") + rreal = replayFlags.Bool("real-clock", false, "If set, replay the data in real time. If not set replay data as fast as possible.") rrec = replayFlags.Bool("rec-time", false, "If set, use the times saved in the recording instead of present times.") + rnowait = replayFlags.Bool("no-wait", false, "Do not wait for the replay to finish.") + rid = replayFlags.String("replay-id", "", "The ID to give to this replay. If not set a random ID is chosen.") ) func replayUsage() { var u = `Usage: kapacitor replay [options] -Replay a recording to a task. Waits until the task finishes. +Replay a recording to a task. Prints the replay ID. The times of the data points will either be relative to now or the exact times in the recording if the '-rec-time' flag is set. In either case the relative times @@ -559,46 +655,102 @@ Options: } func doReplay(args []string) error { - if *rid == "" { - return errors.New("must pass recording id") + if *rrecording == "" { + replayUsage() + return errors.New("must pass recording ID") } - if *rtname == "" { - return errors.New("must pass task name") + if *rtask == "" { + replayUsage() + return errors.New("must pass task ID") } - return cli.Replay(*rtname, *rid, *rrec, *rfast) + clk := client.Fast + if *rreal { + clk = client.Real + } + replay, err := cli.CreateReplay(client.CreateReplayOptions{ + ID: *rid, + Task: *rtask, + Recording: *rrecording, + RecordingTime: *rrec, + Clock: clk, + }) + if err != nil { + return err + } + if *rnowait { + fmt.Println(replay.ID) + return nil + } + for replay.Status == client.Running { + time.Sleep(500 * time.Millisecond) + replay, err = cli.Replay(replay.Link) + if err != nil { + return err + } + } + fmt.Println(replay.ID) + if replay.Status == client.Failed { + if replay.Error == "" { + replay.Error = "replay failed: unknown reason" + } + return errors.New(replay.Error) + } + return nil } // Enable func enableUsage() { - var u = `Usage: kapacitor enable [task name...] + var u = `Usage: kapacitor enable [task ID...] Enable and start a task running from the live data. For example: - You can enable by specific task name. + You can enable by specific task ID. - $ kapacitor enable cpu_alert + $ kapacitor enable cpu_alert - Or, you can enable by glob: + Or, you can enable by glob: - $ kapacitor enable *_alert + $ kapacitor enable *_alert ` fmt.Fprintln(os.Stderr, u) } func doEnable(args []string) error { if len(args) < 1 { - fmt.Fprintln(os.Stderr, "Must pass at least one task name") + fmt.Fprintln(os.Stderr, "Must pass at least one task ID") enableUsage() os.Exit(2) } - for _, name := range args { - err := cli.Enable(name) - if err != nil { - return err + limit := 100 + for _, pattern := range args { + offset := 0 + for { + tasks, err := cli.ListTasks(&client.ListTasksOptions{ + Pattern: pattern, + Fields: []string{"link"}, + Offset: offset, + Limit: limit, + }) + if err != nil { + return errors.Wrap(err, "listing tasks") + } + for _, task := range tasks { + err := cli.UpdateTask( + task.Link, + client.UpdateTaskOptions{Status: client.Enabled}, + ) + if err != nil { + return errors.Wrapf(err, "enabling task %s", task.ID) + } + } + if len(tasks) != limit { + break + } + offset += limit } } return nil @@ -607,34 +759,56 @@ func doEnable(args []string) error { // Disable func disableUsage() { - var u = `Usage: kapacitor disable [task name...] + var u = `Usage: kapacitor disable [task ID...] Disable and stop a task running. For example: - You can disable by specific task name. + You can disable by specific task ID. - $ kapacitor disable cpu_alert + $ kapacitor disable cpu_alert - Or, you can disable by glob: + Or, you can disable by glob: - $ kapacitor disable *_alert + $ kapacitor disable *_alert ` fmt.Fprintln(os.Stderr, u) } func doDisable(args []string) error { if len(args) < 1 { - fmt.Fprintln(os.Stderr, "Must pass at least one task name") + fmt.Fprintln(os.Stderr, "Must pass at least one task ID") disableUsage() os.Exit(2) } - for _, name := range args { - err := cli.Disable(name) - if err != nil { - return err + limit := 100 + for _, pattern := range args { + offset := 0 + for { + tasks, err := cli.ListTasks(&client.ListTasksOptions{ + Pattern: pattern, + Fields: []string{"link"}, + Offset: offset, + Limit: limit, + }) + if err != nil { + return errors.Wrap(err, "listing tasks") + } + for _, task := range tasks { + err := cli.UpdateTask( + task.Link, + client.UpdateTaskOptions{Status: client.Disabled}, + ) + if err != nil { + return errors.Wrapf(err, "disabling task %s", task.ID) + } + } + if len(tasks) != limit { + break + } + offset += limit } } return nil @@ -643,26 +817,26 @@ func doDisable(args []string) error { // Reload func reloadUsage() { - var u = `Usage: kapacitor reload [task name...] + var u = `Usage: kapacitor reload [task ID...] Disable then enable a running task. For example: - You can reload by specific task name. + You can reload by specific task ID. - $ kapacitor reload cpu_alert + $ kapacitor reload cpu_alert - Or, you can reload by glob: + Or, you can reload by glob: - $ kapacitor reload *_alert + $ kapacitor reload *_alert ` fmt.Fprintln(os.Stderr, u) } func doReload(args []string) error { if len(args) < 1 { - fmt.Fprintln(os.Stderr, "Must pass at least one task name") + fmt.Fprintln(os.Stderr, "Must pass at least one task ID") reloadUsage() os.Exit(2) } @@ -677,7 +851,7 @@ func doReload(args []string) error { // Show func showUsage() { - var u = `Usage: kapacitor show [task name] + var u = `Usage: kapacitor show [task ID] Show details about a specific task. ` @@ -686,20 +860,20 @@ func showUsage() { func doShow(args []string) error { if len(args) != 1 { - fmt.Fprintln(os.Stderr, "Must specify one task name") + fmt.Fprintln(os.Stderr, "Must specify one task ID") showUsage() os.Exit(2) } - ti, err := cli.Task(args[0], false, false) + ti, err := cli.Task(cli.TaskLink(args[0]), nil) if err != nil { return err } - fmt.Println("Name:", ti.Name) + fmt.Println("ID:", ti.ID) fmt.Println("Error:", ti.Error) fmt.Println("Type:", ti.Type) - fmt.Println("Enabled:", ti.Enabled) + fmt.Println("Status:", ti.Status) fmt.Println("Executing:", ti.Executing) fmt.Println("Databases Retention Policies:", ti.DBRPs) fmt.Printf("TICKscript:\n%s\n\n", ti.TICKscript) @@ -710,12 +884,11 @@ func doShow(args []string) error { // List func listUsage() { - var u = `Usage: kapacitor list (tasks|recordings) [task name|recording ID]... + var u = `Usage: kapacitor list (tasks|recordings|replays) [(task|recording|replay) ID or pattern] -List tasks or recordings and their current state. +List tasks, recordings, or replays and their current state. -If no tasks are given then all tasks are listed. Same for recordings. -If a set of task names or recordings IDs is provided only those entries will be listed. +If no ID or pattern is given then all items will be listed. ` fmt.Fprintln(os.Stderr, u) } @@ -723,36 +896,96 @@ If a set of task names or recordings IDs is provided only those entries will be func doList(args []string) error { if len(args) == 0 { - fmt.Fprintln(os.Stderr, "Must specify 'tasks' or 'recordings'") + fmt.Fprintln(os.Stderr, "Must specify 'tasks', 'recordings', or 'replays'") + listUsage() + os.Exit(2) + } + + if len(args) > 2 { + fmt.Fprintln(os.Stderr, "Invalid usage of list") listUsage() os.Exit(2) } + var pattern string + if len(args) == 2 { + pattern = args[1] + } + + limit := 100 + switch kind := args[0]; kind { case "tasks": - tasks, err := cli.ListTasks(args[1:]) - if err != nil { - return err - } - outFmt := "%-30s%-10v%-10v%-10v%s\n" - fmt.Fprintf(os.Stdout, outFmt, "Name", "Type", "Enabled", "Executing", "Databases and Retention Policies") - for _, t := range tasks { - fmt.Fprintf(os.Stdout, outFmt, t.Name, t.Type, t.Enabled, t.Executing, t.DBRPs) + fmt.Fprintf(os.Stdout, outFmt, "ID", "Type", "Status", "Executing", "Databases and Retention Policies") + offset := 0 + for { + tasks, err := cli.ListTasks(&client.ListTasksOptions{ + Pattern: pattern, + Fields: []string{"type", "status", "executing", "dbrps"}, + Offset: offset, + Limit: limit, + }) + if err != nil { + return err + } + + for _, t := range tasks { + fmt.Fprintf(os.Stdout, outFmt, t.ID, t.Type, t.Status, t.Executing, t.DBRPs) + } + if len(tasks) != limit { + break + } + offset += limit } case "recordings": - recordings, err := cli.ListRecordings(args[1:]) - if err != nil { - return err + outFmt := "%-40s%-8v%-10s%-10s%-23s\n" + fmt.Fprintf(os.Stdout, outFmt, "ID", "Type", "Status", "Size", "Date") + offset := 0 + for { + recordings, err := cli.ListRecordings(&client.ListRecordingsOptions{ + Pattern: pattern, + Fields: []string{"type", "size", "date", "status"}, + Offset: offset, + Limit: limit, + }) + if err != nil { + return err + } + + for _, r := range recordings { + fmt.Fprintf(os.Stdout, outFmt, r.ID, r.Type, r.Status, humanize.Bytes(uint64(r.Size)), r.Date.Local().Format(time.RFC822)) + } + if len(recordings) != limit { + break + } + offset += limit } + case "replays": + outFmt := "%-40v%-20v%-40v%-9v%-8v%-23v\n" + fmt.Fprintf(os.Stdout, outFmt, "ID", "Task", "Recording", "Status", "Clock", "Date") + offset := 0 + for { + replays, err := cli.ListReplays(&client.ListReplaysOptions{ + Pattern: pattern, + Fields: []string{"task", "recording", "status", "clock", "date"}, + Offset: offset, + Limit: limit, + }) + if err != nil { + return err + } - outFmt := "%-40s%-8v%-10s%-23s\n" - fmt.Fprintf(os.Stdout, "%-40s%-8s%-10s%-23s\n", "ID", "Type", "Size", "Created") - for _, r := range recordings { - fmt.Fprintf(os.Stdout, outFmt, r.ID, r.Type, humanize.Bytes(uint64(r.Size)), r.Created.Local().Format(time.RFC822)) + for _, r := range replays { + fmt.Fprintf(os.Stdout, outFmt, r.ID, r.Task, r.Recording, r.Status, r.Clock, r.Date.Local().Format(time.RFC822)) + } + if len(replays) != limit { + break + } + offset += limit } default: - return fmt.Errorf("cannot list '%s' did you mean 'tasks' or 'recordings'?", kind) + return fmt.Errorf("cannot list '%s' did you mean 'tasks', 'recordings' or 'replays'?", kind) } return nil @@ -760,52 +993,106 @@ func doList(args []string) error { // Delete func deleteUsage() { - var u = `Usage: kapacitor delete (tasks|recordings) [task name|recording ID]... + var u = `Usage: kapacitor delete (tasks|recordings|replays) [task|recording|replay ID]... Delete a task or recording. If a task is enabled it will be disabled and then deleted, + For example: - You can delete task: + You can delete task: - $ kapacitor delete tasks my_task + $ kapacitor delete tasks my_task - Or you can delete tasks by glob: + Or you can delete items by glob: - $ kapacitor delete tasks *_alert + $ kapacitor delete tasks *_alert - You can delete recordings: + You can delete recordings: - $ kapacitor delete recordings b0a2ba8a-aeeb-45ec-bef9-1a2939963586 + $ kapacitor delete recordings b0a2ba8a-aeeb-45ec-bef9-1a2939963586 ` fmt.Fprintln(os.Stderr, u) } func doDelete(args []string) error { if len(args) < 2 { - fmt.Fprintln(os.Stderr, "Must pass at least one task name or recording ID") + fmt.Fprintln(os.Stderr, "Must pass at least one ID") deleteUsage() os.Exit(2) } + limit := 100 switch kind := args[0]; kind { case "tasks": - for _, task := range args[1:] { - err := cli.DeleteTask(task) - if err != nil { - return err + for _, pattern := range args[1:] { + for { + tasks, err := cli.ListTasks(&client.ListTasksOptions{ + Pattern: pattern, + Fields: []string{"link"}, + Limit: limit, + }) + if err != nil { + return err + } + for _, task := range tasks { + err := cli.DeleteTask(task.Link) + if err != nil { + return err + } + } + if len(tasks) != limit { + break + } } } case "recordings": - for _, rid := range args[1:] { - err := cli.DeleteRecording(rid) - if err != nil { - return err + for _, pattern := range args[1:] { + for { + recordings, err := cli.ListRecordings(&client.ListRecordingsOptions{ + Pattern: pattern, + Fields: []string{"link"}, + Limit: limit, + }) + if err != nil { + return err + } + for _, recording := range recordings { + err := cli.DeleteRecording(recording.Link) + if err != nil { + return err + } + } + if len(recordings) != limit { + break + } + } + } + case "replays": + for _, pattern := range args[1:] { + for { + replays, err := cli.ListReplays(&client.ListReplaysOptions{ + Pattern: pattern, + Fields: []string{"link"}, + Limit: limit, + }) + if err != nil { + return err + } + for _, replay := range replays { + err := cli.DeleteReplay(replay.Link) + if err != nil { + return err + } + } + if len(replays) != limit { + break + } } } default: - return fmt.Errorf("cannot delete '%s' did you mean 'tasks' or 'recordings'?", kind) + return fmt.Errorf("cannot delete '%s' did you mean 'tasks', 'recordings' or 'replays'?", kind) } return nil } diff --git a/cmd/kapacitord/run/config.go b/cmd/kapacitord/run/config.go index b0a1180cd..ff0238d9d 100644 --- a/cmd/kapacitord/run/config.go +++ b/cmd/kapacitord/run/config.go @@ -25,6 +25,7 @@ import ( "github.com/influxdata/kapacitor/services/slack" "github.com/influxdata/kapacitor/services/smtp" "github.com/influxdata/kapacitor/services/stats" + "github.com/influxdata/kapacitor/services/storage" "github.com/influxdata/kapacitor/services/talk" "github.com/influxdata/kapacitor/services/task_store" "github.com/influxdata/kapacitor/services/udf" @@ -40,6 +41,7 @@ import ( type Config struct { HTTP httpd.Config `toml:"http"` Replay replay.Config `toml:"replay"` + Storage storage.Config `toml:"storage"` Task task_store.Config `toml:"task"` InfluxDB []influxdb.Config `toml:"influxdb"` Logging logging.Config `toml:"logging"` @@ -76,6 +78,7 @@ func NewConfig() *Config { } c.HTTP = httpd.NewConfig() + c.Storage = storage.NewConfig() c.Replay = replay.NewConfig() c.Task = task_store.NewConfig() c.Logging = logging.NewConfig() @@ -132,6 +135,7 @@ func NewDemoConfig() (*Config, error) { c.Replay.Dir = filepath.Join(homeDir, ".kapacitor", c.Replay.Dir) c.Task.Dir = filepath.Join(homeDir, ".kapacitor", c.Task.Dir) + c.Storage.BoltDBPath = filepath.Join(homeDir, ".kapacitor", c.Storage.BoltDBPath) c.DataDir = filepath.Join(homeDir, ".kapacitor", c.DataDir) return c, nil @@ -149,6 +153,10 @@ func (c *Config) Validate() error { if err != nil { return err } + err = c.Storage.Validate() + if err != nil { + return err + } err = c.Task.Validate() if err != nil { return err diff --git a/cmd/kapacitord/run/config_test.go b/cmd/kapacitord/run/config_test.go index b6c1b2bbe..66660470b 100644 --- a/cmd/kapacitord/run/config_test.go +++ b/cmd/kapacitord/run/config_test.go @@ -16,8 +16,8 @@ func TestConfig_Parse(t *testing.T) { [replay] dir = "/tmp/replay" -[task] -dir = "/tmp/task" +[storage] +boltdb = "/tmp/kapacitor.db" `, &c); err != nil { t.Fatal(err) } @@ -25,8 +25,8 @@ dir = "/tmp/task" // Validate configuration. if c.Replay.Dir != "/tmp/replay" { t.Fatalf("unexpected replay dir: %s", c.Replay.Dir) - } else if c.Task.Dir != "/tmp/task" { - t.Fatalf("unexpected task dir: %s", c.Task.Dir) + } else if c.Storage.BoltDBPath != "/tmp/kapacitor.db" { + t.Fatalf("unexpected storage boltdb-path: %s", c.Storage.BoltDBPath) } } @@ -41,8 +41,8 @@ urls=["http://localhost:8086"] [replay] dir = "/tmp/replay" -[task] -dir = "/tmp/task" +[storage] +boltdb = "/tmp/kapacitor.db" `, &c); err != nil { t.Fatal(err) } @@ -51,7 +51,7 @@ dir = "/tmp/task" t.Fatalf("failed to set env var: %v", err) } - if err := os.Setenv("KAPACITOR_TASK_DIR", "/var/lib/kapacitor/task"); err != nil { + if err := os.Setenv("KAPACITOR_STORAGE_BOLTDB", "/var/lib/kapacitor/kapacitor.db"); err != nil { t.Fatalf("failed to set env var: %v", err) } @@ -66,8 +66,8 @@ dir = "/tmp/task" // Validate configuration. if c.Replay.Dir != "/var/lib/kapacitor/replay" { t.Fatalf("unexpected replay dir: %s", c.Replay.Dir) - } else if c.Task.Dir != "/var/lib/kapacitor/task" { - t.Fatalf("unexpected task dir: %s", c.Task.Dir) + } else if c.Storage.BoltDBPath != "/var/lib/kapacitor/kapacitor.db" { + t.Fatalf("unexpected storage boltdb-path: %s", c.Storage.BoltDBPath) } else if c.InfluxDB[0].URLs[0] != "http://localhost:18086" { t.Fatalf("unexpected url 0: %s", c.InfluxDB[0].URLs[0]) } diff --git a/cmd/kapacitord/run/server.go b/cmd/kapacitord/run/server.go index 1e7364cab..7aeb95a46 100644 --- a/cmd/kapacitord/run/server.go +++ b/cmd/kapacitord/run/server.go @@ -32,6 +32,7 @@ import ( "github.com/influxdata/kapacitor/services/slack" "github.com/influxdata/kapacitor/services/smtp" "github.com/influxdata/kapacitor/services/stats" + "github.com/influxdata/kapacitor/services/storage" "github.com/influxdata/kapacitor/services/talk" "github.com/influxdata/kapacitor/services/task_store" "github.com/influxdata/kapacitor/services/udf" @@ -65,6 +66,7 @@ type Server struct { LogService logging.Interface HTTPDService *httpd.Service + StorageService *storage.Service TaskStore *task_store.Service ReplayService *replay.Service InfluxDBService *influxdb.Service @@ -128,8 +130,9 @@ func NewServer(c *Config, buildInfo *BuildInfo, logService logging.Interface) (* s.appendSMTPService(c.SMTP) s.initHTTPDService(c.HTTP) s.appendInfluxDBService(c.InfluxDB, c.defaultInfluxDB, c.Hostname) + s.appendStorageService(c.Storage) s.appendTaskStoreService(c.Task) - s.appendReplayStoreService(c.Replay) + s.appendReplayService(c.Replay) s.appendOpsGenieService(c.OpsGenie) s.appendVictorOpsService(c.VictorOps) s.appendPagerDutyService(c.PagerDuty) @@ -164,6 +167,14 @@ func NewServer(c *Config, buildInfo *BuildInfo, logService logging.Interface) (* return s, nil } +func (s *Server) appendStorageService(c storage.Config) { + l := s.LogService.NewLogger("[storage] ", log.LstdFlags) + srv := storage.NewService(c, l) + + s.StorageService = srv + s.Services = append(s.Services, srv) +} + func (s *Server) appendSMTPService(c smtp.Config) { if c.Enabled { l := s.LogService.NewLogger("[smtp] ", log.LstdFlags) @@ -206,6 +217,7 @@ func (s *Server) appendHTTPDService() { func (s *Server) appendTaskStoreService(c task_store.Config) { l := s.LogService.NewLogger("[task_store] ", log.LstdFlags) srv := task_store.NewService(c, l) + srv.StorageService = s.StorageService srv.HTTPDService = s.HTTPDService srv.TaskMaster = s.TaskMaster @@ -214,9 +226,10 @@ func (s *Server) appendTaskStoreService(c task_store.Config) { s.Services = append(s.Services, srv) } -func (s *Server) appendReplayStoreService(c replay.Config) { +func (s *Server) appendReplayService(c replay.Config) { l := s.LogService.NewLogger("[replay] ", log.LstdFlags) srv := replay.NewService(c, l) + srv.StorageService = s.StorageService srv.TaskStore = s.TaskStore srv.HTTPDService = s.HTTPDService srv.InfluxDBService = s.InfluxDBService @@ -317,9 +330,10 @@ func (s *Server) appendCollectdService(c collectd.Config) { if !c.Enabled { return } - l := s.LogService.NewStaticLevelLogger("[collectd] ", log.LstdFlags, wlog.INFO) srv := collectd.NewService(c) - srv.SetLogger(l) + w := s.LogService.NewStaticLevelWriter(wlog.INFO) + srv.SetLogOutput(w) + srv.MetaClient = s.MetaClient srv.PointsWriter = s.TaskMaster s.Services = append(s.Services, srv) @@ -329,12 +343,13 @@ func (s *Server) appendOpenTSDBService(c opentsdb.Config) error { if !c.Enabled { return nil } - l := s.LogService.NewStaticLevelLogger("[opentsdb] ", log.LstdFlags, wlog.INFO) srv, err := opentsdb.NewService(c) if err != nil { return err } - srv.SetLogger(l) + w := s.LogService.NewStaticLevelWriter(wlog.INFO) + srv.SetLogOutput(w) + srv.PointsWriter = s.TaskMaster srv.MetaClient = s.MetaClient s.Services = append(s.Services, srv) @@ -345,12 +360,12 @@ func (s *Server) appendGraphiteService(c graphite.Config) error { if !c.Enabled { return nil } - l := s.LogService.NewStaticLevelLogger("[graphite] ", log.LstdFlags, wlog.INFO) srv, err := graphite.NewService(c) if err != nil { return err } - srv.SetLogger(l) + w := s.LogService.NewStaticLevelWriter(wlog.INFO) + srv.SetLogOutput(w) srv.PointsWriter = s.TaskMaster srv.MetaClient = s.MetaClient diff --git a/cmd/kapacitord/run/server_helper_test.go b/cmd/kapacitord/run/server_helper_test.go index e95902684..77d18699f 100644 --- a/cmd/kapacitord/run/server_helper_test.go +++ b/cmd/kapacitord/run/server_helper_test.go @@ -79,7 +79,7 @@ func Client(s *Server) *client.Client { func (s *Server) Close() { s.Server.Close() os.RemoveAll(s.Config.Replay.Dir) - os.RemoveAll(s.Config.Task.Dir) + os.Remove(s.Config.Storage.BoltDBPath) os.RemoveAll(s.Config.DataDir) } @@ -159,16 +159,18 @@ func NewConfig() *run.Config { c.PostInit() c.Reporting.Enabled = false c.Replay.Dir = MustTempFile() - c.Task.Dir = MustTempFile() + c.Storage.BoltDBPath = MustTempFile() c.DataDir = MustTempFile() c.HTTP.BindAddress = "127.0.0.1:0" + //c.HTTP.BindAddress = "127.0.0.1:9092" + //c.HTTP.GZIP = false c.InfluxDB[0].Enabled = false return c } // MustTempFile returns a path to a temporary file. func MustTempFile() string { - f, err := ioutil.TempFile("", "influxd-") + f, err := ioutil.TempFile("", "kapacitord-") if err != nil { panic(err) } @@ -195,6 +197,10 @@ func (l *LogService) NewStaticLevelLogger(prefix string, flag int, level wlog.Le return log.New(wlog.NewStaticLevelWriter(os.Stderr, level), prefix, flag) } +func (l *LogService) NewStaticLevelWriter(level wlog.Level) io.Writer { + return wlog.NewStaticLevelWriter(os.Stderr, level) +} + type queryFunc func(q string) *iclient.Response type InfluxDB struct { diff --git a/cmd/kapacitord/run/server_test.go b/cmd/kapacitord/run/server_test.go index c017c892a..a717712fa 100644 --- a/cmd/kapacitord/run/server_test.go +++ b/cmd/kapacitord/run/server_test.go @@ -28,6 +28,7 @@ import ( func TestServer_Ping(t *testing.T) { s, cli := OpenDefaultServer() + t.Log(s.URL()) defer s.Close() _, version, err := cli.Ping() if err != nil { @@ -38,12 +39,12 @@ func TestServer_Ping(t *testing.T) { } } -func TestServer_DefineTask(t *testing.T) { +func TestServer_CreateTask(t *testing.T) { s, cli := OpenDefaultServer() defer s.Close() - name := "testTaskName" - ttype := "stream" + id := "testTaskID" + ttype := client.StreamTask dbrps := []client.DBRP{ { Database: "mydb", @@ -58,12 +59,18 @@ func TestServer_DefineTask(t *testing.T) { |from() .measurement('test') ` - err := cli.Define(name, ttype, dbrps, strings.NewReader(tick), false) + task, err := cli.CreateTask(client.CreateTaskOptions{ + ID: id, + Type: ttype, + DBRPs: dbrps, + TICKscript: tick, + Status: client.Disabled, + }) if err != nil { t.Fatal(err) } - ti, err := cli.Task(name, false, false) + ti, err := cli.Task(task.Link, nil) if err != nil { t.Fatal(err) } @@ -71,14 +78,14 @@ func TestServer_DefineTask(t *testing.T) { if ti.Error != "" { t.Fatal(ti.Error) } - if ti.Name != name { - t.Fatalf("unexpected name got %s exp %s", ti.Name, name) + if ti.ID != id { + t.Fatalf("unexpected id got %s exp %s", ti.ID, id) } - if ti.Type != "stream" { - t.Fatalf("unexpected type got %s exp %s", ti.Type, "stream") + if ti.Type != client.StreamTask { + t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask) } - if ti.Enabled != false { - t.Fatalf("unexpected enabled got %v exp %v", ti.Enabled, false) + if ti.Status != client.Disabled { + t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled) } if !reflect.DeepEqual(ti.DBRPs, dbrps) { t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps) @@ -86,9 +93,9 @@ func TestServer_DefineTask(t *testing.T) { if ti.TICKscript != tick { t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick) } - dot := "digraph testTaskName {\nstream0 -> from1;\n}" + dot := "digraph testTaskID {\nstream0 -> from1;\n}" if ti.Dot != dot { - t.Fatalf("unexpected dot got %s exp %s", ti.Dot, dot) + t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot) } } @@ -96,8 +103,8 @@ func TestServer_EnableTask(t *testing.T) { s, cli := OpenDefaultServer() defer s.Close() - name := "testTaskName" - ttype := "stream" + id := "testTaskID" + ttype := client.StreamTask dbrps := []client.DBRP{ { Database: "mydb", @@ -112,17 +119,25 @@ func TestServer_EnableTask(t *testing.T) { |from() .measurement('test') ` - err := cli.Define(name, ttype, dbrps, strings.NewReader(tick), false) + task, err := cli.CreateTask(client.CreateTaskOptions{ + ID: id, + Type: ttype, + DBRPs: dbrps, + TICKscript: tick, + Status: client.Disabled, + }) if err != nil { t.Fatal(err) } - err = cli.Enable(name) + err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{ + Status: client.Enabled, + }) if err != nil { t.Fatal(err) } - ti, err := cli.Task(name, false, false) + ti, err := cli.Task(task.Link, nil) if err != nil { t.Fatal(err) } @@ -130,14 +145,17 @@ func TestServer_EnableTask(t *testing.T) { if ti.Error != "" { t.Fatal(ti.Error) } - if ti.Name != name { - t.Fatalf("unexpected name got %s exp %s", ti.Name, name) + if ti.ID != id { + t.Fatalf("unexpected id got %s exp %s", ti.ID, id) } - if ti.Type != "stream" { - t.Fatalf("unexpected type got %s exp %s", ti.Type, "stream") + if ti.Type != client.StreamTask { + t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask) } - if ti.Enabled != true { - t.Fatalf("unexpected enabled got %v exp %v", ti.Enabled, true) + if ti.Status != client.Enabled { + t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Enabled) + } + if ti.Executing != true { + t.Fatalf("unexpected executing got %v exp %v", ti.Executing, true) } if !reflect.DeepEqual(ti.DBRPs, dbrps) { t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps) @@ -145,7 +163,7 @@ func TestServer_EnableTask(t *testing.T) { if ti.TICKscript != tick { t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick) } - dot := `digraph testTaskName { + dot := `digraph testTaskID { graph [throughput="0.00 points/s"]; stream0 [avg_exec_time_ns="0" ]; @@ -154,7 +172,77 @@ stream0 -> from1 [processed="0"]; from1 [avg_exec_time_ns="0" ]; }` if ti.Dot != dot { - t.Fatalf("unexpected dot got\n%s exp\n%s", ti.Dot, dot) + t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot) + } +} + +func TestServer_EnableTaskOnCreate(t *testing.T) { + s, cli := OpenDefaultServer() + defer s.Close() + + id := "testTaskID" + ttype := client.StreamTask + dbrps := []client.DBRP{ + { + Database: "mydb", + RetentionPolicy: "myrp", + }, + { + Database: "otherdb", + RetentionPolicy: "default", + }, + } + tick := `stream + |from() + .measurement('test') +` + task, err := cli.CreateTask(client.CreateTaskOptions{ + ID: id, + Type: ttype, + DBRPs: dbrps, + TICKscript: tick, + Status: client.Enabled, + }) + if err != nil { + t.Fatal(err) + } + + ti, err := cli.Task(task.Link, nil) + if err != nil { + t.Fatal(err) + } + + if ti.Error != "" { + t.Fatal(ti.Error) + } + if ti.ID != id { + t.Fatalf("unexpected id got %s exp %s", ti.ID, id) + } + if ti.Type != client.StreamTask { + t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask) + } + if ti.Status != client.Enabled { + t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Enabled) + } + if ti.Executing != true { + t.Fatalf("unexpected executing got %v exp %v", ti.Executing, true) + } + if !reflect.DeepEqual(ti.DBRPs, dbrps) { + t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps) + } + if ti.TICKscript != tick { + t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick) + } + dot := `digraph testTaskID { +graph [throughput="0.00 points/s"]; + +stream0 [avg_exec_time_ns="0" ]; +stream0 -> from1 [processed="0"]; + +from1 [avg_exec_time_ns="0" ]; +}` + if ti.Dot != dot { + t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot) } } @@ -162,8 +250,8 @@ func TestServer_DisableTask(t *testing.T) { s, cli := OpenDefaultServer() defer s.Close() - name := "testTaskName" - ttype := "stream" + id := "testTaskID" + ttype := client.StreamTask dbrps := []client.DBRP{ { Database: "mydb", @@ -178,22 +266,32 @@ func TestServer_DisableTask(t *testing.T) { |from() .measurement('test') ` - err := cli.Define(name, ttype, dbrps, strings.NewReader(tick), false) + task, err := cli.CreateTask(client.CreateTaskOptions{ + ID: id, + Type: ttype, + DBRPs: dbrps, + TICKscript: tick, + Status: client.Disabled, + }) if err != nil { t.Fatal(err) } - err = cli.Enable(name) + err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{ + Status: client.Enabled, + }) if err != nil { t.Fatal(err) } - err = cli.Disable(name) + err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{ + Status: client.Disabled, + }) if err != nil { t.Fatal(err) } - ti, err := cli.Task(name, false, false) + ti, err := cli.Task(task.Link, nil) if err != nil { t.Fatal(err) } @@ -201,14 +299,14 @@ func TestServer_DisableTask(t *testing.T) { if ti.Error != "" { t.Fatal(ti.Error) } - if ti.Name != name { - t.Fatalf("unexpected name got %s exp %s", ti.Name, name) + if ti.ID != id { + t.Fatalf("unexpected id got %s exp %s", ti.ID, id) } - if ti.Type != "stream" { - t.Fatalf("unexpected type got %s exp %s", ti.Type, "stream") + if ti.Type != client.StreamTask { + t.Fatalf("unexpected type got %v exp %v", ti.Type, client.StreamTask) } - if ti.Enabled != false { - t.Fatalf("unexpected enabled got %v exp %v", ti.Enabled, false) + if ti.Status != client.Disabled { + t.Fatalf("unexpected status got %v exp %v", ti.Status, client.Disabled) } if !reflect.DeepEqual(ti.DBRPs, dbrps) { t.Fatalf("unexpected dbrps got %s exp %s", ti.DBRPs, dbrps) @@ -216,9 +314,9 @@ func TestServer_DisableTask(t *testing.T) { if ti.TICKscript != tick { t.Fatalf("unexpected TICKscript got %s exp %s", ti.TICKscript, tick) } - dot := "digraph testTaskName {\nstream0 -> from1;\n}" + dot := "digraph testTaskID {\nstream0 -> from1;\n}" if ti.Dot != dot { - t.Fatalf("unexpected dot got %s exp %s", ti.Dot, dot) + t.Fatalf("unexpected dot\ngot\n%s\nexp\n%s\n", ti.Dot, dot) } } @@ -226,8 +324,8 @@ func TestServer_DeleteTask(t *testing.T) { s, cli := OpenDefaultServer() defer s.Close() - name := "testTaskName" - ttype := "stream" + id := "testTaskID" + ttype := client.StreamTask dbrps := []client.DBRP{ { Database: "mydb", @@ -242,17 +340,23 @@ func TestServer_DeleteTask(t *testing.T) { |from() .measurement('test') ` - err := cli.Define(name, ttype, dbrps, strings.NewReader(tick), false) + task, err := cli.CreateTask(client.CreateTaskOptions{ + ID: id, + Type: ttype, + DBRPs: dbrps, + TICKscript: tick, + Status: client.Disabled, + }) if err != nil { t.Fatal(err) } - err = cli.DeleteTask(name) + err = cli.DeleteTask(task.Link) if err != nil { t.Fatal(err) } - ti, err := cli.Task(name, false, false) + ti, err := cli.Task(task.Link, nil) if err == nil { t.Fatal("unexpected task:", ti) } @@ -263,11 +367,12 @@ func TestServer_ListTasks(t *testing.T) { defer s.Close() count := 10 - ttype := "stream" + ttype := client.StreamTask tick := `stream |from() .measurement('test') ` + dbrps := []client.DBRP{ { Database: "mydb", @@ -279,18 +384,21 @@ func TestServer_ListTasks(t *testing.T) { }, } for i := 0; i < count; i++ { - name := fmt.Sprintf("testTaskName%d", i) - err := cli.Define(name, ttype, dbrps, strings.NewReader(tick), false) + id := fmt.Sprintf("testTaskID%d", i) + status := client.Disabled + if i%2 == 0 { + status = client.Enabled + } + _, err := cli.CreateTask(client.CreateTaskOptions{ + ID: id, + Type: ttype, + DBRPs: dbrps, + TICKscript: tick, + Status: status, + }) if err != nil { t.Fatal(err) } - - if i%2 == 0 { - err = cli.Enable(name) - if err != nil { - t.Fatal(err) - } - } } tasks, err := cli.ListTasks(nil) if err != nil { @@ -300,21 +408,109 @@ func TestServer_ListTasks(t *testing.T) { t.Fatalf("unexpected number of tasks: exp:%d got:%d", exp, got) } for i, task := range tasks { - if exp, got := fmt.Sprintf("testTaskName%d", i), task.Name; exp != got { - t.Errorf("unexpected task.Name i:%d exp:%s got:%s", i, exp, got) + if exp, got := fmt.Sprintf("testTaskID%d", i), task.ID; exp != got { + t.Errorf("unexpected task.ID i:%d exp:%s got:%s", i, exp, got) } - if exp, got := "stream", task.Type; exp != got { + if exp, got := client.StreamTask, task.Type; exp != got { t.Errorf("unexpected task.Type i:%d exp:%v got:%v", i, exp, got) } if !reflect.DeepEqual(task.DBRPs, dbrps) { t.Fatalf("unexpected dbrps i:%d exp:%s got:%s", i, dbrps, task.DBRPs) } - if exp, got := i%2 == 0, task.Enabled; exp != got { - t.Errorf("unexpected task.Enabled i:%d exp:%v got:%v", i, exp, got) + exp := client.Disabled + if i%2 == 0 { + exp = client.Enabled + } + if got := task.Status; exp != got { + t.Errorf("unexpected task.Status i:%d exp:%v got:%v", i, exp, got) } if exp, got := i%2 == 0, task.Executing; exp != got { t.Errorf("unexpected task.Executing i:%d exp:%v got:%v", i, exp, got) } + if exp, got := true, len(task.Dot) != 0; exp != got { + t.Errorf("unexpected task.Dot i:%d exp:\n%v\ngot:\n%v\n", i, exp, got) + } + if exp, got := tick, task.TICKscript; exp != got { + t.Errorf("unexpected task.TICKscript i:%d exp:%v got:%v", i, exp, got) + } + if exp, got := "", task.Error; exp != got { + t.Errorf("unexpected task.Error i:%d exp:%v got:%v", i, exp, got) + } + } + +} + +func TestServer_ListTasks_Fields(t *testing.T) { + s, cli := OpenDefaultServer() + defer s.Close() + count := 100 + + ttype := client.StreamTask + tick := `stream + |from() + .measurement('test') +` + dbrps := []client.DBRP{ + { + Database: "mydb", + RetentionPolicy: "myrp", + }, + { + Database: "otherdb", + RetentionPolicy: "default", + }, + } + for i := 0; i < count; i++ { + id := fmt.Sprintf("testTaskID%d", i) + _, err := cli.CreateTask(client.CreateTaskOptions{ + ID: id, + Type: ttype, + DBRPs: dbrps, + TICKscript: tick, + Status: client.Enabled, + }) + if err != nil { + t.Fatal(err) + } + } + tasks, err := cli.ListTasks(&client.ListTasksOptions{ + Pattern: "testTaskID1*", + Fields: []string{"type", "status"}, + Offset: 1, + Limit: 5, + }) + if err != nil { + t.Fatal(err) + } + if exp, got := 5, len(tasks); exp != got { + t.Fatalf("unexpected number of tasks: exp:%d got:%d", exp, got) + } + for i, task := range tasks { + if exp, got := fmt.Sprintf("testTaskID1%d", i), task.ID; exp != got { + t.Errorf("unexpected task.ID i:%d exp:%s got:%s", i, exp, got) + } + if exp, got := client.StreamTask, task.Type; exp != got { + t.Errorf("unexpected task.Type i:%d exp:%v got:%v", i, exp, got) + } + if exp, got := client.Enabled, task.Status; exp != got { + t.Errorf("unexpected task.Status i:%d exp:%v got:%v", i, exp, got) + } + // We didn't request these fields so they should be default zero values + if exp, got := 0, len(task.DBRPs); exp != got { + t.Fatalf("unexpected dbrps i:%d exp:%d got:%d", i, exp, got) + } + if exp, got := false, task.Executing; exp != got { + t.Errorf("unexpected task.Executing i:%d exp:%v got:%v", i, exp, got) + } + if exp, got := "", task.Dot; exp != got { + t.Errorf("unexpected task.Dot i:%d exp:%v got:%v", i, exp, got) + } + if exp, got := "", task.TICKscript; exp != got { + t.Errorf("unexpected task.TICKscript i:%d exp:%v got:%v", i, exp, got) + } + if exp, got := "", task.Error; exp != got { + t.Errorf("unexpected task.Error i:%d exp:%v got:%v", i, exp, got) + } } } @@ -323,8 +519,8 @@ func TestServer_StreamTask(t *testing.T) { s, cli := OpenDefaultServer() defer s.Close() - name := "testStreamTask" - ttype := "stream" + id := "testStreamTask" + ttype := client.StreamTask dbrps := []client.DBRP{{ Database: "mydb", RetentionPolicy: "myrp", @@ -339,20 +535,28 @@ func TestServer_StreamTask(t *testing.T) { |httpOut('count') ` - err := cli.Define(name, ttype, dbrps, strings.NewReader(tick), false) + task, err := cli.CreateTask(client.CreateTaskOptions{ + ID: id, + Type: ttype, + DBRPs: dbrps, + TICKscript: tick, + Status: client.Disabled, + }) if err != nil { t.Fatal(err) } - err = cli.Enable(name) + err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{ + Status: client.Enabled, + }) if err != nil { t.Fatal(err) } - endpoint := fmt.Sprintf("%s/task/%s/count", s.URL(), name) + endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), id) // Request data before any writes and expect null responses - nullResponse := `{"Series":null,"Messages":null,"Err":null}` + nullResponse := `{}` err = s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5) if err != nil { t.Error(err) @@ -380,7 +584,7 @@ test value=1 0000000011 v.Add("precision", "s") s.MustWrite("mydb", "myrp", points, v) - exp := `{"Series":[{"name":"test","columns":["time","count"],"values":[["1970-01-01T00:00:10Z",15]]}],"Messages":null,"Err":null}` + exp := `{"series":[{"name":"test","columns":["time","count"],"values":[["1970-01-01T00:00:10Z",15]]}]}` err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5) if err != nil { t.Error(err) @@ -391,8 +595,8 @@ func TestServer_StreamTask_AllMeasurements(t *testing.T) { s, cli := OpenDefaultServer() defer s.Close() - name := "testStreamTask" - ttype := "stream" + id := "testStreamTask" + ttype := client.StreamTask dbrps := []client.DBRP{{ Database: "mydb", RetentionPolicy: "myrp", @@ -406,20 +610,28 @@ func TestServer_StreamTask_AllMeasurements(t *testing.T) { |httpOut('count') ` - err := cli.Define(name, ttype, dbrps, strings.NewReader(tick), false) + task, err := cli.CreateTask(client.CreateTaskOptions{ + ID: id, + Type: ttype, + DBRPs: dbrps, + TICKscript: tick, + Status: client.Disabled, + }) if err != nil { t.Fatal(err) } - err = cli.Enable(name) + err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{ + Status: client.Enabled, + }) if err != nil { t.Fatal(err) } - endpoint := fmt.Sprintf("%s/task/%s/count", s.URL(), name) + endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), id) // Request data before any writes and expect null responses - nullResponse := `{"Series":null,"Messages":null,"Err":null}` + nullResponse := `{}` err = s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5) if err != nil { t.Error(err) @@ -447,7 +659,7 @@ test0 value=1 0000000011 v.Add("precision", "s") s.MustWrite("mydb", "myrp", points, v) - exp := `{"Series":[{"name":"test0","columns":["time","count"],"values":[["1970-01-01T00:00:10Z",15]]}],"Messages":null,"Err":null}` + exp := `{"series":[{"name":"test0","columns":["time","count"],"values":[["1970-01-01T00:00:10Z",15]]}]}` err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5) if err != nil { t.Error(err) @@ -487,8 +699,8 @@ func TestServer_BatchTask(t *testing.T) { defer s.Close() cli := Client(s) - name := "testBatchTask" - ttype := "batch" + id := "testBatchTask" + ttype := client.BatchTask dbrps := []client.DBRP{{ Database: "mydb", RetentionPolicy: "myrp", @@ -501,24 +713,34 @@ func TestServer_BatchTask(t *testing.T) { |httpOut('count') ` - err := cli.Define(name, ttype, dbrps, strings.NewReader(tick), false) + task, err := cli.CreateTask(client.CreateTaskOptions{ + ID: id, + Type: ttype, + DBRPs: dbrps, + TICKscript: tick, + Status: client.Disabled, + }) if err != nil { t.Fatal(err) } - err = cli.Enable(name) + err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{ + Status: client.Enabled, + }) if err != nil { t.Fatal(err) } - endpoint := fmt.Sprintf("%s/task/%s/count", s.URL(), name) + endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), id) - exp := `{"Series":[{"name":"cpu","columns":["time","count"],"values":[["1971-01-01T00:00:01.002Z",2]]}],"Messages":null,"Err":null}` + exp := `{"series":[{"name":"cpu","columns":["time","count"],"values":[["1971-01-01T00:00:01.002Z",2]]}]}` err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5) if err != nil { t.Error(err) } - err = cli.Disable(name) + err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{ + Status: client.Disabled, + }) if err != nil { t.Fatal(err) } @@ -539,8 +761,8 @@ func TestServer_InvalidBatchTask(t *testing.T) { defer s.Close() cli := Client(s) - name := "testInvalidBatchTask" - ttype := "batch" + id := "testInvalidBatchTask" + ttype := client.BatchTask dbrps := []client.DBRP{{ Database: "mydb", RetentionPolicy: "myrp", @@ -553,18 +775,26 @@ func TestServer_InvalidBatchTask(t *testing.T) { |httpOut('count') ` - err := cli.Define(name, ttype, dbrps, strings.NewReader(tick), false) + task, err := cli.CreateTask(client.CreateTaskOptions{ + ID: id, + Type: ttype, + DBRPs: dbrps, + TICKscript: tick, + Status: client.Disabled, + }) if err != nil { t.Fatal(err) } - err = cli.Enable(name) + err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{ + Status: client.Enabled, + }) expErr := `batch query is not allowed to request data from "unknowndb"."unknownrp"` if err != nil && err.Error() != expErr { t.Fatalf("unexpected err: got %v exp %s", err, expErr) } - err = cli.DeleteTask(name) + err = cli.DeleteTask(task.Link) if err != nil { t.Fatal(err) } @@ -574,8 +804,8 @@ func TestServer_RecordReplayStream(t *testing.T) { s, cli := OpenDefaultServer() defer s.Close() - name := "testStreamTask" - ttype := "stream" + id := "testStreamTask" + ttype := client.StreamTask dbrps := []client.DBRP{{ Database: "mydb", RetentionPolicy: "myrp", @@ -600,10 +830,28 @@ func TestServer_RecordReplayStream(t *testing.T) { .log('` + tmpDir + `/alert.log') ` - err = cli.Define(name, ttype, dbrps, strings.NewReader(tick), false) + task, err := cli.CreateTask(client.CreateTaskOptions{ + ID: id, + Type: ttype, + DBRPs: dbrps, + TICKscript: tick, + Status: client.Disabled, + }) + if err != nil { + t.Fatal(err) + } + recording, err := cli.RecordStream(client.RecordStreamOptions{ + ID: "recordingid", + Task: task.ID, + Stop: time.Date(1970, 1, 1, 0, 0, 10, 0, time.UTC), + }) if err != nil { t.Fatal(err) } + if exp, got := "/kapacitor/v1/recordings/recordingid", recording.Link.Href; exp != got { + t.Errorf("unexpected recording.Link.Href got %s exp %s", got, exp) + } + points := `test value=1 0000000000 test value=1 0000000001 test value=1 0000000001 @@ -623,27 +871,58 @@ test value=1 0000000010 test value=1 0000000011 test value=1 0000000012 ` - rid := make(chan string, 1) - started := make(chan struct{}) - go func() { - id, err := cli.RecordStream(name, 10*time.Second) - close(started) - _, err = cli.Recording(id) - if err != nil { - t.Fatal(err) - } - rid <- id - }() - <-started v := url.Values{} v.Add("precision", "s") s.MustWrite("mydb", "myrp", points, v) - id := <-rid - err = cli.Replay(name, id, true, true) + retry := 0 + for recording.Status == client.Running { + time.Sleep(100 * time.Millisecond) + recording, err = cli.Recording(recording.Link) + if err != nil { + t.Fatal(err) + } + retry++ + if retry > 10 { + t.Fatal("failed to finish recording") + } + } + if recording.Status != client.Finished || recording.Error != "" { + t.Errorf("recording failed: %s", recording.Error) + } + + replay, err := cli.CreateReplay(client.CreateReplayOptions{ + ID: "replayid", + Task: id, + Recording: recording.ID, + Clock: client.Fast, + RecordingTime: true, + }) if err != nil { t.Fatal(err) } + if exp, got := "/kapacitor/v1/replays/replayid", replay.Link.Href; exp != got { + t.Errorf("unexpected replay.Link.Href got %s exp %s", got, exp) + } + if exp, got := id, replay.Task; exp != got { + t.Errorf("unexpected replay.Task got %s exp %s", got, exp) + } + + retry = 0 + for replay.Status == client.Running { + time.Sleep(100 * time.Millisecond) + replay, err = cli.Replay(replay.Link) + if err != nil { + t.Fatal(err) + } + retry++ + if retry > 10 { + t.Fatal("failed to finish replay") + } + } + if replay.Status != client.Finished || replay.Error != "" { + t.Errorf("replay failed: %s", replay.Error) + } f, err := os.Open(path.Join(tmpDir, "alert.log")) if err != nil { @@ -683,6 +962,36 @@ test value=1 0000000012 if !reflect.DeepEqual(exp, got) { t.Errorf("unexpected alert log:\ngot %v\nexp %v", got, exp) } + + recordings, err := cli.ListRecordings(nil) + if exp, got := 1, len(recordings); exp != got { + t.Fatalf("unexpected recordings list:\ngot %v\nexp %v", got, exp) + } + + err = cli.DeleteRecording(recordings[0].Link) + if err != nil { + t.Error(err) + } + + recordings, err = cli.ListRecordings(nil) + if exp, got := 0, len(recordings); exp != got { + t.Errorf("unexpected recordings list:\ngot %v\nexp %v", got, exp) + } + + replays, err := cli.ListReplays(nil) + if exp, got := 1, len(replays); exp != got { + t.Fatalf("unexpected replays list:\ngot %v\nexp %v", got, exp) + } + + err = cli.DeleteReplay(replays[0].Link) + if err != nil { + t.Error(err) + } + + replays, err = cli.ListReplays(nil) + if exp, got := 0, len(replays); exp != got { + t.Errorf("unexpected replays list:\ngot %v\nexp %v", got, exp) + } } func TestServer_RecordReplayBatch(t *testing.T) { @@ -719,8 +1028,8 @@ func TestServer_RecordReplayBatch(t *testing.T) { defer s.Close() cli := Client(s) - name := "testBatchTask" - ttype := "batch" + id := "testBatchTask" + ttype := client.BatchTask dbrps := []client.DBRP{{ Database: "mydb", RetentionPolicy: "myrp", @@ -742,25 +1051,322 @@ func TestServer_RecordReplayBatch(t *testing.T) { .log('` + tmpDir + `/alert.log') ` - err = cli.Define(name, ttype, dbrps, strings.NewReader(tick), false) + _, err = cli.CreateTask(client.CreateTaskOptions{ + ID: id, + Type: ttype, + DBRPs: dbrps, + TICKscript: tick, + Status: client.Disabled, + }) if err != nil { t.Fatal(err) } - id, err := cli.RecordBatch(name, "", time.Time{}, time.Time{}, time.Second*8) + recording, err := cli.RecordBatch(client.RecordBatchOptions{ + ID: "recordingid", + Task: id, + Start: time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC), + Stop: time.Date(1971, 1, 1, 0, 0, 6, 0, time.UTC), + }) if err != nil { t.Fatal(err) } + if exp, got := "/kapacitor/v1/recordings/recordingid", recording.Link.Href; exp != got { + t.Errorf("unexpected recording.Link.Href got %s exp %s", got, exp) + } // Wait for recording to finish. - _, err = cli.Recording(id) + retry := 0 + for recording.Status == client.Running { + time.Sleep(100 * time.Millisecond) + recording, err = cli.Recording(recording.Link) + if err != nil { + t.Fatal(err) + } + retry++ + if retry > 10 { + t.Fatal("failed to perfom recording") + } + } + + replay, err := cli.CreateReplay(client.CreateReplayOptions{ + Task: id, + Recording: recording.ID, + Clock: client.Fast, + RecordingTime: true, + }) if err != nil { t.Fatal(err) } + if exp, got := id, replay.Task; exp != got { + t.Errorf("unexpected replay.Task got %s exp %s", got, exp) + } - err = cli.Replay(name, id, true, true) + // Wait for replay to finish. + retry = 0 + for replay.Status == client.Running { + time.Sleep(100 * time.Millisecond) + replay, err = cli.Replay(replay.Link) + if err != nil { + t.Fatal(err) + } + retry++ + if retry > 10 { + t.Fatal("failed to perfom replay") + } + } + + f, err := os.Open(path.Join(tmpDir, "alert.log")) if err != nil { t.Fatal(err) } + defer f.Close() + type response struct { + ID string `json:"id"` + Message string `json:"message"` + Time time.Time `json:"time"` + Level string `json:"level"` + Data influxql.Result `json:"data"` + } + exp := []response{ + { + ID: "test-batch", + Message: "test-batch got: 3", + Time: time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC), + Level: "CRITICAL", + Data: influxql.Result{ + Series: models.Rows{ + { + Name: "cpu", + Columns: []string{"time", "value"}, + Values: [][]interface{}{ + { + time.Date(1971, 1, 1, 0, 0, 2, 0, time.UTC).Format(time.RFC3339Nano), + 2.0, + }, + { + time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC).Format(time.RFC3339Nano), + 3.0, + }, + }, + }, + }, + }, + }, + { + ID: "test-batch", + Message: "test-batch got: 4", + Time: time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC), + Level: "CRITICAL", + Data: influxql.Result{ + Series: models.Rows{ + { + Name: "cpu", + Columns: []string{"time", "value"}, + Values: [][]interface{}{ + { + time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC).Format(time.RFC3339Nano), + 4.0, + }, + { + time.Date(1971, 1, 1, 0, 0, 5, 0, time.UTC).Format(time.RFC3339Nano), + 5.0, + }, + }, + }, + }, + }, + }, + } + scanner := bufio.NewScanner(f) + got := make([]response, 0) + g := response{} + for scanner.Scan() { + json.Unmarshal(scanner.Bytes(), &g) + got = append(got, g) + } + if !reflect.DeepEqual(exp, got) { + t.Errorf("unexpected alert log:\ngot %v\nexp %v", got, exp) + t.Errorf("unexpected alert log:\ngot %v\nexp %v", got[0].Data.Series[0], exp[0].Data.Series[0]) + t.Errorf("unexpected alert log:\ngot %v\nexp %v", got[1].Data.Series[0], exp[1].Data.Series[0]) + } + + recordings, err := cli.ListRecordings(nil) + if exp, got := 1, len(recordings); exp != got { + t.Fatalf("unexpected recordings list:\ngot %v\nexp %v", got, exp) + } + + err = cli.DeleteRecording(recordings[0].Link) + if err != nil { + t.Error(err) + } + + recordings, err = cli.ListRecordings(nil) + if exp, got := 0, len(recordings); exp != got { + t.Errorf("unexpected recordings list:\ngot %v\nexp %v", got, exp) + } + + replays, err := cli.ListReplays(nil) + if exp, got := 1, len(replays); exp != got { + t.Fatalf("unexpected replays list:\ngot %v\nexp %v", got, exp) + } + + err = cli.DeleteReplay(replays[0].Link) + if err != nil { + t.Error(err) + } + + replays, err = cli.ListReplays(nil) + if exp, got := 0, len(replays); exp != got { + t.Errorf("unexpected replays list:\ngot %v\nexp %v", got, exp) + } +} +func TestServer_RecordReplayQuery(t *testing.T) { + c := NewConfig() + c.InfluxDB[0].Enabled = true + db := NewInfluxDB(func(q string) *iclient.Response { + if len(q) > 6 && q[:6] == "SELECT" { + r := &iclient.Response{ + Results: []iclient.Result{{ + Series: []models.Row{ + { + Name: "cpu", + Columns: []string{"time", "value"}, + Values: [][]interface{}{ + { + time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC).Format(time.RFC3339Nano), + 0.0, + }, + { + time.Date(1971, 1, 1, 0, 0, 1, 0, time.UTC).Format(time.RFC3339Nano), + 1.0, + }, + }, + }, + { + Name: "cpu", + Columns: []string{"time", "value"}, + Values: [][]interface{}{ + { + time.Date(1971, 1, 1, 0, 0, 2, 0, time.UTC).Format(time.RFC3339Nano), + 2.0, + }, + { + time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC).Format(time.RFC3339Nano), + 3.0, + }, + }, + }, + { + Name: "cpu", + Columns: []string{"time", "value"}, + Values: [][]interface{}{ + { + time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC).Format(time.RFC3339Nano), + 4.0, + }, + { + time.Date(1971, 1, 1, 0, 0, 5, 0, time.UTC).Format(time.RFC3339Nano), + 5.0, + }, + }, + }, + }, + }}, + } + return r + } + return nil + }) + c.InfluxDB[0].URLs = []string{db.URL()} + s := OpenServer(c) + defer s.Close() + cli := Client(s) + + id := "testBatchTask" + ttype := client.BatchTask + dbrps := []client.DBRP{{ + Database: "mydb", + RetentionPolicy: "myrp", + }} + + tmpDir, err := ioutil.TempDir("", "testBatchTaskRecording") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + tick := `batch + |query('SELECT value from mydb.myrp.cpu') + .period(2s) + .every(2s) + |alert() + .id('test-batch') + .message('{{ .ID }} got: {{ index .Fields "value" }}') + .crit(lambda: "value" > 2.0) + .log('` + tmpDir + `/alert.log') +` + + _, err = cli.CreateTask(client.CreateTaskOptions{ + ID: id, + Type: ttype, + DBRPs: dbrps, + TICKscript: tick, + Status: client.Disabled, + }) + if err != nil { + t.Fatal(err) + } + + recording, err := cli.RecordQuery(client.RecordQueryOptions{ + ID: "recordingid", + Query: "SELECT value from mydb.myrp.cpu", + Type: client.BatchTask, + }) + if err != nil { + t.Fatal(err) + } + if exp, got := "/kapacitor/v1/recordings/recordingid", recording.Link.Href; exp != got { + t.Errorf("unexpected recording.Link.Href got %s exp %s", got, exp) + } + // Wait for recording to finish. + retry := 0 + for recording.Status == client.Running { + time.Sleep(100 * time.Millisecond) + recording, err = cli.Recording(recording.Link) + if err != nil { + t.Fatal(err) + } + retry++ + if retry > 10 { + t.Fatal("failed to perfom recording") + } + } + + replay, err := cli.CreateReplay(client.CreateReplayOptions{ + Task: id, + Recording: recording.ID, + Clock: client.Fast, + RecordingTime: true, + }) + if err != nil { + t.Fatal(err) + } + if exp, got := id, replay.Task; exp != got { + t.Errorf("unexpected replay.Task got %s exp %s", got, exp) + } + + // Wait for replay to finish. + retry = 0 + for replay.Status == client.Running { + time.Sleep(100 * time.Millisecond) + replay, err = cli.Replay(replay.Link) + if err != nil { + t.Fatal(err) + } + retry++ + if retry > 10 { + t.Fatal("failed to perfom replay") + } + } f, err := os.Open(path.Join(tmpDir, "alert.log")) if err != nil { @@ -836,6 +1442,36 @@ func TestServer_RecordReplayBatch(t *testing.T) { t.Errorf("unexpected alert log:\ngot %v\nexp %v", got[0].Data.Series[0], exp[0].Data.Series[0]) t.Errorf("unexpected alert log:\ngot %v\nexp %v", got[1].Data.Series[0], exp[1].Data.Series[0]) } + + recordings, err := cli.ListRecordings(nil) + if exp, got := 1, len(recordings); exp != got { + t.Fatalf("unexpected recordings list:\ngot %v\nexp %v", got, exp) + } + + err = cli.DeleteRecording(recordings[0].Link) + if err != nil { + t.Error(err) + } + + recordings, err = cli.ListRecordings(nil) + if exp, got := 0, len(recordings); exp != got { + t.Errorf("unexpected recordings list:\ngot %v\nexp %v", got, exp) + } + + replays, err := cli.ListReplays(nil) + if exp, got := 1, len(replays); exp != got { + t.Fatalf("unexpected replays list:\ngot %v\nexp %v", got, exp) + } + + err = cli.DeleteReplay(replays[0].Link) + if err != nil { + t.Error(err) + } + + replays, err = cli.ListReplays(nil) + if exp, got := 0, len(replays); exp != got { + t.Errorf("unexpected replays list:\ngot %v\nexp %v", got, exp) + } } // If this test fails due to missing python dependencies, run 'INSTALL_PREFIX=/usr/local ./install-deps.sh' from the root directory of the @@ -921,8 +1557,8 @@ func testStreamAgent(t *testing.T, c *run.Config) { defer s.Close() cli := Client(s) - name := "testUDFTask" - ttype := "stream" + id := "testUDFTask" + ttype := client.StreamTask dbrps := []client.DBRP{{ Database: "mydb", RetentionPolicy: "myrp", @@ -942,20 +1578,28 @@ func testStreamAgent(t *testing.T, c *run.Config) { |httpOut('moving_avg') ` - err = cli.Define(name, ttype, dbrps, strings.NewReader(tick), false) + task, err := cli.CreateTask(client.CreateTaskOptions{ + ID: id, + Type: ttype, + DBRPs: dbrps, + TICKscript: tick, + Status: client.Disabled, + }) if err != nil { t.Fatal(err) } - err = cli.Enable(name) + err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{ + Status: client.Enabled, + }) if err != nil { t.Fatal(err) } - endpoint := fmt.Sprintf("%s/task/%s/moving_avg", s.URL(), name) + endpoint := fmt.Sprintf("%s/tasks/%s/moving_avg", s.URL(), id) // Request data before any writes and expect null responses - nullResponse := `{"Series":null,"Messages":null,"Err":null}` + nullResponse := `{}` err = s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5) if err != nil { t.Error(err) @@ -990,7 +1634,7 @@ test,group=b value=0 0000000011 v.Add("precision", "s") s.MustWrite("mydb", "myrp", points, v) - exp := `{"Series":[{"name":"test","tags":{"group":"a"},"columns":["time","mean"],"values":[["1970-01-01T00:00:11Z",0.9]]},{"name":"test","tags":{"group":"b"},"columns":["time","mean"],"values":[["1970-01-01T00:00:11Z",1.9]]}],"Messages":null,"Err":null}` + exp := `{"series":[{"name":"test","tags":{"group":"a"},"columns":["time","mean"],"values":[["1970-01-01T00:00:11Z",0.9]]},{"name":"test","tags":{"group":"b"},"columns":["time","mean"],"values":[["1970-01-01T00:00:11Z",1.9]]}]}` err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5) if err != nil { t.Error(err) @@ -1095,8 +1739,8 @@ func testStreamAgentSocket(t *testing.T, c *run.Config) { defer s.Close() cli := Client(s) - name := "testUDFTask" - ttype := "stream" + id := "testUDFTask" + ttype := client.StreamTask dbrps := []client.DBRP{{ Database: "mydb", RetentionPolicy: "myrp", @@ -1113,20 +1757,28 @@ func testStreamAgentSocket(t *testing.T, c *run.Config) { |httpOut('count') ` - err = cli.Define(name, ttype, dbrps, strings.NewReader(tick), false) + task, err := cli.CreateTask(client.CreateTaskOptions{ + ID: id, + Type: ttype, + DBRPs: dbrps, + TICKscript: tick, + Status: client.Disabled, + }) if err != nil { t.Fatal(err) } - err = cli.Enable(name) + err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{ + Status: client.Enabled, + }) if err != nil { t.Fatal(err) } - endpoint := fmt.Sprintf("%s/task/%s/count", s.URL(), name) + endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), id) // Request data before any writes and expect null responses - nullResponse := `{"Series":null,"Messages":null,"Err":null}` + nullResponse := `{}` err = s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5) if err != nil { t.Error(err) @@ -1149,7 +1801,7 @@ test,group=a value=0 0000000011 v.Add("precision", "s") s.MustWrite("mydb", "myrp", points, v) - exp := `{"Series":[{"name":"test","tags":{"group":"a"},"columns":["time","count"],"values":[["1970-01-01T00:00:10Z",10]]}],"Messages":null,"Err":null}` + exp := `{"series":[{"name":"test","tags":{"group":"a"},"columns":["time","count"],"values":[["1970-01-01T00:00:10Z",10]]}]}` err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5) if err != nil { t.Error(err) @@ -1299,8 +1951,8 @@ func testBatchAgent(t *testing.T, c *run.Config) { defer s.Close() cli := Client(s) - name := "testUDFTask" - ttype := "batch" + id := "testUDFTask" + ttype := client.BatchTask dbrps := []client.DBRP{{ Database: "mydb", RetentionPolicy: "myrp", @@ -1317,23 +1969,33 @@ func testBatchAgent(t *testing.T, c *run.Config) { |httpOut('count') ` - err = cli.Define(name, ttype, dbrps, strings.NewReader(tick), false) + task, err := cli.CreateTask(client.CreateTaskOptions{ + ID: id, + Type: ttype, + DBRPs: dbrps, + TICKscript: tick, + Status: client.Disabled, + }) if err != nil { t.Fatal(err) } - err = cli.Enable(name) + err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{ + Status: client.Enabled, + }) if err != nil { t.Fatal(err) } - endpoint := fmt.Sprintf("%s/task/%s/count", s.URL(), name) - exp := `{"Series":[{"name":"cpu","tags":{"count":"1"},"columns":["time","count"],"values":[["1971-01-01T00:00:00.02Z",5]]},{"name":"cpu","tags":{"count":"0"},"columns":["time","count"],"values":[["1971-01-01T00:00:00.02Z",5]]}],"Messages":null,"Err":null}` + endpoint := fmt.Sprintf("%s/tasks/%s/count", s.URL(), id) + exp := `{"series":[{"name":"cpu","tags":{"count":"1"},"columns":["time","count"],"values":[["1971-01-01T00:00:00.02Z",5]]},{"name":"cpu","tags":{"count":"0"},"columns":["time","count"],"values":[["1971-01-01T00:00:00.02Z",5]]}]}` err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*50) if err != nil { t.Error(err) } - err = cli.Disable(name) + err = cli.UpdateTask(task.Link, client.UpdateTaskOptions{ + Status: client.Disabled, + }) if err != nil { t.Fatal(err) } diff --git a/etc/kapacitor/kapacitor.conf b/etc/kapacitor/kapacitor.conf index ff32457a0..b924d7336 100644 --- a/etc/kapacitor/kapacitor.conf +++ b/etc/kapacitor/kapacitor.conf @@ -32,10 +32,17 @@ data_dir = "/var/lib/kapacitor" [task] # Where to store the tasks database + # DEPRECATED: This option is not needed for new installations. + # It is only used to determine the location of the task.db file + # for migrating to the new `storage` service. dir = "/var/lib/kapacitor/tasks" # How often to snapshot running task state. snapshot-interval = "60s" +[storage] + # Where to store the Kapacitor boltdb database + boltdb = "/var/lib/kapacitor/kapacitor.db" + [deadman] # Configure a deadman's switch # Globally configure deadman's switches on all stream tasks. diff --git a/http_out.go b/http_out.go index 594a57473..793947992 100644 --- a/http_out.go +++ b/http_out.go @@ -17,7 +17,7 @@ import ( type HTTPOutNode struct { node c *pipeline.HTTPOutNode - result influxql.Result + result *influxql.Result groupSeriesIdx map[models.GroupID]int endpoint string routes []httpd.Route @@ -30,6 +30,7 @@ func newHTTPOutNode(et *ExecutingTask, n *pipeline.HTTPOutNode, l *log.Logger) ( node: node{Node: n, et: et, logger: l}, c: n, groupSeriesIdx: make(map[models.GroupID]int), + result: new(influxql.Result), } et.registerOutput(hn.c.Endpoint, hn) hn.node.runF = hn.runOut @@ -59,7 +60,7 @@ func (h *HTTPOutNode) runOut([]byte) error { } } - p := path.Join("/task", h.et.Task.Name, h.c.Endpoint) + p := path.Join("/tasks/", h.et.Task.ID, h.c.Endpoint) r := []httpd.Route{{ Name: h.Name(), diff --git a/node.go b/node.go index e6b4e5d8f..f7d9b5af9 100644 --- a/node.go +++ b/node.go @@ -92,7 +92,7 @@ func (n *node) abortParentEdges() { func (n *node) init() { tags := map[string]string{ - "task": n.et.Task.Name, + "task": n.et.Task.ID, "node": n.Name(), "type": n.et.Task.Type.String(), "kind": n.Desc(), @@ -161,7 +161,7 @@ func (n *node) addChild(c Node) (*Edge, error) { } n.children = append(n.children, c) - edge := newEdge(n.et.Task.Name, n.Name(), c.Name(), n.Provides(), defaultEdgeBufferSize, n.et.tm.LogService) + edge := newEdge(n.et.Task.ID, n.Name(), c.Name(), n.Provides(), defaultEdgeBufferSize, n.et.tm.LogService) if edge == nil { return nil, fmt.Errorf("unknown edge type %s", n.Provides()) } diff --git a/pipeline/http_out.go b/pipeline/http_out.go index ed57b268b..cc17f238d 100644 --- a/pipeline/http_out.go +++ b/pipeline/http_out.go @@ -4,8 +4,8 @@ package pipeline // // The cached data is available at the given endpoint. // The endpoint is the relative path from the API endpoint of the running task. -// For example if the task endpoint is at "/task/" and endpoint is -// "top10", then the data can be requested from "/task//top10". +// For example if the task endpoint is at "/kapacitor/v1/tasks/" and endpoint is +// "top10", then the data can be requested from "/kapacitor/v1/tasks//top10". // // Example: // stream diff --git a/services/httpd/config.go b/services/httpd/config.go index 7a503d963..5d01630c5 100644 --- a/services/httpd/config.go +++ b/services/httpd/config.go @@ -19,6 +19,10 @@ type Config struct { HttpsEnabled bool `toml:"https-enabled"` HttpsCertificate string `toml:"https-certificate"` ShutdownTimeout toml.Duration `toml:"shutdown-timeout"` + + // Enable gzipped encoding + // NOTE: this is ignored in toml since it is only consumed by the tests + GZIP bool `toml:"-"` } func NewConfig() Config { @@ -27,5 +31,6 @@ func NewConfig() Config { LogEnabled: true, HttpsCertificate: "/etc/ssl/kapacitor.pem", ShutdownTimeout: DefaultShutdownTimeout, + GZIP: true, } } diff --git a/services/httpd/handler.go b/services/httpd/handler.go index e83b5e111..9cec2757f 100644 --- a/services/httpd/handler.go +++ b/services/httpd/handler.go @@ -31,17 +31,21 @@ const ( statPointsWrittenFail = "points_written_fail" // Number of points that failed to be written ) +const BasePath = "/kapacitor/v1" + type Route struct { Name string Method string Pattern string HandlerFunc interface{} + noJSON bool } // Handler represents an HTTP handler for the Kapacitor API server. type Handler struct { methodMux map[string]*ServeMux requireAuthentication bool + allowGzip bool Version string MetaClient interface { @@ -61,86 +65,135 @@ type Handler struct { } // NewHandler returns a new instance of handler with routes. -func NewHandler(requireAuthentication, loggingEnabled, writeTrace bool, statMap *expvar.Map, l *log.Logger) *Handler { +func NewHandler(requireAuthentication, loggingEnabled, writeTrace, allowGzip bool, statMap *expvar.Map, l *log.Logger) *Handler { h := &Handler{ methodMux: make(map[string]*ServeMux), requireAuthentication: requireAuthentication, + allowGzip: allowGzip, Logger: l, loggingEnabled: loggingEnabled, WriteTrace: writeTrace, statMap: statMap, } - h.AddRoutes([]Route{ + allowedMethods := []string{ + "GET", + "POST", + "PATCH", + "DELETE", + "HEAD", + "OPTIONS", + } + + for _, method := range allowedMethods { + h.methodMux[method] = NewServeMux() + route := Route{ + // Catch all 404 + Name: "404", + Method: method, + Pattern: "/", + HandlerFunc: h.serve404, + } + h.addRawRoute(route) + } + + h.addRawRoutes([]Route{ { // Ping Name: "ping", Method: "GET", - Pattern: "/ping", + Pattern: BasePath + "/ping", HandlerFunc: h.servePing, }, { // Ping Name: "ping-head", Method: "HEAD", - Pattern: "/ping", + Pattern: BasePath + "/ping", HandlerFunc: h.servePing, }, + { + // Data-ingest route. + Name: "write", + Method: "POST", + Pattern: BasePath + "/write", + HandlerFunc: h.serveWrite, + }, { // Satisfy CORS checks. Name: "write", Method: "OPTIONS", - Pattern: "/write", + Pattern: BasePath + "/write", HandlerFunc: ServeOptions, }, { - // Data-ingest route. - Name: "write", + // Data-ingest route for /write endpoint without base path + Name: "write-raw", Method: "POST", Pattern: "/write", HandlerFunc: h.serveWrite, }, + { + // Satisfy CORS checks. + Name: "write-raw", + Method: "OPTIONS", + Pattern: "/write", + HandlerFunc: ServeOptions, + }, { // Display current API routes Name: "routes", Method: "GET", - Pattern: "/:routes", + Pattern: BasePath + "/:routes", HandlerFunc: h.serveRoutes, }, { - // Display current log level + // Change current log level Name: "log-level", Method: "POST", - Pattern: "/loglevel", + Pattern: BasePath + "/loglevel", HandlerFunc: h.serveLogLevel, }, { - // Catch all 404 - Name: "404", + Name: "pprof", Method: "GET", - Pattern: "/", - HandlerFunc: h.serve404, + Pattern: BasePath + "/debug/pprof/", + HandlerFunc: pprof.Index, + noJSON: true, }, { - // Catch all 404 - Name: "404", - Method: "POST", - Pattern: "/", - HandlerFunc: h.serve404, + Name: "pprof/cmdline", + Method: "GET", + Pattern: BasePath + "/debug/pprof/cmdline", + HandlerFunc: pprof.Cmdline, + noJSON: true, }, { - // Catch all 404 - Name: "404", - Method: "DELETE", - Pattern: "/", - HandlerFunc: h.serve404, + Name: "pprof/profile", + Method: "GET", + Pattern: BasePath + "/debug/pprof/profile", + HandlerFunc: pprof.Profile, + noJSON: true, }, { - // Catch all 404 - Name: "404", - Method: "HEAD", - Pattern: "/", - HandlerFunc: h.serve404, + Name: "pprof/symbol", + Method: "GET", + Pattern: BasePath + "/debug/pprof/symbol", + HandlerFunc: pprof.Symbol, + noJSON: true, + }, + { + Name: "pprof/trace", + Method: "GET", + Pattern: BasePath + "/debug/pprof/trace", + HandlerFunc: pprof.Trace, + noJSON: true, + }, + { + Name: "debug/vars", + Method: "GET", + Pattern: BasePath + "/debug/vars", + HandlerFunc: serveExpvar, }, }) @@ -158,6 +211,25 @@ func (h *Handler) AddRoutes(routes []Route) error { } func (h *Handler) AddRoute(r Route) error { + if len(r.Pattern) > 0 && r.Pattern[0] != '/' { + return fmt.Errorf("route patterns must begin with a '/' %s", r.Pattern) + } + r.Pattern = BasePath + r.Pattern + return h.addRawRoute(r) +} + +func (h *Handler) addRawRoutes(routes []Route) error { + for _, r := range routes { + err := h.addRawRoute(r) + if err != nil { + return err + } + } + return nil +} + +// Add a route without prepending the BasePath +func (h *Handler) addRawRoute(r Route) error { var handler http.Handler // If it's a handler func that requires authorization, wrap it in authorization if hf, ok := r.HandlerFunc.(func(http.ResponseWriter, *http.Request, *meta.UserInfo)); ok { @@ -169,8 +241,12 @@ func (h *Handler) AddRoute(r Route) error { } // Set basic handlers for all requests - handler = jsonContent(handler) - handler = gzipFilter(handler) + if !r.noJSON { + handler = jsonContent(handler) + } + if h.allowGzip { + handler = gzipFilter(handler) + } handler = versionHeader(handler, h) handler = cors(handler) handler = requestID(handler) @@ -183,8 +259,7 @@ func (h *Handler) AddRoute(r Route) error { mux, ok := h.methodMux[r.Method] if !ok { - mux = NewServeMux() - h.methodMux[r.Method] = mux + return fmt.Errorf("unsupported method %q", r.Method) } return mux.Handle(r.Pattern, handler) } @@ -197,6 +272,12 @@ func (h *Handler) DelRoutes(routes []Route) { // Delete a route from the handler. No-op if route does not exist. func (h *Handler) DelRoute(r Route) { + r.Pattern = BasePath + r.Pattern + h.delRawRoute(r) +} + +// Delete a route from the handler. No-op if route does not exist. +func (h *Handler) delRawRoute(r Route) { mux, ok := h.methodMux[r.Method] if ok { mux.Deregister(r.Pattern) @@ -206,29 +287,14 @@ func (h *Handler) DelRoute(r Route) { // ServeHTTP responds to HTTP request to the handler. func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { h.statMap.Add(statRequest, 1) - - // FIXME(benbjohnson): Add pprof enabled flag. - if strings.HasPrefix(r.URL.Path, "/debug/pprof") { - switch r.URL.Path { - case "/debug/pprof/cmdline": - pprof.Cmdline(w, r) - case "/debug/pprof/profile": - pprof.Profile(w, r) - case "/debug/pprof/symbol": - pprof.Symbol(w, r) - default: - pprof.Index(w, r) - } - } else if strings.HasPrefix(r.URL.Path, "/debug/vars") { - serveExpvar(w, r) + method := r.Method + if method == "" { + method = "GET" + } + if mux, ok := h.methodMux[method]; ok { + mux.ServeHTTP(w, r) } else { - method := r.Method - if method == "" { - method = "GET" - } - if mux, ok := h.methodMux[method]; ok { - mux.ServeHTTP(w, r) - } + h.serve404(w, r) } } @@ -510,7 +576,7 @@ func cors(inner http.Handler) http.Handler { `GET`, `OPTIONS`, `POST`, - `PUT`, + `PATCH`, }, ", ")) w.Header().Set(`Access-Control-Allow-Headers`, strings.Join([]string{ diff --git a/services/httpd/service.go b/services/httpd/service.go index e16d5d3f3..5626d1565 100644 --- a/services/httpd/service.go +++ b/services/httpd/service.go @@ -48,6 +48,7 @@ func NewService(c Config, l *log.Logger) *Service { c.AuthEnabled, c.LogEnabled, c.WriteTracing, + c.GZIP, statMap, l, ), @@ -241,9 +242,9 @@ func (s *Service) Addr() net.Addr { func (s *Service) URL() string { if s.ln != nil { if s.https { - return "https://" + s.Addr().String() + return "https://" + s.Addr().String() + BasePath } - return "http://" + s.Addr().String() + return "http://" + s.Addr().String() + BasePath } return "" } diff --git a/services/logging/service.go b/services/logging/service.go index d7385ed50..a20b20975 100644 --- a/services/logging/service.go +++ b/services/logging/service.go @@ -13,6 +13,7 @@ import ( type Interface interface { NewLogger(prefix string, flag int) *log.Logger NewStaticLevelLogger(prefix string, flag int, l wlog.Level) *log.Logger + NewStaticLevelWriter(l wlog.Level) io.Writer } type Service struct { @@ -71,6 +72,10 @@ func (s *Service) NewStaticLevelLogger(prefix string, flag int, l wlog.Level) *l return log.New(wlog.NewStaticLevelWriter(s.f, l), prefix, flag) } +func (s *Service) NewStaticLevelWriter(l wlog.Level) io.Writer { + return wlog.NewStaticLevelWriter(s.f, l) +} + type nopCloser struct { f io.Writer } diff --git a/services/replay/dao.go b/services/replay/dao.go new file mode 100644 index 000000000..94eece279 --- /dev/null +++ b/services/replay/dao.go @@ -0,0 +1,534 @@ +package replay + +import ( + "bytes" + "encoding/gob" + "errors" + "path" + "time" + + "github.com/influxdata/kapacitor/services/storage" +) + +var ( + ErrRecordingExists = errors.New("recording already exists") + ErrNoRecordingExists = errors.New("no recording exists") + + ErrReplayExists = errors.New("replay already exists") + ErrNoReplayExists = errors.New("no replay exists") +) + +// Data access object for Recording data. +type RecordingDAO interface { + // Retrieve a recording + Get(id string) (Recording, error) + + // Create a recording. + // ErrRecordingExists is returned if a recording already exists with the same ID. + Create(recording Recording) error + + // Replace an existing recording. + // ErrNoRecordingExists is returned if the recording does not exist. + Replace(recording Recording) error + + // Delete a recording. + // It is not an error to delete an non-existent recording. + Delete(id string) error + + // List recordings matching a pattern. + // The pattern is shell/glob matching see https://golang.org/pkg/path/#Match + // Offset and limit are pagination bounds. Offset is inclusive starting at index 0. + // More results may exist while the number of returned items is equal to limit. + List(pattern string, offset, limit int) ([]Recording, error) +} + +//-------------------------------------------------------------------- +// The following structures are stored in a database via gob encoding. +// Changes to the structures could break existing data. +// +// Many of these structures are exact copies of structures found elsewhere, +// this is intentional so that all structures stored in the database are +// defined here and nowhere else. So as to not accidentally change +// the gob serialization format in incompatible ways. + +type Status int + +const ( + Failed Status = iota + Running + Finished +) + +type RecordingType int + +const ( + StreamRecording RecordingType = iota + BatchRecording +) + +type Recording struct { + ID string + // URL for stored Recording data. Currently only file:// is supported. + DataURL string + Type RecordingType + Size int64 + Date time.Time + Error string + Status Status + Progress float64 +} + +const ( + recordingDataPrefix = "/recordings/data/" + recordingIndexesPrefix = "/recordings/indexes/" + + // Name of ID index + recordingIdIndex = "id/" + // Name of Date index + recordingDateIndex = "date/" +) + +// Key/Value based implementation of the RecordingDAO. +type recordingKV struct { + store storage.Interface +} + +func newRecordingKV(store storage.Interface) *recordingKV { + return &recordingKV{ + store: store, + } +} + +func (d *recordingKV) encodeRecording(r Recording) ([]byte, error) { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + err := enc.Encode(r) + return buf.Bytes(), err +} + +func (d *recordingKV) decodeRecording(data []byte) (Recording, error) { + var recording Recording + dec := gob.NewDecoder(bytes.NewReader(data)) + err := dec.Decode(&recording) + return recording, err +} + +// Create a key for the recording data +func (d *recordingKV) recordingDataKey(id string) string { + return recordingDataPrefix + id +} + +// Create a key for a given index and value. +// +// Indexes are maintained via a 'directory' like system: +// +// /recordings/data/ID -- contains encoded recording data +// /recordings/index/id/ID -- contains the recording ID +// /recordings/index/date/DATE/ID -- contains the recording ID +// +// As such to list all recordings in Date sorted order use the /recordings/index/date/ directory. +func (d *recordingKV) recordingIndexKey(index, value string) string { + return recordingIndexesPrefix + index + value +} + +func (d *recordingKV) recordingIDIndexKey(r Recording) string { + return d.recordingIndexKey(replayIdIndex, r.ID) +} +func (d *recordingKV) recordingDateIndexKey(r Recording) string { + return d.recordingIndexKey(recordingDateIndex, r.Date.Format(time.RFC3339)+"/"+r.ID) +} + +func (d *recordingKV) Get(id string) (Recording, error) { + key := d.recordingDataKey(id) + if exists, err := d.store.Exists(key); err != nil { + return Recording{}, err + } else if !exists { + return Recording{}, ErrNoRecordingExists + } + kv, err := d.store.Get(key) + if err != nil { + return Recording{}, err + } + return d.decodeRecording(kv.Value) +} + +func (d *recordingKV) Create(r Recording) error { + key := d.recordingDataKey(r.ID) + + exists, err := d.store.Exists(key) + if err != nil { + return err + } + if exists { + return ErrRecordingExists + } + + data, err := d.encodeRecording(r) + if err != nil { + return err + } + // Put data + err = d.store.Put(key, data) + if err != nil { + return err + } + // Put ID index + indexKey := d.recordingIDIndexKey(r) + err = d.store.Put(indexKey, []byte(r.ID)) + if err != nil { + return err + } + // Put Date index + indexKey = d.recordingDateIndexKey(r) + return d.store.Put(indexKey, []byte(r.ID)) +} + +func (d *recordingKV) Replace(r Recording) error { + key := d.recordingDataKey(r.ID) + + exists, err := d.store.Exists(key) + if err != nil { + return err + } + if !exists { + return ErrNoRecordingExists + } + + prev, err := d.Get(r.ID) + if err != nil { + return err + } + + data, err := d.encodeRecording(r) + if err != nil { + return err + } + // Put data + err = d.store.Put(key, data) + if err != nil { + return err + } + // Update Date index + prevIndexKey := d.recordingDateIndexKey(prev) + err = d.store.Delete(prevIndexKey) + if err != nil { + return err + } + currIndexKey := d.recordingDateIndexKey(r) + err = d.store.Put(currIndexKey, []byte(r.ID)) + if err != nil { + return err + } + return nil +} + +func (d *recordingKV) Delete(id string) error { + key := d.recordingDataKey(id) + r, err := d.Get(id) + if err != nil { + if err == ErrNoRecordingExists { + return nil + } + return err + } + + idIndexKey := d.recordingIDIndexKey(r) + dateIndexKey := d.recordingDateIndexKey(r) + + dataErr := d.store.Delete(key) + idIndexErr := d.store.Delete(idIndexKey) + dateIndexErr := d.store.Delete(dateIndexKey) + if dataErr != nil { + return dataErr + } + if idIndexErr != nil { + return dataErr + } + return dateIndexErr +} + +func (d *recordingKV) List(pattern string, offset, limit int) ([]Recording, error) { + // Recordings are indexed by their Date. + // This allows us to do offset/limits and filtering without having to read in all recording data. + + // List all recording ids sorted by Date + ids, err := d.store.List(recordingIndexesPrefix + recordingDateIndex) + if err != nil { + return nil, err + } + // Reverse to sort by newest first + for i, j := 0, len(ids)-1; i < j; i, j = i+1, j-1 { + ids[i], ids[j] = ids[j], ids[i] + } + + var match func([]byte) bool + if pattern != "" { + match = func(value []byte) bool { + id := string(value) + matched, _ := path.Match(pattern, id) + return matched + } + } else { + match = func([]byte) bool { return true } + } + matches := storage.DoListFunc(ids, match, offset, limit) + + recordings := make([]Recording, len(matches)) + for i, id := range matches { + data, err := d.store.Get(d.recordingDataKey(string(id))) + if err != nil { + return nil, err + } + t, err := d.decodeRecording(data.Value) + recordings[i] = t + } + return recordings, nil +} + +// Data access object for Recording data. +type ReplayDAO interface { + // Retrieve a replay + Get(id string) (Replay, error) + + // Create a replay. + // ErrReplayExists is returned if a replay already exists with the same ID. + Create(replay Replay) error + + // Replace an existing replay. + // ErrNoReplayExists is returned if the replay does not exist. + Replace(replay Replay) error + + // Delete a replay. + // It is not an error to delete an non-existent replay. + Delete(id string) error + + // List replays matching a pattern. + // The pattern is shell/glob matching see https://golang.org/pkg/path/#Match + // Offset and limit are pagination bounds. Offset is inclusive starting at index 0. + // More results may exist while the number of returned items is equal to limit. + List(pattern string, offset, limit int) ([]Replay, error) +} + +type Clock int + +const ( + Fast Clock = iota + Real +) + +type Replay struct { + ID string + RecordingID string + TaskID string + RecordingTime bool + Clock Clock + Date time.Time + Error string + Status Status + Progress float64 +} + +const ( + replayDataPrefix = "/replays/data/" + replayIndexesPrefix = "/replays/indexes/" + + replayIdIndex = "id/" + replayDateIndex = "date/" +) + +// Key/Value based implementation of the ReplayDAO. +type replayKV struct { + store storage.Interface +} + +func newReplayKV(store storage.Interface) *replayKV { + return &replayKV{ + store: store, + } +} + +func (d *replayKV) encodeReplay(r Replay) ([]byte, error) { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + err := enc.Encode(r) + return buf.Bytes(), err +} + +func (d *replayKV) decodeReplay(data []byte) (Replay, error) { + var replay Replay + dec := gob.NewDecoder(bytes.NewReader(data)) + err := dec.Decode(&replay) + return replay, err +} + +// Create a key for the replay data +func (d *replayKV) replayDataKey(id string) string { + return replayDataPrefix + id +} + +// Create a key for a given index and value. +// +// Indexes are maintained via a 'directory' like system: +// +// /replays/data/ID -- contains encoded replay data +// /replays/index/id/ID -- contains the replay ID +// /replays/index/date/DATE/ID -- contains the replay ID +// +// As such to list all replays in Date sorted order use the /replays/index/date/ directory. +func (d *replayKV) replayIndexKey(index, value string) string { + return replayIndexesPrefix + index + value +} + +func (d *replayKV) replayIDIndexKey(r Replay) string { + return d.replayIndexKey(replayIdIndex, r.ID) +} +func (d *replayKV) replayDateIndexKey(r Replay) string { + return d.replayIndexKey(replayDateIndex, r.Date.Format(time.RFC3339)+"/"+r.ID) +} + +func (d *replayKV) Get(id string) (Replay, error) { + key := d.replayDataKey(id) + if exists, err := d.store.Exists(key); err != nil { + return Replay{}, err + } else if !exists { + return Replay{}, ErrNoReplayExists + } + kv, err := d.store.Get(key) + if err != nil { + return Replay{}, err + } + return d.decodeReplay(kv.Value) +} + +func (d *replayKV) Create(r Replay) error { + key := d.replayDataKey(r.ID) + + exists, err := d.store.Exists(key) + if err != nil { + return err + } + if exists { + return ErrReplayExists + } + + data, err := d.encodeReplay(r) + if err != nil { + return err + } + // Put data + err = d.store.Put(key, data) + if err != nil { + return err + } + // Put ID index + indexKey := d.replayIDIndexKey(r) + err = d.store.Put(indexKey, []byte(r.ID)) + if err != nil { + return err + } + // Put Date index + indexKey = d.replayDateIndexKey(r) + return d.store.Put(indexKey, []byte(r.ID)) +} + +func (d *replayKV) Replace(r Replay) error { + key := d.replayDataKey(r.ID) + + exists, err := d.store.Exists(key) + if err != nil { + return err + } + if !exists { + return ErrNoReplayExists + } + + prev, err := d.Get(r.ID) + if err != nil { + return err + } + + data, err := d.encodeReplay(r) + if err != nil { + return err + } + // Put data + err = d.store.Put(key, data) + if err != nil { + return err + } + // Update Date index + prevIndexKey := d.replayDateIndexKey(prev) + err = d.store.Delete(prevIndexKey) + if err != nil { + return err + } + currIndexKey := d.replayDateIndexKey(r) + err = d.store.Put(currIndexKey, []byte(r.ID)) + if err != nil { + return err + } + return nil +} + +func (d *replayKV) Delete(id string) error { + key := d.replayDataKey(id) + r, err := d.Get(id) + if err != nil { + if err == ErrNoReplayExists { + return nil + } + return err + } + + idIndexKey := d.replayIDIndexKey(r) + dateIndexKey := d.replayDateIndexKey(r) + + dataErr := d.store.Delete(key) + idIndexErr := d.store.Delete(idIndexKey) + dateIndexErr := d.store.Delete(dateIndexKey) + if dataErr != nil { + return dataErr + } + if idIndexErr != nil { + return dataErr + } + return dateIndexErr +} + +func (d *replayKV) List(pattern string, offset, limit int) ([]Replay, error) { + // Replays are indexed by their Date. + // This allows us to do offset/limits and filtering without having to read in all replay data. + + // List all replay ids sorted by Date + ids, err := d.store.List(replayIndexesPrefix + replayDateIndex) + if err != nil { + return nil, err + } + // Reverse to sort by newest first + for i, j := 0, len(ids)-1; i < j; i, j = i+1, j-1 { + ids[i], ids[j] = ids[j], ids[i] + } + + var match func([]byte) bool + if pattern != "" { + match = func(value []byte) bool { + id := string(value) + matched, _ := path.Match(pattern, id) + return matched + } + } else { + match = func([]byte) bool { return true } + } + matches := storage.DoListFunc(ids, match, offset, limit) + + replays := make([]Replay, len(matches)) + for i, id := range matches { + data, err := d.store.Get(d.replayDataKey(string(id))) + if err != nil { + return nil, err + } + t, err := d.decodeReplay(data.Value) + replays[i] = t + } + return replays, nil +} diff --git a/services/replay/service.go b/services/replay/service.go index ebac22788..b75cc7be8 100644 --- a/services/replay/service.go +++ b/services/replay/service.go @@ -3,25 +3,29 @@ package replay import ( "archive/zip" "compress/gzip" - "errors" + "encoding/json" "fmt" "io" "io/ioutil" "log" "net/http" + "net/url" "os" "path" + "regexp" "strconv" "strings" - "sync" "time" client "github.com/influxdata/influxdb/client/v2" "github.com/influxdata/influxdb/influxql" "github.com/influxdata/kapacitor" + kclient "github.com/influxdata/kapacitor/client/v1" "github.com/influxdata/kapacitor/clock" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/services/httpd" + "github.com/influxdata/kapacitor/services/storage" + "github.com/pkg/errors" "github.com/twinj/uuid" ) @@ -30,12 +34,22 @@ const batchEXT = ".brpl" const precision = "n" +var validID = regexp.MustCompile(`^[-\w]+$`) + // Handles recording, starting, and waiting on replays type Service struct { - saveDir string - routes []httpd.Route + saveDir string + + recordings RecordingDAO + replays ReplayDAO + + routes []httpd.Route + + StorageService interface { + Store(namespace string) storage.Interface + } TaskStore interface { - Load(name string) (*kapacitor.Task, error) + Load(id string) (*kapacitor.Task, error) } HTTPDService interface { AddRoutes([]httpd.Route) error @@ -52,67 +66,160 @@ type Service struct { Stream(name string) (kapacitor.StreamCollector, error) } - recordingsMu sync.RWMutex - runningRecordings map[string]<-chan error - logger *log.Logger } // Create a new replay master. func NewService(conf Config, l *log.Logger) *Service { return &Service{ - saveDir: conf.Dir, - logger: l, - runningRecordings: make(map[string]<-chan error), + saveDir: conf.Dir, + logger: l, } } +// The storage namespace for all recording data. +const recordingNamespace = "recording_store" +const replayNamespace = "replay_store" + func (r *Service) Open() error { + // Create DAO + r.recordings = newRecordingKV(r.StorageService.Store(recordingNamespace)) + r.replays = newReplayKV(r.StorageService.Store(replayNamespace)) + + err := os.MkdirAll(r.saveDir, 0755) + if err != nil { + return err + } + + err = r.migrate() + if err != nil { + return err + } + + // Setup routes r.routes = []httpd.Route{ { - Name: "recordings", + Name: "recording", Method: "GET", - Pattern: "/recordings", - HandlerFunc: r.handleList, + Pattern: "/recordings/", + HandlerFunc: r.handleRecording, }, { - Name: "recording-delete", + Name: "deleteRecording", Method: "DELETE", - Pattern: "/recording", - HandlerFunc: r.handleDelete, + Pattern: "/recordings/", + HandlerFunc: r.handleDeleteRecording, }, { - Name: "recording-delete", + Name: "/recordings/-cors", Method: "OPTIONS", - Pattern: "/recording", + Pattern: "/recordings/", HandlerFunc: httpd.ServeOptions, }, { - Name: "record", + Name: "listRecordings", + Method: "GET", + Pattern: "/recordings", + HandlerFunc: r.handleListRecordings, + }, + { + Name: "createRecording", Method: "POST", - Pattern: "/record", - HandlerFunc: r.handleRecord, + Pattern: "/recordings/stream", + HandlerFunc: r.handleRecordStream, }, { - Name: "record", - Method: "GET", - Pattern: "/record", - HandlerFunc: r.handleGetRecording, + Name: "createRecording", + Method: "POST", + Pattern: "/recordings/batch", + HandlerFunc: r.handleRecordBatch, }, { - Name: "replay", + Name: "createRecording", Method: "POST", - Pattern: "/replay", + Pattern: "/recordings/query", + HandlerFunc: r.handleRecordQuery, + }, + { + Name: "replay", + Method: "GET", + Pattern: "/replays/", HandlerFunc: r.handleReplay, }, + { + Name: "deleteReplay", + Method: "DELETE", + Pattern: "/replays/", + HandlerFunc: r.handleDeleteReplay, + }, + { + Name: "/replays/-cors", + Method: "OPTIONS", + Pattern: "/replays/", + HandlerFunc: httpd.ServeOptions, + }, + { + Name: "listReplays", + Method: "GET", + Pattern: "/replays", + HandlerFunc: r.handleListReplays, + }, + { + Name: "createReplay", + Method: "POST", + Pattern: "/replays", + HandlerFunc: r.handleCreateReplay, + }, } - err := os.MkdirAll(r.saveDir, 0755) + return r.HTTPDService.AddRoutes(r.routes) +} + +func (r *Service) migrate() error { + // Find all recordings and store their metadata into the new storage service. + files, err := ioutil.ReadDir(r.saveDir) if err != nil { - return err + return errors.Wrap(err, "migrating recording metadata") } + for _, info := range files { + if info.IsDir() { + continue + } + name := info.Name() + i := strings.LastIndex(name, ".") + ext := name[i:] + id := name[:i] - return r.HTTPDService.AddRoutes(r.routes) + var typ RecordingType + switch ext { + case streamEXT: + typ = StreamRecording + case batchEXT: + typ = BatchRecording + default: + r.logger.Println("E! unknown file in replay dir", name) + continue + } + recording := Recording{ + ID: id, + Type: typ, + Size: info.Size(), + Date: info.ModTime().UTC(), + Status: Finished, + Progress: 1.0, + } + err = r.recordings.Create(recording) + if err != nil { + if err == ErrRecordingExists { + r.logger.Printf("D! skipping recording %s, metadata already migrated", id) + } else { + return errors.Wrap(err, "creating recording metadata") + } + } else { + r.logger.Printf("D! recording %s metadata migrated", id) + } + } + return nil } func (r *Service) Close() error { @@ -120,474 +227,631 @@ func (r *Service) Close() error { return nil } -func (s *Service) handleList(w http.ResponseWriter, req *http.Request) { - ridsStr := req.URL.Query().Get("rids") - var rids []string - if ridsStr != "" { - rids = strings.Split(ridsStr, ",") - } +func recordingLink(id string) kclient.Link { + return kclient.Link{Relation: kclient.Self, Href: path.Join(httpd.BasePath, "recordings", id)} +} - infos, err := s.GetRecordings(rids) - if err != nil { - httpd.HttpError(w, err.Error(), true, http.StatusNotFound) - return +func convertRecording(recording Recording) kclient.Recording { + var typ kclient.TaskType + switch recording.Type { + case StreamRecording: + typ = kclient.StreamTask + case BatchRecording: + typ = kclient.BatchTask } - - type response struct { - Recordings []RecordingInfo `json:"Recordings"` + var status kclient.Status + switch recording.Status { + case Failed: + status = kclient.Failed + case Running: + status = kclient.Running + case Finished: + status = kclient.Finished + } + return kclient.Recording{ + Link: recordingLink(recording.ID), + ID: recording.ID, + Type: typ, + Size: recording.Size, + Date: recording.Date, + Error: recording.Error, + Status: status, + Progress: recording.Progress, } - - w.Write(httpd.MarshalJSON(response{infos}, true)) } -func (s *Service) handleDelete(w http.ResponseWriter, r *http.Request) { - rid := r.URL.Query().Get("rid") - s.Delete(rid) - w.WriteHeader(http.StatusNoContent) +func replayLink(id string) kclient.Link { + return kclient.Link{Relation: kclient.Self, Href: path.Join(httpd.BasePath, "replays", id)} } -func (r *Service) handleReplay(w http.ResponseWriter, req *http.Request) { - name := req.URL.Query().Get("name") - id := req.URL.Query().Get("id") - clockTyp := req.URL.Query().Get("clock") - recTimeStr := req.URL.Query().Get("rec-time") - var recTime bool - if recTimeStr != "" { - var err error - recTime, err = strconv.ParseBool(recTimeStr) - if err != nil { - httpd.HttpError(w, err.Error(), true, http.StatusBadRequest) - return - } +func convertReplay(replay Replay) kclient.Replay { + var clk kclient.Clock + switch replay.Clock { + case Real: + clk = kclient.Real + case Fast: + clk = kclient.Fast } + var status kclient.Status + switch replay.Status { + case Failed: + status = kclient.Failed + case Running: + status = kclient.Running + case Finished: + status = kclient.Finished + } + return kclient.Replay{ + Link: replayLink(replay.ID), + ID: replay.ID, + Recording: replay.RecordingID, + Task: replay.TaskID, + RecordingTime: replay.RecordingTime, + Clock: clk, + Date: replay.Date, + Error: replay.Error, + Status: status, + Progress: replay.Progress, + } +} - t, err := r.TaskStore.Load(name) - if err != nil { - httpd.HttpError(w, "task load: "+err.Error(), true, http.StatusNotFound) - return +var allRecordingFields = []string{ + "link", + "id", + "type", + "size", + "date", + "error", + "status", + "progress", +} + +func (s *Service) handleListRecordings(w http.ResponseWriter, r *http.Request) { + pattern := r.URL.Query().Get("pattern") + fields := r.URL.Query()["fields"] + if len(fields) == 0 { + fields = allRecordingFields + } else { + // Always return ID field + fields = append(fields, "id", "link") } - var clk clock.Clock - switch clockTyp { - case "", "wall": - clk = clock.Wall() - case "fast": - clk = clock.Fast() + offsetStr := r.URL.Query().Get("offset") + offset, err := strconv.ParseInt(offsetStr, 10, 64) + if err != nil { + httpd.HttpError(w, fmt.Sprintf("invalid offset parameter %q must be an integer: %s", offsetStr, err), true, http.StatusBadRequest) } - // Create new isolated task master - tm := r.TaskMaster.New() - tm.Open() - defer tm.Close() - et, err := tm.StartTask(t) + limitStr := r.URL.Query().Get("limit") + limit, err := strconv.ParseInt(limitStr, 10, 64) if err != nil { - httpd.HttpError(w, "task start: "+err.Error(), true, http.StatusBadRequest) - return + httpd.HttpError(w, fmt.Sprintf("invalid limit parameter %q must be an integer: %s", limitStr, err), true, http.StatusBadRequest) + } + if limit == 0 { + limit = 100 } - replay := kapacitor.NewReplay(clk) - var replayC <-chan error - switch t.Type { - case kapacitor.StreamTask: - f, err := r.FindStreamRecording(id) - if err != nil { - httpd.HttpError(w, "replay find: "+err.Error(), true, http.StatusNotFound) - return - } - stream, err := tm.Stream(id) - if err != nil { - httpd.HttpError(w, "stream start: "+err.Error(), true, http.StatusInternalServerError) - return - } - replayC = replay.ReplayStream(f, stream, recTime, precision) - case kapacitor.BatchTask: - fs, err := r.FindBatchRecording(id) - if err != nil { - httpd.HttpError(w, "replay find: "+err.Error(), true, http.StatusNotFound) - return + recordings, err := s.recordings.List(pattern, int(offset), int(limit)) + + rs := make([]map[string]interface{}, len(recordings)) + for i, recording := range recordings { + rs[i] = make(map[string]interface{}, len(fields)) + for _, field := range fields { + var value interface{} + switch field { + case "id": + value = recording.ID + case "link": + value = recordingLink(recording.ID) + case "type": + switch recording.Type { + case StreamRecording: + value = kclient.StreamTask + case BatchRecording: + value = kclient.BatchTask + } + case "size": + value = recording.Size + case "date": + value = recording.Date + case "error": + value = recording.Error + case "status": + switch recording.Status { + case Failed: + value = kclient.Failed + case Running: + value = kclient.Running + case Finished: + value = kclient.Finished + } + case "progress": + value = recording.Progress + } + rs[i][field] = value } - batches := tm.BatchCollectors(name) - replayC = replay.ReplayBatch(fs, batches, recTime) } + type response struct { + Recordings []map[string]interface{} `json:"recordings"` + } + w.Write(httpd.MarshalJSON(response{Recordings: rs}, true)) +} - // Check for error on replay - err = <-replayC +func (r *Service) handleRecording(w http.ResponseWriter, req *http.Request) { + _, rid := path.Split(req.URL.Path) + + recording, err := r.recordings.Get(rid) if err != nil { - httpd.HttpError(w, "replay: "+err.Error(), true, http.StatusInternalServerError) + httpd.HttpError(w, "error finding recording: "+err.Error(), true, http.StatusInternalServerError) return } + if recording.Status == Running { + w.WriteHeader(http.StatusAccepted) + } else { + w.WriteHeader(http.StatusOK) + } - // Drain tm so the task can finish - tm.Drain() - - // Stop stats nodes - et.StopStats() - - // Check for error on task - err = et.Wait() + w.Write(httpd.MarshalJSON(convertRecording(recording), true)) +} +func (s *Service) handleDeleteRecording(w http.ResponseWriter, r *http.Request) { + _, rid := path.Split(r.URL.Path) + recording, err := s.recordings.Get(rid) + if err == ErrNoRecordingExists { + w.WriteHeader(http.StatusNoContent) + return + } + if err != nil { + httpd.HttpError(w, err.Error(), true, http.StatusInternalServerError) + return + } + err = s.recordings.Delete(rid) + if err != nil { + httpd.HttpError(w, err.Error(), true, http.StatusInternalServerError) + return + } + ds, err := parseDataSourceURL(recording.DataURL) if err != nil { - httpd.HttpError(w, "task run: "+err.Error(), true, http.StatusInternalServerError) + httpd.HttpError(w, err.Error(), true, http.StatusInternalServerError) return } - // Call close explicitly to check for error - err = tm.Close() + err = ds.Remove() if err != nil { - httpd.HttpError(w, "closing: "+err.Error(), true, http.StatusInternalServerError) + httpd.HttpError(w, err.Error(), true, http.StatusInternalServerError) return } + w.WriteHeader(http.StatusNoContent) } -func (r *Service) handleRecord(w http.ResponseWriter, req *http.Request) { - type doFunc func() error - var doF doFunc - started := make(chan struct{}) - - rid := uuid.NewV4() - typ := req.URL.Query().Get("type") - switch typ { - case "stream": - task := req.URL.Query().Get("name") - if task == "" { - httpd.HttpError(w, "no task specified", true, http.StatusBadRequest) - return - } - t, err := r.TaskStore.Load(task) - if err != nil { - httpd.HttpError(w, err.Error(), true, http.StatusNotFound) - return - } - - durStr := req.URL.Query().Get("duration") - dur, err := influxql.ParseDuration(durStr) - if err != nil { - httpd.HttpError(w, "invalid duration string: "+err.Error(), true, http.StatusBadRequest) - return - } +func (r *Service) handleRecordStream(w http.ResponseWriter, req *http.Request) { + var opt kclient.RecordStreamOptions + dec := json.NewDecoder(req.Body) + err := dec.Decode(&opt) + if err != nil { + httpd.HttpError(w, err.Error(), true, http.StatusBadRequest) + return + } + if opt.ID == "" { + opt.ID = uuid.NewV4().String() + } + if !validID.MatchString(opt.ID) { + httpd.HttpError(w, fmt.Sprintf("recording ID must match %v %q", validID, opt.ID), true, http.StatusBadRequest) + return + } + t, err := r.TaskStore.Load(opt.Task) + if err != nil { + httpd.HttpError(w, err.Error(), true, http.StatusNotFound) + return + } + dataUrl := url.URL{ + Scheme: "file", + Path: path.Join(r.saveDir, opt.ID+streamEXT), + } - doF = func() error { - err := r.doRecordStream(rid, dur, t.DBRPs, t.Measurements(), started) - if err != nil { - close(started) - } - return err - } + recording := Recording{ + ID: opt.ID, + DataURL: dataUrl.String(), + Type: StreamRecording, + Date: time.Now(), + Status: Running, + } + err = r.recordings.Create(recording) + if err != nil { + httpd.HttpError(w, err.Error(), true, http.StatusInternalServerError) + return + } - case "batch": - var err error + // Spawn routine to perform actual recording. + go func(recording Recording) { + ds, _ := parseDataSourceURL(dataUrl.String()) + err := r.doRecordStream(opt.ID, ds, opt.Stop, t.DBRPs, t.Measurements()) + r.updateRecordingResult(recording, ds, err) + }(recording) + + w.WriteHeader(http.StatusCreated) + w.Write(httpd.MarshalJSON(convertRecording(recording), true)) +} - // Determine start time. - var start time.Time - startStr := req.URL.Query().Get("start") - pastStr := req.URL.Query().Get("past") - if startStr != "" && pastStr != "" { - httpd.HttpError(w, "must not pass both 'start' and 'past' parameters", true, http.StatusBadRequest) - return - } +func (r *Service) handleRecordBatch(w http.ResponseWriter, req *http.Request) { + var opt kclient.RecordBatchOptions + dec := json.NewDecoder(req.Body) + err := dec.Decode(&opt) + if err != nil { + httpd.HttpError(w, err.Error(), true, http.StatusBadRequest) + return + } + if opt.ID == "" { + opt.ID = uuid.NewV4().String() + } + if !validID.MatchString(opt.ID) { + httpd.HttpError(w, fmt.Sprintf("recording ID must match %v %q", validID, opt.ID), true, http.StatusBadRequest) + return + } - now := time.Now() + if opt.Start.IsZero() { + httpd.HttpError(w, "must provide start time", true, http.StatusBadRequest) + return + } + if opt.Stop.IsZero() { + opt.Stop = time.Now() + } - switch { - case startStr != "": - start, err = time.Parse(time.RFC3339, startStr) - if err != nil { - httpd.HttpError(w, err.Error(), true, http.StatusBadRequest) - return - } - case pastStr != "": - diff, err := influxql.ParseDuration(pastStr) - if err != nil { - httpd.HttpError(w, err.Error(), true, http.StatusBadRequest) - return - } - start = now.Add(-1 * diff) - } + t, err := r.TaskStore.Load(opt.Task) + if err != nil { + httpd.HttpError(w, err.Error(), true, http.StatusNotFound) + return + } + dataUrl := url.URL{ + Scheme: "file", + Path: path.Join(r.saveDir, opt.ID+batchEXT), + } - // Get stop time, if present - stop := now - stopStr := req.URL.Query().Get("stop") - if stopStr != "" { - stop, err = time.Parse(time.RFC3339, stopStr) - if err != nil { - httpd.HttpError(w, err.Error(), true, http.StatusBadRequest) - return - } - } + recording := Recording{ + ID: opt.ID, + DataURL: dataUrl.String(), + Type: BatchRecording, + Date: time.Now(), + Status: Running, + } + err = r.recordings.Create(recording) + if err != nil { + httpd.HttpError(w, err.Error(), true, http.StatusInternalServerError) + return + } - // Get task - task := req.URL.Query().Get("name") - if task == "" { - httpd.HttpError(w, "no task specified", true, http.StatusBadRequest) - return - } + go func(recording Recording) { + ds, _ := parseDataSourceURL(dataUrl.String()) + err := r.doRecordBatch(opt.ID, ds, t, opt.Start, opt.Stop, opt.Cluster) + r.updateRecordingResult(recording, ds, err) + }(recording) - // Get InfluxDB cluster - cluster := req.URL.Query().Get("cluster") + w.WriteHeader(http.StatusCreated) + w.Write(httpd.MarshalJSON(convertRecording(recording), true)) +} - t, err := r.TaskStore.Load(task) - if err != nil { - httpd.HttpError(w, err.Error(), true, http.StatusNotFound) - return - } +func (r *Service) handleRecordQuery(w http.ResponseWriter, req *http.Request) { + var opt kclient.RecordQueryOptions + dec := json.NewDecoder(req.Body) + err := dec.Decode(&opt) + if err != nil { + httpd.HttpError(w, err.Error(), true, http.StatusBadRequest) + return + } + if opt.ID == "" { + opt.ID = uuid.NewV4().String() + } + if !validID.MatchString(opt.ID) { + httpd.HttpError(w, fmt.Sprintf("recording ID must match %v %q", validID, opt.ID), true, http.StatusBadRequest) + return + } + if opt.Query == "" { + httpd.HttpError(w, "must provide query", true, http.StatusBadRequest) + return + } + var dataPath string + var typ RecordingType + switch opt.Type { + case kclient.StreamTask: + dataPath = path.Join(r.saveDir, opt.ID+streamEXT) + typ = StreamRecording + case kclient.BatchTask: + dataPath = path.Join(r.saveDir, opt.ID+batchEXT) + typ = BatchRecording + } - doF = func() error { - close(started) - return r.doRecordBatch(rid, t, start, stop, cluster) - } - case "query": - query := req.URL.Query().Get("query") - if query == "" { - httpd.HttpError(w, "must pass query parameter", true, http.StatusBadRequest) - return - } + dataUrl := url.URL{ + Scheme: "file", + Path: dataPath, + } - typeStr := req.URL.Query().Get("ttype") - var tt kapacitor.TaskType - switch typeStr { - case "stream": - tt = kapacitor.StreamTask - case "batch": - tt = kapacitor.BatchTask - default: - httpd.HttpError(w, fmt.Sprintf("invalid type %q", typeStr), true, http.StatusBadRequest) - return - } - // Get InfluxDB cluster - cluster := req.URL.Query().Get("cluster") - doF = func() error { - close(started) - return r.doRecordQuery(rid, query, tt, cluster) - } - default: - httpd.HttpError(w, "invalid recording type", true, http.StatusBadRequest) + recording := Recording{ + ID: opt.ID, + DataURL: dataUrl.String(), + Type: typ, + Date: time.Now(), + Status: Running, + } + err = r.recordings.Create(recording) + if err != nil { + httpd.HttpError(w, err.Error(), true, http.StatusInternalServerError) return } - // Store recording in running recordings. - errC := make(chan error) - func() { - r.recordingsMu.Lock() - defer r.recordingsMu.Unlock() - r.runningRecordings[rid.String()] = errC - }() - - // Spawn routine to perform actual recording. - go func() { - err := doF() - if err != nil { - // Always log an error since the user may not have requested the error. - r.logger.Printf("E! recording %s failed: %v", rid.String(), err) - } - select { - case errC <- err: - case <-time.After(time.Minute): - // Cache the error for a max duration then drop it - } - // We have finished delete from running map - r.recordingsMu.Lock() - defer r.recordingsMu.Unlock() - delete(r.runningRecordings, rid.String()) - }() - // Wait till the goroutine for doing the recording has actually started - <-started + go func(recording Recording) { + ds, _ := parseDataSourceURL(dataUrl.String()) + err := r.doRecordQuery(opt.ID, ds, opt.Query, typ, opt.Cluster) + r.updateRecordingResult(recording, ds, err) + }(recording) - // Respond with the recording ID - type response struct { - RecordingID string `json:"RecordingID"` - } - w.Write(httpd.MarshalJSON(response{rid.String()}, true)) + w.WriteHeader(http.StatusCreated) + w.Write(httpd.MarshalJSON(convertRecording(recording), true)) } -func (r *Service) handleGetRecording(w http.ResponseWriter, req *http.Request) { - rid := req.URL.Query().Get("id") - - // First check if its still running - var errC <-chan error - var running bool - func() { - r.recordingsMu.RLock() - defer r.recordingsMu.RUnlock() - errC, running = r.runningRecordings[rid] - }() +func (r *Service) updateRecordingResult(recording Recording, ds DataSource, err error) { + recording.Status = Finished + if err != nil { + recording.Status = Failed + recording.Error = err.Error() + } + recording.Date = time.Now() + recording.Progress = 1.0 + recording.Size, err = ds.Size() + if err != nil { + r.logger.Println("E! failed to determine size of recording", recording.ID, err) + } - if running { - // It is still running wait for it to finish - err := <-errC - if err != nil { - info := RecordingInfo{ - ID: rid, - Error: err.Error(), - } - w.Write(httpd.MarshalJSON(info, true)) - return - } + err = r.recordings.Replace(recording) + if err != nil { + r.logger.Println("E! failed to save recording info", recording.ID, err) } +} - // It already finished, return its info - info, err := r.GetRecordings([]string{rid}) +func (r *Service) handleReplay(w http.ResponseWriter, req *http.Request) { + _, id := path.Split(req.URL.Path) + replay, err := r.replays.Get(id) if err != nil { - httpd.HttpError(w, "error finding recording: "+err.Error(), true, http.StatusInternalServerError) + httpd.HttpError(w, "could not find replay: "+err.Error(), true, http.StatusNotFound) return } - if len(info) != 1 { - httpd.HttpError(w, "recording not found", true, http.StatusNotFound) - return + if replay.Status == Running { + w.WriteHeader(http.StatusAccepted) + } else { + w.WriteHeader(http.StatusOK) } + w.Write(httpd.MarshalJSON(convertReplay(replay), true)) +} - w.Write(httpd.MarshalJSON(info[0], true)) +func (r *Service) handleDeleteReplay(w http.ResponseWriter, req *http.Request) { + _, id := path.Split(req.URL.Path) + //TODO: Cancel running replays + r.replays.Delete(id) + w.WriteHeader(http.StatusNoContent) } -type RecordingInfo struct { - ID string - Type kapacitor.TaskType - Size int64 - Created time.Time - Error string `json:",omitempty"` +var allReplayFields = []string{ + "link", + "id", + "recording", + "task", + "recording-time", + "clock", + "date", + "error", + "status", + "progress", } -func (r *Service) GetRecordings(rids []string) ([]RecordingInfo, error) { - files, err := ioutil.ReadDir(r.saveDir) - if err != nil { - return nil, err +func (s *Service) handleListReplays(w http.ResponseWriter, r *http.Request) { + pattern := r.URL.Query().Get("pattern") + fields := r.URL.Query()["fields"] + if len(fields) == 0 { + fields = allReplayFields + } else { + // Always return ID field + fields = append(fields, "id", "link") } - ids := make(map[string]bool) - for _, id := range rids { - ids[id] = true + offsetStr := r.URL.Query().Get("offset") + offset, err := strconv.ParseInt(offsetStr, 10, 64) + if err != nil { + httpd.HttpError(w, fmt.Sprintf("invalid offset parameter %q must be an integer: %s", offsetStr, err), true, http.StatusBadRequest) } - infos := make([]RecordingInfo, 0, len(files)) + limitStr := r.URL.Query().Get("limit") + limit, err := strconv.ParseInt(limitStr, 10, 64) + if err != nil { + httpd.HttpError(w, fmt.Sprintf("invalid limit parameter %q must be an integer: %s", limitStr, err), true, http.StatusBadRequest) + } + if limit == 0 { + limit = 100 + } - for _, info := range files { - if info.IsDir() { - continue - } - name := info.Name() - i := strings.LastIndex(name, ".") - ext := name[i:] - id := name[:i] - if len(ids) > 0 && !ids[id] { - continue - } - var typ kapacitor.TaskType - switch ext { - case streamEXT: - typ = kapacitor.StreamTask - case batchEXT: - typ = kapacitor.BatchTask - default: - continue - } - rinfo := RecordingInfo{ - ID: id, - Type: typ, - Size: info.Size(), - Created: info.ModTime().UTC(), + replays, err := s.replays.List(pattern, int(offset), int(limit)) + + rs := make([]map[string]interface{}, len(replays)) + for i, replay := range replays { + rs[i] = make(map[string]interface{}, len(fields)) + for _, field := range fields { + var value interface{} + switch field { + case "id": + value = replay.ID + case "link": + value = replayLink(replay.ID) + case "recording": + value = replay.RecordingID + case "task": + value = replay.TaskID + case "recording-time": + value = replay.RecordingTime + case "clock": + switch replay.Clock { + case Fast: + value = kclient.Fast + case Real: + value = kclient.Real + } + case "date": + value = replay.Date + case "error": + value = replay.Error + case "status": + switch replay.Status { + case Failed: + value = kclient.Failed + case Running: + value = kclient.Running + case Finished: + value = kclient.Finished + } + case "progress": + value = replay.Progress + } + rs[i][field] = value } - infos = append(infos, rinfo) } - return infos, nil + type response struct { + Replays []map[string]interface{} `json:"replays"` + } + w.Write(httpd.MarshalJSON(response{Replays: rs}, true)) } -func (r *Service) find(id string, typ kapacitor.TaskType) (*os.File, error) { - var ext string - var other string - switch typ { - case kapacitor.StreamTask: - ext = streamEXT - other = batchEXT - case kapacitor.BatchTask: - ext = batchEXT - other = streamEXT - default: - return nil, fmt.Errorf("unknown task type %q", typ) - } - p := path.Join(r.saveDir, id+ext) - f, err := os.Open(p) +func (r *Service) handleCreateReplay(w http.ResponseWriter, req *http.Request) { + var opt kclient.CreateReplayOptions + // Default clock to the Fast clock + opt.Clock = kclient.Fast + dec := json.NewDecoder(req.Body) + err := dec.Decode(&opt) if err != nil { - if _, err := os.Stat(path.Join(r.saveDir, id+other)); err == nil { - return nil, fmt.Errorf("found recording of wrong type, check task type matches recording.") - } - return nil, err + httpd.HttpError(w, err.Error(), true, http.StatusBadRequest) + return + } + if opt.ID == "" { + opt.ID = uuid.NewV4().String() + } + if !validID.MatchString(opt.ID) { + httpd.HttpError(w, fmt.Sprintf("replay ID must match %v %q", validID, opt.ID), true, http.StatusBadRequest) + return } - return f, nil -} -func (r *Service) FindStreamRecording(id string) (io.ReadCloser, error) { - f, err := r.find(id, kapacitor.StreamTask) + t, err := r.TaskStore.Load(opt.Task) if err != nil { - return nil, err + httpd.HttpError(w, "task load: "+err.Error(), true, http.StatusNotFound) + return } - gz, err := gzip.NewReader(f) + recording, err := r.recordings.Get(opt.Recording) if err != nil { - return nil, err + httpd.HttpError(w, "recording not found: "+err.Error(), true, http.StatusNotFound) + return } - return rc{gz, f}, nil -} -func (r *Service) FindBatchRecording(id string) ([]io.ReadCloser, error) { - f, err := r.find(id, kapacitor.BatchTask) - if err != nil { - return nil, err + var clk clock.Clock + var clockType Clock + switch opt.Clock { + case kclient.Real: + clk = clock.Wall() + clockType = Real + case kclient.Fast: + clk = clock.Fast() + clockType = Fast + default: + httpd.HttpError(w, fmt.Sprintf("invalid clock type %v", opt.Clock), true, http.StatusBadRequest) + return } - stat, err := f.Stat() + + // Successfully started replay + replay := Replay{ + ID: opt.ID, + RecordingID: opt.Recording, + TaskID: opt.Task, + RecordingTime: opt.RecordingTime, + Clock: clockType, + Date: time.Now(), + Status: Running, + } + r.replays.Create(replay) + + go func(replay Replay) { + err := r.doReplay(t, recording, clk, opt.RecordingTime) + replay.Status = Finished + if err != nil { + replay.Status = Failed + replay.Error = err.Error() + } + replay.Progress = 1.0 + replay.Date = time.Now() + err = r.replays.Replace(replay) + if err != nil { + r.logger.Println("E! failed to save replay results:", err) + } + }(replay) + + w.WriteHeader(http.StatusCreated) + w.Write(httpd.MarshalJSON(convertReplay(replay), true)) +} + +func (r *Service) doReplay(task *kapacitor.Task, recording Recording, clk clock.Clock, recTime bool) error { + // Create new isolated task master + tm := r.TaskMaster.New() + tm.Open() + defer tm.Close() + et, err := tm.StartTask(task) if err != nil { - return nil, err + return errors.Wrap(err, "task start") } - archive, err := zip.NewReader(f, stat.Size()) + + dataSource, err := parseDataSourceURL(recording.DataURL) if err != nil { - return nil, err + return errors.Wrap(err, "load data source") } - rcs := make([]io.ReadCloser, len(archive.File)) - for i, file := range archive.File { - rc, err := file.Open() + + replay := kapacitor.NewReplay(clk) + var replayC <-chan error + switch task.Type { + case kapacitor.StreamTask: + f, err := dataSource.StreamReader() if err != nil { - return nil, err + return errors.Wrap(err, "data source open") } - rcs[i] = rc + stream, err := tm.Stream(recording.ID) + if err != nil { + return errors.Wrap(err, "stream start") + } + replayC = replay.ReplayStream(f, stream, recTime, precision) + case kapacitor.BatchTask: + fs, err := dataSource.BatchReaders() + if err != nil { + return errors.Wrap(err, "data source open") + } + batches := tm.BatchCollectors(task.ID) + replayC = replay.ReplayBatch(fs, batches, recTime) } - return rcs, nil -} -func (r *Service) Delete(id string) { - ps := path.Join(r.saveDir, id+streamEXT) - pb := path.Join(r.saveDir, id+batchEXT) - os.Remove(ps) - os.Remove(pb) -} + // Check for error on replay + err = <-replayC + if err != nil { + return errors.Wrap(err, "replay") + } -type rc struct { - r io.ReadCloser - c io.Closer -} + // Drain tm so the task can finish + tm.Drain() -func (r rc) Read(p []byte) (int, error) { - return r.r.Read(p) -} + // Stop stats nodes + et.StopStats() -func (r rc) Close() error { - err := r.r.Close() - if err != nil { - return err - } - err = r.c.Close() + // Check for error on task + err = et.Wait() if err != nil { - return err + return errors.Wrap(err, "task run") } - return nil -} -// create new stream writer -func (r *Service) newStreamWriter(rid uuid.UUID) (io.WriteCloser, error) { - rpath := path.Join(r.saveDir, rid.String()+streamEXT) - f, err := os.Create(rpath) + // Call close explicitly to check for error + err = tm.Close() if err != nil { - return nil, fmt.Errorf("failed to save recording: %s", err) + return errors.Wrap(err, "task master close") } - gz := gzip.NewWriter(f) - sw := streamWriter{f: f, gz: gz} - return sw, nil + return nil } // wrap gzipped writer and underlying file @@ -608,12 +872,12 @@ func (s streamWriter) Close() error { } // Record the stream for a duration -func (r *Service) doRecordStream(rid uuid.UUID, dur time.Duration, dbrps []kapacitor.DBRP, measurements []string, started chan struct{}) error { - e, err := r.TaskMaster.NewFork(rid.String(), dbrps, measurements) +func (r *Service) doRecordStream(id string, dataSource DataSource, stop time.Time, dbrps []kapacitor.DBRP, measurements []string) error { + e, err := r.TaskMaster.NewFork(id, dbrps, measurements) if err != nil { return err } - sw, err := r.newStreamWriter(rid) + sw, err := dataSource.StreamWriter() if err != nil { return err } @@ -621,17 +885,12 @@ func (r *Service) doRecordStream(rid uuid.UUID, dur time.Duration, dbrps []kapac done := make(chan struct{}) go func() { - close(started) - start := time.Time{} closed := false for p, ok := e.NextPoint(); ok; p, ok = e.NextPoint() { if closed { continue } - if start.IsZero() { - start = p.Time - } - if p.Time.Sub(start) > dur { + if p.Time.After(stop) { closed = true close(done) //continue to read any data already on the edge, but just drop it. @@ -642,21 +901,10 @@ func (r *Service) doRecordStream(rid uuid.UUID, dur time.Duration, dbrps []kapac }() <-done e.Abort() - r.TaskMaster.DelFork(rid.String()) + r.TaskMaster.DelFork(id) return nil } -// open an archive for writing batch recordings -func (r *Service) newBatchArchive(rid uuid.UUID) (*batchArchive, error) { - rpath := path.Join(r.saveDir, rid.String()+batchEXT) - f, err := os.Create(rpath) - if err != nil { - return nil, err - } - archive := zip.NewWriter(f) - return &batchArchive{f: f, archive: archive}, nil -} - // wrap the underlying file and archive type batchArchive struct { f io.Closer @@ -664,7 +912,7 @@ type batchArchive struct { } // create new file in archive from batch index -func (b batchArchive) Create(idx int) (io.Writer, error) { +func (b batchArchive) Archive(idx int) (io.Writer, error) { return b.archive.Create(strconv.FormatInt(int64(idx), 10)) } @@ -679,7 +927,7 @@ func (b batchArchive) Close() error { } // Record a series of batch queries defined by a batch task -func (r *Service) doRecordBatch(rid uuid.UUID, t *kapacitor.Task, start, stop time.Time, cluster string) error { +func (r *Service) doRecordBatch(id string, dataSource DataSource, t *kapacitor.Task, start, stop time.Time, cluster string) error { et, err := kapacitor.NewExecutingTask(r.TaskMaster.New(), t) if err != nil { return err @@ -704,13 +952,13 @@ func (r *Service) doRecordBatch(rid uuid.UUID, t *kapacitor.Task, start, stop ti return err } - archive, err := r.newBatchArchive(rid) + archiver, err := dataSource.BatchArchiver() if err != nil { return err } for batchIdx, queries := range batches { - w, err := archive.Create(batchIdx) + w, err := archiver.Archive(batchIdx) if err != nil { return err } @@ -736,10 +984,10 @@ func (r *Service) doRecordBatch(rid uuid.UUID, t *kapacitor.Task, start, stop ti } } } - return archive.Close() + return archiver.Close() } -func (r *Service) doRecordQuery(rid uuid.UUID, q string, tt kapacitor.TaskType, cluster string) error { +func (r *Service) doRecordQuery(id string, dataSource DataSource, q string, typ RecordingType, cluster string) error { // Parse query to determine dbrp var db, rp string s, err := influxql.ParseStatement(q) @@ -781,24 +1029,24 @@ func (r *Service) doRecordQuery(rid uuid.UUID, q string, tt kapacitor.TaskType, // Open appropriate writer var w io.Writer var c io.Closer - switch tt { - case kapacitor.StreamTask: - sw, err := r.newStreamWriter(rid) + switch typ { + case StreamRecording: + sw, err := dataSource.StreamWriter() if err != nil { return err } w = sw c = sw - case kapacitor.BatchTask: - archive, err := r.newBatchArchive(rid) + case BatchRecording: + archiver, err := dataSource.BatchArchiver() if err != nil { return err } - w, err = archive.Create(0) + w, err = archiver.Archive(0) if err != nil { return err } - c = archive + c = archiver } // Write results to writer for _, res := range resp.Results { @@ -807,8 +1055,8 @@ func (r *Service) doRecordQuery(rid uuid.UUID, q string, tt kapacitor.TaskType, c.Close() return err } - switch tt { - case kapacitor.StreamTask: + switch typ { + case StreamRecording: // Write points in order across batches // Find earliest time of first points @@ -856,7 +1104,7 @@ func (r *Service) doRecordQuery(rid uuid.UUID, q string, tt kapacitor.TaskType, } current = next } - case kapacitor.BatchTask: + case BatchRecording: for _, batch := range batches { kapacitor.WriteBatchForRecording(w, batch) } @@ -864,3 +1112,119 @@ func (r *Service) doRecordQuery(rid uuid.UUID, q string, tt kapacitor.TaskType, } return c.Close() } + +type BatchArchiver interface { + io.Closer + Archive(idx int) (io.Writer, error) +} + +type DataSource interface { + Size() (int64, error) + Remove() error + StreamWriter() (io.WriteCloser, error) + StreamReader() (io.ReadCloser, error) + BatchArchiver() (BatchArchiver, error) + BatchReaders() ([]io.ReadCloser, error) +} + +type fileSource string + +func parseDataSourceURL(rawurl string) (DataSource, error) { + u, err := url.Parse(rawurl) + if err != nil { + return nil, err + } + switch u.Scheme { + case "file": + return fileSource(u.Path), nil + default: + return nil, fmt.Errorf("unsupported data source scheme %s", u.Scheme) + } +} + +func (s fileSource) Size() (int64, error) { + info, err := os.Stat(string(s)) + if err != nil { + return -1, err + } + return info.Size(), nil +} + +func (s fileSource) Remove() error { + return os.Remove(string(s)) +} + +func (s fileSource) StreamWriter() (io.WriteCloser, error) { + f, err := os.Create(string(s)) + if err != nil { + return nil, fmt.Errorf("failed to create recording file: %s", err) + } + gz := gzip.NewWriter(f) + sw := streamWriter{f: f, gz: gz} + return sw, nil +} + +func (s fileSource) StreamReader() (io.ReadCloser, error) { + f, err := os.Open(string(s)) + if err != nil { + return nil, err + } + gz, err := gzip.NewReader(f) + if err != nil { + return nil, err + } + return rc{gz, f}, nil +} + +func (s fileSource) BatchArchiver() (BatchArchiver, error) { + f, err := os.Create(string(s)) + if err != nil { + return nil, err + } + archive := zip.NewWriter(f) + return &batchArchive{f: f, archive: archive}, nil +} +func (s fileSource) BatchReaders() ([]io.ReadCloser, error) { + f, err := os.Open(string(s)) + if err != nil { + return nil, err + } + stat, err := f.Stat() + if err != nil { + return nil, err + } + archive, err := zip.NewReader(f, stat.Size()) + if err != nil { + return nil, err + } + rcs := make([]io.ReadCloser, len(archive.File)) + for i, file := range archive.File { + rc, err := file.Open() + if err != nil { + return nil, err + } + rcs[i] = rc + } + return rcs, nil +} + +type rc struct { + r io.ReadCloser + c io.Closer +} + +func (r rc) Read(p []byte) (int, error) { + return r.r.Read(p) +} + +func (r rc) Close() error { + err := r.r.Close() + if err != nil { + return err + } + err = r.c.Close() + if err != nil { + return err + } + return nil +} diff --git a/services/storage/bolt.go b/services/storage/bolt.go new file mode 100644 index 000000000..57bc2c565 --- /dev/null +++ b/services/storage/bolt.go @@ -0,0 +1,108 @@ +package storage + +import ( + "bytes" + + "github.com/boltdb/bolt" +) + +// Bolt implementation of Store +type Bolt struct { + db *bolt.DB + bucket []byte +} + +func NewBolt(db *bolt.DB, bucket string) *Bolt { + return &Bolt{ + db: db, + bucket: []byte(bucket), + } +} + +func (b *Bolt) Put(key string, value []byte) error { + return b.db.Update(func(tx *bolt.Tx) error { + bucket, err := tx.CreateBucketIfNotExists(b.bucket) + if err != nil { + return err + } + err = bucket.Put([]byte(key), value) + if err != nil { + return err + } + return nil + }) +} +func (b *Bolt) Get(key string) (*KeyValue, error) { + var value []byte + err := b.db.View(func(tx *bolt.Tx) error { + bucket := tx.Bucket(b.bucket) + if bucket == nil { + return ErrNoKeyExists + } + + val := bucket.Get([]byte(key)) + if val == nil { + return ErrNoKeyExists + } + value = make([]byte, len(val)) + copy(value, val) + return nil + }) + if err != nil { + return nil, err + } + + return &KeyValue{ + Key: key, + Value: value, + }, nil +} + +func (b *Bolt) Delete(key string) error { + return b.db.Update(func(tx *bolt.Tx) error { + bucket := tx.Bucket(b.bucket) + if bucket == nil { + return nil + } + return bucket.Delete([]byte(key)) + }) +} + +func (b *Bolt) Exists(key string) (bool, error) { + var exists bool + err := b.db.View(func(tx *bolt.Tx) error { + bucket := tx.Bucket(b.bucket) + if bucket == nil { + return nil + } + + val := bucket.Get([]byte(key)) + exists = val != nil + return nil + }) + return exists, err +} + +func (b *Bolt) List(prefix string) (kvs []*KeyValue, err error) { + err = b.db.View(func(tx *bolt.Tx) error { + bucket := tx.Bucket(b.bucket) + if bucket == nil { + return nil + } + + cursor := bucket.Cursor() + prefix := []byte(prefix) + + for key, v := cursor.Seek(prefix); bytes.HasPrefix(key, prefix); key, v = cursor.Next() { + value := make([]byte, len(v)) + copy(value, v) + + kvs = append(kvs, &KeyValue{ + Key: string(key), + Value: value, + }) + } + return nil + }) + return kvs, err +} diff --git a/services/storage/config.go b/services/storage/config.go new file mode 100644 index 000000000..67443b436 --- /dev/null +++ b/services/storage/config.go @@ -0,0 +1,21 @@ +package storage + +import "fmt" + +type Config struct { + // Path to a boltdb database file. + BoltDBPath string `toml:"boltdb"` +} + +func NewConfig() Config { + return Config{ + BoltDBPath: "./kapacitor.db", + } +} + +func (c Config) Validate() error { + if c.BoltDBPath == "" { + return fmt.Errorf("must specify storage 'boltdb' path") + } + return nil +} diff --git a/services/storage/doc.go b/services/storage/doc.go new file mode 100644 index 000000000..d2b4f1124 --- /dev/null +++ b/services/storage/doc.go @@ -0,0 +1,13 @@ +/* +The storage package provides a key/value based interface for storing Kapacitor metadata. +All services wishing to store data should use this interface. + +The usage patterns for this storage layer are typical create/replace/delete/get/list operations. +Typically objects are serialized and stored as the value. +As a result, updates to a single field of an object can incur the cost to retrieve the entire object and store it again. +In most cases this is acceptable since modifications are rare and object size is small. + +A BoltDB backed implementation is also provided. + +*/ +package storage diff --git a/services/storage/service.go b/services/storage/service.go new file mode 100644 index 000000000..6cbab1bf1 --- /dev/null +++ b/services/storage/service.go @@ -0,0 +1,67 @@ +package storage + +import ( + "log" + "os" + "path" + "sync" + + "github.com/boltdb/bolt" + "github.com/pkg/errors" +) + +type Service struct { + dbpath string + + boltdb *bolt.DB + stores map[string]Interface + mu sync.Mutex + + logger *log.Logger +} + +func NewService(conf Config, l *log.Logger) *Service { + return &Service{ + dbpath: conf.BoltDBPath, + logger: l, + stores: make(map[string]Interface), + } +} + +func (s *Service) Open() error { + s.mu.Lock() + defer s.mu.Unlock() + err := os.MkdirAll(path.Dir(s.dbpath), 0755) + if err != nil { + return errors.Wrapf(err, "mkdir dirs %q", s.dbpath) + } + db, err := bolt.Open(s.dbpath, 0600, nil) + if err != nil { + return errors.Wrapf(err, "open boltdb @ %q", s.dbpath) + } + s.boltdb = db + return nil +} + +func (s *Service) Close() error { + s.mu.Lock() + defer s.mu.Unlock() + if s.boltdb != nil { + return s.boltdb.Close() + } + return nil +} + +// Return a namespaced store. +// Calling Store with the same namespace returns the same Store. +func (s *Service) Store(name string) Interface { + s.mu.Lock() + defer s.mu.Unlock() + if store, ok := s.stores[name]; ok { + return store + } else { + store = NewBolt(s.boltdb, name) + s.stores[name] = store + return store + } +} diff --git a/services/storage/storage.go b/services/storage/storage.go new file mode 100644 index 000000000..059e88a95 --- /dev/null +++ b/services/storage/storage.go @@ -0,0 +1,64 @@ +package storage + +import "errors" + +// Common errors that can be returned +var ( + ErrNoKeyExists = errors.New("no key exists") +) + +// Common interface for interacting with a simple Key/Value storage +type Interface interface { + // Store a value. + Put(key string, value []byte) error + // Retrieve a value. + Get(key string) (*KeyValue, error) + // Delete a key. + // Deleting a non-existent key is not an error. + Delete(key string) error + // Check if a key exists> + Exists(key string) (bool, error) + // List all values with given prefix. + List(prefix string) ([]*KeyValue, error) +} + +type KeyValue struct { + Key string + Value []byte +} + +// Return a list of values from a list of KeyValues using an offset/limit bound and a match function. +func DoListFunc(list []*KeyValue, match func(value []byte) bool, offset, limit int) [][]byte { + l := len(list) + upper := offset + limit + if upper > l { + upper = l + } + size := upper - offset + if size <= 0 { + // No more results + return nil + } + matches := make([][]byte, 0, size) + i := 0 + for _, kv := range list { + if !match(kv.Value) { + continue + } + // Count matched + i++ + + // Skip till offset + if i <= offset { + continue + } + + matches = append(matches, kv.Value) + + // Stop once limit reached + if len(matches) == size { + break + } + } + return matches +} diff --git a/services/task_store/config.go b/services/task_store/config.go index e93263fdf..7ca560a8c 100644 --- a/services/task_store/config.go +++ b/services/task_store/config.go @@ -1,13 +1,13 @@ package task_store import ( - "fmt" "time" "github.com/influxdata/influxdb/toml" ) type Config struct { + // Deprecated, only needed to find old db and migrate Dir string `toml:"dir"` SnapshotInterval toml.Duration `toml:"snapshot-interval"` } @@ -20,8 +20,5 @@ func NewConfig() Config { } func (c Config) Validate() error { - if c.Dir == "" { - return fmt.Errorf("must specify task_store dir") - } return nil } diff --git a/services/task_store/dao.go b/services/task_store/dao.go new file mode 100644 index 000000000..9b2e9afc6 --- /dev/null +++ b/services/task_store/dao.go @@ -0,0 +1,319 @@ +package task_store + +import ( + "bytes" + "encoding/gob" + "errors" + "path" + + "github.com/influxdata/kapacitor/services/storage" +) + +var ( + ErrTaskExists = errors.New("task already exists") + ErrNoTaskExists = errors.New("no task exists") + ErrNoSnapshotExists = errors.New("no snapshot exists") +) + +// Data access object for Task Snapshot data. +type TaskDAO interface { + // Retrieve a task + Get(id string) (Task, error) + + // Create a task. + // ErrTaskExists is returned if a task already exists with the same ID. + Create(t Task) error + + // Replace an existing task. + // ErrNoTaskExists is returned if the task does not exist. + Replace(t Task) error + + // Delete a task. + // It is not an error to delete an non-existent task. + Delete(id string) error + + // List tasks matching a pattern. + // The pattern is shell/glob matching see https://golang.org/pkg/path/#Match + // Offset and limit are pagination bounds. Offset is inclusive starting at index 0. + // More results may exist while the number of returned items is equal to limit. + List(pattern string, offset, limit int) ([]Task, error) +} + +// Data access object for Snapshot data. +type SnapshotDAO interface { + // Load a saved snapshot. + // ErrNoSnapshotExists will be returned if HasSnapshot returns false. + Get(id string) (*Snapshot, error) + // Save a snapshot. + Put(id string, snapshot *Snapshot) error + // Whether a snapshot exists in the store. + Exists(id string) (bool, error) +} + +//-------------------------------------------------------------------- +// The following structures are stored in a database via gob encoding. +// Changes to the structures could break existing data. +// +// Many of these structures are exact copies of structures found elsewhere, +// this is intentional so that all structures stored in the database are +// defined here and nowhere else. So as to not accidentally change +// the gob serialization format in incompatible ways. + +type Status int + +const ( + Disabled Status = iota + Enabled +) + +type TaskType int + +const ( + StreamTask TaskType = iota + BatchTask +) + +type Task struct { + // Unique identifier for the task + ID string + // The task type (stream|batch). + Type TaskType + // The DBs and RPs the task is allowed to access. + DBRPs []DBRP + // The TICKscript for the task. + TICKscript string + // Last error the task had either while defining or executing. + Error string + // Status of the task + Status Status +} + +type DBRP struct { + Database string + RetentionPolicy string +} + +type Snapshot struct { + NodeSnapshots map[string][]byte +} + +const ( + taskDataPrefix = "/tasks/data/" + taskIndexesPrefix = "/tasks/indexes/" + + // Name of ID index + idIndex = "id/" +) + +// Key/Value store based implementation of the TaskDAO +type taskKV struct { + store storage.Interface +} + +func newTaskKV(store storage.Interface) *taskKV { + return &taskKV{ + store: store, + } +} + +func (d *taskKV) encodeTask(t Task) ([]byte, error) { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + err := enc.Encode(t) + return buf.Bytes(), err +} + +func (d *taskKV) decodeTask(data []byte) (Task, error) { + var task Task + dec := gob.NewDecoder(bytes.NewReader(data)) + err := dec.Decode(&task) + return task, err +} + +// Create a key for the task data +func (d *taskKV) taskDataKey(id string) string { + return taskDataPrefix + id +} + +// Create a key for a given index and value. +// +// Indexes are maintained via a 'directory' like system: +// +// /tasks/data/ID -- contains encoded task data +// /tasks/index/id/ID -- contains the task ID +// +// As such to list all tasks in ID sorted order use the /tasks/index/id/ directory. +func (d *taskKV) taskIndexKey(index, value string) string { + return taskIndexesPrefix + index + value +} + +func (d *taskKV) Get(id string) (Task, error) { + key := d.taskDataKey(id) + if exists, err := d.store.Exists(key); err != nil { + return Task{}, err + } else if !exists { + return Task{}, ErrNoTaskExists + } + kv, err := d.store.Get(key) + if err != nil { + return Task{}, err + } + return d.decodeTask(kv.Value) +} + +func (d *taskKV) Create(t Task) error { + key := d.taskDataKey(t.ID) + + exists, err := d.store.Exists(key) + if err != nil { + return err + } + if exists { + return ErrTaskExists + } + + data, err := d.encodeTask(t) + if err != nil { + return err + } + // Put data + err = d.store.Put(key, data) + if err != nil { + return err + } + // Put ID index + indexKey := d.taskIndexKey(idIndex, t.ID) + return d.store.Put(indexKey, []byte(t.ID)) +} + +func (d *taskKV) Replace(t Task) error { + key := d.taskDataKey(t.ID) + + exists, err := d.store.Exists(key) + if err != nil { + return err + } + if !exists { + return ErrNoTaskExists + } + + data, err := d.encodeTask(t) + if err != nil { + return err + } + // Put data + err = d.store.Put(key, data) + if err != nil { + return err + } + return nil +} + +func (d *taskKV) Delete(id string) error { + key := d.taskDataKey(id) + indexKey := d.taskIndexKey(idIndex, id) + + dataErr := d.store.Delete(key) + indexErr := d.store.Delete(indexKey) + if dataErr != nil { + return dataErr + } + return indexErr +} + +func (d *taskKV) List(pattern string, offset, limit int) ([]Task, error) { + // Tasks are indexed via their ID only. + // While tasks are sorted in the data section by their ID anyway + // this allows us to do offset/limits and filtering without having to read in all task data. + + // List all task ids sorted by ID + ids, err := d.store.List(taskIndexesPrefix + idIndex) + if err != nil { + return nil, err + } + + var match func([]byte) bool + if pattern != "" { + match = func(value []byte) bool { + id := string(value) + matched, _ := path.Match(pattern, id) + return matched + } + } else { + match = func([]byte) bool { return true } + } + matches := storage.DoListFunc(ids, match, offset, limit) + + tasks := make([]Task, len(matches)) + for i, id := range matches { + data, err := d.store.Get(d.taskDataKey(string(id))) + if err != nil { + return nil, err + } + t, err := d.decodeTask(data.Value) + tasks[i] = t + } + return tasks, nil +} + +const ( + snapshotDataPrefix = "/snapshots/data/" +) + +// Key/Value implementation of SnapshotDAO +type snapshotKV struct { + store storage.Interface +} + +func newSnapshotKV(store storage.Interface) *snapshotKV { + return &snapshotKV{ + store: store, + } +} +func (d *snapshotKV) encodeSnapshot(snapshot *Snapshot) ([]byte, error) { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + err := enc.Encode(snapshot) + return buf.Bytes(), err +} + +func (d *snapshotKV) decodeSnapshot(data []byte) (*Snapshot, error) { + snapshot := new(Snapshot) + dec := gob.NewDecoder(bytes.NewReader(data)) + err := dec.Decode(snapshot) + return snapshot, err +} + +func (d *snapshotKV) snapshotDataKey(id string) string { + return snapshotDataPrefix + id +} + +func (d *snapshotKV) Put(id string, snapshot *Snapshot) error { + key := d.snapshotDataKey(id) + data, err := d.encodeSnapshot(snapshot) + if err != nil { + return err + } + return d.store.Put(key, data) +} + +func (d *snapshotKV) Exists(id string) (bool, error) { + key := d.snapshotDataKey(id) + return d.store.Exists(key) +} + +func (d *snapshotKV) Get(id string) (*Snapshot, error) { + exists, err := d.Exists(id) + if err != nil { + return nil, err + } + if !exists { + return nil, ErrNoSnapshotExists + } + key := d.snapshotDataKey(id) + data, err := d.store.Get(key) + if err != nil { + return nil, err + } + return d.decodeSnapshot(data.Value) +} diff --git a/services/task_store/service.go b/services/task_store/service.go index bfda4d11d..a94047030 100644 --- a/services/task_store/service.go +++ b/services/task_store/service.go @@ -4,39 +4,35 @@ import ( "bytes" "encoding/gob" "encoding/json" - "errors" "fmt" - "io/ioutil" "log" "net/http" - "os" "path" "path/filepath" + "regexp" "strconv" - "strings" "time" "github.com/boltdb/bolt" - "github.com/influxdata/influxdb/influxql" "github.com/influxdata/kapacitor" + "github.com/influxdata/kapacitor/client/v1" "github.com/influxdata/kapacitor/services/httpd" + "github.com/influxdata/kapacitor/services/storage" "github.com/influxdata/kapacitor/tick" -) - -const taskDB = "task.db" - -var ( - tasksBucket = []byte("tasks") - enabledBucket = []byte("enabled") - snapshotBucket = []byte("snapshots") + "github.com/pkg/errors" + "github.com/twinj/uuid" ) type Service struct { - dbpath string - db *bolt.DB + oldDBDir string + tasks TaskDAO + snapshots SnapshotDAO routes []httpd.Route snapshotInterval time.Duration - HTTPDService interface { + StorageService interface { + Store(namespace string) storage.Interface + } + HTTPDService interface { AddRoutes([]httpd.Route) error DelRoutes([]httpd.Route) } @@ -66,71 +62,68 @@ type taskStore struct { func NewService(conf Config, l *log.Logger) *Service { return &Service{ - dbpath: path.Join(conf.Dir, taskDB), snapshotInterval: time.Duration(conf.SnapshotInterval), logger: l, + oldDBDir: conf.Dir, } } +// The storage namespace for all task data. +const taskNamespace = "task_store" + func (ts *Service) Open() error { - err := os.MkdirAll(path.Dir(ts.dbpath), 0755) - if err != nil { - return err - } + // Create DAO + store := ts.StorageService.Store(taskNamespace) + ts.tasks = newTaskKV(store) + ts.snapshots = newSnapshotKV(store) - // Open db - db, err := bolt.Open(ts.dbpath, 0600, nil) + // Perform migration to new storage service. + err := ts.migrate() if err != nil { return err } - ts.db = db // Define API routes ts.routes = []httpd.Route{ { - Name: "task-show", + Name: "task", Method: "GET", - Pattern: "/task", + Pattern: "/tasks/", HandlerFunc: ts.handleTask, }, { - Name: "task-list", - Method: "GET", - Pattern: "/tasks", - HandlerFunc: ts.handleTasks, - }, - { - Name: "task-save", - Method: "POST", - Pattern: "/task", - HandlerFunc: ts.handleSave, - }, - { - Name: "task-delete", + Name: "deleteTask", Method: "DELETE", - Pattern: "/task", - HandlerFunc: ts.handleDelete, + Pattern: "/tasks/", + HandlerFunc: ts.handleDeleteTask, }, { // Satisfy CORS checks. - Name: "task-delete", + Name: "/tasks/-cors", Method: "OPTIONS", - Pattern: "/task", + Pattern: "/tasks/", HandlerFunc: httpd.ServeOptions, }, { - Name: "task-enable", - Method: "POST", - Pattern: "/enable", - HandlerFunc: ts.handleEnable, + Name: "updateTask", + Method: "PATCH", + Pattern: "/tasks/", + HandlerFunc: ts.handleUpdateTask, }, { - Name: "task-disable", + Name: "listTasks", + Method: "GET", + Pattern: "/tasks", + HandlerFunc: ts.handleListTasks, + }, + { + Name: "createTask", Method: "POST", - Pattern: "/disable", - HandlerFunc: ts.handleDisable, + Pattern: "/tasks", + HandlerFunc: ts.handleCreateTask, }, } + err = ts.HTTPDService.AddRoutes(ts.routes) if err != nil { return err @@ -140,135 +133,232 @@ func (ts *Service) Open() error { numEnabledTasks := int64(0) // Count all tasks - err = ts.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte(tasksBucket)) - if b == nil { - return nil + offset := 0 + limit := 100 + kapacitor.NumEnabledTasksVar.Set(0) + for { + tasks, err := ts.tasks.List("*", offset, limit) + if err != nil { + return err } - return b.ForEach(func(k, v []byte) error { + for _, task := range tasks { numTasks++ - return nil - }) - }) + if task.Status == Enabled { + ts.logger.Println("D! starting enabled task on startup", task.ID) + err = ts.startTask(task) + if err != nil { + ts.logger.Printf("E! error starting enabled task %s, err: %s\n", task.ID, err) + } else { + ts.logger.Println("D! started task during startup", task.ID) + numEnabledTasks++ + } + } + } + if len(tasks) != limit { + break + } + offset += limit + } + + // Set expvars + kapacitor.NumTasksVar.Set(numTasks) + + return nil +} + +// Migrate data from previous task.db to new storage service. +// This process will return any errors and stop the TaskStore from opening +// thus stopping the entire Kapacitor startup. +// This way Kapacitor will not be able to startup without successfully migrating +// to the new scheme. +// +// This process is idempotent and can be attempted multiple times until success is achieved. +func (ts *Service) migrate() error { + if ts.oldDBDir == "" { + return nil + } + + tasksBucket := []byte("tasks") + enabledBucket := []byte("enabled") + snapshotBucket := []byte("snapshots") + + // Connect to old boltdb + db, err := bolt.Open(filepath.Join(ts.oldDBDir, "task.db"), 0600, &bolt.Options{ReadOnly: true}) if err != nil { - return err + ts.logger.Println("W! could not open old boltd for task_store. Not performing migration. Remove the `task_store.dir` configuration to disable migration.") + return nil } - // Get enabled tasks - enabledTasks := make([]string, 0) - err = ts.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte(enabledBucket)) - if b == nil { + // Old task format + type rawTask struct { + // The name of the task. + Name string + // The TICKscript for the task. + TICKscript string + // Last error the task had either while defining or executing. + Error string + // The task type (stream|batch). + Type kapacitor.TaskType + // The DBs and RPs the task is allowed to access. + DBRPs []kapacitor.DBRP + SnapshotInterval time.Duration + } + + // Migrate all tasks + err = db.View(func(tx *bolt.Tx) error { + tasks := tx.Bucket([]byte(tasksBucket)) + if tasks == nil { return nil } - return b.ForEach(func(k, v []byte) error { - enabledTasks = append(enabledTasks, string(k)) + enables := tx.Bucket([]byte(enabledBucket)) + return tasks.ForEach(func(k, v []byte) error { + r := bytes.NewReader(v) + dec := gob.NewDecoder(r) + task := &rawTask{} + err = dec.Decode(task) + if err != nil { + ts.logger.Println("E! corrupt data in old task_store boltdb tasks:", err) + return nil + } + + var typ TaskType + switch task.Type { + case kapacitor.StreamTask: + typ = StreamTask + case kapacitor.BatchTask: + typ = BatchTask + } + + dbrps := make([]DBRP, len(task.DBRPs)) + for i, dbrp := range task.DBRPs { + dbrps[i] = DBRP{ + Database: dbrp.Database, + RetentionPolicy: dbrp.RetentionPolicy, + } + } + + status := Disabled + if enables != nil { + data := enables.Get(k) + if data != nil { + status = Enabled + } + } + + newTask := Task{ + ID: task.Name, + Type: typ, + DBRPs: dbrps, + TICKscript: task.TICKscript, + Error: task.Error, + Status: status, + } + // Try and create the task in the new store. + err = ts.tasks.Create(newTask) + if err != nil { + if err != ErrTaskExists { + // Failed to migrate task stop process + return err + } else { + ts.logger.Printf("D! task %s has already been migrated skipping", task.Name) + } + } else { + ts.logger.Printf("D! task %s was migrated to new storage service", task.Name) + } return nil }) }) if err != nil { - return err + return errors.Wrap(err, "migrating tasks") } - // Start each enabled task - for _, name := range enabledTasks { - ts.logger.Println("D! starting enabled task on startup", name) - t, err := ts.Load(name) - if err != nil { - ts.logger.Printf("E! error loading enabled task %s, err: %s\n", name, err) + // Migrate all snapshots + err = db.View(func(tx *bolt.Tx) error { + snapshots := tx.Bucket([]byte(snapshotBucket)) + if snapshots == nil { return nil } - err = ts.StartTask(t) - if err != nil { - ts.logger.Printf("E! error starting enabled task %s, err: %s\n", name, err) - } else { - ts.logger.Println("D! started task during startup", name) - numEnabledTasks++ - } - } + return snapshots.ForEach(func(k, v []byte) error { + r := bytes.NewReader(v) + dec := gob.NewDecoder(r) + snapshot := &kapacitor.TaskSnapshot{} + err = dec.Decode(snapshot) + if err != nil { + ts.logger.Println("E! corrupt data in old task_store boltdb snapshots:", err) + return nil + } - // Set expvars - kapacitor.NumTasksVar.Set(numTasks) - kapacitor.NumEnabledTasksVar.Set(numEnabledTasks) + newSnapshot := &Snapshot{ + NodeSnapshots: snapshot.NodeSnapshots, + } + id := string(k) + if exists, err := ts.snapshots.Exists(id); err == nil { + if !exists { + err := ts.snapshots.Put(string(k), newSnapshot) + if err != nil { + // Failed to migrate snapshot stop process. + return err + } + ts.logger.Printf("D! snapshot %s was migrated to new storage service", id) + } else { + ts.logger.Printf("D! snapshot %s skipped, already migrated to new storage service", id) + } + } else if err != nil { + return err + } + return nil + }) + }) + if err != nil { + return errors.Wrap(err, "migrating snapshots") + } return nil } func (ts *Service) Close() error { ts.HTTPDService.DelRoutes(ts.routes) - if ts.db != nil { - return ts.db.Close() - } return nil } -func (ts *Service) SaveSnapshot(name string, snapshot *kapacitor.TaskSnapshot) error { - var buf bytes.Buffer - enc := gob.NewEncoder(&buf) - err := enc.Encode(snapshot) +func (ts *Service) Load(id string) (*kapacitor.Task, error) { + t, err := ts.tasks.Get(id) if err != nil { - return fmt.Errorf("failed to encode task snapshot %s %v", name, err) + return nil, err } + return ts.newKapacitorTask(t) +} - err = ts.db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists(snapshotBucket) - if err != nil { - return err - } - - return b.Put([]byte(name), buf.Bytes()) - }) - if err != nil { - return err +func (ts *Service) SaveSnapshot(id string, snapshot *kapacitor.TaskSnapshot) error { + s := &Snapshot{ + NodeSnapshots: snapshot.NodeSnapshots, } - return nil + return ts.snapshots.Put(id, s) } -func (ts *Service) HasSnapshot(name string) bool { - err := ts.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket(snapshotBucket) - if b == nil { - return fmt.Errorf("no snapshot found for task %s", name) - } - data := b.Get([]byte(name)) - if data == nil { - return fmt.Errorf("no snapshot found for task %s", name) - } - return nil - }) - return err == nil +func (ts *Service) HasSnapshot(id string) bool { + exists, err := ts.snapshots.Exists(id) + if err != nil { + ts.logger.Println("E! error checking for snapshot", err) + return false + } + return exists } -func (ts *Service) LoadSnapshot(name string) (*kapacitor.TaskSnapshot, error) { - var data []byte - err := ts.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket(snapshotBucket) - if b == nil { - return fmt.Errorf("no snapshot found for task %s", name) - } - - data = b.Get([]byte(name)) - if data == nil { - return fmt.Errorf("no snapshot found for task %s", name) - } - return nil - }) +func (ts *Service) LoadSnapshot(id string) (*kapacitor.TaskSnapshot, error) { + snapshot, err := ts.snapshots.Get(id) if err != nil { return nil, err } - buf := bytes.NewBuffer(data) - dec := gob.NewDecoder(buf) - snapshot := &kapacitor.TaskSnapshot{} - err = dec.Decode(snapshot) - if err != nil { - return nil, err + s := &kapacitor.TaskSnapshot{ + NodeSnapshots: snapshot.NodeSnapshots, } - return snapshot, nil + return s, nil } type TaskInfo struct { Name string Type kapacitor.TaskType - DBRPs []kapacitor.DBRP + DBRPs []DBRP TICKscript string Dot string Enabled bool @@ -278,42 +368,21 @@ type TaskInfo struct { } func (ts *Service) handleTask(w http.ResponseWriter, r *http.Request) { - name := r.URL.Query().Get("name") - if name == "" { - httpd.HttpError(w, "must pass task name", true, http.StatusBadRequest) + _, id := path.Split(r.URL.Path) + if id == "" { + httpd.HttpError(w, "must specify task id on path", true, http.StatusBadRequest) return } - labels := false - labelsStr := r.URL.Query().Get("labels") - if labelsStr != "" { - var err error - labels, err = strconv.ParseBool(labelsStr) - if err != nil { - httpd.HttpError(w, "invalid labels value:", true, http.StatusBadRequest) - return - } - - } - - skipFormat := false - skipFormatStr := r.URL.Query().Get("skip-format") - if skipFormatStr != "" { - var err error - skipFormat, err = strconv.ParseBool(skipFormatStr) - if err != nil { - httpd.HttpError(w, "invalid skip-format value:", true, http.StatusBadRequest) - return - } - - } - raw, err := ts.LoadRaw(name) + raw, err := ts.tasks.Get(id) if err != nil { httpd.HttpError(w, err.Error(), true, http.StatusNotFound) return } - if !skipFormat { + scriptFormat := r.URL.Query().Get("script-format") + switch scriptFormat { + case "", "formatted": // Format TICKscript formatted, err := tick.Format(raw.TICKscript) if err == nil { @@ -321,17 +390,38 @@ func (ts *Service) handleTask(w http.ResponseWriter, r *http.Request) { // Otherwise a change in syntax may prevent task retrieval. raw.TICKscript = formatted } + case "raw": + default: + httpd.HttpError(w, fmt.Sprintf("invalid script-format parameter %q", scriptFormat), true, http.StatusBadRequest) + return } - executing := ts.TaskMaster.IsExecuting(name) + dotView := r.URL.Query().Get("dot-view") + switch dotView { + case "": + dotView = "attributes" + case "attributes": + case "labels": + default: + httpd.HttpError(w, fmt.Sprintf("invalid dot-view parameter %q", dotView), true, http.StatusBadRequest) + return + } + + executing := ts.TaskMaster.IsExecuting(id) errMsg := raw.Error dot := "" - stats := kapacitor.ExecutionStats{} - task, err := ts.Load(name) + stats := client.ExecutionStats{} + task, err := ts.newKapacitorTask(raw) if err == nil { if executing { - dot = ts.TaskMaster.ExecutingDot(name, labels) - stats, _ = ts.TaskMaster.ExecutionStats(name) + dot = ts.TaskMaster.ExecutingDot(id, dotView == "labels") + s, err := ts.TaskMaster.ExecutionStats(id) + if err != nil { + ts.logger.Printf("E! failed to retrieve stats for task %s: %v", id, err) + } else { + stats.TaskStats = s.TaskStats + stats.NodeStats = s.NodeStats + } } else { dot = string(task.Dot()) } @@ -339,13 +429,43 @@ func (ts *Service) handleTask(w http.ResponseWriter, r *http.Request) { errMsg = err.Error() } - info := TaskInfo{ - Name: name, - Type: raw.Type, - DBRPs: raw.DBRPs, + var status client.TaskStatus + switch raw.Status { + case Disabled: + status = client.Disabled + case Enabled: + status = client.Enabled + default: + httpd.HttpError(w, fmt.Sprintf("invalid task status recorded in db %v", raw.Status), true, http.StatusInternalServerError) + return + } + + var typ client.TaskType + switch raw.Type { + case StreamTask: + typ = client.StreamTask + case BatchTask: + typ = client.BatchTask + default: + httpd.HttpError(w, fmt.Sprintf("invalid task type recorded in db %v", raw.Type), true, http.StatusInternalServerError) + return + } + + dbrps := make([]client.DBRP, len(raw.DBRPs)) + for i, dbrp := range raw.DBRPs { + dbrps[i] = client.DBRP{ + Database: dbrp.Database, + RetentionPolicy: dbrp.RetentionPolicy, + } + } + + info := client.Task{ + ID: id, + Type: typ, + DBRPs: dbrps, TICKscript: raw.TICKscript, Dot: dot, - Enabled: ts.IsEnabled(name), + Status: status, Executing: executing, Error: errMsg, ExecutionStats: stats, @@ -354,352 +474,414 @@ func (ts *Service) handleTask(w http.ResponseWriter, r *http.Request) { w.Write(httpd.MarshalJSON(info, true)) } -func (ts *Service) handleTasks(w http.ResponseWriter, r *http.Request) { - tasksStr := r.URL.Query().Get("tasks") - var tasks []string - if tasksStr != "" { - tasks = strings.Split(tasksStr, ",") +var allFields = []string{ + "link", + "id", + "type", + "dbrps", + "script", + "dot", + "status", + "executing", + "error", + "stats", +} + +func (ts *Service) taskLink(id string) client.Link { + return client.Link{Relation: client.Self, Href: path.Join(httpd.BasePath, "tasks", id)} +} + +func (ts *Service) handleListTasks(w http.ResponseWriter, r *http.Request) { + + pattern := r.URL.Query().Get("pattern") + fields := r.URL.Query()["fields"] + if len(fields) == 0 { + fields = allFields + } else { + // Always return ID field + fields = append(fields, "id", "link") } - infos, err := ts.GetTaskSummaryInfo(tasks) - if err != nil { - httpd.HttpError(w, err.Error(), true, http.StatusNotFound) + scriptFormat := r.URL.Query().Get("script-format") + switch scriptFormat { + case "formatted": + case "raw": + default: + httpd.HttpError(w, fmt.Sprintf("invalid script-format parameter %q", scriptFormat), true, http.StatusBadRequest) return } - type response struct { - Tasks []TaskSummaryInfo `json:"Tasks"` + dotView := r.URL.Query().Get("dot-view") + switch dotView { + case "attributes": + case "labels": + default: + httpd.HttpError(w, fmt.Sprintf("invalid dot-view parameter %q", dotView), true, http.StatusBadRequest) + return } - w.Write(httpd.MarshalJSON(response{infos}, true)) -} - -type rawTask struct { - // The name of the task. - Name string - // The TICKscript for the task. - TICKscript string - // Last error the task had either while defining or executing. - Error string - // The task type (stream|batch). - Type kapacitor.TaskType - // The DBs and RPs the task is allowed to access. - DBRPs []kapacitor.DBRP - SnapshotInterval time.Duration -} + offsetStr := r.URL.Query().Get("offset") + offset, err := strconv.ParseInt(offsetStr, 10, 64) + if err != nil { + httpd.HttpError(w, fmt.Sprintf("invalid offset parameter %q must be an integer: %s", offsetStr, err), true, http.StatusBadRequest) + } -func (ts *Service) handleSave(w http.ResponseWriter, r *http.Request) { - name := r.URL.Query().Get("name") - newTask := &rawTask{ - Name: name, - SnapshotInterval: ts.snapshotInterval, + limitStr := r.URL.Query().Get("limit") + limit, err := strconv.ParseInt(limitStr, 10, 64) + if err != nil { + httpd.HttpError(w, fmt.Sprintf("invalid limit parameter %q must be an integer: %s", limitStr, err), true, http.StatusBadRequest) + } + if limit == 0 { + limit = 100 } - // Check for existing task - raw, err := ts.LoadRaw(name) - exists := err == nil - if exists { - newTask = raw - } - - // Get task type - ttStr := r.URL.Query().Get("type") - switch ttStr { - case "stream": - newTask.Type = kapacitor.StreamTask - case "batch": - newTask.Type = kapacitor.BatchTask - default: - if !exists { - if ttStr == "" { - httpd.HttpError(w, fmt.Sprintf("no task with name %q exists cannot infer type.", name), true, http.StatusBadRequest) - } else { - httpd.HttpError(w, fmt.Sprintf("unknown type %q", ttStr), true, http.StatusBadRequest) + rawTasks, err := ts.tasks.List(pattern, int(offset), int(limit)) + tasks := make([]map[string]interface{}, len(rawTasks)) + + for i, task := range rawTasks { + tasks[i] = make(map[string]interface{}, len(fields)) + executing := ts.TaskMaster.IsExecuting(task.ID) + for _, field := range fields { + var value interface{} + switch field { + case "id": + value = task.ID + case "link": + value = ts.taskLink(task.ID) + case "type": + switch task.Type { + case StreamTask: + value = client.StreamTask + case BatchTask: + value = client.BatchTask + } + case "dbrps": + dbrps := make([]client.DBRP, len(task.DBRPs)) + for i, dbrp := range task.DBRPs { + dbrps[i] = client.DBRP{ + Database: dbrp.Database, + RetentionPolicy: dbrp.RetentionPolicy, + } + } + value = dbrps + case "script": + value = task.TICKscript + if scriptFormat == "formatted" { + formatted, err := tick.Format(task.TICKscript) + if err == nil { + // Only format if it succeeded. + // Otherwise a change in syntax may prevent task retrieval. + value = formatted + } + } + case "executing": + value = executing + case "dot": + if executing { + value = ts.TaskMaster.ExecutingDot(task.ID, dotView == "labels") + } else { + kt, err := ts.newKapacitorTask(task) + if err != nil { + break + } + value = string(kt.Dot()) + } + case "stats": + if executing { + s, err := ts.TaskMaster.ExecutionStats(task.ID) + if err != nil { + ts.logger.Printf("E! failed to retrieve stats for task %s: %v", task.ID, err) + } else { + value = client.ExecutionStats{ + TaskStats: s.TaskStats, + NodeStats: s.NodeStats, + } + } + } + case "error": + value = task.Error + case "status": + switch task.Status { + case Disabled: + value = client.Disabled + case Enabled: + value = client.Enabled + } } - return + tasks[i][field] = value } } - // Get tick script - tick, err := ioutil.ReadAll(r.Body) + type response struct { + Tasks []map[string]interface{} `json:"tasks"` + } + + w.Write(httpd.MarshalJSON(response{tasks}, true)) +} + +var validTaskID = regexp.MustCompile(`^[-\w]+$`) + +func (ts *Service) handleCreateTask(w http.ResponseWriter, r *http.Request) { + task := client.CreateTaskOptions{} + dec := json.NewDecoder(r.Body) + err := dec.Decode(&task) if err != nil { - httpd.HttpError(w, err.Error(), true, http.StatusBadRequest) + httpd.HttpError(w, "invalid JSON", true, http.StatusBadRequest) return } - if len(tick) > 0 { - newTask.TICKscript = string(tick) - } else if !exists { - httpd.HttpError(w, fmt.Sprintf("must provide TICKscript via POST data."), true, http.StatusBadRequest) - return + if task.ID == "" { + task.ID = uuid.NewV4().String() } - - // Get dbrps - dbrpsStr := r.URL.Query().Get("dbrps") - if dbrpsStr != "" { - dbrps := make([]kapacitor.DBRP, 0) - err = json.Unmarshal([]byte(dbrpsStr), &dbrps) - if err != nil { - httpd.HttpError(w, err.Error(), true, http.StatusBadRequest) - return - } - newTask.DBRPs = dbrps - } else if !exists { - httpd.HttpError(w, fmt.Sprintf("must provide at least one database and retention policy."), true, http.StatusBadRequest) + if !validTaskID.MatchString(task.ID) { + httpd.HttpError(w, fmt.Sprintf("task ID must match %v %q", validTaskID, task.ID), true, http.StatusBadRequest) return } - // Get snapshot interval - snapshotIntervalStr := r.URL.Query().Get("snapshot") - if snapshotIntervalStr != "" { - snapshotInterval, err := influxql.ParseDuration(snapshotIntervalStr) - if err != nil { - httpd.HttpError(w, err.Error(), true, http.StatusBadRequest) - return - } - newTask.SnapshotInterval = snapshotInterval + newTask := Task{ + ID: task.ID, } - err = ts.Save(newTask) - if err != nil { - httpd.HttpError(w, err.Error(), true, http.StatusInternalServerError) + // Check for existing task + _, err = ts.tasks.Get(task.ID) + if err == nil { + httpd.HttpError(w, "task already exists", true, http.StatusBadRequest) return } - w.WriteHeader(http.StatusNoContent) -} -func (ts *Service) handleDelete(w http.ResponseWriter, r *http.Request) { - name := r.URL.Query().Get("name") + // Set task type + switch task.Type { + case client.StreamTask: + newTask.Type = StreamTask + case client.BatchTask: + newTask.Type = BatchTask + default: + httpd.HttpError(w, fmt.Sprintf("unknown type %q", task.Type), true, http.StatusBadRequest) + return + } - err := ts.Delete(name) - if err != nil { - httpd.HttpError(w, err.Error(), true, http.StatusInternalServerError) + // Set tick script + newTask.TICKscript = task.TICKscript + if newTask.TICKscript == "" { + httpd.HttpError(w, fmt.Sprintf("must provide TICKscript"), true, http.StatusBadRequest) return } - w.WriteHeader(http.StatusNoContent) -} -func (ts *Service) handleEnable(w http.ResponseWriter, r *http.Request) { - name := r.URL.Query().Get("name") - err := ts.Enable(name) - if err != nil { - httpd.HttpError(w, err.Error(), true, http.StatusInternalServerError) + // Set dbrps + newTask.DBRPs = make([]DBRP, len(task.DBRPs)) + for i, dbrp := range task.DBRPs { + newTask.DBRPs[i] = DBRP{ + Database: dbrp.Database, + RetentionPolicy: dbrp.RetentionPolicy, + } + } + if len(newTask.DBRPs) == 0 { + httpd.HttpError(w, fmt.Sprintf("must provide at least one database and retention policy."), true, http.StatusBadRequest) return } - w.WriteHeader(http.StatusNoContent) -} -func (ts *Service) handleDisable(w http.ResponseWriter, r *http.Request) { - name := r.URL.Query().Get("name") - err := ts.Disable(name) - if err != nil { - httpd.HttpError(w, err.Error(), true, http.StatusInternalServerError) + // Set status + switch task.Status { + case client.Enabled: + newTask.Status = Enabled + case client.Disabled: + newTask.Status = Disabled + default: + httpd.HttpError(w, fmt.Sprintf("invalid status field %q", task.Status), true, http.StatusBadRequest) return } - w.WriteHeader(http.StatusNoContent) -} -func (ts *Service) Save(task *rawTask) error { // Validate task - _, err := ts.TaskMaster.NewTask(task.Name, - task.TICKscript, - task.Type, - task.DBRPs, - task.SnapshotInterval, - ) + ktask, err := ts.newKapacitorTask(newTask) if err != nil { - return fmt.Errorf("invalid task: %s", err) - } - - // Write 0 snapshot interval if it is the default. - // This way if the default changes the task will change too. - if task.SnapshotInterval == ts.snapshotInterval { - task.SnapshotInterval = 0 + httpd.HttpError(w, "invalid TICKscript: "+err.Error(), true, http.StatusBadRequest) + return } - var buf bytes.Buffer - - enc := gob.NewEncoder(&buf) - err = enc.Encode(task) + err = ts.tasks.Create(newTask) if err != nil { - return err + httpd.HttpError(w, err.Error(), true, http.StatusInternalServerError) + return } - - err = ts.db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists(tasksBucket) + if newTask.Status == Enabled { + err = ts.startTask(newTask) if err != nil { - return err + httpd.HttpError(w, err.Error(), true, http.StatusInternalServerError) + return } - exists := b.Get([]byte(task.Name)) != nil - err = b.Put([]byte(task.Name), buf.Bytes()) + } + w.WriteHeader(http.StatusOK) + + executing := ts.TaskMaster.IsExecuting(newTask.ID) + dot := "" + stats := client.ExecutionStats{} + if executing { + dot = ts.TaskMaster.ExecutingDot(newTask.ID, false) + s, err := ts.TaskMaster.ExecutionStats(newTask.ID) if err != nil { - return err - } - if !exists { - kapacitor.NumTasksVar.Add(1) + ts.logger.Printf("E! failed to retrieve stats for task %s: %v", newTask.ID, err) + } else { + stats.TaskStats = s.TaskStats + stats.NodeStats = s.NodeStats } - return nil - }) - return err -} - -func (ts *Service) deleteTask(name string) error { - ts.TaskMaster.StopTask(name) + } else { + dot = string(ktask.Dot()) + } - return ts.db.Update(func(tx *bolt.Tx) error { - tb := tx.Bucket(tasksBucket) - if tb != nil { - exists := tb.Get([]byte(name)) != nil - if exists { - tb.Delete([]byte(name)) - kapacitor.NumTasksVar.Add(-1) - } - } - eb := tx.Bucket(enabledBucket) - if eb != nil { - eb.Delete([]byte(name)) - } - return nil - }) + t := client.Task{ + Link: ts.taskLink(newTask.ID), + ID: newTask.ID, + Type: task.Type, + DBRPs: task.DBRPs, + TICKscript: task.TICKscript, + Status: task.Status, + Dot: dot, + Executing: executing, + ExecutionStats: stats, + } + w.Write(httpd.MarshalJSON(t, true)) } -func (ts *Service) Delete(pattern string) error { - rawTasks, err := ts.FindTasks(func(taskName string) (bool, error) { - matched, err := filepath.Match(pattern, taskName) - if err != nil { - return false, err - } - - return matched, nil - }) +func (ts *Service) handleUpdateTask(w http.ResponseWriter, r *http.Request) { + _, id := path.Split(r.URL.Path) + task := client.UpdateTaskOptions{} + dec := json.NewDecoder(r.Body) + err := dec.Decode(&task) + if err != nil { + httpd.HttpError(w, "invalid JSON", true, http.StatusBadRequest) + return + } + // Check for existing task + existing, err := ts.tasks.Get(id) if err != nil { - return nil + httpd.HttpError(w, "task does not exist, cannot update", true, http.StatusNotFound) + return } - for _, rawTask := range rawTasks { - err = ts.deleteTask(rawTask.Name) - if err != nil { - return err - } + // Set task type + switch task.Type { + case client.StreamTask: + existing.Type = StreamTask + case client.BatchTask: + existing.Type = BatchTask } - return nil -} + // Set tick script + if task.TICKscript != "" { + existing.TICKscript = task.TICKscript + } -func (ts *Service) LoadRaw(name string) (*rawTask, error) { - var data []byte - err := ts.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket(tasksBucket) - if b == nil { - return errors.New("no tasks bucket") + // Set dbrps + if len(task.DBRPs) > 0 { + existing.DBRPs = make([]DBRP, len(task.DBRPs)) + for i, dbrp := range task.DBRPs { + existing.DBRPs[i] = DBRP{ + Database: dbrp.Database, + RetentionPolicy: dbrp.RetentionPolicy, + } } - data = b.Get([]byte(name)) - return nil - }) - if err != nil { - return nil, err } - if len(data) == 0 { - return nil, fmt.Errorf("unknown task %s", name) - } - buf := bytes.NewBuffer(data) - dec := gob.NewDecoder(buf) - task := &rawTask{} - err = dec.Decode(task) - if task.SnapshotInterval == 0 { - task.SnapshotInterval = ts.snapshotInterval + + // Set status + previousStatus := existing.Status + switch task.Status { + case client.Enabled: + existing.Status = Enabled + case client.Disabled: + existing.Status = Disabled } + statusChanged := previousStatus != existing.Status + + // Validate task + _, err = ts.newKapacitorTask(existing) if err != nil { - return nil, err + httpd.HttpError(w, "invalid TICKscript: "+err.Error(), true, http.StatusBadRequest) + return } - return task, nil -} -func (ts *Service) Load(name string) (*kapacitor.Task, error) { - task, err := ts.LoadRaw(name) + err = ts.tasks.Replace(existing) if err != nil { - return nil, err + httpd.HttpError(w, err.Error(), true, http.StatusInternalServerError) + return } - return ts.CreateTaskFromRaw(task) -} + if statusChanged { + // Enable/Disable task + switch existing.Status { + case Enabled: + err = ts.startTask(existing) + if err != nil { + httpd.HttpError(w, err.Error(), true, http.StatusInternalServerError) + return + } + case Disabled: + ts.stopTask(existing.ID) + } + } -func (ts *Service) CreateTaskFromRaw(task *rawTask) (*kapacitor.Task, error) { - return ts.TaskMaster.NewTask(task.Name, - task.TICKscript, - task.Type, - task.DBRPs, - task.SnapshotInterval, - ) + w.WriteHeader(http.StatusNoContent) } -func (ts *Service) enableRawTask(rawTask *rawTask) error { - t, err := ts.CreateTaskFromRaw(rawTask) +func (ts *Service) handleDeleteTask(w http.ResponseWriter, r *http.Request) { + _, id := path.Split(r.URL.Path) + + err := ts.deleteTask(id) if err != nil { - return err + httpd.HttpError(w, err.Error(), true, http.StatusInternalServerError) + return } + w.WriteHeader(http.StatusNoContent) +} - // Save the enabled state - var enabled bool - - err = ts.db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists(enabledBucket) - if err != nil { - return err - } - enabled = b.Get([]byte(t.Name)) != nil - err = b.Put([]byte(t.Name), []byte{}) - if err != nil { - return err - } - if !enabled { - kapacitor.NumEnabledTasksVar.Add(1) - } - return nil - }) +func (ts *Service) deleteTask(id string) error { + task, err := ts.tasks.Get(id) if err != nil { + if err == ErrNoTaskExists { + return nil + } return err } - - if !enabled { - return ts.StartTask(t) + if task.Status == Enabled { + ts.stopTask(id) } - return nil + return ts.tasks.Delete(id) } -func (ts *Service) Enable(pattern string) error { - // Find the matching tasks - rawTasks, err := ts.FindTasks(func(taskName string) (bool, error) { - matched, err := filepath.Match(pattern, taskName) - if err != nil { - return false, err +func (ts *Service) newKapacitorTask(task Task) (*kapacitor.Task, error) { + dbrps := make([]kapacitor.DBRP, len(task.DBRPs)) + for i, dbrp := range task.DBRPs { + dbrps[i] = kapacitor.DBRP{ + Database: dbrp.Database, + RetentionPolicy: dbrp.RetentionPolicy, } - - return matched, nil - }) - - if err != nil { - return nil } - - for _, rawTask := range rawTasks { - err = ts.enableRawTask(rawTask) - if err != nil { - return nil - } + var tt kapacitor.TaskType + switch task.Type { + case StreamTask: + tt = kapacitor.StreamTask + case BatchTask: + tt = kapacitor.BatchTask } - - return nil + return ts.TaskMaster.NewTask(task.ID, + task.TICKscript, + tt, + dbrps, + ts.snapshotInterval, + ) } -func (ts *Service) StartTask(t *kapacitor.Task) error { +func (ts *Service) startTask(task Task) error { + t, err := ts.newKapacitorTask(task) + if err != nil { + return err + } // Starting task, remove last error - ts.SaveLastError(t.Name, "") + ts.saveLastError(t.ID, "") // Start the task et, err := ts.TaskMaster.StartTask(t) if err != nil { - ts.SaveLastError(t.Name, err.Error()) + ts.saveLastError(t.ID, err.Error()) return err } @@ -707,202 +889,43 @@ func (ts *Service) StartTask(t *kapacitor.Task) error { if t.Type == kapacitor.BatchTask { err := et.StartBatching() if err != nil { - ts.SaveLastError(t.Name, err.Error()) - ts.TaskMaster.StopTask(t.Name) + ts.saveLastError(t.ID, err.Error()) + ts.stopTask(t.ID) return err } } + kapacitor.NumEnabledTasksVar.Add(1) + go func() { // Wait for task to finish err := et.Wait() // Stop task - ts.TaskMaster.StopTask(et.Task.Name) + ts.stopTask(t.ID) if err != nil { - ts.logger.Printf("E! task %s finished with error: %s", et.Task.Name, err) + ts.logger.Printf("E! task %s finished with error: %s", et.Task.ID, err) // Save last error from task. - err = ts.SaveLastError(t.Name, err.Error()) + err = ts.saveLastError(t.ID, err.Error()) if err != nil { - ts.logger.Println("E! failed to save last error for task", et.Task.Name) + ts.logger.Println("E! failed to save last error for task", et.Task.ID) } } }() return nil } -// Save last error from task. -func (ts *Service) SaveLastError(name string, errStr string) error { - - raw, err := ts.LoadRaw(name) - if err != nil { - return err - } - raw.Error = errStr - err = ts.Save(raw) - if err != nil { - return err - } - return nil +func (ts *Service) stopTask(id string) { + kapacitor.NumEnabledTasksVar.Add(-1) + ts.TaskMaster.StopTask(id) } -func (ts *Service) Disable(pattern string) error { - // Find the matching tasks - rawTasks, err := ts.FindTasks(func(taskName string) (bool, error) { - matched, err := filepath.Match(pattern, taskName) - if err != nil { - return false, err - } - - return matched, nil - }) - - if err != nil { - return nil - } - - // Delete the enabled state - err = ts.db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists(enabledBucket) - if err != nil { - return err - } - for _, rawTask := range rawTasks { - enabled := b.Get([]byte(rawTask.Name)) != nil - if enabled { - err = b.Delete([]byte(rawTask.Name)) - if err != nil { - return err - } - kapacitor.NumEnabledTasksVar.Add(-1) - } - } - return nil - }) - +// Save last error from task. +func (ts *Service) saveLastError(id string, errStr string) error { + task, err := ts.tasks.Get(id) if err != nil { return err } - - for _, rawTask := range rawTasks { - err = ts.TaskMaster.StopTask(rawTask.Name) - if err != nil { - return err - } - } - - return nil -} - -type TaskSummaryInfo struct { - Name string - Type kapacitor.TaskType - DBRPs []kapacitor.DBRP - Enabled bool - Executing bool - ExecutionStats kapacitor.ExecutionStats -} - -func (ts *Service) IsEnabled(name string) (e bool) { - ts.db.View(func(tx *bolt.Tx) error { - eb := tx.Bucket([]byte(enabledBucket)) - e = eb != nil && eb.Get([]byte(name)) != nil - return nil - }) - return -} - -// Returns all taskInfo of task name that matches the predicate -func (ts *Service) FindTasks(predicate func(string) (bool, error)) ([]*rawTask, error) { - rawTasks := make([]*rawTask, 0) - - err := ts.db.View(func(tx *bolt.Tx) error { - tb := tx.Bucket([]byte(tasksBucket)) - if tb == nil { - return nil - } - - return tb.ForEach(func(k, v []byte) error { - taskName := string(k) - isMatched, err := predicate(taskName) - if err != nil { - return err - } - if !isMatched { - return nil - } - - // Grab task info - t, err := ts.LoadRaw(taskName) - if err != nil { - return fmt.Errorf("found invalid task in db. name: %s, err: %s", string(k), err) - } - - rawTasks = append(rawTasks, t) - return nil - }) - - }) - if err != nil { - return nil, err - } - - return rawTasks, nil -} - -func (ts *Service) GetTaskSummaryInfo(tasks []string) ([]TaskSummaryInfo, error) { - taskInfos := make([]TaskSummaryInfo, 0) - - err := ts.db.View(func(tx *bolt.Tx) error { - tb := tx.Bucket([]byte(tasksBucket)) - if tb == nil { - return nil - } - eb := tx.Bucket([]byte(enabledBucket)) - // Grab task info - f := func(k, v []byte) error { - t, err := ts.LoadRaw(string(k)) - if err != nil { - return fmt.Errorf("found invalid task in db. name: %s, err: %s", string(k), err) - } - - enabled := eb != nil && eb.Get(k) != nil - - info := TaskSummaryInfo{ - Name: t.Name, - Type: t.Type, - DBRPs: t.DBRPs, - Enabled: enabled, - Executing: ts.TaskMaster.IsExecuting(t.Name), - } - - if info.Executing { - executionStats, err := ts.TaskMaster.ExecutionStats(t.Name) - if err != nil { - return fmt.Errorf("failed to fetch execution stats. name: %s, err: %s", t.Name, err) - } - info.ExecutionStats = executionStats - } - - taskInfos = append(taskInfos, info) - return nil - } - - if len(tasks) == 0 { - return tb.ForEach(f) - } else { - for _, tn := range tasks { - err := f([]byte(tn), []byte{}) - if err != nil { - return err - } - } - } - return nil - }) - if err != nil { - return nil, err - } - - return taskInfos, nil + task.Error = errStr + return ts.tasks.Replace(task) } diff --git a/task.go b/task.go index 0940a69e3..bf409ec99 100644 --- a/task.go +++ b/task.go @@ -64,9 +64,9 @@ func (d DBRP) String() string { return fmt.Sprintf("%q.%q", d.Database, d.RetentionPolicy) } -// The complete definition of a task, its name, pipeline and type. +// The complete definition of a task, its id, pipeline and type. type Task struct { - Name string + ID string Pipeline *pipeline.Pipeline Type TaskType DBRPs []DBRP @@ -74,7 +74,7 @@ type Task struct { } func (t *Task) Dot() []byte { - return t.Pipeline.Dot(t.Name) + return t.Pipeline.Dot(t.ID) } // returns all the measurements from a FromNode @@ -115,7 +115,7 @@ type ExecutingTask struct { // Create a new task from a defined kapacitor. func NewExecutingTask(tm *TaskMaster, t *Task) (*ExecutingTask, error) { - l := tm.LogService.NewLogger(fmt.Sprintf("[task:%s] ", t.Name), log.LstdFlags) + l := tm.LogService.NewLogger(fmt.Sprintf("[task:%s] ", t.ID), log.LstdFlags) et := &ExecutingTask{ tm: tm, Task: t, @@ -158,7 +158,7 @@ func (et *ExecutingTask) link() error { // Walk Pipeline and create equivalent executing nodes err := et.Task.Pipeline.Walk(func(n pipeline.Node) error { l := et.tm.LogService.NewLogger( - fmt.Sprintf("[%s:%s] ", et.Task.Name, n.Name()), + fmt.Sprintf("[%s:%s] ", et.Task.ID, n.Name()), log.LstdFlags, ) en, err := et.createNode(n, l) @@ -373,7 +373,7 @@ func (et *ExecutingTask) EDot(labels bool) []byte { var buf bytes.Buffer buf.Write([]byte("digraph ")) - buf.Write([]byte(et.Task.Name)) + buf.Write([]byte(et.Task.ID)) buf.Write([]byte(" {\n")) // Write graph attributes unit := "points" @@ -529,7 +529,7 @@ func (et *ExecutingTask) runSnapshotter() { case <-ticker.C: snapshot, err := et.Snapshot() if err != nil { - et.logger.Println("E! failed to snapshot task", et.Task.Name, err) + et.logger.Println("E! failed to snapshot task", et.Task.ID, err) break } size := 0 @@ -538,9 +538,9 @@ func (et *ExecutingTask) runSnapshotter() { } // Only save the snapshot if it has content if size > 0 { - err = et.tm.TaskStore.SaveSnapshot(et.Task.Name, snapshot) + err = et.tm.TaskStore.SaveSnapshot(et.Task.ID, snapshot) if err != nil { - et.logger.Println("E! failed to save task snapshot", et.Task.Name, err) + et.logger.Println("E! failed to save task snapshot", et.Task.ID, err) } } case <-et.stopping: diff --git a/task_master.go b/task_master.go index a74254255..f7e771fa8 100644 --- a/task_master.go +++ b/task_master.go @@ -38,9 +38,9 @@ type TaskMaster struct { URL() string } TaskStore interface { - SaveSnapshot(name string, snapshot *TaskSnapshot) error - HasSnapshot(name string) bool - LoadSnapshot(name string) (*TaskSnapshot, error) + SaveSnapshot(id string, snapshot *TaskSnapshot) error + HasSnapshot(id string) bool + LoadSnapshot(id string) (*TaskSnapshot, error) } DeadmanService pipeline.DeadmanService @@ -105,13 +105,13 @@ type TaskMaster struct { writePointsIn StreamCollector // Forks of incoming streams - // We are mapping from (db, rp, measurement) to map of task names to their edges + // We are mapping from (db, rp, measurement) to map of task ids to their edges // The outer map (from dbrp&measurement) is for fast access on forkPoint // While the inner map is for handling fork deletions better (see taskToForkKeys) forks map[forkKey]map[string]*Edge // Task to fork keys is map to help in deletes, in deletes - // we have only the task name, and they are called after the task is deleted from TaskMaster.tasks + // we have only the task id, and they are called after the task is deleted from TaskMaster.tasks taskToForkKeys map[string][]forkKey // Set of incoming batches @@ -190,7 +190,7 @@ func (tm *TaskMaster) StopTasks() { tm.mu.Lock() defer tm.mu.Unlock() for _, et := range tm.tasks { - tm.stopTask(et.Task.Name) + tm.stopTask(et.Task.ID) } } @@ -203,7 +203,7 @@ func (tm *TaskMaster) Close() error { } tm.closed = true for _, et := range tm.tasks { - tm.stopTask(et.Task.Name) + tm.stopTask(et.Task.ID) } tm.logger.Println("I! closed") return nil @@ -215,21 +215,21 @@ func (tm *TaskMaster) Drain() { defer tm.mu.Unlock() // TODO(yosia): handle this thing ;) - for name, _ := range tm.taskToForkKeys { - tm.delFork(name) + for id, _ := range tm.taskToForkKeys { + tm.delFork(id) } } // Create a new task in the context of a TaskMaster func (tm *TaskMaster) NewTask( - name, + id, script string, tt TaskType, dbrps []DBRP, snapshotInterval time.Duration, ) (*Task, error) { t := &Task{ - Name: name, + ID: id, Type: tt, DBRPs: dbrps, SnapshotInterval: snapshotInterval, @@ -297,7 +297,7 @@ func (tm *TaskMaster) StartTask(t *Task) (*ExecutingTask, error) { if tm.closed { return nil, errors.New("task master is closed cannot start a task") } - tm.logger.Println("D! Starting task:", t.Name) + tm.logger.Println("D! Starting task:", t.ID) et, err := NewExecutingTask(tm, t) if err != nil { return nil, err @@ -306,7 +306,7 @@ func (tm *TaskMaster) StartTask(t *Task) (*ExecutingTask, error) { var ins []*Edge switch et.Task.Type { case StreamTask: - e, err := tm.newFork(et.Task.Name, et.Task.DBRPs, et.Task.Measurements()) + e, err := tm.newFork(et.Task.ID, et.Task.DBRPs, et.Task.Measurements()) if err != nil { return nil, err } @@ -318,15 +318,15 @@ func (tm *TaskMaster) StartTask(t *Task) (*ExecutingTask, error) { } ins = make([]*Edge, count) for i := 0; i < count; i++ { - in := newEdge(t.Name, "batch", fmt.Sprintf("batch%d", i), pipeline.BatchEdge, defaultEdgeBufferSize, tm.LogService) + in := newEdge(t.ID, "batch", fmt.Sprintf("batch%d", i), pipeline.BatchEdge, defaultEdgeBufferSize, tm.LogService) ins[i] = in - tm.batches[t.Name] = append(tm.batches[t.Name], in) + tm.batches[t.ID] = append(tm.batches[t.ID], in) } } var snapshot *TaskSnapshot - if tm.TaskStore.HasSnapshot(t.Name) { - snapshot, err = tm.TaskStore.LoadSnapshot(t.Name) + if tm.TaskStore.HasSnapshot(t.ID) { + snapshot, err = tm.TaskStore.LoadSnapshot(t.ID) if err != nil { return nil, err } @@ -337,52 +337,52 @@ func (tm *TaskMaster) StartTask(t *Task) (*ExecutingTask, error) { return nil, err } - tm.tasks[et.Task.Name] = et - tm.logger.Println("I! Started task:", t.Name) + tm.tasks[et.Task.ID] = et + tm.logger.Println("I! Started task:", t.ID) tm.logger.Println("D!", string(t.Dot())) return et, nil } -func (tm *TaskMaster) BatchCollectors(name string) []BatchCollector { - return tm.batches[name] +func (tm *TaskMaster) BatchCollectors(id string) []BatchCollector { + return tm.batches[id] } -func (tm *TaskMaster) StopTask(name string) error { +func (tm *TaskMaster) StopTask(id string) error { tm.mu.Lock() defer tm.mu.Unlock() - return tm.stopTask(name) + return tm.stopTask(id) } // internal stopTask function. The caller must have acquired // the lock in order to call this function -func (tm *TaskMaster) stopTask(name string) (err error) { - if et, ok := tm.tasks[name]; ok { - delete(tm.tasks, name) +func (tm *TaskMaster) stopTask(id string) (err error) { + if et, ok := tm.tasks[id]; ok { + delete(tm.tasks, id) if et.Task.Type == StreamTask { - tm.delFork(name) + tm.delFork(id) } err = et.stop() if err != nil { - tm.logger.Println("E! Stopped task:", name, err) + tm.logger.Println("E! Stopped task:", id, err) } else { - tm.logger.Println("I! Stopped task:", name) + tm.logger.Println("I! Stopped task:", id) } } return } -func (tm *TaskMaster) IsExecuting(name string) bool { +func (tm *TaskMaster) IsExecuting(id string) bool { tm.mu.RLock() defer tm.mu.RUnlock() - _, executing := tm.tasks[name] + _, executing := tm.tasks[id] return executing } -func (tm *TaskMaster) ExecutionStats(name string) (ExecutionStats, error) { +func (tm *TaskMaster) ExecutionStats(id string) (ExecutionStats, error) { tm.mu.RLock() defer tm.mu.RUnlock() - task, executing := tm.tasks[name] + task, executing := tm.tasks[id] if !executing { return ExecutionStats{}, nil } @@ -390,10 +390,10 @@ func (tm *TaskMaster) ExecutionStats(name string) (ExecutionStats, error) { return task.ExecutionStats() } -func (tm *TaskMaster) ExecutingDot(name string, labels bool) string { +func (tm *TaskMaster) ExecutingDot(id string, labels bool) string { tm.mu.RLock() defer tm.mu.RUnlock() - et, executing := tm.tasks[name] + et, executing := tm.tasks[id] if executing { return string(et.EDot(labels)) } @@ -526,24 +526,24 @@ func (tm *TaskMaster) newFork(taskName string, dbrps []DBRP, measurements []stri return e, nil } -func (tm *TaskMaster) DelFork(name string) { +func (tm *TaskMaster) DelFork(id string) { tm.mu.Lock() defer tm.mu.Unlock() - tm.delFork(name) + tm.delFork(id) } // internal delFork function, must have lock to call -func (tm *TaskMaster) delFork(name string) { +func (tm *TaskMaster) delFork(id string) { // mark if we already closed the edge because the edge is replicated // by it's fork keys (db,rp,measurement) isEdgeClosed := false // Find the fork keys - for _, key := range tm.taskToForkKeys[name] { + for _, key := range tm.taskToForkKeys[id] { // check if the edge exists - edge, ok := tm.forks[key][name] + edge, ok := tm.forks[key][id] if ok { // Only close the edge if we are already didn't closed it @@ -553,23 +553,23 @@ func (tm *TaskMaster) delFork(name string) { } // remove the task in fork map - delete(tm.forks[key], name) + delete(tm.forks[key], id) } } - // remove mapping from task name to it's keys - delete(tm.taskToForkKeys, name) + // remove mapping from task id to it's keys + delete(tm.taskToForkKeys, id) } -func (tm *TaskMaster) SnapshotTask(name string) (*TaskSnapshot, error) { +func (tm *TaskMaster) SnapshotTask(id string) (*TaskSnapshot, error) { tm.mu.RLock() - et, ok := tm.tasks[name] + et, ok := tm.tasks[id] tm.mu.RUnlock() if ok { return et.Snapshot() } - return nil, fmt.Errorf("task %s is not running or does not exist", name) + return nil, fmt.Errorf("task %s is not running or does not exist", id) } type noOpTimingService struct{} diff --git a/udf.go b/udf.go index 9c0425118..9db3928f5 100644 --- a/udf.go +++ b/udf.go @@ -58,7 +58,9 @@ func (u *UDFNode) stopUDF() { defer u.mu.Unlock() if !u.stopped { u.stopped = true - u.udf.Abort(errNodeAborted) + if u.udf != nil { + u.udf.Abort(errNodeAborted) + } } } diff --git a/vendor.yml b/vendor.yml index 1367630ed..be0bef511 100644 --- a/vendor.yml +++ b/vendor.yml @@ -1,22 +1,22 @@ vendors: - path: github.com/BurntSushi/toml - rev: bbd5bb678321a0d6e58f1099321dfa73391c1b6f + rev: f0aeabca5a127c4078abb8c8d64298b147264b55 - path: github.com/boltdb/bolt - rev: 144418e1475d8bf7abbdc48583500f1a20c62ea7 + rev: d97499360d1ecebc492ea66c7447ea948f417620 - path: github.com/cenkalti/backoff - rev: 32cd0c5b3aef12c76ed64aaf678f6c79736be7dc + rev: c29158af31815ccc31ca29c86c121bc39e00d3d8 - path: github.com/davecgh/go-spew rev: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d - path: github.com/dustin/go-humanize rev: 8929fe90cee4b2cb9deb468b51fb34eba64d1bf0 - path: github.com/gogo/protobuf - rev: 4f262e4b0f3a6cea646e15798109335551e21756 + rev: c3995ae437bb78d1189f4f147dfe5f87ad3596e4 - path: github.com/golang/protobuf - rev: 2ebff28ac76fb19e2d25e5ddd4885708dfdd5611 + rev: 7cc19b78d562895b13596ddce7aafb59dd789318 - path: github.com/gorhill/cronexpr rev: f0984319b44273e83de132089ae42b1810f4933b - path: github.com/influxdata/influxdb - rev: 7def8bc0c98c04080ed649d3eb31fb00baf68482 + rev: 0cc99a3e27d5b7198c4fec51edbc694bd31d854f - path: github.com/influxdata/wlog rev: 7c63b0a71ef8300adc255344d275e10e5c3a71ec - path: github.com/influxdb/usage-client @@ -25,16 +25,18 @@ vendors: rev: b5dddb1667dcc1e6355b9305e2c1608a2db6983c - path: github.com/mattn/go-runewidth rev: d6bea18f789704b5f83375793155289da36a3c7f +- path: github.com/pkg/errors + rev: 42fa80f2ac6ed17a977ce826074bd3009593fa9d - path: github.com/pmezard/go-difflib rev: 792786c7400a136282c1664665ae0a8db921c6c2 - path: github.com/russross/blackfriday - rev: b43df972fb5fdf3af8d2e90f38a69d374fe26dd0 + rev: 43529be3978d5a7b8e144f459e3f96491acc569c - path: github.com/serenize/snaker rev: 8824b61eca66d308fcb2d515287d3d7a28dba8d6 - path: github.com/shurcooL/go rev: 07c46ca56e4820cfaf750f74e25bc671dccd2ba4 - path: github.com/shurcooL/markdownfmt - rev: 45e6ea2c4705675a93a32b5f548dbb7997826875 + rev: 818f50da77ed6ad125554fbb38f56e6ca4498963 - path: github.com/shurcooL/sanitized_anchor_name rev: 10ef21a441db47d8b13ebcc5fd2310f636973c77 - path: github.com/stretchr/testify @@ -42,9 +44,9 @@ vendors: - path: github.com/twinj/uuid rev: 89173bcdda19db0eb88aef1e1cb1cb2505561d31 - path: golang.org/x/crypto - rev: 2f6fccd33b9b1fc23ebb73ad4890698820f7174d + rev: 285fb2ed20d1dc450fc743a1b3ff7c36bef372b9 - path: golang.org/x/sys - rev: f64b50fbea64174967a8882830d621a18ee1548e + rev: b776ec39b3e54652e09028aaaaac9757f4f8211a - path: gopkg.in/alexcesaro/quotedprintable.v3 rev: 2caba252f4dc53eaf6b553000885530023f54623 - path: gopkg.in/gomail.v2 diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go index c26b00c01..98c8aa667 100644 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -103,6 +103,13 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { // This decoder will not handle cyclic types. If a cyclic type is passed, // `Decode` will not terminate. func Decode(data string, v interface{}) (MetaData, error) { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return MetaData{}, e("Decode of non-pointer type %s", reflect.TypeOf(v)) + } + if rv.IsNil() { + return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v)) + } p, err := parse(data) if err != nil { return MetaData{}, err @@ -111,7 +118,7 @@ func Decode(data string, v interface{}) (MetaData, error) { p.mapping, p.types, p.ordered, make(map[string]bool, len(p.ordered)), nil, } - return md, md.unify(p.mapping, rvalue(v)) + return md, md.unify(p.mapping, indirect(rv)) } // DecodeFile is just like Decode, except it will automatically read the diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go index 4e4c97aed..f538261ab 100644 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -315,10 +315,16 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value) { t := f.Type switch t.Kind() { case reflect.Struct: - addFields(t, frv, f.Index) - continue + // Treat anonymous struct fields with + // tag names as though they are not + // anonymous, like encoding/json does. + if getOptions(f.Tag).name == "" { + addFields(t, frv, f.Index) + continue + } case reflect.Ptr: - if t.Elem().Kind() == reflect.Struct { + if t.Elem().Kind() == reflect.Struct && + getOptions(f.Tag).name == "" { if !frv.IsNil() { addFields(t.Elem(), frv.Elem(), f.Index) } @@ -347,17 +353,18 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value) { continue } - tag := sft.Tag.Get("toml") - if tag == "-" { + opts := getOptions(sft.Tag) + if opts.skip { continue } - keyName, opts := getOptions(tag) - if keyName == "" { - keyName = sft.Name + keyName := sft.Name + if opts.name != "" { + keyName = opts.name } - if _, ok := opts["omitempty"]; ok && isEmpty(sf) { + if opts.omitempty && isEmpty(sf) { continue - } else if _, ok := opts["omitzero"]; ok && isZero(sf) { + } + if opts.omitzero && isZero(sf) { continue } @@ -451,17 +458,30 @@ func tomlArrayType(rv reflect.Value) tomlType { return firstType } -func getOptions(keyName string) (string, map[string]struct{}) { - opts := make(map[string]struct{}) - ss := strings.Split(keyName, ",") - name := ss[0] - if len(ss) > 1 { - for _, opt := range ss { - opts[opt] = struct{}{} +type tagOptions struct { + skip bool // "-" + name string + omitempty bool + omitzero bool +} + +func getOptions(tag reflect.StructTag) tagOptions { + t := tag.Get("toml") + if t == "-" { + return tagOptions{skip: true} + } + var opts tagOptions + parts := strings.Split(t, ",") + opts.name = parts[0] + for _, s := range parts[1:] { + switch s { + case "omitempty": + opts.omitempty = true + case "omitzero": + opts.omitzero = true } } - - return name, opts + return opts } func isZero(rv reflect.Value) bool { diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go index 9b20b3a81..a016dc230 100644 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -3,6 +3,7 @@ package toml import ( "fmt" "strings" + "unicode" "unicode/utf8" ) @@ -166,6 +167,19 @@ func (lx *lexer) peek() rune { return r } +// skip ignores all input that matches the given predicate. +func (lx *lexer) skip(pred func(rune) bool) { + for { + r := lx.next() + if pred(r) { + continue + } + lx.backup() + lx.ignore() + return + } +} + // errorf stops all lexing by emitting an error and returning `nil`. // Note that any value that is a character is escaped if it's a special // character (new lines, tabs, etc.). @@ -261,6 +275,7 @@ func lexArrayTableEnd(lx *lexer) stateFn { } func lexTableNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) switch r := lx.peek(); { case r == tableEnd || r == eof: return lx.errorf("Unexpected end of table name. (Table names cannot " + @@ -277,24 +292,22 @@ func lexTableNameStart(lx *lexer) stateFn { } } -// lexTableName lexes the name of a table. It assumes that at least one +// lexBareTableName lexes the name of a table. It assumes that at least one // valid character for the table has already been read. func lexBareTableName(lx *lexer) stateFn { - switch r := lx.next(); { - case isBareKeyChar(r): + r := lx.next() + if isBareKeyChar(r) { return lexBareTableName - case r == tableSep || r == tableEnd: - lx.backup() - lx.emitTrim(itemText) - return lexTableNameEnd - default: - return lx.errorf("Bare keys cannot contain %q.", r) } + lx.backup() + lx.emit(itemText) + return lexTableNameEnd } // lexTableNameEnd reads the end of a piece of a table name, optionally // consuming whitespace. func lexTableNameEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) switch r := lx.next(); { case isWhitespace(r): return lexTableNameEnd @@ -338,11 +351,12 @@ func lexBareKey(lx *lexer) stateFn { case isBareKeyChar(r): return lexBareKey case isWhitespace(r): - lx.emitTrim(itemText) + lx.backup() + lx.emit(itemText) return lexKeyEnd case r == keySep: lx.backup() - lx.emitTrim(itemText) + lx.emit(itemText) return lexKeyEnd default: return lx.errorf("Bare keys cannot contain %q.", r) @@ -371,16 +385,19 @@ func lexValue(lx *lexer) stateFn { // In array syntax, the array states are responsible for ignoring new // lines. r := lx.next() - if isWhitespace(r) { + switch { + case isWhitespace(r): return lexSkip(lx, lexValue) + case isDigit(r): + lx.backup() // avoid an extra state and use the same as above + return lexNumberOrDateStart } - - switch { - case r == arrayStart: + switch r { + case arrayStart: lx.ignore() lx.emit(itemArray) return lexArrayValue - case r == stringStart: + case stringStart: if lx.accept(stringStart) { if lx.accept(stringStart) { lx.ignore() // Ignore """ @@ -390,7 +407,7 @@ func lexValue(lx *lexer) stateFn { } lx.ignore() // ignore the '"' return lexString - case r == rawStringStart: + case rawStringStart: if lx.accept(rawStringStart) { if lx.accept(rawStringStart) { lx.ignore() // Ignore """ @@ -400,18 +417,19 @@ func lexValue(lx *lexer) stateFn { } lx.ignore() // ignore the "'" return lexRawString - case r == 't': - return lexTrue - case r == 'f': - return lexFalse - case r == '-': + case '+', '-': return lexNumberStart - case isDigit(r): - lx.backup() // avoid an extra state and use the same as above - return lexNumberOrDateStart - case r == '.': // special error case, be kind to users + case '.': // special error case, be kind to users return lx.errorf("Floats must start with a digit, not '.'.") } + if unicode.IsLetter(r) { + // Be permissive here; lexBool will give a nice error if the + // user wrote something like + // x = foo + // (i.e. not 'true' or 'false' but is something else word-like.) + lx.backup() + return lexBool + } return lx.errorf("Expected value but found %q instead.", r) } @@ -618,33 +636,36 @@ func lexLongUnicodeEscape(lx *lexer) stateFn { return lx.pop() } -// lexNumberOrDateStart consumes either a (positive) integer, float or -// datetime. It assumes that NO negative sign has been consumed. +// lexNumberOrDateStart consumes either an integer, a float, or datetime. func lexNumberOrDateStart(lx *lexer) stateFn { r := lx.next() - if !isDigit(r) { - if r == '.' { - return lx.errorf("Floats must start with a digit, not '.'.") - } else { - return lx.errorf("Expected a digit but got %q.", r) - } + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '_': + return lexNumber + case 'e', 'E': + return lexFloat + case '.': + return lx.errorf("Floats must start with a digit, not '.'.") } - return lexNumberOrDate + return lx.errorf("Expected a digit but got %q.", r) } -// lexNumberOrDate consumes either a (positive) integer, float or datetime. +// lexNumberOrDate consumes either an integer, float or datetime. func lexNumberOrDate(lx *lexer) stateFn { r := lx.next() - switch { - case r == '-': - if lx.pos-lx.start != 5 { - return lx.errorf("All ISO8601 dates must be in full Zulu form.") - } - return lexDateAfterYear - case isDigit(r): + if isDigit(r) { return lexNumberOrDate - case r == '.': - return lexFloatStart + } + switch r { + case '-': + return lexDatetime + case '_': + return lexNumber + case '.', 'e', 'E': + return lexFloat } lx.backup() @@ -652,39 +673,28 @@ func lexNumberOrDate(lx *lexer) stateFn { return lx.pop() } -// lexDateAfterYear consumes a full Zulu Datetime in ISO8601 format. -// It assumes that "YYYY-" has already been consumed. -func lexDateAfterYear(lx *lexer) stateFn { - formats := []rune{ - // digits are '0'. - // everything else is direct equality. - '0', '0', '-', '0', '0', - 'T', - '0', '0', ':', '0', '0', ':', '0', '0', - 'Z', +// lexDatetime consumes a Datetime, to a first approximation. +// The parser validates that it matches one of the accepted formats. +func lexDatetime(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDatetime } - for _, f := range formats { - r := lx.next() - if f == '0' { - if !isDigit(r) { - return lx.errorf("Expected digit in ISO8601 datetime, "+ - "but found %q instead.", r) - } - } else if f != r { - return lx.errorf("Expected %q in ISO8601 datetime, "+ - "but found %q instead.", f, r) - } + switch r { + case '-', 'T', ':', '.', 'Z': + return lexDatetime } + + lx.backup() lx.emit(itemDatetime) return lx.pop() } -// lexNumberStart consumes either an integer or a float. It assumes that -// a negative sign has already been read, but that *no* digits have been -// consumed. lexNumberStart will move to the appropriate integer or float -// states. +// lexNumberStart consumes either an integer or a float. It assumes that a sign +// has already been read, but that *no* digits have been consumed. +// lexNumberStart will move to the appropriate integer or float states. func lexNumberStart(lx *lexer) stateFn { - // we MUST see a digit. Even floats have to start with a digit. + // We MUST see a digit. Even floats have to start with a digit. r := lx.next() if !isDigit(r) { if r == '.' { @@ -699,11 +709,14 @@ func lexNumberStart(lx *lexer) stateFn { // lexNumber consumes an integer or a float after seeing the first digit. func lexNumber(lx *lexer) stateFn { r := lx.next() - switch { - case isDigit(r): + if isDigit(r) { + return lexNumber + } + switch r { + case '_': return lexNumber - case r == '.': - return lexFloatStart + case '.', 'e', 'E': + return lexFloat } lx.backup() @@ -711,60 +724,42 @@ func lexNumber(lx *lexer) stateFn { return lx.pop() } -// lexFloatStart starts the consumption of digits of a float after a '.'. -// Namely, at least one digit is required. -func lexFloatStart(lx *lexer) stateFn { - r := lx.next() - if !isDigit(r) { - return lx.errorf("Floats must have a digit after the '.', but got "+ - "%q instead.", r) - } - return lexFloat -} - -// lexFloat consumes the digits of a float after a '.'. -// Assumes that one digit has been consumed after a '.' already. +// lexFloat consumes the elements of a float. It allows any sequence of +// float-like characters, so floats emitted by the lexer are only a first +// approximation and must be validated by the parser. func lexFloat(lx *lexer) stateFn { r := lx.next() if isDigit(r) { return lexFloat } + switch r { + case '_', '.', '-', '+', 'e', 'E': + return lexFloat + } lx.backup() lx.emit(itemFloat) return lx.pop() } -// lexConst consumes the s[1:] in s. It assumes that s[0] has already been -// consumed. -func lexConst(lx *lexer, s string) stateFn { - for i := range s[1:] { - if r := lx.next(); r != rune(s[i+1]) { - return lx.errorf("Expected %q, but found %q instead.", s[:i+1], - s[:i]+string(r)) +// lexBool consumes a bool string: 'true' or 'false. +func lexBool(lx *lexer) stateFn { + var rs []rune + for { + r := lx.next() + if r == eof || isWhitespace(r) || isNL(r) { + lx.backup() + break } + rs = append(rs, r) } - return nil -} - -// lexTrue consumes the "rue" in "true". It assumes that 't' has already -// been consumed. -func lexTrue(lx *lexer) stateFn { - if fn := lexConst(lx, "true"); fn != nil { - return fn - } - lx.emit(itemBool) - return lx.pop() -} - -// lexFalse consumes the "alse" in "false". It assumes that 'f' has already -// been consumed. -func lexFalse(lx *lexer) stateFn { - if fn := lexConst(lx, "false"); fn != nil { - return fn + s := string(rs) + switch s { + case "true", "false": + lx.emit(itemBool) + return lx.pop() } - lx.emit(itemBool) - return lx.pop() + return lx.errorf("Expected value but found %q instead.", s) } // lexCommentStart begins the lexing of a comment. It will emit diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go index 6a82e84f6..a5625555c 100644 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -2,7 +2,6 @@ package toml import ( "fmt" - "log" "strconv" "strings" "time" @@ -81,7 +80,7 @@ func (p *parser) next() item { } func (p *parser) bug(format string, v ...interface{}) { - log.Panicf("BUG: %s\n\n", fmt.Sprintf(format, v...)) + panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) } func (p *parser) expect(typ itemType) item { @@ -179,10 +178,18 @@ func (p *parser) value(it item) (interface{}, tomlType) { } p.bug("Expected boolean value, but got '%s'.", it.val) case itemInteger: - num, err := strconv.ParseInt(it.val, 10, 64) + if !numUnderscoresOK(it.val) { + p.panicf("Invalid integer %q: underscores must be surrounded by digits", + it.val) + } + val := strings.Replace(it.val, "_", "", -1) + num, err := strconv.ParseInt(val, 10, 64) if err != nil { - // See comment below for floats describing why we make a - // distinction between a bug and a user error. + // Distinguish integer values. Normally, it'd be a bug if the lexer + // provides an invalid integer, but it's possible that the number is + // out of range of valid values (which the lexer cannot determine). + // So mark the former as a bug but the latter as a legitimate user + // error. if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { @@ -194,29 +201,57 @@ func (p *parser) value(it item) (interface{}, tomlType) { } return num, p.typeOfPrimitive(it) case itemFloat: - num, err := strconv.ParseFloat(it.val, 64) + parts := strings.FieldsFunc(it.val, func(r rune) bool { + switch r { + case '.', 'e', 'E': + return true + } + return false + }) + for _, part := range parts { + if !numUnderscoresOK(part) { + p.panicf("Invalid float %q: underscores must be "+ + "surrounded by digits", it.val) + } + } + if !numPeriodsOK(it.val) { + // As a special case, numbers like '123.' or '1.e2', + // which are valid as far as Go/strconv are concerned, + // must be rejected because TOML says that a fractional + // part consists of '.' followed by 1+ digits. + p.panicf("Invalid float %q: '.' must be followed "+ + "by one or more digits", it.val) + } + val := strings.Replace(it.val, "_", "", -1) + num, err := strconv.ParseFloat(val, 64) if err != nil { - // Distinguish float values. Normally, it'd be a bug if the lexer - // provides an invalid float, but it's possible that the float is - // out of range of valid values (which the lexer cannot determine). - // So mark the former as a bug but the latter as a legitimate user - // error. - // - // This is also true for integers. if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { p.panicf("Float '%s' is out of the range of 64-bit "+ "IEEE-754 floating-point numbers.", it.val) } else { - p.bug("Expected float value, but got '%s'.", it.val) + p.panicf("Invalid float value: %q", it.val) } } return num, p.typeOfPrimitive(it) case itemDatetime: - t, err := time.Parse("2006-01-02T15:04:05Z", it.val) - if err != nil { - p.panicf("Invalid RFC3339 Zulu DateTime: '%s'.", it.val) + var t time.Time + var ok bool + var err error + for _, format := range []string{ + "2006-01-02T15:04:05Z07:00", + "2006-01-02T15:04:05", + "2006-01-02", + } { + t, err = time.ParseInLocation(format, it.val, time.Local) + if err == nil { + ok = true + break + } + } + if !ok { + p.panicf("Invalid TOML Datetime: %q.", it.val) } return t, p.typeOfPrimitive(it) case itemArray: @@ -239,6 +274,35 @@ func (p *parser) value(it item) (interface{}, tomlType) { panic("unreachable") } +// numUnderscoresOK checks whether each underscore in s is surrounded by +// characters that are not underscores. +func numUnderscoresOK(s string) bool { + accept := false + for _, r := range s { + if r == '_' { + if !accept { + return false + } + accept = false + continue + } + accept = true + } + return accept +} + +// numPeriodsOK checks whether every period in s is followed by a digit. +func numPeriodsOK(s string) bool { + period := false + for _, r := range s { + if period && !isDigit(r) { + return false + } + period = r == '.' + } + return !period +} + // establishContext sets the current context of the parser, // where the context is either a hash or an array of hashes. Which one is // set depends on the value of the `array` parameter. diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go index 6da608af4..608997c22 100644 --- a/vendor/github.com/BurntSushi/toml/type_fields.go +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -95,8 +95,8 @@ func typeFields(t reflect.Type) []field { if sf.PkgPath != "" && !sf.Anonymous { // unexported continue } - name, _ := getOptions(sf.Tag.Get("toml")) - if name == "-" { + opts := getOptions(sf.Tag) + if opts.skip { continue } index := make([]int, len(f.index)+1) @@ -110,8 +110,9 @@ func typeFields(t reflect.Type) []field { } // Record found field and index sequence. - if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := name != "" + if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := opts.name != "" + name := opts.name if name == "" { name = sf.Name } diff --git a/vendor/github.com/boltdb/bolt/db.go b/vendor/github.com/boltdb/bolt/db.go index 850366561..1223493ca 100644 --- a/vendor/github.com/boltdb/bolt/db.go +++ b/vendor/github.com/boltdb/bolt/db.go @@ -36,6 +36,9 @@ const ( DefaultAllocSize = 16 * 1024 * 1024 ) +// default page size for db is set to the OS page size. +var defaultPageSize = os.Getpagesize() + // DB represents a collection of buckets persisted to a file on disk. // All data access is performed through transactions which can be obtained through the DB. // All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. @@ -107,6 +110,8 @@ type DB struct { freelist *freelist stats Stats + pagePool sync.Pool + batchMu sync.Mutex batch *batch @@ -200,12 +205,27 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { if _, err := db.file.ReadAt(buf[:], 0); err == nil { m := db.pageInBuffer(buf[:], 0).meta() if err := m.validate(); err != nil { - return nil, err + // If we can't read the page size, we can assume it's the same + // as the OS -- since that's how the page size was chosen in the + // first place. + // + // If the first page is invalid and this OS uses a different + // page size than what the database was created with then we + // are out of luck and cannot access the database. + db.pageSize = os.Getpagesize() + } else { + db.pageSize = int(m.pageSize) } - db.pageSize = int(m.pageSize) } } + // Initialize page pool. + db.pagePool = sync.Pool{ + New: func() interface{} { + return make([]byte, db.pageSize) + }, + } + // Memory map the data file. if err := db.mmap(options.InitialMmapSize); err != nil { _ = db.close() @@ -262,12 +282,13 @@ func (db *DB) mmap(minsz int) error { db.meta0 = db.page(0).meta() db.meta1 = db.page(1).meta() - // Validate the meta pages. - if err := db.meta0.validate(); err != nil { - return err - } - if err := db.meta1.validate(); err != nil { - return err + // Validate the meta pages. We only return an error if both meta pages fail + // validation, since meta0 failing validation means that it wasn't saved + // properly -- but we can recover using meta1. And vice-versa. + err0 := db.meta0.validate() + err1 := db.meta1.validate() + if err0 != nil && err1 != nil { + return err0 } return nil @@ -339,6 +360,7 @@ func (db *DB) init() error { m.root = bucket{root: 3} m.pgid = 4 m.txid = txid(i) + m.checksum = m.sum64() } // Write an empty freelist at page 3. @@ -778,16 +800,37 @@ func (db *DB) pageInBuffer(b []byte, id pgid) *page { // meta retrieves the current meta page reference. func (db *DB) meta() *meta { - if db.meta0.txid > db.meta1.txid { - return db.meta0 + // We have to return the meta with the highest txid which doesn't fail + // validation. Otherwise, we can cause errors when in fact the database is + // in a consistent state. metaA is the one with the higher txid. + metaA := db.meta0 + metaB := db.meta1 + if db.meta1.txid > db.meta0.txid { + metaA = db.meta1 + metaB = db.meta0 } - return db.meta1 + + // Use higher meta page if valid. Otherwise fallback to previous, if valid. + if err := metaA.validate(); err == nil { + return metaA + } else if err := metaB.validate(); err == nil { + return metaB + } + + // This should never be reached, because both meta1 and meta0 were validated + // on mmap() and we do fsync() on every write. + panic("bolt.DB.meta(): invalid meta pages") } // allocate returns a contiguous block of memory starting at a given page. func (db *DB) allocate(count int) (*page, error) { // Allocate a temporary buffer for the page. - buf := make([]byte, count*db.pageSize) + var buf []byte + if count == 1 { + buf = db.pagePool.Get().([]byte) + } else { + buf = make([]byte, count*db.pageSize) + } p := (*page)(unsafe.Pointer(&buf[0])) p.overflow = uint32(count - 1) @@ -937,12 +980,12 @@ type meta struct { // validate checks the marker bytes and version of the meta page to ensure it matches this binary. func (m *meta) validate() error { - if m.checksum != 0 && m.checksum != m.sum64() { - return ErrChecksum - } else if m.magic != magic { + if m.magic != magic { return ErrInvalid } else if m.version != version { return ErrVersionMismatch + } else if m.checksum != 0 && m.checksum != m.sum64() { + return ErrChecksum } return nil } diff --git a/vendor/github.com/boltdb/bolt/errors.go b/vendor/github.com/boltdb/bolt/errors.go index 6883786d5..a3620a3eb 100644 --- a/vendor/github.com/boltdb/bolt/errors.go +++ b/vendor/github.com/boltdb/bolt/errors.go @@ -12,7 +12,8 @@ var ( // already open. ErrDatabaseOpen = errors.New("database already open") - // ErrInvalid is returned when a data file is not a Bolt-formatted database. + // ErrInvalid is returned when both meta pages on a database are invalid. + // This typically occurs when a file is not a bolt database. ErrInvalid = errors.New("invalid database") // ErrVersionMismatch is returned when the data file was created with a diff --git a/vendor/github.com/boltdb/bolt/tx.go b/vendor/github.com/boltdb/bolt/tx.go index b8510fdb8..1cfb4cde8 100644 --- a/vendor/github.com/boltdb/bolt/tx.go +++ b/vendor/github.com/boltdb/bolt/tx.go @@ -473,6 +473,8 @@ func (tx *Tx) write() error { for _, p := range tx.pages { pages = append(pages, p) } + // Clear out page cache early. + tx.pages = make(map[pgid]*page) sort.Sort(pages) // Write pages to disk in order. @@ -517,8 +519,22 @@ func (tx *Tx) write() error { } } - // Clear out page cache. - tx.pages = make(map[pgid]*page) + // Put small pages back to page pool. + for _, p := range pages { + // Ignore page sizes over 1 page. + // These are allocated using make() instead of the page pool. + if int(p.overflow) != 0 { + continue + } + + buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize] + + // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 + for i := range buf { + buf[i] = 0 + } + tx.db.pagePool.Put(buf) + } return nil } diff --git a/vendor/github.com/cenkalti/backoff/README.md b/vendor/github.com/cenkalti/backoff/README.md index 020b8fbf3..7988c3084 100644 --- a/vendor/github.com/cenkalti/backoff/README.md +++ b/vendor/github.com/cenkalti/backoff/README.md @@ -97,20 +97,20 @@ return nil ```bash # install -$ go get github.com/cenkalti/backoff +$ go get github.com/cenk/backoff # test -$ cd $GOPATH/src/github.com/cenkalti/backoff +$ cd $GOPATH/src/github.com/cenk/backoff $ go get -t ./... $ go test -v -cover ``` -[godoc]: https://godoc.org/github.com/cenkalti/backoff -[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png -[travis]: https://travis-ci.org/cenkalti/backoff -[travis image]: https://travis-ci.org/cenkalti/backoff.png +[godoc]: https://godoc.org/github.com/cenk/backoff +[godoc image]: https://godoc.org/github.com/cenk/backoff?status.png +[travis]: https://travis-ci.org/cenk/backoff +[travis image]: https://travis-ci.org/cenk/backoff.png [google-http-java-client]: https://github.com/google/google-http-java-client [exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff -[advanced example]: https://godoc.org/github.com/cenkalti/backoff#example_ +[advanced example]: https://godoc.org/github.com/cenk/backoff#example_ diff --git a/vendor/github.com/gogo/protobuf/Readme.md b/vendor/github.com/gogo/protobuf/Readme.md index fe5b0b0a6..293da37b2 100644 --- a/vendor/github.com/gogo/protobuf/Readme.md +++ b/vendor/github.com/gogo/protobuf/Readme.md @@ -1,7 +1,5 @@ # Protocol Buffers for Go with Gadgets -Drone.io Basic Tests: [![Build Status](https://drone.io/github.com/gogo/protobuf/status.png)](https://drone.io/github.com/gogo/protobuf/latest) - Travis CI Matrix Builds: [![Build Status](https://travis-ci.org/gogo/protobuf.svg?branch=master)](https://travis-ci.org/gogo/protobuf) ### Getting Started (Give me the speed I don't care about the rest) diff --git a/vendor/github.com/influxdata/influxdb/CHANGELOG.md b/vendor/github.com/influxdata/influxdb/CHANGELOG.md index 87f818f2d..13e866b80 100644 --- a/vendor/github.com/influxdata/influxdb/CHANGELOG.md +++ b/vendor/github.com/influxdata/influxdb/CHANGELOG.md @@ -2,6 +2,7 @@ ### Features +- [#6213](https://github.com/influxdata/influxdb/pull/6213): Make logging output location more programmatically configurable. - [#6237](https://github.com/influxdata/influxdb/issues/6237): Enable continuous integration testing on Windows platform via AppVeyor. Thanks @mvadu - [#6263](https://github.com/influxdata/influxdb/pull/6263): Reduce UDP Service allocation size. - [#6228](https://github.com/influxdata/influxdb/pull/6228): Support for multiple listeners for collectd and OpenTSDB inputs. @@ -12,26 +13,59 @@ - [#3247](https://github.com/influxdata/influxdb/issues/3247): Implement derivatives across intervals for aggregate queries. - [#3166](https://github.com/influxdata/influxdb/issues/3166): Sort the series keys inside of a tag set so output is deterministic. - [#1856](https://github.com/influxdata/influxdb/issues/1856): Add `elapsed` function that returns the time delta between subsequent points. +- [#5502](https://github.com/influxdata/influxdb/issues/5502): Add checksum verification to TSM inspect tool +- [#6444](https://github.com/influxdata/influxdb/pull/6444): Allow setting the config path through an environment variable and default config path. +- [#3558](https://github.com/influxdata/influxdb/issues/3558): Support field math inside a WHERE clause. +- [#6429](https://github.com/influxdata/influxdb/issues/6429): Log slow queries if they pass a configurable threshold. +- [#4675](https://github.com/influxdata/influxdb/issues/4675): Allow derivative() function to be used with ORDER BY desc. +- [#6483](https://github.com/influxdata/influxdb/pull/6483): Delete series support for TSM +- [#6484](https://github.com/influxdata/influxdb/pull/6484): Query language support for DELETE +- [#6290](https://github.com/influxdata/influxdb/issues/6290): Add POST /query endpoint and warning messages for using GET with write operations. +- [#6494](https://github.com/influxdata/influxdb/issues/6494): Support booleans for min() and max(). +- [#2074](https://github.com/influxdata/influxdb/issues/2074): Support offset argument in the GROUP BY time(...) call. ### Bugfixes - [#6283](https://github.com/influxdata/influxdb/pull/6283): Fix GROUP BY tag to produce consistent results when a series has no tags. - [#3773](https://github.com/influxdata/influxdb/issues/3773): Support empty tags for all WHERE equality operations. - [#6270](https://github.com/influxdata/influxdb/issues/6270): tsm1 query engine alloc reduction -- [#6271](https://github.com/influxdata/influxdb/issues/6271): Fixed deadlock in tsm1 file store. - [#6287](https://github.com/influxdata/influxdb/issues/6287): Fix data race in Influx Client. - [#6252](https://github.com/influxdata/influxdb/pull/6252): Remove TSDB listener accept message @simnv - [#6202](https://github.com/influxdata/influxdb/pull/6202): Check default SHARD DURATION when recreating the same database. - [#6296](https://github.com/influxdata/influxdb/issues/6296): Allow the implicit time field to be renamed again. -- [#6379](https://github.com/influxdata/influxdb/issues/6379): Validate the first argument to percentile() is a variable. - [#6294](https://github.com/influxdata/influxdb/issues/6294): Fix panic running influx_inspect info. - [#6382](https://github.com/influxdata/influxdb/pull/6382): Removed dead code from the old query engine. -- [#6383](https://github.com/influxdata/influxdb/pull/6383): Recover from a panic during query execution. - [#3369](https://github.com/influxdata/influxdb/issues/3369): Detect when a timer literal will overflow or underflow the query engine. - [#6398](https://github.com/influxdata/influxdb/issues/6398): Fix CREATE RETENTION POLICY parsing so it doesn't consume tokens it shouldn't. +- [#6425](https://github.com/influxdata/influxdb/pull/6425): Close idle tcp connections in HTTP client to prevent tcp conn leak. +- [#6109](https://github.com/influxdata/influxdb/issues/6109): Cache maximum memory size exceeded on startup +- [#6427](https://github.com/influxdata/influxdb/pull/6427): Fix setting uint config options via env vars +- [#6458](https://github.com/influxdata/influxdb/pull/6458): Make it clear when the CLI version is unknown. +- [#3883](https://github.com/influxdata/influxdb/issues/3883): Improve query sanitization to prevent a password leak in the logs. +- [#6462](https://github.com/influxdata/influxdb/pull/6462): Add safer locking to CreateFieldIfNotExists +- [#6361](https://github.com/influxdata/influxdb/pull/6361): Fix cluster/pool release of connection +- [#6470](https://github.com/influxdata/influxdb/pull/6470): Remove SHOW SERVERS & DROP SERVER support +- [#6477](https://github.com/influxdata/influxdb/pull/6477): Don't catch SIGQUIT or SIGHUP signals. +- [#6468](https://github.com/influxdata/influxdb/issues/6468): Panic with truncated wal segments +- [#6491](https://github.com/influxdata/influxdb/pull/6491): Fix the CLI not to enter an infinite loop when the liner has an error. +- [#6457](https://github.com/influxdata/influxdb/issues/6457): Retention policy cleanup does not remove series +- [#6477](https://github.com/influxdata/influxdb/pull/6477): Don't catch SIGQUIT or SIGHUP signals. +- [#6468](https://github.com/influxdata/influxdb/issues/6468): Panic with truncated wal segments +- [#6480](https://github.com/influxdata/influxdb/issues/6480): Fix SHOW statements' rewriting bug +- [#6505](https://github.com/influxdata/influxdb/issues/6505): Add regex literal to InfluxQL spec for FROM clause. +- [#5890](https://github.com/influxdata/influxdb/issues/5890): Return the time with a selector when there is no group by interval. +- [#6496](https://github.com/influxdata/influxdb/issues/6496): Fix parsing escaped series key when loading database index + +## v0.12.2 [2016-04-20] + +### Bugfixes + +- [#6271](https://github.com/influxdata/influxdb/issues/6271): Fixed deadlock in tsm1 file store. - [#6413](https://github.com/influxdata/influxdb/pull/6413): Prevent goroutine leak from persistent http connections. Thanks @aaronknister. - [#6414](https://github.com/influxdata/influxdb/pull/6414): Send "Connection: close" header for queries. -- [#6425](https://github.com/influxdata/influxdb/pull/6425): Close idle tcp connections in HTTP client to prevent tcp conn leak. +- [#6419](https://github.com/influxdata/influxdb/issues/6419): Fix panic in transform iterator on division. @thbourlove +- [#6379](https://github.com/influxdata/influxdb/issues/6379): Validate the first argument to percentile() is a variable. +- [#6383](https://github.com/influxdata/influxdb/pull/6383): Recover from a panic during query execution. ## v0.12.1 [2016-04-08] @@ -45,6 +79,7 @@ - [#6223](https://github.com/influxdata/influxdb/issues/6223): Failure to start/run on Windows. Thanks @mvadu - [#6229](https://github.com/influxdata/influxdb/issues/6229): Fixed aggregate queries with no GROUP BY to include the end time. + ## v0.12.0 [2016-04-05] ### Release Notes Upgrading to this release requires a little more than just installing the new binary and starting it up. The upgrade process is very quick and should only require a minute of downtime or less. Details on [upgrading to 0.12 are here](https://docs.influxdata.com/influxdb/v0.12/administration/upgrading/). @@ -139,6 +174,7 @@ There were some important breaking changes in this release. Here's a list of the - [#2715](https://github.com/influxdata/influxdb/issues/2715): Support using field regex comparisons in the WHERE clause - [#5994](https://github.com/influxdata/influxdb/issues/5994): Single server - [#5737](https://github.com/influxdata/influxdb/pull/5737): Admin UI: Display results of multiple queries, not just the first query. Thanks @Vidhuran! +- [#5720](https://github.com/influxdata/influxdb/pull/5720): Admin UI: New button to generate permalink to queries ### Bugfixes diff --git a/vendor/github.com/influxdata/influxdb/Godeps b/vendor/github.com/influxdata/influxdb/Godeps index bb803a946..1c9f62210 100644 --- a/vendor/github.com/influxdata/influxdb/Godeps +++ b/vendor/github.com/influxdata/influxdb/Godeps @@ -12,10 +12,10 @@ github.com/hashicorp/go-msgpack fa3f63826f7c23912c15263591e65d54d080b458 github.com/hashicorp/raft 8fd9a2fdfd154f4b393aa24cff91e3c317efe839 github.com/hashicorp/raft-boltdb d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee github.com/influxdata/usage-client 475977e68d79883d9c8d67131c84e4241523f452 -github.com/jwilder/encoding 07d88d4f35eec497617bee0c7bfe651a796dae13 +github.com/jwilder/encoding b421ab402545ef5a119f4f827784c6551d9bfc37 github.com/kimor79/gollectd 61d0deeb4ffcc167b2a1baa8efd72365692811bc github.com/paulbellamy/ratecounter 5a11f585a31379765c190c033b6ad39956584447 -github.com/peterh/liner ad1edfd30321d8f006ccf05f1e0524adeb943060 +github.com/peterh/liner 82a939e738b0ee23e84ec7a12d8e216f4d95c53f github.com/rakyll/statik 274df120e9065bdd08eb1120e0375e3dc1ae8465 golang.org/x/crypto 1f22c0103821b9390939b6776727195525381532 golang.org/x/tools 8b178a93c1f5b5c8f4e36cd6bd64e0d5bf0ee180 diff --git a/vendor/github.com/influxdata/influxdb/README.md b/vendor/github.com/influxdata/influxdb/README.md index 636e3ff4c..3523e4298 100644 --- a/vendor/github.com/influxdata/influxdb/README.md +++ b/vendor/github.com/influxdata/influxdb/README.md @@ -1,4 +1,4 @@ -# InfluxDB [![Circle CI](https://circleci.com/gh/influxdata/influxdb/tree/master.svg?style=svg)](https://circleci.com/gh/influxdata/influxdb/tree/master) +# InfluxDB [![Circle CI](https://circleci.com/gh/influxdata/influxdb/tree/master.svg?style=svg)](https://circleci.com/gh/influxdata/influxdb/tree/master) [![Go Report Card](https://goreportcard.com/badge/github.com/influxdata/influxdb)](https://goreportcard.com/report/github.com/influxdata/influxdb) ## An Open-Source Time Series Database @@ -8,10 +8,9 @@ events, and performing analytics. ## Features -* Built-in [HTTP API](https://docs.influxdata.com/influxdb/v0.10/guides/writing_data/) so you don't have to write any server side code to get up and running. +* Built-in [HTTP API](https://docs.influxdata.com/influxdb/latest/guides/writing_data/) so you don't have to write any server side code to get up and running. * Data can be tagged, allowing very flexible querying. * SQL-like query language. -* Clustering is supported out of the box, so that you can scale horizontally to handle your data. **Clustering is currently in an alpha state.** * Simple to install and manage, and fast to get data in and out. * It aims to answer queries in real-time. That means every data point is indexed as it comes in and is immediately available in queries that @@ -59,9 +58,9 @@ curl -G http://localhost:8086/query?pretty=true --data-urlencode "db=mydb" \ ## Documentation -* Read more about the [design goals and motivations of the project](https://docs.influxdata.com/influxdb/v0.10/). -* Follow the [getting started guide](https://docs.influxdata.com/influxdb/v0.10/introduction/getting_started/) to learn the basics in just a few minutes. -* Learn more about [InfluxDB's key concepts](https://docs.influxdata.com/influxdb/v0.10/guides/writing_data/). +* Read more about the [design goals and motivations of the project](https://docs.influxdata.com/influxdb/latest/). +* Follow the [getting started guide](https://docs.influxdata.com/influxdb/latest/introduction/getting_started/) to learn the basics in just a few minutes. +* Learn more about [InfluxDB's key concepts](https://docs.influxdata.com/influxdb/latest/guides/writing_data/). ## Contributing diff --git a/vendor/github.com/influxdata/influxdb/build.py b/vendor/github.com/influxdata/influxdb/build.py index d132bc29d..5e5888f23 100755 --- a/vendor/github.com/influxdata/influxdb/build.py +++ b/vendor/github.com/influxdata/influxdb/build.py @@ -10,8 +10,7 @@ import hashlib import re import logging - -LOG_LEVEL = logging.INFO +import argparse ################ #### InfluxDB Variables @@ -35,7 +34,7 @@ DEFAULT_CONFIG = "etc/config.sample.toml" # Default AWS S3 bucket for uploads -DEFAULT_BUCKET = "influxdb" +DEFAULT_BUCKET = "dl.influxdata.com/influxdb/artifacts" CONFIGURATION_FILES = [ CONFIG_DIR + '/influxdb.conf', @@ -86,37 +85,56 @@ } supported_builds = { - 'darwin': [ "amd64", "i386" ], - 'windows': [ "amd64", "i386" ], - 'linux': [ "amd64", "i386", "armhf", "arm64", "armel" ] + 'darwin': [ "amd64" ], + 'windows': [ "amd64" ], + 'linux': [ "amd64", "i386", "armhf", "arm64", "armel", "static_i386", "static_amd64" ] } supported_packages = { - "darwin": [ "tar", "zip" ], - "linux": [ "deb", "rpm", "tar", "zip"], - "windows": [ "tar", "zip" ], + "darwin": [ "tar" ], + "linux": [ "deb", "rpm", "tar" ], + "windows": [ "zip" ], } ################ #### InfluxDB Functions ################ +def print_banner(): + logging.info(""" + ___ __ _ ___ ___ + |_ _|_ _ / _| |_ ___ _| \\| _ ) + | || ' \\| _| | || \\ \\ / |) | _ \\ + |___|_||_|_| |_|\\_,_/_\\_\\___/|___/ + Build Script +""") + def create_package_fs(build_root): + """Create a filesystem structure to mimic the package filesystem. + """ logging.debug("Creating package filesystem at location: {}".format(build_root)) # Using [1:] for the path names due to them being absolute # (will overwrite previous paths, per 'os.path.join' documentation) - dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], DATA_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:] ] + dirs = [ INSTALL_ROOT_DIR[1:], + LOG_DIR[1:], + DATA_DIR[1:], + SCRIPT_DIR[1:], + CONFIG_DIR[1:], + LOGROTATE_DIR[1:] ] for d in dirs: - create_dir(os.path.join(build_root, d)) + os.makedirs(os.path.join(build_root, d)) os.chmod(os.path.join(build_root, d), 0o755) def package_scripts(build_root, config_only=False): + """Copy the necessary scripts and configuration files to the package + filesystem. + """ if config_only: - logging.info("Copying configuration to build directory.") + logging.debug("Copying configuration to build directory.") shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, "influxdb.conf")) os.chmod(os.path.join(build_root, "influxdb.conf"), 0o644) else: - logging.info("Copying scripts and sample configuration to build directory.") + logging.debug("Copying scripts and sample configuration to build directory.") shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1])) os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644) shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1])) @@ -127,6 +145,8 @@ def package_scripts(build_root, config_only=False): os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "influxdb.conf"), 0o644) def run_generate(): + """Run 'go generate' to rebuild any static assets. + """ logging.info("Running 'go generate'...") if not check_path_for("statik"): run("go install github.com/rakyll/statik") @@ -140,7 +160,12 @@ def run_generate(): os.environ["PATH"] = orig_path return True -def go_get(branch, update=False, no_stash=False): +def go_get(branch, update=False, no_uncommitted=False): + """Retrieve build dependencies or restore pinned dependencies. + """ + if local_changes() and no_uncommitted: + logging.error("There are uncommitted changes in the current directory.") + return False if not check_path_for("gdm"): logging.info("Downloading `gdm`...") get_command = "go get github.com/sparrc/gdm" @@ -150,11 +175,51 @@ def go_get(branch, update=False, no_stash=False): run("{}/bin/gdm restore -v".format(os.environ.get("GOPATH"))) return True +def run_tests(race, parallel, timeout, no_vet): + """Run the Go test suite on binary output. + """ + logging.info("Starting tests...") + if race: + logging.info("Race is enabled.") + if parallel is not None: + logging.info("Using parallel: {}".format(parallel)) + if timeout is not None: + logging.info("Using timeout: {}".format(timeout)) + out = run("go fmt ./...") + if len(out) > 0: + logging.error("Code not formatted. Please use 'go fmt ./...' to fix formatting errors.") + logging.error("{}".format(out)) + return False + if not no_vet: + logging.info("Installing 'go vet' tool...") + run("go install golang.org/x/tools/cmd/vet") + out = run(go_vet_command) + if len(out) > 0: + logging.error("Go vet failed. Please run 'go vet ./...' and fix any errors.") + logging.error("{}".format(out)) + return False + else: + logging.info("Skipping 'go vet' call...") + test_command = "go test -v" + if race: + test_command += " -race" + if parallel is not None: + test_command += " -parallel {}".format(parallel) + if timeout is not None: + test_command += " -timeout {}".format(timeout) + test_command += " ./..." + logging.info("Running tests...") + output = run(test_command) + logging.debug("Test output:\n{}".format(output.encode('ascii', 'ignore'))) + return True + ################ #### All InfluxDB-specific content above this line ################ def run(command, allow_failure=False, shell=False): + """Run shell command (convenience wrapper around subprocess). + """ out = None logging.debug("{}".format(command)) try: @@ -182,22 +247,46 @@ def run(command, allow_failure=False, shell=False): return out def create_temp_dir(prefix = None): + """ Create temporary directory with optional prefix. + """ if prefix is None: return tempfile.mkdtemp(prefix="{}-build.".format(PACKAGE_NAME)) else: return tempfile.mkdtemp(prefix=prefix) +def increment_minor_version(version): + """Return the version with the minor version incremented and patch + version set to zero. + """ + ver_list = version.split('.') + if len(ver_list) != 3: + logging.warn("Could not determine how to increment version '{}', will just use provided version.".format(version)) + return version + ver_list[1] = str(int(ver_list[1]) + 1) + ver_list[2] = str(0) + inc_version = '.'.join(ver_list) + logging.debug("Incremented version from '{}' to '{}'.".format(version, inc_version)) + return inc_version + def get_current_version_tag(): - version = run("git describe --always --tags --abbrev=0").strip() + """Retrieve the raw git version tag. + """ + version = run("git describe --always --tags --abbrev=0") return version def get_current_version(): + """Parse version information from git tag output. + """ version_tag = get_current_version_tag() # Remove leading 'v' and possible '-rc\d+' - version = re.sub(r'-rc\d+', '', str(version_tag[1:])) + if version_tag[0] == 'v': + version_tag = version_tag[1:] + version = re.sub(r'-rc\d+', '', str(version_tag)) return version def get_current_rc(): + """Parse release candidate from git tag output. + """ rc = None version_tag = get_current_version_tag() matches = re.match(r'.*-rc(\d+)', str(version_tag)) @@ -206,6 +295,8 @@ def get_current_rc(): return rc def get_current_commit(short=False): + """Retrieve the current git commit. + """ command = None if short: command = "git log --pretty=format:'%h' -n 1" @@ -215,23 +306,44 @@ def get_current_commit(short=False): return out.strip('\'\n\r ') def get_current_branch(): + """Retrieve the current git branch. + """ command = "git rev-parse --abbrev-ref HEAD" out = run(command) return out.strip() +def local_changes(): + """Return True if there are local un-committed changes. + """ + output = run("git diff-files --ignore-submodules --").strip() + if len(output) > 0: + return True + return False + def get_system_arch(): + """Retrieve current system architecture. + """ arch = os.uname()[4] if arch == "x86_64": arch = "amd64" + elif arch == "386": + arch = "i386" + elif 'arm' in arch: + # Prevent uname from reporting full ARM arch (eg 'armv7l') + arch = "arm" return arch def get_system_platform(): + """Retrieve current system platform. + """ if sys.platform.startswith("linux"): return "linux" else: return sys.platform def get_go_version(): + """Retrieve version information for Go. + """ out = run("go version") matches = re.search('go version go(\S+)', out) if matches is not None: @@ -239,6 +351,8 @@ def get_go_version(): return None def check_path_for(b): + """Check the the user's path for the provided binary. + """ def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) @@ -249,6 +363,8 @@ def is_exe(fpath): return full_path def check_environ(build_dir = None): + """Check environment for common Go variables. + """ logging.info("Checking environment...") for v in [ "GOPATH", "GOBIN", "GOROOT" ]: logging.debug("Using '{}' for {}".format(os.environ.get(v), v)) @@ -259,6 +375,8 @@ def check_environ(build_dir = None): return True def check_prereqs(): + """Check user path for required dependencies. + """ logging.info("Checking for dependencies...") for req in prereqs: if not check_path_for(req): @@ -267,15 +385,22 @@ def check_prereqs(): return True def upload_packages(packages, bucket_name=None, overwrite=False): + """Upload provided package output to AWS S3. + """ logging.debug("Uploading files to bucket '{}': {}".format(bucket_name, packages)) try: import boto from boto.s3.key import Key + from boto.s3.connection import OrdinaryCallingFormat + logging.getLogger("boto").setLevel(logging.WARNING) except ImportError: - logging.warn("Cannot upload packages without 'boto' Python library! Skipping.") + logging.warn("Cannot upload packages without 'boto' Python library!") return False - logging.info("Connecting to S3.") - c = boto.connect_s3() + logging.info("Connecting to AWS S3...") + # Up the number of attempts to 10 from default of 1 + boto.config.add_section("Boto") + boto.config.set("Boto", "metadata_service_num_attempts", "10") + c = boto.connect_s3(calling_format=OrdinaryCallingFormat()) if bucket_name is None: bucket_name = DEFAULT_BUCKET bucket = c.get_bucket(bucket_name.split('/')[0]) @@ -288,6 +413,7 @@ def upload_packages(packages, bucket_name=None, overwrite=False): os.path.basename(p)) else: name = os.path.basename(p) + logging.debug("Using key: {}".format(name)) if bucket.get_key(name) is None or overwrite: logging.info("Uploading file {}".format(name)) k = Key(bucket) @@ -298,48 +424,36 @@ def upload_packages(packages, bucket_name=None, overwrite=False): n = k.set_contents_from_filename(p, replace=False) k.make_public() else: - logging.warn("Not uploading file {}, as it already exists in the target bucket.") + logging.warn("Not uploading file {}, as it already exists in the target bucket.".format(name)) return True -def run_tests(race, parallel, timeout, no_vet): - logging.info("Starting tests...") - if race: - logging.info("Race is enabled.") - if parallel is not None: - logging.info("Using parallel: {}".format(parallel)) - if timeout is not None: - logging.info("Using timeout: {}".format(timeout)) - out = run("go fmt ./...") - if len(out) > 0: - logging.error("Code not formatted. Please use 'go fmt ./...' to fix formatting errors.") - logging.error("{}".format(out)) - return False - if not no_vet: - logging.info("Installing 'go vet' tool...") - run("go install golang.org/x/tools/cmd/vet") - out = run(go_vet_command) - if len(out) > 0: - logging.error("Go vet failed. Please run 'go vet ./...' and fix any errors.") - logging.error("{}".format(out)) - return False - else: - logging.info("Skipping 'go vet' call...") - test_command = "go test -v" - if race: - test_command += " -race" - if parallel is not None: - test_command += " -parallel {}".format(parallel) - if timeout is not None: - test_command += " -timeout {}".format(timeout) - test_command += " ./..." - logging.info("Running tests...") - output = run(test_command) - logging.debug("Test output:\n{}".format(output.encode('ascii', 'ignore'))) - return True +def go_list(vendor=False, relative=False): + """ + Return a list of packages + If vendor is False vendor package are not included + If relative is True the package prefix defined by PACKAGE_URL is stripped + """ + p = subprocess.Popen(["go", "list", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = p.communicate() + packages = out.split('\n') + if packages[-1] == '': + packages = packages[:-1] + if not vendor: + non_vendor = [] + for p in packages: + if '/vendor/' not in p: + non_vendor.append(p) + packages = non_vendor + if relative: + relative_pkgs = [] + for p in packages: + r = p.replace(PACKAGE_URL, '.') + if r != '.': + relative_pkgs.append(r) + packages = relative_pkgs + return packages def build(version=None, - branch=None, - commit=None, platform=None, arch=None, nightly=False, @@ -349,9 +463,12 @@ def build(version=None, outdir=".", tags=[], static=False): + """Build each target for the specified architecture and platform. + """ logging.info("Starting build for {}/{}...".format(platform, arch)) - logging.info("Using commit: {}".format(get_current_commit())) - logging.info("Using branch: {}".format(get_current_branch())) + logging.info("Using Go version: {}".format(get_go_version())) + logging.info("Using git branch: {}".format(get_current_branch())) + logging.info("Using git commit: {}".format(get_current_commit())) if static: logging.info("Using statically-compiled output.") if race: @@ -362,8 +479,8 @@ def build(version=None, logging.info("Sending build output to: {}".format(outdir)) if not os.path.exists(outdir): os.makedirs(outdir) - elif clean and outdir != '/': - logging.info("Cleaning build directory.") + elif clean and outdir != '/' and outdir != ".": + logging.info("Cleaning build directory '{}' before building.".format(outdir)) shutil.rmtree(outdir) os.makedirs(outdir) @@ -371,42 +488,46 @@ def build(version=None, # If a release candidate, update the version information accordingly version = "{}rc{}".format(version, rc) logging.info("Using version '{}' for build.".format(version)) - + tmp_build_dir = create_temp_dir() - for b, c in targets.items(): - logging.info("Building target: {}".format(b)) + for target, path in targets.items(): + logging.info("Building target: {}".format(target)) build_command = "" - if static: + + # Handle static binary output + if static is True or "static_" in arch: + if "static_" in arch: + static = True + arch = arch.replace("static_", "") build_command += "CGO_ENABLED=0 " - if "arm" in arch: - build_command += "GOOS={} GOARCH={} ".format(platform, "arm") - else: - if arch == 'i386': - arch = '386' - elif arch == 'x86_64': - arch = 'amd64' - build_command += "GOOS={} GOARCH={} ".format(platform, arch) + + # Handle variations in architecture output + if arch == "i386" or arch == "i686": + arch = "386" + elif "arm" in arch: + arch = "arm" + build_command += "GOOS={} GOARCH={} ".format(platform, arch) + if "arm" in arch: if arch == "armel": build_command += "GOARM=5 " elif arch == "armhf" or arch == "arm": build_command += "GOARM=6 " elif arch == "arm64": + # TODO(rossmcdonald) - Verify this is the correct setting for arm64 build_command += "GOARM=7 " else: logging.error("Invalid ARM architecture specified: {}".format(arch)) logging.error("Please specify either 'armel', 'armhf', or 'arm64'.") return False if platform == 'windows': - build_command += "go build -o {} ".format(os.path.join(outdir, b + '.exe')) - else: - build_command += "go build -o {} ".format(os.path.join(outdir, b)) + target = target + '.exe' + build_command += "go build -o {} ".format(os.path.join(outdir, target)) if race: build_command += "-race " if len(tags) > 0: build_command += "-tags {} ".format(','.join(tags)) - go_version = get_go_version() - if "1.4" in go_version: + if "1.4" in get_go_version(): if static: build_command += "-ldflags=\"-s -X main.version {} -X main.branch {} -X main.commit {}\" ".format(version, get_current_branch(), @@ -417,7 +538,7 @@ def build(version=None, get_current_commit()) else: - # With Go 1.5, the linker flag arguments changed to 'name=value' from 'name value' + # Starting with Go 1.5, the linker flag arguments changed to 'name=value' from 'name value' if static: build_command += "-ldflags=\"-s -X main.version={} -X main.branch={} -X main.commit={}\" ".format(version, get_current_branch(), @@ -428,27 +549,16 @@ def build(version=None, get_current_commit()) if static: build_command += "-a -installsuffix cgo " - build_command += c + build_command += path + start_time = datetime.utcnow() run(build_command, shell=True) + end_time = datetime.utcnow() + logging.info("Time taken: {}s".format((end_time - start_time).total_seconds())) return True -def create_dir(path): - os.makedirs(path) - -def rename_file(fr, to): - try: - os.rename(fr, to) - except OSError as e: - # Return the original filename - return fr - else: - # Return the new filename - return to - -def copy_file(fr, to): - shutil.copy(fr, to) - def generate_md5_from_file(path): + """Generate MD5 signature based on the contents of the file at path. + """ m = hashlib.md5() with open(path, 'rb') as f: for chunk in iter(lambda: f.read(4096), b""): @@ -456,6 +566,8 @@ def generate_md5_from_file(path): return m.hexdigest() def generate_sig_from_file(path): + """Generate a detached GPG signature from the file at path. + """ logging.debug("Generating GPG signature for file: {}".format(path)) gpg_path = check_path_for('gpg') if gpg_path is None: @@ -467,16 +579,19 @@ def generate_sig_from_file(path): run('gpg --armor --detach-sign --yes {}'.format(path)) return True -def build_packages(build_output, version, nightly=False, rc=None, iteration=1, static=False): +def package(build_output, version, nightly=False, rc=None, iteration=1, static=False, release=False): + """Package the output of the build process. + """ outfiles = [] tmp_build_dir = create_temp_dir() logging.debug("Packaging for build output: {}".format(build_output)) - logging.debug("Storing temporary build data at location: {}".format(tmp_build_dir)) + logging.info("Using temporary directory: {}".format(tmp_build_dir)) try: for platform in build_output: # Create top-level folder displaying which platform (linux, etc) - create_dir(os.path.join(tmp_build_dir, platform)) + os.makedirs(os.path.join(tmp_build_dir, platform)) for arch in build_output[platform]: + logging.info("Creating packages for {}/{}".format(platform, arch)) # Create second-level directory displaying the architecture (amd64, etc) current_location = build_output[platform][arch] @@ -485,10 +600,10 @@ def build_packages(build_output, version, nightly=False, rc=None, iteration=1, s platform, arch, '{}-{}-{}'.format(PACKAGE_NAME, version, iteration)) - create_dir(build_root) + os.makedirs(build_root) # Copy packaging scripts to build directory - if platform == 'windows' or static: + if platform == "windows" or static or "static_" in arch: # For windows and static builds, just copy # binaries to root of package (no other scripts or # directories) @@ -501,7 +616,7 @@ def build_packages(build_output, version, nightly=False, rc=None, iteration=1, s # Copy newly-built binaries to packaging directory if platform == 'windows': binary = binary + '.exe' - if platform == 'windows' or static: + if platform == 'windows' or static or "static_" in arch: # Where the binary should go in the package filesystem to = os.path.join(build_root, binary) # Where the binary currently is located @@ -511,7 +626,7 @@ def build_packages(build_output, version, nightly=False, rc=None, iteration=1, s fr = os.path.join(current_location, binary) # Where the binary should go in the package filesystem to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], binary) - copy_file(fr, to) + shutil.copy(fr, to) for package_type in supported_packages[platform]: # Package the directory structure for each package type for the platform @@ -521,10 +636,21 @@ def build_packages(build_output, version, nightly=False, rc=None, iteration=1, s # since they may be modified below. package_version = version package_iteration = iteration + if "static_" in arch: + # Remove the "static_" from the displayed arch on the package + package_arch = arch.replace("static_", "") + else: + package_arch = arch + if not release and not nightly: + # For non-release builds, just use the commit hash as the version + package_version = "{}~{}.{}".format(version, + get_current_branch(), + get_current_commit(short=True)) + package_iteration = "0" package_build_root = build_root current_location = build_output[platform][arch] - if rc is not None: + if rc is not None and release: # Set iteration to 0 since it's a release candidate package_iteration = "0.rc{}".format(rc) @@ -533,28 +659,40 @@ def build_packages(build_output, version, nightly=False, rc=None, iteration=1, s # the build root (to include the package name) package_build_root = os.path.join('/', '/'.join(build_root.split('/')[:-1])) if nightly: - if static: + if static or "static_" in arch: name = '{}-static-nightly_{}_{}'.format(name, - platform, - arch) + platform, + package_arch) else: name = '{}-nightly_{}_{}'.format(name, - platform, - arch) + platform, + package_arch) else: - if static: - name = '{}-{}-{}-static_{}_{}'.format(name, - package_version, - package_iteration, - platform, - arch) + if rc is not None or 'rc' in package_iteration or 'beta' not in package_iteration: + # If RC or beta, include iteration in package output + if static or "static_" in arch: + name = '{}-{}-static_{}_{}'.format(name, + package_version, + platform, + package_arch) + else: + name = '{}-{}_{}_{}'.format(name, + package_version, + platform, + package_arch) else: - name = '{}-{}-{}_{}_{}'.format(name, - package_version, - package_iteration, - platform, - arch) - + if static or "static_" in arch: + name = '{}-{}-{}-static_{}_{}'.format(name, + package_version, + package_iteration, + platform, + package_arch) + else: + name = '{}-{}-{}_{}_{}'.format(name, + package_version, + package_iteration, + platform, + package_arch) current_location = os.path.join(os.getcwd(), current_location) if package_type == 'tar': tar_command = "cd {} && tar -cvzf {}.tar.gz ./*".format(build_root, name) @@ -562,22 +700,19 @@ def build_packages(build_output, version, nightly=False, rc=None, iteration=1, s run("mv {}.tar.gz {}".format(os.path.join(build_root, name), current_location), shell=True) outfile = os.path.join(current_location, name + ".tar.gz") outfiles.append(outfile) - print("MD5({}) = {}".format(outfile, generate_md5_from_file(outfile))) elif package_type == 'zip': zip_command = "cd {} && zip -r {}.zip ./*".format(build_root, name) run(zip_command, shell=True) run("mv {}.zip {}".format(os.path.join(build_root, name), current_location), shell=True) outfile = os.path.join(current_location, name + ".zip") outfiles.append(outfile) - logging.info("MD5({}) = {}".format(outfile.split(os.pathsep)[-1:], - generate_md5_from_file(outfile))) - elif package_type not in ['zip', 'tar'] and static: + elif package_type not in ['zip', 'tar'] and static or "static_" in arch: logging.info("Skipping package type '{}' for static builds.".format(package_type)) else: fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format( fpm_common_args, name, - arch, + package_arch, package_type, package_version, package_iteration, @@ -593,272 +728,132 @@ def build_packages(build_output, version, nightly=False, rc=None, iteration=1, s if outfile is None: logging.warn("Could not determine output from packaging output!") else: - # Strip nightly version (the unix epoch) from filename - if nightly and package_type in [ 'deb', 'rpm' ]: - outfile = rename_file(outfile, - outfile.replace("{}-{}".format(version, iteration), "nightly")) + if nightly: + # Strip nightly version from package name + new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), "nightly") + os.rename(outfile, new_outfile) + outfile = new_outfile + else: + if rc is None and 'rc' not in package_iteration and 'beta' not in package_iteration: + # Strip iteration from package name (if there is no RC or beta) + if package_type == 'rpm': + # rpm's convert any dashes to underscores + package_version = package_version.replace("-", "_") + new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), package_version) + os.rename(outfile, new_outfile) + outfile = new_outfile outfiles.append(os.path.join(os.getcwd(), outfile)) - # Display MD5 hash for generated package - logging.info("MD5({}) = {}".format(outfile.split(os.pathsep)[-1:], - generate_md5_from_file(outfile))) logging.debug("Produced package files: {}".format(outfiles)) return outfiles finally: # Cleanup shutil.rmtree(tmp_build_dir) -def print_usage(): - print("Usage: ./build.py [options]") - print("") - print("Options:") - print("\t --outdir= \n\t\t- Send build output to a specified path. Defaults to ./build.") - print("\t --arch= \n\t\t- Build for specified architecture. Acceptable values: x86_64|amd64, 386|i386, arm, or all") - print("\t --platform= \n\t\t- Build for specified platform. Acceptable values: linux, windows, darwin, or all") - print("\t --version= \n\t\t- Version information to apply to build metadata. If not specified, will be pulled from repo tag.") - print("\t --commit= \n\t\t- Use specific commit for build (currently a NOOP).") - print("\t --branch= \n\t\t- Build from a specific branch (currently a NOOP).") - print("\t --rc= \n\t\t- Whether or not the build is a release candidate (affects version information).") - print("\t --iteration= \n\t\t- The iteration to display on the package output (defaults to 0 for RC's, and 1 otherwise).") - print("\t --race \n\t\t- Whether the produced build should have race detection enabled.") - print("\t --package \n\t\t- Whether the produced builds should be packaged for the target platform(s).") - print("\t --nightly \n\t\t- Whether the produced build is a nightly (affects version information).") - print("\t --update \n\t\t- Whether dependencies should be updated prior to building.") - print("\t --test \n\t\t- Run Go tests. Will not produce a build.") - print("\t --parallel \n\t\t- Run Go tests in parallel up to the count specified.") - print("\t --generate \n\t\t- Run `go generate`.") - print("\t --timeout \n\t\t- Timeout for Go tests. Defaults to 480s.") - print("\t --clean \n\t\t- Clean the build output directory prior to creating build.") - print("\t --no-get \n\t\t- Do not run `go get` before building.") - print("\t --static \n\t\t- Generate statically-linked binaries.") - print("\t --bucket=\n\t\t- Full path of the bucket to upload packages to (must also specify --upload).") - print("\t --sign \n\t\t- Sign output packages using GPG.") - print("\t --debug \n\t\t- Use debug output.") - print("") - -def main(): +def main(args): global PACKAGE_NAME - # Command-line arguments - outdir = "build" - commit = None - target_platform = None - target_arch = None - nightly = False - race = False - branch = None - version = get_current_version() - rc = get_current_rc() - package = False - update = False - clean = False - upload = False - test = False - parallel = None - timeout = None - iteration = 1 - no_vet = False - run_get = True - upload_bucket = None - generate = False - no_stash = False - static = False - build_tags = [] - sign_packages = False - upload_overwrite = False - - for arg in sys.argv[1:]: - if '--outdir' in arg: - # Output directory. If none is specified, then builds will be placed in the same directory. - outdir = arg.split("=")[1] - if '--commit' in arg: - # Commit to build from. If none is specified, then it will build from the most recent commit. - commit = arg.split("=")[1] - if '--branch' in arg: - # Branch to build from. If none is specified, then it will build from the current branch. - branch = arg.split("=")[1] - elif '--arch' in arg: - # Target architecture. If none is specified, then it will build for the current arch. - target_arch = arg.split("=")[1] - elif '--platform' in arg: - # Target platform. If none is specified, then it will build for the current platform. - target_platform = arg.split("=")[1] - elif '--version' in arg: - # Version to assign to this build (0.9.5, etc) - version = arg.split("=")[1] - elif '--rc' in arg: - # Signifies that this is a release candidate build. - rc = arg.split("=")[1] - elif '--race' in arg: - # Signifies that race detection should be enabled. - race = True - elif '--package' in arg: - # Signifies that packages should be built. - package = True - # If packaging do not allow stashing of local changes - no_stash = True - elif '--nightly' in arg: - # Signifies that this is a nightly build. - nightly = True - elif '--update' in arg: - # Signifies that dependencies should be updated. - update = True - elif '--upload' in arg: - # Signifies that the resulting packages should be uploaded to S3 - upload = True - elif '--overwrite' in arg: - # Signifies that the resulting packages should be uploaded to S3 - upload_overwrite = True - elif '--test' in arg: - # Run tests and exit - test = True - elif '--parallel' in arg: - # Set parallel for tests. - parallel = int(arg.split("=")[1]) - elif '--timeout' in arg: - # Set timeout for tests. - timeout = arg.split("=")[1] - elif '--clean' in arg: - # Signifies that the outdir should be deleted before building - clean = True - elif '--iteration' in arg: - iteration = arg.split("=")[1] - elif '--no-vet' in arg: - no_vet = True - elif '--no-get' in arg: - run_get = False - elif '--bucket' in arg: - # The bucket to upload the packages to, relies on boto - upload_bucket = arg.split("=")[1] - elif '--no-stash' in arg: - # Do not stash uncommited changes - # Fail if uncommited changes exist - no_stash = True - elif '--generate' in arg: - generate = True - elif '--build-tags' in arg: - for t in arg.split("=")[1].split(","): - build_tags.append(t) - elif '--name' in arg: - # Change the output package name - PACKAGE_NAME = arg.split("=")[1] - elif '--static' in arg: - static = True - elif '--sign' in arg: - sign_packages = True - elif '--debug' in arg: - # Setting log level is handled elsewhere - pass - elif '--help' in arg: - print_usage() - return 0 - else: - print("!! Unknown argument: {}".format(arg)) - print_usage() - return 1 - - if nightly and rc: + if args.nightly and args.rc: logging.error("Cannot be both a nightly and a release candidate.") return 1 + if args.release and args.nightly: + logging.error("Cannot be both a nightly and a release.") + return 1 - if nightly: - # In order to cleanly delineate nightly version, we are adding the epoch timestamp - # to the version so that version numbers are always greater than the previous nightly. - version = "{}~n{}".format(version, - datetime.utcnow().strftime("%Y%m%d%H%M")) - iteration = 0 - elif rc: - iteration = 0 + if args.nightly: + args.version = increment_minor_version(args.version) + args.version = "{}~n{}".format(args.version, + datetime.utcnow().strftime("%Y%m%d%H%M")) + args.iteration = 0 + elif args.rc: + args.iteration = 0 # Pre-build checks check_environ() if not check_prereqs(): return 1 - - if not commit: - commit = get_current_commit(short=True) - if not branch: - branch = get_current_branch() - if not target_arch: - system_arch = get_system_arch() - if 'arm' in system_arch: - # Prevent uname from reporting ARM arch (eg 'armv7l') - target_arch = "arm" - else: - target_arch = system_arch - if target_arch == '386': - target_arch = 'i386' - elif target_arch == 'x86_64': - target_arch = 'amd64' - if target_platform: - if target_platform not in supported_builds and target_platform != 'all': - logging.error("Invalid build platform: {}".format(target_platform)) - return 1 + if args.build_tags is None: + args.build_tags = [] else: - target_platform = get_system_platform() + args.build_tags = args.build_tags.split(',') + + orig_commit = get_current_commit(short=True) + orig_branch = get_current_branch() + + if args.platform not in supported_builds and args.platform != 'all': + logging.error("Invalid build platform: {}".format(target_platform)) + return 1 build_output = {} - if run_get: - if not go_get(branch, update=update, no_stash=no_stash): + if args.branch != orig_branch and args.commit != orig_commit: + logging.error("Can only specify one branch or commit to build from.") + return 1 + elif args.branch != orig_branch: + logging.info("Moving to git branch: {}".format(args.branch)) + run("git checkout {}".format(args.branch)) + elif args.commit != orig_commit: + logging.info("Moving to git commit: {}".format(args.commit)) + run("git checkout {}".format(args.commit)) + + if not args.no_get: + if not go_get(args.branch, update=args.update, no_uncommitted=args.no_uncommitted): return 1 - if generate: + if args.generate: if not run_generate(): return 1 - if test: - if not run_tests(race, parallel, timeout, no_vet): + if args.test: + if not run_tests(args.race, args.parallel, args.timeout, args.no_vet): return 1 - return 0 platforms = [] single_build = True - if target_platform == 'all': + if args.platform == 'all': platforms = supported_builds.keys() single_build = False else: - platforms = [target_platform] + platforms = [args.platform] for platform in platforms: build_output.update( { platform : {} } ) archs = [] - if target_arch == "all": + if args.arch == "all": single_build = False archs = supported_builds.get(platform) else: - archs = [target_arch] + archs = [args.arch] for arch in archs: - od = outdir + od = args.outdir if not single_build: - od = os.path.join(outdir, platform, arch) - if not build(version=version, - branch=branch, - commit=commit, + od = os.path.join(args.outdir, platform, arch) + if not build(version=args.version, platform=platform, arch=arch, - nightly=nightly, - rc=rc, - race=race, - clean=clean, + nightly=args.nightly, + rc=args.rc, + race=args.race, + clean=args.clean, outdir=od, - tags=build_tags, - static=static): + tags=args.build_tags, + static=args.static): return 1 build_output.get(platform).update( { arch : od } ) # Build packages - if package: + if args.package: if not check_path_for("fpm"): logging.error("FPM ruby gem required for packaging. Stopping.") return 1 - packages = build_packages(build_output, - version, - nightly=nightly, - rc=rc, - iteration=iteration, - static=static) - if sign_packages: + packages = package(build_output, + args.version, + nightly=args.nightly, + rc=args.rc, + iteration=args.iteration, + static=args.static, + release=args.release) + if args.sign: logging.debug("Generating GPG signatures for packages: {}".format(packages)) sigs = [] # retain signatures so they can be uploaded with packages for p in packages: @@ -868,18 +863,149 @@ def main(): logging.error("Creation of signature for package [{}] failed!".format(p)) return 1 packages += sigs - if upload: + if args.upload: logging.debug("Files staged for upload: {}".format(packages)) - if nightly or upload_overwrite: - upload_packages(packages, bucket_name=upload_bucket, overwrite=True) - else: - upload_packages(packages, bucket_name=upload_bucket, overwrite=False) + if args.nightly: + args.upload_overwrite = True + if not upload_packages(packages, bucket_name=args.bucket, overwrite=args.upload_overwrite): + return 1 + logging.info("Packages created:") + for p in packages: + logging.info("{} (MD5={})".format(p.split('/')[-1:][0], + generate_md5_from_file(p))) + if orig_branch != get_current_branch(): + logging.info("Moving back to original git branch: {}".format(args.branch)) + run("git checkout {}".format(orig_branch)) + return 0 if __name__ == '__main__': + LOG_LEVEL = logging.INFO if '--debug' in sys.argv[1:]: LOG_LEVEL = logging.DEBUG log_format = '[%(levelname)s] %(funcName)s: %(message)s' logging.basicConfig(level=LOG_LEVEL, format=log_format) - sys.exit(main()) + + parser = argparse.ArgumentParser(description='InfluxDB build and packaging script.') + parser.add_argument('--verbose','-v','--debug', + action='store_true', + help='Use debug output') + parser.add_argument('--outdir', '-o', + metavar='', + default='./build/', + type=os.path.abspath, + help='Output directory') + parser.add_argument('--name', '-n', + metavar='', + type=str, + help='Name to use for package name (when package is specified)') + parser.add_argument('--arch', + metavar='', + type=str, + default=get_system_arch(), + help='Target architecture for build output') + parser.add_argument('--platform', + metavar='', + type=str, + default=get_system_platform(), + help='Target platform for build output') + parser.add_argument('--branch', + metavar='', + type=str, + default=get_current_branch(), + help='Build from a specific branch') + parser.add_argument('--commit', + metavar='', + type=str, + default=get_current_commit(short=True), + help='Build from a specific commit') + parser.add_argument('--version', + metavar='', + type=str, + default=get_current_version(), + help='Version information to apply to build output (ex: 0.12.0)') + parser.add_argument('--rc', + metavar='', + type=int, + help='Release Candidate (RC) version to apply to build output') + parser.add_argument('--iteration', + metavar='', + type=str, + default="1", + help='Package iteration to apply to build output (defaults to 1)') + parser.add_argument('--stats', + action='store_true', + help='Emit build metrics (requires InfluxDB Python client)') + parser.add_argument('--stats-server', + metavar='', + type=str, + help='Send build stats to InfluxDB using provided hostname and port') + parser.add_argument('--stats-db', + metavar='', + type=str, + help='Send build stats to InfluxDB using provided database name') + parser.add_argument('--nightly', + action='store_true', + help='Mark build output as nightly build (will incremement the minor version)') + parser.add_argument('--update', + action='store_true', + help='Update build dependencies prior to building') + parser.add_argument('--package', + action='store_true', + help='Package binary output') + parser.add_argument('--release', + action='store_true', + help='Mark build output as release') + parser.add_argument('--clean', + action='store_true', + help='Clean output directory before building') + parser.add_argument('--no-get', + action='store_true', + help='Do not retrieve pinned dependencies when building') + parser.add_argument('--no-uncommitted', + action='store_true', + help='Fail if uncommitted changes exist in the working directory') + parser.add_argument('--upload', + action='store_true', + help='Upload output packages to AWS S3') + parser.add_argument('--upload-overwrite','-w', + action='store_true', + help='Upload output packages to AWS S3') + parser.add_argument('--bucket', + metavar='', + type=str, + default=DEFAULT_BUCKET, + help='Destination bucket for uploads') + parser.add_argument('--generate', + action='store_true', + help='Run "go generate" before building') + parser.add_argument('--build-tags', + metavar='', + help='Optional build tags to use for compilation') + parser.add_argument('--static', + action='store_true', + help='Create statically-compiled binary output') + parser.add_argument('--sign', + action='store_true', + help='Create GPG detached signatures for packages (when package is specified)') + parser.add_argument('--test', + action='store_true', + help='Run tests (does not produce build output)') + parser.add_argument('--no-vet', + action='store_true', + help='Do not run "go vet" when running tests') + parser.add_argument('--race', + action='store_true', + help='Enable race flag for build output') + parser.add_argument('--parallel', + metavar='', + type=int, + help='Number of tests to run simultaneously') + parser.add_argument('--timeout', + metavar='', + type=str, + help='Timeout for tests before failing') + args = parser.parse_args() + print_banner() + sys.exit(main(args)) diff --git a/vendor/github.com/influxdata/influxdb/client/influxdb.go b/vendor/github.com/influxdata/influxdb/client/influxdb.go index 020c2cd3b..90695b9ed 100644 --- a/vendor/github.com/influxdata/influxdb/client/influxdb.go +++ b/vendor/github.com/influxdata/influxdb/client/influxdb.go @@ -181,7 +181,7 @@ func (c *Client) Query(q Query) (*Response, error) { } u.RawQuery = values.Encode() - req, err := http.NewRequest("GET", u.String(), nil) + req, err := http.NewRequest("POST", u.String(), nil) if err != nil { return nil, err } diff --git a/vendor/github.com/influxdata/influxdb/client/v2/client.go b/vendor/github.com/influxdata/influxdb/client/v2/client.go index 3776eab30..a5b631913 100644 --- a/vendor/github.com/influxdata/influxdb/client/v2/client.go +++ b/vendor/github.com/influxdata/influxdb/client/v2/client.go @@ -541,7 +541,7 @@ func (c *client) Query(q Query) (*Response, error) { u := c.url u.Path = "query" - req, err := http.NewRequest("GET", u.String(), nil) + req, err := http.NewRequest("POST", u.String(), nil) if err != nil { return nil, err } diff --git a/vendor/github.com/influxdata/influxdb/influxql/README.md b/vendor/github.com/influxdata/influxdb/influxql/README.md index db7cfd930..9b8202f0e 100644 --- a/vendor/github.com/influxdata/influxdb/influxql/README.md +++ b/vendor/github.com/influxdata/influxdb/influxql/README.md @@ -102,12 +102,12 @@ DURATION END EVERY EXISTS EXPLAIN FIELD FOR FORCE FROM GRANT GRANTS GROUP GROUPS IF IN INF INNER INSERT INTO KEY KEYS LIMIT SHOW MEASUREMENT -MEASUREMENTS NOT OFFSET ON ORDER PASSWORD -POLICY POLICIES PRIVILEGES QUERIES QUERY READ -REPLICATION RESAMPLE RETENTION REVOKE SELECT SERIES -SERVER SERVERS SET SHARD SHARDS SLIMIT -SOFFSET STATS SUBSCRIPTION SUBSCRIPTIONS TAG TO -USER USERS VALUES WHERE WITH WRITE +MEASUREMENTS NAME NOT OFFSET ON ORDER +PASSWORD POLICY POLICIES PRIVILEGES QUERIES QUERY +READ REPLICATION RESAMPLE RETENTION REVOKE SELECT +SERIES SET SHARD SHARDS SLIMIT SOFFSET +STATS SUBSCRIPTION SUBSCRIPTIONS TAG TO USER +USERS VALUES WHERE WITH WRITE ``` ## Literals @@ -358,6 +358,20 @@ CREATE USER jdoe WITH PASSWORD '1337password'; CREATE USER jdoe WITH PASSWORD '1337password' WITH ALL PRIVILEGES; ``` +### DELETE + +``` +delete_stmt = "DELETE" ( from_clause | where_clause | from_clause where_clause ) . +``` + +#### Example: + +```sql +DELETE FROM cpu +DELETE FROM cpu WHERE time < '2000-01-01T00:00:00Z' +DELETE WHERE time < '2000-01-01T00:00:00Z' +``` + ### DROP CONTINUOUS QUERY ``` @@ -756,7 +770,7 @@ measurement = measurement_name | measurements = measurement { "," measurement } . -measurement_name = identifier . +measurement_name = identifier | regex_lit . password = string_lit . diff --git a/vendor/github.com/influxdata/influxdb/influxql/ast.go b/vendor/github.com/influxdata/influxdb/influxql/ast.go index 1c19d6b06..295080843 100644 --- a/vendor/github.com/influxdata/influxdb/influxql/ast.go +++ b/vendor/github.com/influxdata/influxdb/influxql/ast.go @@ -96,13 +96,13 @@ func (*CreateRetentionPolicyStatement) node() {} func (*CreateSubscriptionStatement) node() {} func (*CreateUserStatement) node() {} func (*Distinct) node() {} +func (*DeleteSeriesStatement) node() {} func (*DeleteStatement) node() {} func (*DropContinuousQueryStatement) node() {} func (*DropDatabaseStatement) node() {} func (*DropMeasurementStatement) node() {} func (*DropRetentionPolicyStatement) node() {} func (*DropSeriesStatement) node() {} -func (*DropServerStatement) node() {} func (*DropShardStatement) node() {} func (*DropSubscriptionStatement) node() {} func (*DropUserStatement) node() {} @@ -115,7 +115,6 @@ func (*SelectStatement) node() {} func (*SetPasswordUserStatement) node() {} func (*ShowContinuousQueriesStatement) node() {} func (*ShowGrantsForUserStatement) node() {} -func (*ShowServersStatement) node() {} func (*ShowDatabasesStatement) node() {} func (*ShowFieldKeysStatement) node() {} func (*ShowRetentionPoliciesStatement) node() {} @@ -211,13 +210,13 @@ func (*CreateDatabaseStatement) stmt() {} func (*CreateRetentionPolicyStatement) stmt() {} func (*CreateSubscriptionStatement) stmt() {} func (*CreateUserStatement) stmt() {} +func (*DeleteSeriesStatement) stmt() {} func (*DeleteStatement) stmt() {} func (*DropContinuousQueryStatement) stmt() {} func (*DropDatabaseStatement) stmt() {} func (*DropMeasurementStatement) stmt() {} func (*DropRetentionPolicyStatement) stmt() {} func (*DropSeriesStatement) stmt() {} -func (*DropServerStatement) stmt() {} func (*DropSubscriptionStatement) stmt() {} func (*DropUserStatement) stmt() {} func (*GrantStatement) stmt() {} @@ -225,7 +224,6 @@ func (*GrantAdminStatement) stmt() {} func (*KillQueryStatement) stmt() {} func (*ShowContinuousQueriesStatement) stmt() {} func (*ShowGrantsForUserStatement) stmt() {} -func (*ShowServersStatement) stmt() {} func (*ShowDatabasesStatement) stmt() {} func (*ShowFieldKeysStatement) stmt() {} func (*ShowMeasurementsStatement) stmt() {} @@ -938,34 +936,6 @@ func (s *SelectStatement) IsSimpleDerivative() bool { return false } -// HasSimpleCount return true if one of the function calls is a count function with a -// variable ref as the first arg -func (s *SelectStatement) HasSimpleCount() bool { - // recursively check for a simple count(varref) function - var hasCount func(f *Call) bool - hasCount = func(f *Call) bool { - if f.Name == "count" { - // it's nested if the first argument is an aggregate function - if _, ok := f.Args[0].(*VarRef); ok { - return true - } - } else { - for _, arg := range f.Args { - if child, ok := arg.(*Call); ok { - return hasCount(child) - } - } - } - return false - } - for _, f := range s.FunctionCalls() { - if hasCount(f) { - return true - } - } - return false -} - // TimeAscending returns true if the time field is sorted in chronological order. func (s *SelectStatement) TimeAscending() bool { return len(s.SortFields) == 0 || s.SortFields[0].Ascending @@ -1336,18 +1306,10 @@ func (s *SelectStatement) validate(tr targetRequirement) error { return err } - if err := s.validateCountDistinct(); err != nil { - return err - } - if err := s.validateAggregates(tr); err != nil { return err } - if err := s.validateDerivative(); err != nil { - return err - } - return nil } @@ -1373,18 +1335,32 @@ func (s *SelectStatement) validateDimensions() error { for _, dim := range s.Dimensions { switch expr := dim.Expr.(type) { case *Call: - // Ensure the call is time() and it only has one duration argument. + // Ensure the call is time() and it has one or two duration arguments. // If we already have a duration if expr.Name != "time" { return errors.New("only time() calls allowed in dimensions") - } else if len(expr.Args) != 1 { - return errors.New("time dimension expected one argument") + } else if got := len(expr.Args); got < 1 || got > 2 { + return errors.New("time dimension expected 1 or 2 arguments") } else if lit, ok := expr.Args[0].(*DurationLiteral); !ok { - return errors.New("time dimension must have one duration argument") + return errors.New("time dimension must have duration argument") } else if dur != 0 { return errors.New("multiple time dimensions not allowed") } else { dur = lit.Val + if len(expr.Args) == 2 { + switch lit := expr.Args[1].(type) { + case *DurationLiteral: + // noop + case *Call: + if lit.Name != "now" { + return errors.New("time dimension offset function must be now()") + } else if len(lit.Args) != 0 { + return errors.New("time dimension offset now() function requires no arguments") + } + default: + return errors.New("time dimension offset must be duration or now()") + } + } } case *VarRef: if strings.ToLower(expr.Val) == "time" { @@ -1561,8 +1537,12 @@ func (s *SelectStatement) validateAggregates(tr targetRequirement) error { case *VarRef: // do nothing case *Call: - if fc.Name != "distinct" { + if fc.Name != "distinct" || expr.Name != "count" { return fmt.Errorf("expected field argument in %s()", c.Name) + } else if exp, got := 1, len(fc.Args); got != exp { + return fmt.Errorf("count(distinct ) can only have one argument", fc.Name, exp, got) + } else if _, ok := fc.Args[0].(*VarRef); !ok { + return fmt.Errorf("expected field argument in distinct()") } case *Distinct: if expr.Name != "count" { @@ -1586,14 +1566,24 @@ func (s *SelectStatement) validateAggregates(tr targetRequirement) error { return err } if exp, got := 1, len(expr.Args); got != exp { + // Special error message if distinct was used as the argument. + if expr.Name == "count" && got >= 1 { + if _, ok := expr.Args[0].(*Distinct); ok { + return fmt.Errorf("count(distinct ) can only have one argument") + } + } return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", expr.Name, exp, got) } switch fc := expr.Args[0].(type) { case *VarRef: // do nothing case *Call: - if fc.Name != "distinct" { + if fc.Name != "distinct" || expr.Name != "count" { return fmt.Errorf("expected field argument in %s()", expr.Name) + } else if exp, got := 1, len(fc.Args); got != exp { + return fmt.Errorf("count(distinct ) can only have one argument") + } else if _, ok := fc.Args[0].(*VarRef); !ok { + return fmt.Errorf("expected field argument in distinct()") } case *Distinct: if expr.Name != "count" { @@ -1663,103 +1653,6 @@ func (s *SelectStatement) validateDistinct() error { return nil } -// HasCountDistinct checks if a select statement contains COUNT and DISTINCT -func (s *SelectStatement) HasCountDistinct() bool { - for _, f := range s.Fields { - if c, ok := f.Expr.(*Call); ok { - if c.Name == "count" { - for _, a := range c.Args { - if _, ok := a.(*Distinct); ok { - return true - } - if c, ok := a.(*Call); ok { - if c.Name == "distinct" { - return true - } - } - } - } - } - } - return false -} - -func (s *SelectStatement) validateCountDistinct() error { - if !s.HasCountDistinct() { - return nil - } - - valid := func(e Expr) bool { - c, ok := e.(*Call) - if !ok { - return true - } - if c.Name != "count" { - return true - } - for _, a := range c.Args { - if _, ok := a.(*Distinct); ok { - return len(c.Args) == 1 - } - if d, ok := a.(*Call); ok { - if d.Name == "distinct" { - return len(d.Args) == 1 - } - } - } - return true - } - - for _, f := range s.Fields { - if !valid(f.Expr) { - return fmt.Errorf("count(distinct ) can only have one argument") - } - } - - return nil -} - -func (s *SelectStatement) validateDerivative() error { - if !s.HasDerivative() { - return nil - } - - // If a derivative is requested, it must be the only field in the query. We don't support - // multiple fields in combination w/ derivaties yet. - if len(s.Fields) != 1 { - return fmt.Errorf("derivative cannot be used with other fields") - } - - aggr := s.FunctionCalls() - if len(aggr) != 1 { - return fmt.Errorf("derivative cannot be used with other fields") - } - - // Derivative requires two arguments - derivativeCall := aggr[0] - if len(derivativeCall.Args) == 0 { - return fmt.Errorf("derivative requires a field argument") - } - - // First arg must be a field or aggr over a field e.g. (mean(field)) - _, callOk := derivativeCall.Args[0].(*Call) - _, varOk := derivativeCall.Args[0].(*VarRef) - - if !(callOk || varOk) { - return fmt.Errorf("derivative requires a field argument") - } - - // If a duration arg is passed, make sure it's a duration - if len(derivativeCall.Args) == 2 { - // Second must be a duration .e.g (1h) - if _, ok := derivativeCall.Args[1].(*DurationLiteral); !ok { - return fmt.Errorf("derivative requires a duration argument") - } - } - - return nil -} - // GroupByInterval extracts the time interval, if specified. func (s *SelectStatement) GroupByInterval() (time.Duration, error) { // return if we've already pulled it out @@ -1775,14 +1668,14 @@ func (s *SelectStatement) GroupByInterval() (time.Duration, error) { for _, d := range s.Dimensions { if call, ok := d.Expr.(*Call); ok && call.Name == "time" { // Make sure there is exactly one argument. - if len(call.Args) != 1 { - return 0, errors.New("time dimension expected one argument") + if got := len(call.Args); got < 1 || got > 2 { + return 0, errors.New("time dimension expected 1 or 2 arguments") } // Ensure the argument is a duration. lit, ok := call.Args[0].(*DurationLiteral) if !ok { - return 0, errors.New("time dimension must have one duration argument") + return 0, errors.New("time dimension must have duration argument") } s.groupByInterval = lit.Val return lit.Val, nil @@ -1791,6 +1684,36 @@ func (s *SelectStatement) GroupByInterval() (time.Duration, error) { return 0, nil } +// GroupByOffset extracts the time interval offset, if specified. +func (s *SelectStatement) GroupByOffset(opt *IteratorOptions) (time.Duration, error) { + interval, err := s.GroupByInterval() + if err != nil { + return 0, err + } + + // Ignore if there are no dimensions. + if len(s.Dimensions) == 0 { + return 0, nil + } + + for _, d := range s.Dimensions { + if call, ok := d.Expr.(*Call); ok && call.Name == "time" { + if len(call.Args) == 2 { + switch expr := call.Args[1].(type) { + case *DurationLiteral: + return expr.Val % interval, nil + case *TimeLiteral: + return expr.Val.Sub(expr.Val.Truncate(interval)), nil + default: + return 0, fmt.Errorf("invalid time dimension offset: %s", expr) + } + } + return 0, nil + } + } + return 0, nil +} + // SetTimeRange sets the start and end time of the select statement to [start, end). i.e. start inclusive, end exclusive. // This is used commonly for continuous queries so the start and end are in buckets. func (s *SelectStatement) SetTimeRange(start, end time.Time) error { @@ -2213,31 +2136,35 @@ func (s DropSeriesStatement) RequiredPrivileges() ExecutionPrivileges { return ExecutionPrivileges{{Admin: false, Name: "", Privilege: WritePrivilege}} } -// DropServerStatement represents a command for removing a server from the cluster. -type DropServerStatement struct { - // ID of the node to be dropped. - NodeID uint64 +// DeleteSeriesStatement represents a command for deleting all or part of a series from a database. +type DeleteSeriesStatement struct { + // Data source that fields are extracted from (optional) + Sources Sources - // Meta indicates if the server being dropped is a meta or data node - Meta bool + // An expression evaluated on data point (optional) + Condition Expr } -// String returns a string representation of the drop series statement. -func (s *DropServerStatement) String() string { +// String returns a string representation of the delete series statement. +func (s *DeleteSeriesStatement) String() string { var buf bytes.Buffer - _, _ = buf.WriteString("DROP ") - if s.Meta { - _, _ = buf.WriteString(" META SERVER ") - } else { - _, _ = buf.WriteString(" DATA SERVER ") + buf.WriteString("DELETE") + + if s.Sources != nil { + buf.WriteString(" FROM ") + buf.WriteString(s.Sources.String()) + } + if s.Condition != nil { + buf.WriteString(" WHERE ") + buf.WriteString(s.Condition.String()) } - _, _ = buf.WriteString(strconv.FormatUint(s.NodeID, 10)) + return buf.String() } -// RequiredPrivileges returns the privilege required to execute a DropServerStatement. -func (s *DropServerStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Name: "", Privilege: AllPrivileges}} +// RequiredPrivileges returns the privilege required to execute a DeleteSeriesStatement. +func (s DeleteSeriesStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: WritePrivilege}} } // DropShardStatement represents a command for removing a shard from @@ -2292,17 +2219,6 @@ func (s *ShowGrantsForUserStatement) RequiredPrivileges() ExecutionPrivileges { return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} } -// ShowServersStatement represents a command for listing all servers. -type ShowServersStatement struct{} - -// String returns a string representation of the show servers command. -func (s *ShowServersStatement) String() string { return "SHOW SERVERS" } - -// RequiredPrivileges returns the privilege required to execute a ShowServersStatement -func (s *ShowServersStatement) RequiredPrivileges() ExecutionPrivileges { - return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} -} - // ShowDatabasesStatement represents a command for listing all databases in the cluster. type ShowDatabasesStatement struct{} @@ -3555,6 +3471,10 @@ func Walk(v Visitor, node Node) { Walk(v, c) } + case *DeleteSeriesStatement: + Walk(v, n.Sources) + Walk(v, n.Condition) + case *DropSeriesStatement: Walk(v, n.Sources) Walk(v, n.Condition) diff --git a/vendor/github.com/influxdata/influxdb/influxql/call_iterator.go b/vendor/github.com/influxdata/influxdb/influxql/call_iterator.go index 2cb3ae507..c6aa6013a 100644 --- a/vendor/github.com/influxdata/influxdb/influxql/call_iterator.go +++ b/vendor/github.com/influxdata/influxdb/influxql/call_iterator.go @@ -137,6 +137,12 @@ func newMinIterator(input Iterator, opt IteratorOptions) (Iterator, error) { return fn, fn } return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { + fn := NewBooleanFuncReducer(BooleanMinReduce) + return fn, fn + } + return &booleanReduceBooleanIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil default: return nil, fmt.Errorf("unsupported min iterator type: %T", input) } @@ -158,6 +164,14 @@ func IntegerMinReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { return prev.Time, prev.Value, prev.Aux } +// BooleanMinReduce returns the minimum value between prev & curr. +func BooleanMinReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) { + if prev == nil || (curr.Value != prev.Value && !curr.Value) || (curr.Value == prev.Value && curr.Time < prev.Time) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + // newMaxIterator returns an iterator for operating on a max() call. func newMaxIterator(input Iterator, opt IteratorOptions) (Iterator, error) { switch input := input.(type) { @@ -173,6 +187,12 @@ func newMaxIterator(input Iterator, opt IteratorOptions) (Iterator, error) { return fn, fn } return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { + fn := NewBooleanFuncReducer(BooleanMaxReduce) + return fn, fn + } + return &booleanReduceBooleanIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil default: return nil, fmt.Errorf("unsupported max iterator type: %T", input) } @@ -194,6 +214,14 @@ func IntegerMaxReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { return prev.Time, prev.Value, prev.Aux } +// BooleanMaxReduce returns the minimum value between prev & curr. +func BooleanMaxReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) { + if prev == nil || (curr.Value != prev.Value && curr.Value) || (curr.Value == prev.Value && curr.Time < prev.Time) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + // newSumIterator returns an iterator for operating on a sum() call. func newSumIterator(input Iterator, opt IteratorOptions) (Iterator, error) { switch input := input.(type) { diff --git a/vendor/github.com/influxdata/influxdb/influxql/emitter.go b/vendor/github.com/influxdata/influxdb/influxql/emitter.go index 0ef119e2e..2759ba6d8 100644 --- a/vendor/github.com/influxdata/influxdb/influxql/emitter.go +++ b/vendor/github.com/influxdata/influxdb/influxql/emitter.go @@ -41,20 +41,22 @@ func (e *Emitter) Close() error { } // Emit returns the next row from the iterators. -func (e *Emitter) Emit() *models.Row { +func (e *Emitter) Emit() (*models.Row, error) { // Immediately end emission if there are no iterators. if len(e.itrs) == 0 { - return nil + return nil, nil } // Continually read from iterators until they are exhausted. for { // Fill buffer. Return row if no more points remain. - t, name, tags := e.loadBuf() - if t == ZeroTime { + t, name, tags, err := e.loadBuf() + if err != nil { + return nil, err + } else if t == ZeroTime { row := e.row e.row = nil - return row + return row, nil } // Read next set of values from all iterators at a given time/name/tags. @@ -63,7 +65,7 @@ func (e *Emitter) Emit() *models.Row { if values == nil { row := e.row e.row = nil - return row + return row, nil } // If there's no row yet then create one. @@ -77,20 +79,23 @@ func (e *Emitter) Emit() *models.Row { } else { row := e.row e.createRow(name, tags, values) - return row + return row, nil } } } // loadBuf reads in points into empty buffer slots. // Returns the next time/name/tags to emit for. -func (e *Emitter) loadBuf() (t int64, name string, tags Tags) { +func (e *Emitter) loadBuf() (t int64, name string, tags Tags, err error) { t = ZeroTime for i := range e.itrs { // Load buffer, if empty. if e.buf[i] == nil { - e.buf[i] = e.readIterator(e.itrs[i]) + e.buf[i], err = e.readIterator(e.itrs[i]) + if err != nil { + break + } } // Skip if buffer is empty. @@ -173,30 +178,38 @@ func (e *Emitter) readAt(t int64, name string, tags Tags) []interface{} { } // readIterator reads the next point from itr. -func (e *Emitter) readIterator(itr Iterator) Point { +func (e *Emitter) readIterator(itr Iterator) (Point, error) { if itr == nil { - return nil + return nil, nil } switch itr := itr.(type) { case FloatIterator: - if p := itr.Next(); p != nil { - return p + if p, err := itr.Next(); err != nil { + return nil, err + } else if p != nil { + return p, nil } case IntegerIterator: - if p := itr.Next(); p != nil { - return p + if p, err := itr.Next(); err != nil { + return nil, err + } else if p != nil { + return p, nil } case StringIterator: - if p := itr.Next(); p != nil { - return p + if p, err := itr.Next(); err != nil { + return nil, err + } else if p != nil { + return p, nil } case BooleanIterator: - if p := itr.Next(); p != nil { - return p + if p, err := itr.Next(); err != nil { + return nil, err + } else if p != nil { + return p, nil } default: panic(fmt.Sprintf("unsupported iterator: %T", itr)) } - return nil + return nil, nil } diff --git a/vendor/github.com/influxdata/influxdb/influxql/functions.go b/vendor/github.com/influxdata/influxdb/influxql/functions.go index ffd8402bc..40661f2f4 100644 --- a/vendor/github.com/influxdata/influxdb/influxql/functions.go +++ b/vendor/github.com/influxdata/influxdb/influxql/functions.go @@ -98,11 +98,7 @@ func (r *FloatDerivativeReducer) Emit() []FloatPoint { if !r.ascending { elapsed = -elapsed } - - value := 0.0 - if elapsed > 0 { - value = diff / (float64(elapsed) / float64(r.interval.Duration)) - } + value := diff / (float64(elapsed) / float64(r.interval.Duration)) // Drop negative values for non-negative derivatives. if r.isNonNegative && diff < 0 { @@ -149,11 +145,7 @@ func (r *IntegerDerivativeReducer) Emit() []FloatPoint { if !r.ascending { elapsed = -elapsed } - - value := 0.0 - if elapsed > 0 { - value = diff / (float64(elapsed) / float64(r.interval.Duration)) - } + value := diff / (float64(elapsed) / float64(r.interval.Duration)) // Drop negative values for non-negative derivatives. if r.isNonNegative && diff < 0 { diff --git a/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go b/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go index fe6a70a73..95db26b8d 100644 --- a/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go +++ b/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go @@ -12,7 +12,6 @@ import ( "errors" "fmt" "io" - "log" "sort" "sync" "time" @@ -27,7 +26,7 @@ const DefaultStatsInterval = 10 * time.Second // FloatIterator represents a stream of float points. type FloatIterator interface { Iterator - Next() *FloatPoint + Next() (*FloatPoint, error) } // newFloatIterators converts a slice of Iterator to a slice of FloatIterator. @@ -68,43 +67,46 @@ func (itr *bufFloatIterator) Stats() IteratorStats { return itr.itr.Stats() } func (itr *bufFloatIterator) Close() error { return itr.itr.Close() } // peek returns the next point without removing it from the iterator. -func (itr *bufFloatIterator) peek() *FloatPoint { - p := itr.Next() +func (itr *bufFloatIterator) peek() (*FloatPoint, error) { + p, err := itr.Next() + if err != nil { + return nil, err + } itr.unread(p) - return p + return p, nil } // peekTime returns the time of the next point. // Returns zero time if no more points available. -func (itr *bufFloatIterator) peekTime() int64 { - p := itr.peek() - if p == nil { - return ZeroTime +func (itr *bufFloatIterator) peekTime() (int64, error) { + p, err := itr.peek() + if p == nil || err != nil { + return ZeroTime, err } - return p.Time + return p.Time, nil } // Next returns the current buffer, if exists, or calls the underlying iterator. -func (itr *bufFloatIterator) Next() *FloatPoint { - if itr.buf != nil { - buf := itr.buf +func (itr *bufFloatIterator) Next() (*FloatPoint, error) { + buf := itr.buf + if buf != nil { itr.buf = nil - return buf + return buf, nil } return itr.itr.Next() } // NextInWindow returns the next value if it is between [startTime, endTime). // If the next value is outside the range then it is moved to the buffer. -func (itr *bufFloatIterator) NextInWindow(startTime, endTime int64) *FloatPoint { - v := itr.Next() - if v == nil { - return nil - } else if v.Time < startTime || v.Time >= endTime { +func (itr *bufFloatIterator) NextInWindow(startTime, endTime int64) (*FloatPoint, error) { + v, err := itr.Next() + if v == nil || err != nil { + return nil, err + } else if t := v.Time; t >= endTime || t < startTime { itr.unread(v) - return nil + return nil, nil } - return v + return v, nil } // unread sets v to the buffer. It is read on the next call to Next(). @@ -166,7 +168,7 @@ func (itr *floatMergeIterator) Close() error { } // Next returns the next point from the iterator. -func (itr *floatMergeIterator) Next() *FloatPoint { +func (itr *floatMergeIterator) Next() (*FloatPoint, error) { // Initialize the heap. This needs to be done lazily on the first call to this iterator // so that iterator initialization done through the Select() call returns quickly. // Queries can only be interrupted after the Select() call completes so any operations @@ -176,7 +178,9 @@ func (itr *floatMergeIterator) Next() *FloatPoint { items := itr.heap.items itr.heap.items = make([]*floatMergeHeapItem, 0, len(items)) for _, item := range items { - if item.itr.peek() == nil { + if p, err := item.itr.peek(); err != nil { + return nil, err + } else if p == nil { continue } itr.heap.items = append(itr.heap.items, item) @@ -189,19 +193,25 @@ func (itr *floatMergeIterator) Next() *FloatPoint { // Retrieve the next iterator if we don't have one. if itr.curr == nil { if len(itr.heap.items) == 0 { - return nil + return nil, nil } itr.curr = heap.Pop(itr.heap).(*floatMergeHeapItem) // Read point and set current window. - p := itr.curr.itr.Next() + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } itr.window.name, itr.window.tags = p.Name, p.Tags.ID() itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) - return p + return p, nil } // Read the next point from the current iterator. - p := itr.curr.itr.Next() + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } // If there are no more points then remove iterator from heap and find next. if p == nil { @@ -211,13 +221,13 @@ func (itr *floatMergeIterator) Next() *FloatPoint { // Check if the point is inside of our current window. inWindow := true - if itr.window.name != p.Name { + if window := itr.window; window.name != p.Name { inWindow = false - } else if itr.window.tags != p.Tags.ID() { + } else if window.tags != p.Tags.ID() { inWindow = false - } else if itr.heap.opt.Ascending && p.Time >= itr.window.endTime { + } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { inWindow = false - } else if !itr.heap.opt.Ascending && p.Time < itr.window.startTime { + } else if !opt.Ascending && p.Time < window.startTime { inWindow = false } @@ -229,7 +239,7 @@ func (itr *floatMergeIterator) Next() *FloatPoint { continue } - return p + return p, nil } } @@ -243,7 +253,14 @@ type floatMergeHeap struct { func (h floatMergeHeap) Len() int { return len(h.items) } func (h floatMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } func (h floatMergeHeap) Less(i, j int) bool { - x, y := h.items[i].itr.peek(), h.items[j].itr.peek() + x, err := h.items[i].itr.peek() + if err != nil { + return true + } + y, err := h.items[j].itr.peek() + if err != nil { + return false + } if h.opt.Ascending { if x.Name != y.Name { @@ -282,6 +299,7 @@ func (h *floatMergeHeap) Pop() interface{} { type floatMergeHeapItem struct { itr *bufFloatIterator + err error } // floatSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. @@ -289,6 +307,7 @@ type floatSortedMergeIterator struct { inputs []FloatIterator opt IteratorOptions heap floatSortedMergeHeap + init bool } // newFloatSortedMergeIterator returns an instance of floatSortedMergeIterator. @@ -299,18 +318,11 @@ func newFloatSortedMergeIterator(inputs []FloatIterator, opt IteratorOptions) It opt: opt, } - // Initialize heap. + // Initialize heap items. for _, input := range inputs { - // Read next point. - p := input.Next() - if p == nil { - continue - } - // Append to the heap. - itr.heap = append(itr.heap, &floatSortedMergeHeapItem{point: p, itr: input, ascending: opt.Ascending}) + itr.heap = append(itr.heap, &floatSortedMergeHeapItem{itr: input, ascending: opt.Ascending}) } - heap.Init(&itr.heap) return itr } @@ -333,27 +345,47 @@ func (itr *floatSortedMergeIterator) Close() error { } // Next returns the next points from the iterator. -func (itr *floatSortedMergeIterator) Next() *FloatPoint { return itr.pop() } +func (itr *floatSortedMergeIterator) Next() (*FloatPoint, error) { return itr.pop() } // pop returns the next point from the heap. // Reads the next point from item's cursor and puts it back on the heap. -func (itr *floatSortedMergeIterator) pop() *FloatPoint { +func (itr *floatSortedMergeIterator) pop() (*FloatPoint, error) { + // Initialize the heap. See the MergeIterator to see why this has to be done lazily. + if !itr.init { + items := itr.heap + itr.heap = make([]*floatSortedMergeHeapItem, 0, len(items)) + for _, item := range items { + var err error + if item.point, err = item.itr.Next(); err != nil { + return nil, err + } else if item.point == nil { + continue + } + itr.heap = append(itr.heap, item) + } + heap.Init(&itr.heap) + itr.init = true + } + if len(itr.heap) == 0 { - return nil + return nil, nil } // Read the next item from the heap. item := heap.Pop(&itr.heap).(*floatSortedMergeHeapItem) + if item.err != nil { + return nil, item.err + } // Copy the point for return. p := item.point.Clone() // Read the next item from the cursor. Push back to heap if one exists. - if item.point = item.itr.Next(); item.point != nil { + if item.point, item.err = item.itr.Next(); item.point != nil { heap.Push(&itr.heap, item) } - return p + return p, nil } // floatSortedMergeHeap represents a heap of floatSortedMergeHeapItems. @@ -395,6 +427,7 @@ func (h *floatSortedMergeHeap) Pop() interface{} { type floatSortedMergeHeapItem struct { point *FloatPoint + err error itr FloatIterator ascending bool } @@ -426,11 +459,11 @@ func (itr *floatLimitIterator) Stats() IteratorStats { return itr.input.Stats() func (itr *floatLimitIterator) Close() error { return itr.input.Close() } // Next returns the next point from the iterator. -func (itr *floatLimitIterator) Next() *FloatPoint { +func (itr *floatLimitIterator) Next() (*FloatPoint, error) { for { - p := itr.input.Next() - if p == nil { - return nil + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err } // Reset window and counter if a new window is encountered. @@ -452,12 +485,12 @@ func (itr *floatLimitIterator) Next() *FloatPoint { if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { // If there's no interval, no groups, and a single source then simply exit. if itr.opt.Interval.IsZero() && len(itr.opt.Dimensions) == 0 && len(itr.opt.Sources) == 1 { - return nil + return nil, nil } continue } - return p + return p, nil } } @@ -467,6 +500,7 @@ type floatFillIterator struct { startTime int64 endTime int64 auxFields []interface{} + init bool opt IteratorOptions window struct { @@ -498,33 +532,33 @@ func newFloatFillIterator(input FloatIterator, expr Expr, opt IteratorOptions) * auxFields = make([]interface{}, len(opt.Aux)) } - itr := &floatFillIterator{ + return &floatFillIterator{ input: newBufFloatIterator(input), startTime: startTime, endTime: endTime, auxFields: auxFields, opt: opt, } - - p := itr.input.peek() - if p != nil { - itr.window.name, itr.window.tags = p.Name, p.Tags - itr.window.time = itr.startTime - } else { - if opt.Ascending { - itr.window.time = itr.endTime + 1 - } else { - itr.window.time = itr.endTime - 1 - } - } - return itr } func (itr *floatFillIterator) Stats() IteratorStats { return itr.input.Stats() } func (itr *floatFillIterator) Close() error { return itr.input.Close() } -func (itr *floatFillIterator) Next() *FloatPoint { - p := itr.input.Next() +func (itr *floatFillIterator) Next() (*FloatPoint, error) { + if !itr.init { + p, err := itr.input.peek() + if p == nil || err != nil { + return nil, err + } + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + itr.init = true + } + + p, err := itr.input.Next() + if err != nil { + return nil, err + } // Check if the next point is outside of our window or is nil. for p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { @@ -547,7 +581,7 @@ func (itr *floatFillIterator) Next() *FloatPoint { // We are *not* in a current interval. If there is no next point, // we are at the end of all intervals. if p == nil { - return nil + return nil, nil } // Set the new interval. @@ -595,7 +629,7 @@ func (itr *floatFillIterator) Next() *FloatPoint { } else { itr.window.time = p.Time - int64(itr.opt.Interval.Duration) } - return p + return p, nil } // floatIntervalIterator represents a float implementation of IntervalIterator. @@ -611,13 +645,13 @@ func newFloatIntervalIterator(input FloatIterator, opt IteratorOptions) *floatIn func (itr *floatIntervalIterator) Stats() IteratorStats { return itr.input.Stats() } func (itr *floatIntervalIterator) Close() error { return itr.input.Close() } -func (itr *floatIntervalIterator) Next() *FloatPoint { - p := itr.input.Next() - if p == nil { - return p +func (itr *floatIntervalIterator) Next() (*FloatPoint, error) { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err } p.Time, _ = itr.opt.Window(p.Time) - return p + return p, nil } // floatInterruptIterator represents a float implementation of InterruptIterator. @@ -634,15 +668,15 @@ func newFloatInterruptIterator(input FloatIterator, closing <-chan struct{}) *fl func (itr *floatInterruptIterator) Stats() IteratorStats { return itr.input.Stats() } func (itr *floatInterruptIterator) Close() error { return itr.input.Close() } -func (itr *floatInterruptIterator) Next() *FloatPoint { - // Only check if the channel is closed every 256 points. This - // intentionally checks on both 0 and 256 so that if the iterator +func (itr *floatInterruptIterator) Next() (*FloatPoint, error) { + // Only check if the channel is closed every N points. This + // intentionally checks on both 0 and N so that if the iterator // has been interrupted before the first point is emitted it will // not emit any points. - if itr.count&0x100 == 0 { + if itr.count&0xFF == 0xFF { select { case <-itr.closing: - return nil + return nil, nil default: // Reset iterator count to zero and fall through to emit the next point. itr.count = 0 @@ -654,10 +688,16 @@ func (itr *floatInterruptIterator) Next() *FloatPoint { return itr.input.Next() } +// auxFloatPoint represents a combination of a point and an error for the AuxIterator. +type auxFloatPoint struct { + point *FloatPoint + err error +} + // floatAuxIterator represents a float implementation of AuxIterator. type floatAuxIterator struct { input *bufFloatIterator - output chan *FloatPoint + output chan auxFloatPoint fields auxIteratorFields background bool } @@ -665,7 +705,7 @@ type floatAuxIterator struct { func newFloatAuxIterator(input FloatIterator, seriesKeys SeriesList, opt IteratorOptions) *floatAuxIterator { return &floatAuxIterator{ input: newBufFloatIterator(input), - output: make(chan *FloatPoint, 1), + output: make(chan auxFloatPoint, 1), fields: newAuxIteratorFields(seriesKeys, opt), } } @@ -676,10 +716,13 @@ func (itr *floatAuxIterator) Background() { go DrainIterator(itr) } -func (itr *floatAuxIterator) Start() { go itr.stream() } -func (itr *floatAuxIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *floatAuxIterator) Close() error { return itr.input.Close() } -func (itr *floatAuxIterator) Next() *FloatPoint { return <-itr.output } +func (itr *floatAuxIterator) Start() { go itr.stream() } +func (itr *floatAuxIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *floatAuxIterator) Close() error { return itr.input.Close() } +func (itr *floatAuxIterator) Next() (*FloatPoint, error) { + p := <-itr.output + return p.point, p.err +} func (itr *floatAuxIterator) Iterator(name string) Iterator { return itr.fields.iterator(name) } func (itr *floatAuxIterator) CreateIterator(opt IteratorOptions) (Iterator, error) { @@ -711,13 +754,17 @@ func (itr *floatAuxIterator) ExpandSources(sources Sources) (Sources, error) { func (itr *floatAuxIterator) stream() { for { // Read next point. - p := itr.input.Next() - if p == nil { + p, err := itr.input.Next() + if err != nil { + itr.output <- auxFloatPoint{err: err} + itr.fields.sendError(err) + break + } else if p == nil { break } // Send point to output and to each field iterator. - itr.output <- p + itr.output <- auxFloatPoint{point: p} if ok := itr.fields.send(p); !ok && itr.background { break } @@ -734,6 +781,7 @@ type floatChanIterator struct { filled bool points [2]FloatPoint } + err error cond *sync.Cond done bool } @@ -784,8 +832,23 @@ func (itr *floatChanIterator) setBuf(name string, tags Tags, time int64, value i return true } -func (itr *floatChanIterator) Next() *FloatPoint { +func (itr *floatChanIterator) setErr(err error) { itr.cond.L.Lock() + defer itr.cond.L.Unlock() + itr.err = err + + // Signal to all waiting goroutines that a new value is ready to read. + itr.cond.Signal() +} + +func (itr *floatChanIterator) Next() (*FloatPoint, error) { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + + // Check for an error and return one if there. + if itr.err != nil { + return nil, itr.err + } // Wait until either a value is available in the buffer or // the iterator is closed. @@ -795,8 +858,7 @@ func (itr *floatChanIterator) Next() *FloatPoint { // Return nil once the channel is done and the buffer is empty. if itr.done && !itr.buf.filled { - itr.cond.L.Unlock() - return nil + return nil, nil } // Always read from the buffer if it exists, even if the iterator @@ -806,10 +868,7 @@ func (itr *floatChanIterator) Next() *FloatPoint { itr.buf.i = (itr.buf.i + 1) % len(itr.buf.points) itr.buf.filled = false itr.cond.Signal() - - // Do not defer the unlock so we don't create an unnecessary allocation. - itr.cond.L.Unlock() - return p + return p, nil } // floatReduceFloatIterator executes a reducer for every interval and buffers the result. @@ -827,19 +886,20 @@ func (itr *floatReduceFloatIterator) Stats() IteratorStats { return itr.input.St func (itr *floatReduceFloatIterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *floatReduceFloatIterator) Next() *FloatPoint { +func (itr *floatReduceFloatIterator) Next() (*FloatPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // floatReduceFloatPoint stores the reduced data for a name/tag combination. @@ -852,16 +912,22 @@ type floatReduceFloatPoint struct { // reduce executes fn once for every point in the next window. // The previous value for the dimension is passed to fn. -func (itr *floatReduceFloatIterator) reduce() []FloatPoint { +func (itr *floatReduceFloatIterator) reduce() ([]FloatPoint, error) { // Calculate next window. - startTime, endTime := itr.opt.Window(itr.input.peekTime()) + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*floatReduceFloatPoint) for { // Read next point. - curr := itr.input.NextInWindow(startTime, endTime) - if curr == nil { + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { break } else if curr.Nil { continue @@ -912,7 +978,7 @@ func (itr *floatReduceFloatIterator) reduce() []FloatPoint { } } - return a + return a, nil } // floatStreamFloatIterator streams inputs into the iterator and emits points gradually. @@ -941,29 +1007,30 @@ func (itr *floatStreamFloatIterator) Stats() IteratorStats { return itr.input.St func (itr *floatStreamFloatIterator) Close() error { return itr.input.Close() } // Next returns the next value for the stream iterator. -func (itr *floatStreamFloatIterator) Next() *FloatPoint { +func (itr *floatStreamFloatIterator) Next() (*FloatPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // reduce creates and manages aggregators for every point from the input. // After aggregating a point, it always tries to emit a value using the emitter. -func (itr *floatStreamFloatIterator) reduce() []FloatPoint { +func (itr *floatStreamFloatIterator) reduce() ([]FloatPoint, error) { for { // Read next point. - curr := itr.input.Next() - if curr == nil { - return nil + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err } else if curr.Nil { continue } @@ -998,7 +1065,7 @@ func (itr *floatStreamFloatIterator) reduce() []FloatPoint { points[i].Name = rp.Name points[i].Tags = rp.Tags } - return points + return points, nil } } @@ -1022,13 +1089,18 @@ func (itr *floatExprIterator) Close() error { return nil } -func (itr *floatExprIterator) Next() *FloatPoint { - a := itr.left.Next() - b := itr.right.Next() - if a == nil && b == nil { - return nil +func (itr *floatExprIterator) Next() (*FloatPoint, error) { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } else if a == nil && b == nil { + return nil, nil } - return itr.fn(a, b) + return itr.fn(a, b), nil } // floatExprFunc creates or modifies a point by combining two @@ -1052,19 +1124,20 @@ func (itr *floatReduceIntegerIterator) Stats() IteratorStats { return itr.input. func (itr *floatReduceIntegerIterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *floatReduceIntegerIterator) Next() *IntegerPoint { +func (itr *floatReduceIntegerIterator) Next() (*IntegerPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // floatReduceIntegerPoint stores the reduced data for a name/tag combination. @@ -1077,16 +1150,22 @@ type floatReduceIntegerPoint struct { // reduce executes fn once for every point in the next window. // The previous value for the dimension is passed to fn. -func (itr *floatReduceIntegerIterator) reduce() []IntegerPoint { +func (itr *floatReduceIntegerIterator) reduce() ([]IntegerPoint, error) { // Calculate next window. - startTime, endTime := itr.opt.Window(itr.input.peekTime()) + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*floatReduceIntegerPoint) for { // Read next point. - curr := itr.input.NextInWindow(startTime, endTime) - if curr == nil { + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { break } else if curr.Nil { continue @@ -1137,7 +1216,7 @@ func (itr *floatReduceIntegerIterator) reduce() []IntegerPoint { } } - return a + return a, nil } // floatStreamIntegerIterator streams inputs into the iterator and emits points gradually. @@ -1166,29 +1245,30 @@ func (itr *floatStreamIntegerIterator) Stats() IteratorStats { return itr.input. func (itr *floatStreamIntegerIterator) Close() error { return itr.input.Close() } // Next returns the next value for the stream iterator. -func (itr *floatStreamIntegerIterator) Next() *IntegerPoint { +func (itr *floatStreamIntegerIterator) Next() (*IntegerPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // reduce creates and manages aggregators for every point from the input. // After aggregating a point, it always tries to emit a value using the emitter. -func (itr *floatStreamIntegerIterator) reduce() []IntegerPoint { +func (itr *floatStreamIntegerIterator) reduce() ([]IntegerPoint, error) { for { // Read next point. - curr := itr.input.Next() - if curr == nil { - return nil + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err } else if curr.Nil { continue } @@ -1223,7 +1303,7 @@ func (itr *floatStreamIntegerIterator) reduce() []IntegerPoint { points[i].Name = rp.Name points[i].Tags = rp.Tags } - return points + return points, nil } } @@ -1247,13 +1327,18 @@ func (itr *floatIntegerExprIterator) Close() error { return nil } -func (itr *floatIntegerExprIterator) Next() *IntegerPoint { - a := itr.left.Next() - b := itr.right.Next() - if a == nil && b == nil { - return nil +func (itr *floatIntegerExprIterator) Next() (*IntegerPoint, error) { + a, err := itr.left.Next() + if err != nil { + return nil, err } - return itr.fn(a, b) + b, err := itr.right.Next() + if err != nil { + return nil, err + } else if a == nil && b == nil { + return nil, nil + } + return itr.fn(a, b), nil } // floatIntegerExprFunc creates or modifies a point by combining two @@ -1277,19 +1362,20 @@ func (itr *floatReduceStringIterator) Stats() IteratorStats { return itr.input.S func (itr *floatReduceStringIterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *floatReduceStringIterator) Next() *StringPoint { +func (itr *floatReduceStringIterator) Next() (*StringPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // floatReduceStringPoint stores the reduced data for a name/tag combination. @@ -1302,16 +1388,22 @@ type floatReduceStringPoint struct { // reduce executes fn once for every point in the next window. // The previous value for the dimension is passed to fn. -func (itr *floatReduceStringIterator) reduce() []StringPoint { +func (itr *floatReduceStringIterator) reduce() ([]StringPoint, error) { // Calculate next window. - startTime, endTime := itr.opt.Window(itr.input.peekTime()) + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*floatReduceStringPoint) for { // Read next point. - curr := itr.input.NextInWindow(startTime, endTime) - if curr == nil { + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { break } else if curr.Nil { continue @@ -1362,7 +1454,7 @@ func (itr *floatReduceStringIterator) reduce() []StringPoint { } } - return a + return a, nil } // floatStreamStringIterator streams inputs into the iterator and emits points gradually. @@ -1391,29 +1483,30 @@ func (itr *floatStreamStringIterator) Stats() IteratorStats { return itr.input.S func (itr *floatStreamStringIterator) Close() error { return itr.input.Close() } // Next returns the next value for the stream iterator. -func (itr *floatStreamStringIterator) Next() *StringPoint { +func (itr *floatStreamStringIterator) Next() (*StringPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // reduce creates and manages aggregators for every point from the input. // After aggregating a point, it always tries to emit a value using the emitter. -func (itr *floatStreamStringIterator) reduce() []StringPoint { +func (itr *floatStreamStringIterator) reduce() ([]StringPoint, error) { for { // Read next point. - curr := itr.input.Next() - if curr == nil { - return nil + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err } else if curr.Nil { continue } @@ -1448,7 +1541,7 @@ func (itr *floatStreamStringIterator) reduce() []StringPoint { points[i].Name = rp.Name points[i].Tags = rp.Tags } - return points + return points, nil } } @@ -1472,13 +1565,18 @@ func (itr *floatStringExprIterator) Close() error { return nil } -func (itr *floatStringExprIterator) Next() *StringPoint { - a := itr.left.Next() - b := itr.right.Next() - if a == nil && b == nil { - return nil +func (itr *floatStringExprIterator) Next() (*StringPoint, error) { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } else if a == nil && b == nil { + return nil, nil } - return itr.fn(a, b) + return itr.fn(a, b), nil } // floatStringExprFunc creates or modifies a point by combining two @@ -1502,19 +1600,20 @@ func (itr *floatReduceBooleanIterator) Stats() IteratorStats { return itr.input. func (itr *floatReduceBooleanIterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *floatReduceBooleanIterator) Next() *BooleanPoint { +func (itr *floatReduceBooleanIterator) Next() (*BooleanPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // floatReduceBooleanPoint stores the reduced data for a name/tag combination. @@ -1527,16 +1626,22 @@ type floatReduceBooleanPoint struct { // reduce executes fn once for every point in the next window. // The previous value for the dimension is passed to fn. -func (itr *floatReduceBooleanIterator) reduce() []BooleanPoint { +func (itr *floatReduceBooleanIterator) reduce() ([]BooleanPoint, error) { // Calculate next window. - startTime, endTime := itr.opt.Window(itr.input.peekTime()) + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*floatReduceBooleanPoint) for { // Read next point. - curr := itr.input.NextInWindow(startTime, endTime) - if curr == nil { + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { break } else if curr.Nil { continue @@ -1587,7 +1692,7 @@ func (itr *floatReduceBooleanIterator) reduce() []BooleanPoint { } } - return a + return a, nil } // floatStreamBooleanIterator streams inputs into the iterator and emits points gradually. @@ -1616,29 +1721,30 @@ func (itr *floatStreamBooleanIterator) Stats() IteratorStats { return itr.input. func (itr *floatStreamBooleanIterator) Close() error { return itr.input.Close() } // Next returns the next value for the stream iterator. -func (itr *floatStreamBooleanIterator) Next() *BooleanPoint { +func (itr *floatStreamBooleanIterator) Next() (*BooleanPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // reduce creates and manages aggregators for every point from the input. // After aggregating a point, it always tries to emit a value using the emitter. -func (itr *floatStreamBooleanIterator) reduce() []BooleanPoint { +func (itr *floatStreamBooleanIterator) reduce() ([]BooleanPoint, error) { for { // Read next point. - curr := itr.input.Next() - if curr == nil { - return nil + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err } else if curr.Nil { continue } @@ -1673,7 +1779,7 @@ func (itr *floatStreamBooleanIterator) reduce() []BooleanPoint { points[i].Name = rp.Name points[i].Tags = rp.Tags } - return points + return points, nil } } @@ -1697,13 +1803,18 @@ func (itr *floatBooleanExprIterator) Close() error { return nil } -func (itr *floatBooleanExprIterator) Next() *BooleanPoint { - a := itr.left.Next() - b := itr.right.Next() - if a == nil && b == nil { - return nil +func (itr *floatBooleanExprIterator) Next() (*BooleanPoint, error) { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } else if a == nil && b == nil { + return nil, nil } - return itr.fn(a, b) + return itr.fn(a, b), nil } // floatBooleanExprFunc creates or modifies a point by combining two @@ -1726,12 +1837,14 @@ func (itr *floatTransformIterator) Stats() IteratorStats { return itr.input.Stat func (itr *floatTransformIterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *floatTransformIterator) Next() *FloatPoint { - p := itr.input.Next() - if p != nil { +func (itr *floatTransformIterator) Next() (*FloatPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { p = itr.fn(p) } - return p + return p, nil } // floatTransformFunc creates or modifies a point. @@ -1753,12 +1866,14 @@ func (itr *floatBoolTransformIterator) Stats() IteratorStats { return itr.input. func (itr *floatBoolTransformIterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *floatBoolTransformIterator) Next() *BooleanPoint { - p := itr.input.Next() - if p != nil { - return itr.fn(p) +func (itr *floatBoolTransformIterator) Next() (*BooleanPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + return itr.fn(p), nil } - return nil + return nil, nil } // floatBoolTransformFunc creates or modifies a point. @@ -1790,19 +1905,18 @@ func (itr *floatDedupeIterator) Stats() IteratorStats { return itr.input.Stats() func (itr *floatDedupeIterator) Close() error { return itr.input.Close() } // Next returns the next unique point from the input iterator. -func (itr *floatDedupeIterator) Next() *FloatPoint { +func (itr *floatDedupeIterator) Next() (*FloatPoint, error) { for { // Read next point. - p := itr.input.Next() - if p == nil { - return nil + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err } // Serialize to bytes to store in lookup. buf, err := proto.Marshal(encodeFloatPoint(p)) if err != nil { - log.Println("error marshaling dedupe point:", err) - continue + return nil, err } // If the point has already been output then move to the next point. @@ -1812,7 +1926,7 @@ func (itr *floatDedupeIterator) Next() *FloatPoint { // Otherwise mark it as emitted and return point. itr.m[string(buf)] = struct{}{} - return p + return p, nil } } @@ -1845,24 +1959,23 @@ func (itr *floatReaderIterator) Close() error { } // Next returns the next point from the iterator. -func (itr *floatReaderIterator) Next() *FloatPoint { +func (itr *floatReaderIterator) Next() (*FloatPoint, error) { // OPTIMIZE(benbjohnson): Reuse point on iterator. // Unmarshal next point. p := &FloatPoint{} if err := itr.dec.DecodeFloatPoint(p); err == io.EOF { - return nil + return nil, nil } else if err != nil { - log.Printf("error reading iterator point: %s", err) - return nil + return nil, err } - return p + return p, nil } // IntegerIterator represents a stream of integer points. type IntegerIterator interface { Iterator - Next() *IntegerPoint + Next() (*IntegerPoint, error) } // newIntegerIterators converts a slice of Iterator to a slice of IntegerIterator. @@ -1900,43 +2013,46 @@ func (itr *bufIntegerIterator) Stats() IteratorStats { return itr.itr.Stats() } func (itr *bufIntegerIterator) Close() error { return itr.itr.Close() } // peek returns the next point without removing it from the iterator. -func (itr *bufIntegerIterator) peek() *IntegerPoint { - p := itr.Next() +func (itr *bufIntegerIterator) peek() (*IntegerPoint, error) { + p, err := itr.Next() + if err != nil { + return nil, err + } itr.unread(p) - return p + return p, nil } // peekTime returns the time of the next point. // Returns zero time if no more points available. -func (itr *bufIntegerIterator) peekTime() int64 { - p := itr.peek() - if p == nil { - return ZeroTime +func (itr *bufIntegerIterator) peekTime() (int64, error) { + p, err := itr.peek() + if p == nil || err != nil { + return ZeroTime, err } - return p.Time + return p.Time, nil } // Next returns the current buffer, if exists, or calls the underlying iterator. -func (itr *bufIntegerIterator) Next() *IntegerPoint { - if itr.buf != nil { - buf := itr.buf +func (itr *bufIntegerIterator) Next() (*IntegerPoint, error) { + buf := itr.buf + if buf != nil { itr.buf = nil - return buf + return buf, nil } return itr.itr.Next() } // NextInWindow returns the next value if it is between [startTime, endTime). // If the next value is outside the range then it is moved to the buffer. -func (itr *bufIntegerIterator) NextInWindow(startTime, endTime int64) *IntegerPoint { - v := itr.Next() - if v == nil { - return nil - } else if v.Time < startTime || v.Time >= endTime { +func (itr *bufIntegerIterator) NextInWindow(startTime, endTime int64) (*IntegerPoint, error) { + v, err := itr.Next() + if v == nil || err != nil { + return nil, err + } else if t := v.Time; t >= endTime || t < startTime { itr.unread(v) - return nil + return nil, nil } - return v + return v, nil } // unread sets v to the buffer. It is read on the next call to Next(). @@ -1998,7 +2114,7 @@ func (itr *integerMergeIterator) Close() error { } // Next returns the next point from the iterator. -func (itr *integerMergeIterator) Next() *IntegerPoint { +func (itr *integerMergeIterator) Next() (*IntegerPoint, error) { // Initialize the heap. This needs to be done lazily on the first call to this iterator // so that iterator initialization done through the Select() call returns quickly. // Queries can only be interrupted after the Select() call completes so any operations @@ -2008,7 +2124,9 @@ func (itr *integerMergeIterator) Next() *IntegerPoint { items := itr.heap.items itr.heap.items = make([]*integerMergeHeapItem, 0, len(items)) for _, item := range items { - if item.itr.peek() == nil { + if p, err := item.itr.peek(); err != nil { + return nil, err + } else if p == nil { continue } itr.heap.items = append(itr.heap.items, item) @@ -2021,19 +2139,25 @@ func (itr *integerMergeIterator) Next() *IntegerPoint { // Retrieve the next iterator if we don't have one. if itr.curr == nil { if len(itr.heap.items) == 0 { - return nil + return nil, nil } itr.curr = heap.Pop(itr.heap).(*integerMergeHeapItem) // Read point and set current window. - p := itr.curr.itr.Next() + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } itr.window.name, itr.window.tags = p.Name, p.Tags.ID() itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) - return p + return p, nil } // Read the next point from the current iterator. - p := itr.curr.itr.Next() + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } // If there are no more points then remove iterator from heap and find next. if p == nil { @@ -2043,13 +2167,13 @@ func (itr *integerMergeIterator) Next() *IntegerPoint { // Check if the point is inside of our current window. inWindow := true - if itr.window.name != p.Name { + if window := itr.window; window.name != p.Name { inWindow = false - } else if itr.window.tags != p.Tags.ID() { + } else if window.tags != p.Tags.ID() { inWindow = false - } else if itr.heap.opt.Ascending && p.Time >= itr.window.endTime { + } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { inWindow = false - } else if !itr.heap.opt.Ascending && p.Time < itr.window.startTime { + } else if !opt.Ascending && p.Time < window.startTime { inWindow = false } @@ -2061,7 +2185,7 @@ func (itr *integerMergeIterator) Next() *IntegerPoint { continue } - return p + return p, nil } } @@ -2075,7 +2199,14 @@ type integerMergeHeap struct { func (h integerMergeHeap) Len() int { return len(h.items) } func (h integerMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } func (h integerMergeHeap) Less(i, j int) bool { - x, y := h.items[i].itr.peek(), h.items[j].itr.peek() + x, err := h.items[i].itr.peek() + if err != nil { + return true + } + y, err := h.items[j].itr.peek() + if err != nil { + return false + } if h.opt.Ascending { if x.Name != y.Name { @@ -2114,6 +2245,7 @@ func (h *integerMergeHeap) Pop() interface{} { type integerMergeHeapItem struct { itr *bufIntegerIterator + err error } // integerSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. @@ -2121,6 +2253,7 @@ type integerSortedMergeIterator struct { inputs []IntegerIterator opt IteratorOptions heap integerSortedMergeHeap + init bool } // newIntegerSortedMergeIterator returns an instance of integerSortedMergeIterator. @@ -2131,18 +2264,11 @@ func newIntegerSortedMergeIterator(inputs []IntegerIterator, opt IteratorOptions opt: opt, } - // Initialize heap. + // Initialize heap items. for _, input := range inputs { - // Read next point. - p := input.Next() - if p == nil { - continue - } - // Append to the heap. - itr.heap = append(itr.heap, &integerSortedMergeHeapItem{point: p, itr: input, ascending: opt.Ascending}) + itr.heap = append(itr.heap, &integerSortedMergeHeapItem{itr: input, ascending: opt.Ascending}) } - heap.Init(&itr.heap) return itr } @@ -2165,27 +2291,47 @@ func (itr *integerSortedMergeIterator) Close() error { } // Next returns the next points from the iterator. -func (itr *integerSortedMergeIterator) Next() *IntegerPoint { return itr.pop() } +func (itr *integerSortedMergeIterator) Next() (*IntegerPoint, error) { return itr.pop() } // pop returns the next point from the heap. // Reads the next point from item's cursor and puts it back on the heap. -func (itr *integerSortedMergeIterator) pop() *IntegerPoint { +func (itr *integerSortedMergeIterator) pop() (*IntegerPoint, error) { + // Initialize the heap. See the MergeIterator to see why this has to be done lazily. + if !itr.init { + items := itr.heap + itr.heap = make([]*integerSortedMergeHeapItem, 0, len(items)) + for _, item := range items { + var err error + if item.point, err = item.itr.Next(); err != nil { + return nil, err + } else if item.point == nil { + continue + } + itr.heap = append(itr.heap, item) + } + heap.Init(&itr.heap) + itr.init = true + } + if len(itr.heap) == 0 { - return nil + return nil, nil } // Read the next item from the heap. item := heap.Pop(&itr.heap).(*integerSortedMergeHeapItem) + if item.err != nil { + return nil, item.err + } // Copy the point for return. p := item.point.Clone() // Read the next item from the cursor. Push back to heap if one exists. - if item.point = item.itr.Next(); item.point != nil { + if item.point, item.err = item.itr.Next(); item.point != nil { heap.Push(&itr.heap, item) } - return p + return p, nil } // integerSortedMergeHeap represents a heap of integerSortedMergeHeapItems. @@ -2227,6 +2373,7 @@ func (h *integerSortedMergeHeap) Pop() interface{} { type integerSortedMergeHeapItem struct { point *IntegerPoint + err error itr IntegerIterator ascending bool } @@ -2258,11 +2405,11 @@ func (itr *integerLimitIterator) Stats() IteratorStats { return itr.input.Stats( func (itr *integerLimitIterator) Close() error { return itr.input.Close() } // Next returns the next point from the iterator. -func (itr *integerLimitIterator) Next() *IntegerPoint { +func (itr *integerLimitIterator) Next() (*IntegerPoint, error) { for { - p := itr.input.Next() - if p == nil { - return nil + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err } // Reset window and counter if a new window is encountered. @@ -2284,12 +2431,12 @@ func (itr *integerLimitIterator) Next() *IntegerPoint { if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { // If there's no interval, no groups, and a single source then simply exit. if itr.opt.Interval.IsZero() && len(itr.opt.Dimensions) == 0 && len(itr.opt.Sources) == 1 { - return nil + return nil, nil } continue } - return p + return p, nil } } @@ -2299,6 +2446,7 @@ type integerFillIterator struct { startTime int64 endTime int64 auxFields []interface{} + init bool opt IteratorOptions window struct { @@ -2330,33 +2478,33 @@ func newIntegerFillIterator(input IntegerIterator, expr Expr, opt IteratorOption auxFields = make([]interface{}, len(opt.Aux)) } - itr := &integerFillIterator{ + return &integerFillIterator{ input: newBufIntegerIterator(input), startTime: startTime, endTime: endTime, auxFields: auxFields, opt: opt, } - - p := itr.input.peek() - if p != nil { - itr.window.name, itr.window.tags = p.Name, p.Tags - itr.window.time = itr.startTime - } else { - if opt.Ascending { - itr.window.time = itr.endTime + 1 - } else { - itr.window.time = itr.endTime - 1 - } - } - return itr } func (itr *integerFillIterator) Stats() IteratorStats { return itr.input.Stats() } func (itr *integerFillIterator) Close() error { return itr.input.Close() } -func (itr *integerFillIterator) Next() *IntegerPoint { - p := itr.input.Next() +func (itr *integerFillIterator) Next() (*IntegerPoint, error) { + if !itr.init { + p, err := itr.input.peek() + if p == nil || err != nil { + return nil, err + } + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + itr.init = true + } + + p, err := itr.input.Next() + if err != nil { + return nil, err + } // Check if the next point is outside of our window or is nil. for p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { @@ -2379,7 +2527,7 @@ func (itr *integerFillIterator) Next() *IntegerPoint { // We are *not* in a current interval. If there is no next point, // we are at the end of all intervals. if p == nil { - return nil + return nil, nil } // Set the new interval. @@ -2427,7 +2575,7 @@ func (itr *integerFillIterator) Next() *IntegerPoint { } else { itr.window.time = p.Time - int64(itr.opt.Interval.Duration) } - return p + return p, nil } // integerIntervalIterator represents a integer implementation of IntervalIterator. @@ -2443,13 +2591,13 @@ func newIntegerIntervalIterator(input IntegerIterator, opt IteratorOptions) *int func (itr *integerIntervalIterator) Stats() IteratorStats { return itr.input.Stats() } func (itr *integerIntervalIterator) Close() error { return itr.input.Close() } -func (itr *integerIntervalIterator) Next() *IntegerPoint { - p := itr.input.Next() - if p == nil { - return p +func (itr *integerIntervalIterator) Next() (*IntegerPoint, error) { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err } p.Time, _ = itr.opt.Window(p.Time) - return p + return p, nil } // integerInterruptIterator represents a integer implementation of InterruptIterator. @@ -2466,15 +2614,15 @@ func newIntegerInterruptIterator(input IntegerIterator, closing <-chan struct{}) func (itr *integerInterruptIterator) Stats() IteratorStats { return itr.input.Stats() } func (itr *integerInterruptIterator) Close() error { return itr.input.Close() } -func (itr *integerInterruptIterator) Next() *IntegerPoint { - // Only check if the channel is closed every 256 points. This - // intentionally checks on both 0 and 256 so that if the iterator +func (itr *integerInterruptIterator) Next() (*IntegerPoint, error) { + // Only check if the channel is closed every N points. This + // intentionally checks on both 0 and N so that if the iterator // has been interrupted before the first point is emitted it will // not emit any points. - if itr.count&0x100 == 0 { + if itr.count&0xFF == 0xFF { select { case <-itr.closing: - return nil + return nil, nil default: // Reset iterator count to zero and fall through to emit the next point. itr.count = 0 @@ -2486,10 +2634,16 @@ func (itr *integerInterruptIterator) Next() *IntegerPoint { return itr.input.Next() } +// auxIntegerPoint represents a combination of a point and an error for the AuxIterator. +type auxIntegerPoint struct { + point *IntegerPoint + err error +} + // integerAuxIterator represents a integer implementation of AuxIterator. type integerAuxIterator struct { input *bufIntegerIterator - output chan *IntegerPoint + output chan auxIntegerPoint fields auxIteratorFields background bool } @@ -2497,7 +2651,7 @@ type integerAuxIterator struct { func newIntegerAuxIterator(input IntegerIterator, seriesKeys SeriesList, opt IteratorOptions) *integerAuxIterator { return &integerAuxIterator{ input: newBufIntegerIterator(input), - output: make(chan *IntegerPoint, 1), + output: make(chan auxIntegerPoint, 1), fields: newAuxIteratorFields(seriesKeys, opt), } } @@ -2508,10 +2662,13 @@ func (itr *integerAuxIterator) Background() { go DrainIterator(itr) } -func (itr *integerAuxIterator) Start() { go itr.stream() } -func (itr *integerAuxIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *integerAuxIterator) Close() error { return itr.input.Close() } -func (itr *integerAuxIterator) Next() *IntegerPoint { return <-itr.output } +func (itr *integerAuxIterator) Start() { go itr.stream() } +func (itr *integerAuxIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *integerAuxIterator) Close() error { return itr.input.Close() } +func (itr *integerAuxIterator) Next() (*IntegerPoint, error) { + p := <-itr.output + return p.point, p.err +} func (itr *integerAuxIterator) Iterator(name string) Iterator { return itr.fields.iterator(name) } func (itr *integerAuxIterator) CreateIterator(opt IteratorOptions) (Iterator, error) { @@ -2543,13 +2700,17 @@ func (itr *integerAuxIterator) ExpandSources(sources Sources) (Sources, error) { func (itr *integerAuxIterator) stream() { for { // Read next point. - p := itr.input.Next() - if p == nil { + p, err := itr.input.Next() + if err != nil { + itr.output <- auxIntegerPoint{err: err} + itr.fields.sendError(err) + break + } else if p == nil { break } // Send point to output and to each field iterator. - itr.output <- p + itr.output <- auxIntegerPoint{point: p} if ok := itr.fields.send(p); !ok && itr.background { break } @@ -2566,6 +2727,7 @@ type integerChanIterator struct { filled bool points [2]IntegerPoint } + err error cond *sync.Cond done bool } @@ -2613,8 +2775,23 @@ func (itr *integerChanIterator) setBuf(name string, tags Tags, time int64, value return true } -func (itr *integerChanIterator) Next() *IntegerPoint { +func (itr *integerChanIterator) setErr(err error) { itr.cond.L.Lock() + defer itr.cond.L.Unlock() + itr.err = err + + // Signal to all waiting goroutines that a new value is ready to read. + itr.cond.Signal() +} + +func (itr *integerChanIterator) Next() (*IntegerPoint, error) { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + + // Check for an error and return one if there. + if itr.err != nil { + return nil, itr.err + } // Wait until either a value is available in the buffer or // the iterator is closed. @@ -2624,8 +2801,7 @@ func (itr *integerChanIterator) Next() *IntegerPoint { // Return nil once the channel is done and the buffer is empty. if itr.done && !itr.buf.filled { - itr.cond.L.Unlock() - return nil + return nil, nil } // Always read from the buffer if it exists, even if the iterator @@ -2635,10 +2811,7 @@ func (itr *integerChanIterator) Next() *IntegerPoint { itr.buf.i = (itr.buf.i + 1) % len(itr.buf.points) itr.buf.filled = false itr.cond.Signal() - - // Do not defer the unlock so we don't create an unnecessary allocation. - itr.cond.L.Unlock() - return p + return p, nil } // integerReduceFloatIterator executes a reducer for every interval and buffers the result. @@ -2656,19 +2829,20 @@ func (itr *integerReduceFloatIterator) Stats() IteratorStats { return itr.input. func (itr *integerReduceFloatIterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *integerReduceFloatIterator) Next() *FloatPoint { +func (itr *integerReduceFloatIterator) Next() (*FloatPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // integerReduceFloatPoint stores the reduced data for a name/tag combination. @@ -2681,16 +2855,22 @@ type integerReduceFloatPoint struct { // reduce executes fn once for every point in the next window. // The previous value for the dimension is passed to fn. -func (itr *integerReduceFloatIterator) reduce() []FloatPoint { +func (itr *integerReduceFloatIterator) reduce() ([]FloatPoint, error) { // Calculate next window. - startTime, endTime := itr.opt.Window(itr.input.peekTime()) + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*integerReduceFloatPoint) for { // Read next point. - curr := itr.input.NextInWindow(startTime, endTime) - if curr == nil { + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { break } else if curr.Nil { continue @@ -2741,7 +2921,7 @@ func (itr *integerReduceFloatIterator) reduce() []FloatPoint { } } - return a + return a, nil } // integerStreamFloatIterator streams inputs into the iterator and emits points gradually. @@ -2770,29 +2950,30 @@ func (itr *integerStreamFloatIterator) Stats() IteratorStats { return itr.input. func (itr *integerStreamFloatIterator) Close() error { return itr.input.Close() } // Next returns the next value for the stream iterator. -func (itr *integerStreamFloatIterator) Next() *FloatPoint { +func (itr *integerStreamFloatIterator) Next() (*FloatPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // reduce creates and manages aggregators for every point from the input. // After aggregating a point, it always tries to emit a value using the emitter. -func (itr *integerStreamFloatIterator) reduce() []FloatPoint { +func (itr *integerStreamFloatIterator) reduce() ([]FloatPoint, error) { for { // Read next point. - curr := itr.input.Next() - if curr == nil { - return nil + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err } else if curr.Nil { continue } @@ -2827,7 +3008,7 @@ func (itr *integerStreamFloatIterator) reduce() []FloatPoint { points[i].Name = rp.Name points[i].Tags = rp.Tags } - return points + return points, nil } } @@ -2851,13 +3032,18 @@ func (itr *integerFloatExprIterator) Close() error { return nil } -func (itr *integerFloatExprIterator) Next() *FloatPoint { - a := itr.left.Next() - b := itr.right.Next() - if a == nil && b == nil { - return nil +func (itr *integerFloatExprIterator) Next() (*FloatPoint, error) { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } else if a == nil && b == nil { + return nil, nil } - return itr.fn(a, b) + return itr.fn(a, b), nil } // integerFloatExprFunc creates or modifies a point by combining two @@ -2881,19 +3067,20 @@ func (itr *integerReduceIntegerIterator) Stats() IteratorStats { return itr.inpu func (itr *integerReduceIntegerIterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *integerReduceIntegerIterator) Next() *IntegerPoint { +func (itr *integerReduceIntegerIterator) Next() (*IntegerPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // integerReduceIntegerPoint stores the reduced data for a name/tag combination. @@ -2906,16 +3093,22 @@ type integerReduceIntegerPoint struct { // reduce executes fn once for every point in the next window. // The previous value for the dimension is passed to fn. -func (itr *integerReduceIntegerIterator) reduce() []IntegerPoint { +func (itr *integerReduceIntegerIterator) reduce() ([]IntegerPoint, error) { // Calculate next window. - startTime, endTime := itr.opt.Window(itr.input.peekTime()) + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*integerReduceIntegerPoint) for { // Read next point. - curr := itr.input.NextInWindow(startTime, endTime) - if curr == nil { + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { break } else if curr.Nil { continue @@ -2966,7 +3159,7 @@ func (itr *integerReduceIntegerIterator) reduce() []IntegerPoint { } } - return a + return a, nil } // integerStreamIntegerIterator streams inputs into the iterator and emits points gradually. @@ -2995,29 +3188,30 @@ func (itr *integerStreamIntegerIterator) Stats() IteratorStats { return itr.inpu func (itr *integerStreamIntegerIterator) Close() error { return itr.input.Close() } // Next returns the next value for the stream iterator. -func (itr *integerStreamIntegerIterator) Next() *IntegerPoint { +func (itr *integerStreamIntegerIterator) Next() (*IntegerPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // reduce creates and manages aggregators for every point from the input. // After aggregating a point, it always tries to emit a value using the emitter. -func (itr *integerStreamIntegerIterator) reduce() []IntegerPoint { +func (itr *integerStreamIntegerIterator) reduce() ([]IntegerPoint, error) { for { // Read next point. - curr := itr.input.Next() - if curr == nil { - return nil + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err } else if curr.Nil { continue } @@ -3052,7 +3246,7 @@ func (itr *integerStreamIntegerIterator) reduce() []IntegerPoint { points[i].Name = rp.Name points[i].Tags = rp.Tags } - return points + return points, nil } } @@ -3076,13 +3270,18 @@ func (itr *integerExprIterator) Close() error { return nil } -func (itr *integerExprIterator) Next() *IntegerPoint { - a := itr.left.Next() - b := itr.right.Next() - if a == nil && b == nil { - return nil +func (itr *integerExprIterator) Next() (*IntegerPoint, error) { + a, err := itr.left.Next() + if err != nil { + return nil, err } - return itr.fn(a, b) + b, err := itr.right.Next() + if err != nil { + return nil, err + } else if a == nil && b == nil { + return nil, nil + } + return itr.fn(a, b), nil } // integerExprFunc creates or modifies a point by combining two @@ -3106,19 +3305,20 @@ func (itr *integerReduceStringIterator) Stats() IteratorStats { return itr.input func (itr *integerReduceStringIterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *integerReduceStringIterator) Next() *StringPoint { +func (itr *integerReduceStringIterator) Next() (*StringPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // integerReduceStringPoint stores the reduced data for a name/tag combination. @@ -3131,16 +3331,22 @@ type integerReduceStringPoint struct { // reduce executes fn once for every point in the next window. // The previous value for the dimension is passed to fn. -func (itr *integerReduceStringIterator) reduce() []StringPoint { +func (itr *integerReduceStringIterator) reduce() ([]StringPoint, error) { // Calculate next window. - startTime, endTime := itr.opt.Window(itr.input.peekTime()) + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*integerReduceStringPoint) for { // Read next point. - curr := itr.input.NextInWindow(startTime, endTime) - if curr == nil { + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { break } else if curr.Nil { continue @@ -3191,7 +3397,7 @@ func (itr *integerReduceStringIterator) reduce() []StringPoint { } } - return a + return a, nil } // integerStreamStringIterator streams inputs into the iterator and emits points gradually. @@ -3220,29 +3426,30 @@ func (itr *integerStreamStringIterator) Stats() IteratorStats { return itr.input func (itr *integerStreamStringIterator) Close() error { return itr.input.Close() } // Next returns the next value for the stream iterator. -func (itr *integerStreamStringIterator) Next() *StringPoint { +func (itr *integerStreamStringIterator) Next() (*StringPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // reduce creates and manages aggregators for every point from the input. // After aggregating a point, it always tries to emit a value using the emitter. -func (itr *integerStreamStringIterator) reduce() []StringPoint { +func (itr *integerStreamStringIterator) reduce() ([]StringPoint, error) { for { // Read next point. - curr := itr.input.Next() - if curr == nil { - return nil + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err } else if curr.Nil { continue } @@ -3277,7 +3484,7 @@ func (itr *integerStreamStringIterator) reduce() []StringPoint { points[i].Name = rp.Name points[i].Tags = rp.Tags } - return points + return points, nil } } @@ -3301,13 +3508,18 @@ func (itr *integerStringExprIterator) Close() error { return nil } -func (itr *integerStringExprIterator) Next() *StringPoint { - a := itr.left.Next() - b := itr.right.Next() - if a == nil && b == nil { - return nil +func (itr *integerStringExprIterator) Next() (*StringPoint, error) { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } else if a == nil && b == nil { + return nil, nil } - return itr.fn(a, b) + return itr.fn(a, b), nil } // integerStringExprFunc creates or modifies a point by combining two @@ -3331,19 +3543,20 @@ func (itr *integerReduceBooleanIterator) Stats() IteratorStats { return itr.inpu func (itr *integerReduceBooleanIterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *integerReduceBooleanIterator) Next() *BooleanPoint { +func (itr *integerReduceBooleanIterator) Next() (*BooleanPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // integerReduceBooleanPoint stores the reduced data for a name/tag combination. @@ -3356,16 +3569,22 @@ type integerReduceBooleanPoint struct { // reduce executes fn once for every point in the next window. // The previous value for the dimension is passed to fn. -func (itr *integerReduceBooleanIterator) reduce() []BooleanPoint { +func (itr *integerReduceBooleanIterator) reduce() ([]BooleanPoint, error) { // Calculate next window. - startTime, endTime := itr.opt.Window(itr.input.peekTime()) + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*integerReduceBooleanPoint) for { // Read next point. - curr := itr.input.NextInWindow(startTime, endTime) - if curr == nil { + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { break } else if curr.Nil { continue @@ -3416,7 +3635,7 @@ func (itr *integerReduceBooleanIterator) reduce() []BooleanPoint { } } - return a + return a, nil } // integerStreamBooleanIterator streams inputs into the iterator and emits points gradually. @@ -3445,29 +3664,30 @@ func (itr *integerStreamBooleanIterator) Stats() IteratorStats { return itr.inpu func (itr *integerStreamBooleanIterator) Close() error { return itr.input.Close() } // Next returns the next value for the stream iterator. -func (itr *integerStreamBooleanIterator) Next() *BooleanPoint { +func (itr *integerStreamBooleanIterator) Next() (*BooleanPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // reduce creates and manages aggregators for every point from the input. // After aggregating a point, it always tries to emit a value using the emitter. -func (itr *integerStreamBooleanIterator) reduce() []BooleanPoint { +func (itr *integerStreamBooleanIterator) reduce() ([]BooleanPoint, error) { for { // Read next point. - curr := itr.input.Next() - if curr == nil { - return nil + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err } else if curr.Nil { continue } @@ -3502,7 +3722,7 @@ func (itr *integerStreamBooleanIterator) reduce() []BooleanPoint { points[i].Name = rp.Name points[i].Tags = rp.Tags } - return points + return points, nil } } @@ -3526,13 +3746,18 @@ func (itr *integerBooleanExprIterator) Close() error { return nil } -func (itr *integerBooleanExprIterator) Next() *BooleanPoint { - a := itr.left.Next() - b := itr.right.Next() - if a == nil && b == nil { - return nil +func (itr *integerBooleanExprIterator) Next() (*BooleanPoint, error) { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } else if a == nil && b == nil { + return nil, nil } - return itr.fn(a, b) + return itr.fn(a, b), nil } // integerBooleanExprFunc creates or modifies a point by combining two @@ -3555,12 +3780,14 @@ func (itr *integerTransformIterator) Stats() IteratorStats { return itr.input.St func (itr *integerTransformIterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *integerTransformIterator) Next() *IntegerPoint { - p := itr.input.Next() - if p != nil { +func (itr *integerTransformIterator) Next() (*IntegerPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { p = itr.fn(p) } - return p + return p, nil } // integerTransformFunc creates or modifies a point. @@ -3582,12 +3809,14 @@ func (itr *integerBoolTransformIterator) Stats() IteratorStats { return itr.inpu func (itr *integerBoolTransformIterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *integerBoolTransformIterator) Next() *BooleanPoint { - p := itr.input.Next() - if p != nil { - return itr.fn(p) +func (itr *integerBoolTransformIterator) Next() (*BooleanPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + return itr.fn(p), nil } - return nil + return nil, nil } // integerBoolTransformFunc creates or modifies a point. @@ -3619,19 +3848,18 @@ func (itr *integerDedupeIterator) Stats() IteratorStats { return itr.input.Stats func (itr *integerDedupeIterator) Close() error { return itr.input.Close() } // Next returns the next unique point from the input iterator. -func (itr *integerDedupeIterator) Next() *IntegerPoint { +func (itr *integerDedupeIterator) Next() (*IntegerPoint, error) { for { // Read next point. - p := itr.input.Next() - if p == nil { - return nil + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err } // Serialize to bytes to store in lookup. buf, err := proto.Marshal(encodeIntegerPoint(p)) if err != nil { - log.Println("error marshaling dedupe point:", err) - continue + return nil, err } // If the point has already been output then move to the next point. @@ -3641,7 +3869,7 @@ func (itr *integerDedupeIterator) Next() *IntegerPoint { // Otherwise mark it as emitted and return point. itr.m[string(buf)] = struct{}{} - return p + return p, nil } } @@ -3674,24 +3902,23 @@ func (itr *integerReaderIterator) Close() error { } // Next returns the next point from the iterator. -func (itr *integerReaderIterator) Next() *IntegerPoint { +func (itr *integerReaderIterator) Next() (*IntegerPoint, error) { // OPTIMIZE(benbjohnson): Reuse point on iterator. // Unmarshal next point. p := &IntegerPoint{} if err := itr.dec.DecodeIntegerPoint(p); err == io.EOF { - return nil + return nil, nil } else if err != nil { - log.Printf("error reading iterator point: %s", err) - return nil + return nil, err } - return p + return p, nil } // StringIterator represents a stream of string points. type StringIterator interface { Iterator - Next() *StringPoint + Next() (*StringPoint, error) } // newStringIterators converts a slice of Iterator to a slice of StringIterator. @@ -3729,43 +3956,46 @@ func (itr *bufStringIterator) Stats() IteratorStats { return itr.itr.Stats() } func (itr *bufStringIterator) Close() error { return itr.itr.Close() } // peek returns the next point without removing it from the iterator. -func (itr *bufStringIterator) peek() *StringPoint { - p := itr.Next() +func (itr *bufStringIterator) peek() (*StringPoint, error) { + p, err := itr.Next() + if err != nil { + return nil, err + } itr.unread(p) - return p + return p, nil } // peekTime returns the time of the next point. // Returns zero time if no more points available. -func (itr *bufStringIterator) peekTime() int64 { - p := itr.peek() - if p == nil { - return ZeroTime +func (itr *bufStringIterator) peekTime() (int64, error) { + p, err := itr.peek() + if p == nil || err != nil { + return ZeroTime, err } - return p.Time + return p.Time, nil } // Next returns the current buffer, if exists, or calls the underlying iterator. -func (itr *bufStringIterator) Next() *StringPoint { - if itr.buf != nil { - buf := itr.buf +func (itr *bufStringIterator) Next() (*StringPoint, error) { + buf := itr.buf + if buf != nil { itr.buf = nil - return buf + return buf, nil } return itr.itr.Next() } // NextInWindow returns the next value if it is between [startTime, endTime). // If the next value is outside the range then it is moved to the buffer. -func (itr *bufStringIterator) NextInWindow(startTime, endTime int64) *StringPoint { - v := itr.Next() - if v == nil { - return nil - } else if v.Time < startTime || v.Time >= endTime { +func (itr *bufStringIterator) NextInWindow(startTime, endTime int64) (*StringPoint, error) { + v, err := itr.Next() + if v == nil || err != nil { + return nil, err + } else if t := v.Time; t >= endTime || t < startTime { itr.unread(v) - return nil + return nil, nil } - return v + return v, nil } // unread sets v to the buffer. It is read on the next call to Next(). @@ -3827,7 +4057,7 @@ func (itr *stringMergeIterator) Close() error { } // Next returns the next point from the iterator. -func (itr *stringMergeIterator) Next() *StringPoint { +func (itr *stringMergeIterator) Next() (*StringPoint, error) { // Initialize the heap. This needs to be done lazily on the first call to this iterator // so that iterator initialization done through the Select() call returns quickly. // Queries can only be interrupted after the Select() call completes so any operations @@ -3837,7 +4067,9 @@ func (itr *stringMergeIterator) Next() *StringPoint { items := itr.heap.items itr.heap.items = make([]*stringMergeHeapItem, 0, len(items)) for _, item := range items { - if item.itr.peek() == nil { + if p, err := item.itr.peek(); err != nil { + return nil, err + } else if p == nil { continue } itr.heap.items = append(itr.heap.items, item) @@ -3850,19 +4082,25 @@ func (itr *stringMergeIterator) Next() *StringPoint { // Retrieve the next iterator if we don't have one. if itr.curr == nil { if len(itr.heap.items) == 0 { - return nil + return nil, nil } itr.curr = heap.Pop(itr.heap).(*stringMergeHeapItem) // Read point and set current window. - p := itr.curr.itr.Next() + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } itr.window.name, itr.window.tags = p.Name, p.Tags.ID() itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) - return p + return p, nil } // Read the next point from the current iterator. - p := itr.curr.itr.Next() + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } // If there are no more points then remove iterator from heap and find next. if p == nil { @@ -3872,13 +4110,13 @@ func (itr *stringMergeIterator) Next() *StringPoint { // Check if the point is inside of our current window. inWindow := true - if itr.window.name != p.Name { + if window := itr.window; window.name != p.Name { inWindow = false - } else if itr.window.tags != p.Tags.ID() { + } else if window.tags != p.Tags.ID() { inWindow = false - } else if itr.heap.opt.Ascending && p.Time >= itr.window.endTime { + } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { inWindow = false - } else if !itr.heap.opt.Ascending && p.Time < itr.window.startTime { + } else if !opt.Ascending && p.Time < window.startTime { inWindow = false } @@ -3890,7 +4128,7 @@ func (itr *stringMergeIterator) Next() *StringPoint { continue } - return p + return p, nil } } @@ -3904,7 +4142,14 @@ type stringMergeHeap struct { func (h stringMergeHeap) Len() int { return len(h.items) } func (h stringMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } func (h stringMergeHeap) Less(i, j int) bool { - x, y := h.items[i].itr.peek(), h.items[j].itr.peek() + x, err := h.items[i].itr.peek() + if err != nil { + return true + } + y, err := h.items[j].itr.peek() + if err != nil { + return false + } if h.opt.Ascending { if x.Name != y.Name { @@ -3943,6 +4188,7 @@ func (h *stringMergeHeap) Pop() interface{} { type stringMergeHeapItem struct { itr *bufStringIterator + err error } // stringSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. @@ -3950,6 +4196,7 @@ type stringSortedMergeIterator struct { inputs []StringIterator opt IteratorOptions heap stringSortedMergeHeap + init bool } // newStringSortedMergeIterator returns an instance of stringSortedMergeIterator. @@ -3960,18 +4207,11 @@ func newStringSortedMergeIterator(inputs []StringIterator, opt IteratorOptions) opt: opt, } - // Initialize heap. + // Initialize heap items. for _, input := range inputs { - // Read next point. - p := input.Next() - if p == nil { - continue - } - // Append to the heap. - itr.heap = append(itr.heap, &stringSortedMergeHeapItem{point: p, itr: input, ascending: opt.Ascending}) + itr.heap = append(itr.heap, &stringSortedMergeHeapItem{itr: input, ascending: opt.Ascending}) } - heap.Init(&itr.heap) return itr } @@ -3994,27 +4234,47 @@ func (itr *stringSortedMergeIterator) Close() error { } // Next returns the next points from the iterator. -func (itr *stringSortedMergeIterator) Next() *StringPoint { return itr.pop() } +func (itr *stringSortedMergeIterator) Next() (*StringPoint, error) { return itr.pop() } // pop returns the next point from the heap. // Reads the next point from item's cursor and puts it back on the heap. -func (itr *stringSortedMergeIterator) pop() *StringPoint { +func (itr *stringSortedMergeIterator) pop() (*StringPoint, error) { + // Initialize the heap. See the MergeIterator to see why this has to be done lazily. + if !itr.init { + items := itr.heap + itr.heap = make([]*stringSortedMergeHeapItem, 0, len(items)) + for _, item := range items { + var err error + if item.point, err = item.itr.Next(); err != nil { + return nil, err + } else if item.point == nil { + continue + } + itr.heap = append(itr.heap, item) + } + heap.Init(&itr.heap) + itr.init = true + } + if len(itr.heap) == 0 { - return nil + return nil, nil } // Read the next item from the heap. item := heap.Pop(&itr.heap).(*stringSortedMergeHeapItem) + if item.err != nil { + return nil, item.err + } // Copy the point for return. p := item.point.Clone() // Read the next item from the cursor. Push back to heap if one exists. - if item.point = item.itr.Next(); item.point != nil { + if item.point, item.err = item.itr.Next(); item.point != nil { heap.Push(&itr.heap, item) } - return p + return p, nil } // stringSortedMergeHeap represents a heap of stringSortedMergeHeapItems. @@ -4056,6 +4316,7 @@ func (h *stringSortedMergeHeap) Pop() interface{} { type stringSortedMergeHeapItem struct { point *StringPoint + err error itr StringIterator ascending bool } @@ -4087,11 +4348,11 @@ func (itr *stringLimitIterator) Stats() IteratorStats { return itr.input.Stats() func (itr *stringLimitIterator) Close() error { return itr.input.Close() } // Next returns the next point from the iterator. -func (itr *stringLimitIterator) Next() *StringPoint { +func (itr *stringLimitIterator) Next() (*StringPoint, error) { for { - p := itr.input.Next() - if p == nil { - return nil + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err } // Reset window and counter if a new window is encountered. @@ -4113,12 +4374,12 @@ func (itr *stringLimitIterator) Next() *StringPoint { if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { // If there's no interval, no groups, and a single source then simply exit. if itr.opt.Interval.IsZero() && len(itr.opt.Dimensions) == 0 && len(itr.opt.Sources) == 1 { - return nil + return nil, nil } continue } - return p + return p, nil } } @@ -4128,6 +4389,7 @@ type stringFillIterator struct { startTime int64 endTime int64 auxFields []interface{} + init bool opt IteratorOptions window struct { @@ -4159,33 +4421,33 @@ func newStringFillIterator(input StringIterator, expr Expr, opt IteratorOptions) auxFields = make([]interface{}, len(opt.Aux)) } - itr := &stringFillIterator{ + return &stringFillIterator{ input: newBufStringIterator(input), startTime: startTime, endTime: endTime, auxFields: auxFields, opt: opt, } - - p := itr.input.peek() - if p != nil { - itr.window.name, itr.window.tags = p.Name, p.Tags - itr.window.time = itr.startTime - } else { - if opt.Ascending { - itr.window.time = itr.endTime + 1 - } else { - itr.window.time = itr.endTime - 1 - } - } - return itr } func (itr *stringFillIterator) Stats() IteratorStats { return itr.input.Stats() } func (itr *stringFillIterator) Close() error { return itr.input.Close() } -func (itr *stringFillIterator) Next() *StringPoint { - p := itr.input.Next() +func (itr *stringFillIterator) Next() (*StringPoint, error) { + if !itr.init { + p, err := itr.input.peek() + if p == nil || err != nil { + return nil, err + } + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + itr.init = true + } + + p, err := itr.input.Next() + if err != nil { + return nil, err + } // Check if the next point is outside of our window or is nil. for p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { @@ -4208,7 +4470,7 @@ func (itr *stringFillIterator) Next() *StringPoint { // We are *not* in a current interval. If there is no next point, // we are at the end of all intervals. if p == nil { - return nil + return nil, nil } // Set the new interval. @@ -4256,7 +4518,7 @@ func (itr *stringFillIterator) Next() *StringPoint { } else { itr.window.time = p.Time - int64(itr.opt.Interval.Duration) } - return p + return p, nil } // stringIntervalIterator represents a string implementation of IntervalIterator. @@ -4272,13 +4534,13 @@ func newStringIntervalIterator(input StringIterator, opt IteratorOptions) *strin func (itr *stringIntervalIterator) Stats() IteratorStats { return itr.input.Stats() } func (itr *stringIntervalIterator) Close() error { return itr.input.Close() } -func (itr *stringIntervalIterator) Next() *StringPoint { - p := itr.input.Next() - if p == nil { - return p +func (itr *stringIntervalIterator) Next() (*StringPoint, error) { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err } p.Time, _ = itr.opt.Window(p.Time) - return p + return p, nil } // stringInterruptIterator represents a string implementation of InterruptIterator. @@ -4295,15 +4557,15 @@ func newStringInterruptIterator(input StringIterator, closing <-chan struct{}) * func (itr *stringInterruptIterator) Stats() IteratorStats { return itr.input.Stats() } func (itr *stringInterruptIterator) Close() error { return itr.input.Close() } -func (itr *stringInterruptIterator) Next() *StringPoint { - // Only check if the channel is closed every 256 points. This - // intentionally checks on both 0 and 256 so that if the iterator +func (itr *stringInterruptIterator) Next() (*StringPoint, error) { + // Only check if the channel is closed every N points. This + // intentionally checks on both 0 and N so that if the iterator // has been interrupted before the first point is emitted it will // not emit any points. - if itr.count&0x100 == 0 { + if itr.count&0xFF == 0xFF { select { case <-itr.closing: - return nil + return nil, nil default: // Reset iterator count to zero and fall through to emit the next point. itr.count = 0 @@ -4315,10 +4577,16 @@ func (itr *stringInterruptIterator) Next() *StringPoint { return itr.input.Next() } +// auxStringPoint represents a combination of a point and an error for the AuxIterator. +type auxStringPoint struct { + point *StringPoint + err error +} + // stringAuxIterator represents a string implementation of AuxIterator. type stringAuxIterator struct { input *bufStringIterator - output chan *StringPoint + output chan auxStringPoint fields auxIteratorFields background bool } @@ -4326,7 +4594,7 @@ type stringAuxIterator struct { func newStringAuxIterator(input StringIterator, seriesKeys SeriesList, opt IteratorOptions) *stringAuxIterator { return &stringAuxIterator{ input: newBufStringIterator(input), - output: make(chan *StringPoint, 1), + output: make(chan auxStringPoint, 1), fields: newAuxIteratorFields(seriesKeys, opt), } } @@ -4337,10 +4605,13 @@ func (itr *stringAuxIterator) Background() { go DrainIterator(itr) } -func (itr *stringAuxIterator) Start() { go itr.stream() } -func (itr *stringAuxIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *stringAuxIterator) Close() error { return itr.input.Close() } -func (itr *stringAuxIterator) Next() *StringPoint { return <-itr.output } +func (itr *stringAuxIterator) Start() { go itr.stream() } +func (itr *stringAuxIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *stringAuxIterator) Close() error { return itr.input.Close() } +func (itr *stringAuxIterator) Next() (*StringPoint, error) { + p := <-itr.output + return p.point, p.err +} func (itr *stringAuxIterator) Iterator(name string) Iterator { return itr.fields.iterator(name) } func (itr *stringAuxIterator) CreateIterator(opt IteratorOptions) (Iterator, error) { @@ -4372,13 +4643,17 @@ func (itr *stringAuxIterator) ExpandSources(sources Sources) (Sources, error) { func (itr *stringAuxIterator) stream() { for { // Read next point. - p := itr.input.Next() - if p == nil { + p, err := itr.input.Next() + if err != nil { + itr.output <- auxStringPoint{err: err} + itr.fields.sendError(err) + break + } else if p == nil { break } // Send point to output and to each field iterator. - itr.output <- p + itr.output <- auxStringPoint{point: p} if ok := itr.fields.send(p); !ok && itr.background { break } @@ -4395,6 +4670,7 @@ type stringChanIterator struct { filled bool points [2]StringPoint } + err error cond *sync.Cond done bool } @@ -4442,8 +4718,23 @@ func (itr *stringChanIterator) setBuf(name string, tags Tags, time int64, value return true } -func (itr *stringChanIterator) Next() *StringPoint { +func (itr *stringChanIterator) setErr(err error) { itr.cond.L.Lock() + defer itr.cond.L.Unlock() + itr.err = err + + // Signal to all waiting goroutines that a new value is ready to read. + itr.cond.Signal() +} + +func (itr *stringChanIterator) Next() (*StringPoint, error) { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + + // Check for an error and return one if there. + if itr.err != nil { + return nil, itr.err + } // Wait until either a value is available in the buffer or // the iterator is closed. @@ -4453,8 +4744,7 @@ func (itr *stringChanIterator) Next() *StringPoint { // Return nil once the channel is done and the buffer is empty. if itr.done && !itr.buf.filled { - itr.cond.L.Unlock() - return nil + return nil, nil } // Always read from the buffer if it exists, even if the iterator @@ -4464,10 +4754,7 @@ func (itr *stringChanIterator) Next() *StringPoint { itr.buf.i = (itr.buf.i + 1) % len(itr.buf.points) itr.buf.filled = false itr.cond.Signal() - - // Do not defer the unlock so we don't create an unnecessary allocation. - itr.cond.L.Unlock() - return p + return p, nil } // stringReduceFloatIterator executes a reducer for every interval and buffers the result. @@ -4485,19 +4772,20 @@ func (itr *stringReduceFloatIterator) Stats() IteratorStats { return itr.input.S func (itr *stringReduceFloatIterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *stringReduceFloatIterator) Next() *FloatPoint { +func (itr *stringReduceFloatIterator) Next() (*FloatPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // stringReduceFloatPoint stores the reduced data for a name/tag combination. @@ -4510,16 +4798,22 @@ type stringReduceFloatPoint struct { // reduce executes fn once for every point in the next window. // The previous value for the dimension is passed to fn. -func (itr *stringReduceFloatIterator) reduce() []FloatPoint { +func (itr *stringReduceFloatIterator) reduce() ([]FloatPoint, error) { // Calculate next window. - startTime, endTime := itr.opt.Window(itr.input.peekTime()) + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*stringReduceFloatPoint) for { // Read next point. - curr := itr.input.NextInWindow(startTime, endTime) - if curr == nil { + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { break } else if curr.Nil { continue @@ -4570,7 +4864,7 @@ func (itr *stringReduceFloatIterator) reduce() []FloatPoint { } } - return a + return a, nil } // stringStreamFloatIterator streams inputs into the iterator and emits points gradually. @@ -4599,29 +4893,30 @@ func (itr *stringStreamFloatIterator) Stats() IteratorStats { return itr.input.S func (itr *stringStreamFloatIterator) Close() error { return itr.input.Close() } // Next returns the next value for the stream iterator. -func (itr *stringStreamFloatIterator) Next() *FloatPoint { +func (itr *stringStreamFloatIterator) Next() (*FloatPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // reduce creates and manages aggregators for every point from the input. // After aggregating a point, it always tries to emit a value using the emitter. -func (itr *stringStreamFloatIterator) reduce() []FloatPoint { +func (itr *stringStreamFloatIterator) reduce() ([]FloatPoint, error) { for { // Read next point. - curr := itr.input.Next() - if curr == nil { - return nil + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err } else if curr.Nil { continue } @@ -4656,7 +4951,7 @@ func (itr *stringStreamFloatIterator) reduce() []FloatPoint { points[i].Name = rp.Name points[i].Tags = rp.Tags } - return points + return points, nil } } @@ -4680,13 +4975,18 @@ func (itr *stringFloatExprIterator) Close() error { return nil } -func (itr *stringFloatExprIterator) Next() *FloatPoint { - a := itr.left.Next() - b := itr.right.Next() - if a == nil && b == nil { - return nil +func (itr *stringFloatExprIterator) Next() (*FloatPoint, error) { + a, err := itr.left.Next() + if err != nil { + return nil, err } - return itr.fn(a, b) + b, err := itr.right.Next() + if err != nil { + return nil, err + } else if a == nil && b == nil { + return nil, nil + } + return itr.fn(a, b), nil } // stringFloatExprFunc creates or modifies a point by combining two @@ -4710,19 +5010,20 @@ func (itr *stringReduceIntegerIterator) Stats() IteratorStats { return itr.input func (itr *stringReduceIntegerIterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *stringReduceIntegerIterator) Next() *IntegerPoint { +func (itr *stringReduceIntegerIterator) Next() (*IntegerPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // stringReduceIntegerPoint stores the reduced data for a name/tag combination. @@ -4735,16 +5036,22 @@ type stringReduceIntegerPoint struct { // reduce executes fn once for every point in the next window. // The previous value for the dimension is passed to fn. -func (itr *stringReduceIntegerIterator) reduce() []IntegerPoint { +func (itr *stringReduceIntegerIterator) reduce() ([]IntegerPoint, error) { // Calculate next window. - startTime, endTime := itr.opt.Window(itr.input.peekTime()) + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*stringReduceIntegerPoint) for { // Read next point. - curr := itr.input.NextInWindow(startTime, endTime) - if curr == nil { + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { break } else if curr.Nil { continue @@ -4795,7 +5102,7 @@ func (itr *stringReduceIntegerIterator) reduce() []IntegerPoint { } } - return a + return a, nil } // stringStreamIntegerIterator streams inputs into the iterator and emits points gradually. @@ -4824,29 +5131,30 @@ func (itr *stringStreamIntegerIterator) Stats() IteratorStats { return itr.input func (itr *stringStreamIntegerIterator) Close() error { return itr.input.Close() } // Next returns the next value for the stream iterator. -func (itr *stringStreamIntegerIterator) Next() *IntegerPoint { +func (itr *stringStreamIntegerIterator) Next() (*IntegerPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // reduce creates and manages aggregators for every point from the input. // After aggregating a point, it always tries to emit a value using the emitter. -func (itr *stringStreamIntegerIterator) reduce() []IntegerPoint { +func (itr *stringStreamIntegerIterator) reduce() ([]IntegerPoint, error) { for { // Read next point. - curr := itr.input.Next() - if curr == nil { - return nil + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err } else if curr.Nil { continue } @@ -4881,7 +5189,7 @@ func (itr *stringStreamIntegerIterator) reduce() []IntegerPoint { points[i].Name = rp.Name points[i].Tags = rp.Tags } - return points + return points, nil } } @@ -4905,13 +5213,18 @@ func (itr *stringIntegerExprIterator) Close() error { return nil } -func (itr *stringIntegerExprIterator) Next() *IntegerPoint { - a := itr.left.Next() - b := itr.right.Next() - if a == nil && b == nil { - return nil +func (itr *stringIntegerExprIterator) Next() (*IntegerPoint, error) { + a, err := itr.left.Next() + if err != nil { + return nil, err } - return itr.fn(a, b) + b, err := itr.right.Next() + if err != nil { + return nil, err + } else if a == nil && b == nil { + return nil, nil + } + return itr.fn(a, b), nil } // stringIntegerExprFunc creates or modifies a point by combining two @@ -4935,19 +5248,20 @@ func (itr *stringReduceStringIterator) Stats() IteratorStats { return itr.input. func (itr *stringReduceStringIterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *stringReduceStringIterator) Next() *StringPoint { +func (itr *stringReduceStringIterator) Next() (*StringPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // stringReduceStringPoint stores the reduced data for a name/tag combination. @@ -4960,16 +5274,22 @@ type stringReduceStringPoint struct { // reduce executes fn once for every point in the next window. // The previous value for the dimension is passed to fn. -func (itr *stringReduceStringIterator) reduce() []StringPoint { +func (itr *stringReduceStringIterator) reduce() ([]StringPoint, error) { // Calculate next window. - startTime, endTime := itr.opt.Window(itr.input.peekTime()) + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*stringReduceStringPoint) for { // Read next point. - curr := itr.input.NextInWindow(startTime, endTime) - if curr == nil { + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { break } else if curr.Nil { continue @@ -5020,7 +5340,7 @@ func (itr *stringReduceStringIterator) reduce() []StringPoint { } } - return a + return a, nil } // stringStreamStringIterator streams inputs into the iterator and emits points gradually. @@ -5049,29 +5369,30 @@ func (itr *stringStreamStringIterator) Stats() IteratorStats { return itr.input. func (itr *stringStreamStringIterator) Close() error { return itr.input.Close() } // Next returns the next value for the stream iterator. -func (itr *stringStreamStringIterator) Next() *StringPoint { +func (itr *stringStreamStringIterator) Next() (*StringPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // reduce creates and manages aggregators for every point from the input. // After aggregating a point, it always tries to emit a value using the emitter. -func (itr *stringStreamStringIterator) reduce() []StringPoint { +func (itr *stringStreamStringIterator) reduce() ([]StringPoint, error) { for { // Read next point. - curr := itr.input.Next() - if curr == nil { - return nil + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err } else if curr.Nil { continue } @@ -5106,7 +5427,7 @@ func (itr *stringStreamStringIterator) reduce() []StringPoint { points[i].Name = rp.Name points[i].Tags = rp.Tags } - return points + return points, nil } } @@ -5130,13 +5451,18 @@ func (itr *stringExprIterator) Close() error { return nil } -func (itr *stringExprIterator) Next() *StringPoint { - a := itr.left.Next() - b := itr.right.Next() - if a == nil && b == nil { - return nil +func (itr *stringExprIterator) Next() (*StringPoint, error) { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } else if a == nil && b == nil { + return nil, nil } - return itr.fn(a, b) + return itr.fn(a, b), nil } // stringExprFunc creates or modifies a point by combining two @@ -5160,19 +5486,20 @@ func (itr *stringReduceBooleanIterator) Stats() IteratorStats { return itr.input func (itr *stringReduceBooleanIterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *stringReduceBooleanIterator) Next() *BooleanPoint { +func (itr *stringReduceBooleanIterator) Next() (*BooleanPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // stringReduceBooleanPoint stores the reduced data for a name/tag combination. @@ -5185,16 +5512,22 @@ type stringReduceBooleanPoint struct { // reduce executes fn once for every point in the next window. // The previous value for the dimension is passed to fn. -func (itr *stringReduceBooleanIterator) reduce() []BooleanPoint { +func (itr *stringReduceBooleanIterator) reduce() ([]BooleanPoint, error) { // Calculate next window. - startTime, endTime := itr.opt.Window(itr.input.peekTime()) + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*stringReduceBooleanPoint) for { // Read next point. - curr := itr.input.NextInWindow(startTime, endTime) - if curr == nil { + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { break } else if curr.Nil { continue @@ -5245,7 +5578,7 @@ func (itr *stringReduceBooleanIterator) reduce() []BooleanPoint { } } - return a + return a, nil } // stringStreamBooleanIterator streams inputs into the iterator and emits points gradually. @@ -5274,29 +5607,30 @@ func (itr *stringStreamBooleanIterator) Stats() IteratorStats { return itr.input func (itr *stringStreamBooleanIterator) Close() error { return itr.input.Close() } // Next returns the next value for the stream iterator. -func (itr *stringStreamBooleanIterator) Next() *BooleanPoint { +func (itr *stringStreamBooleanIterator) Next() (*BooleanPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // reduce creates and manages aggregators for every point from the input. // After aggregating a point, it always tries to emit a value using the emitter. -func (itr *stringStreamBooleanIterator) reduce() []BooleanPoint { +func (itr *stringStreamBooleanIterator) reduce() ([]BooleanPoint, error) { for { // Read next point. - curr := itr.input.Next() - if curr == nil { - return nil + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err } else if curr.Nil { continue } @@ -5331,7 +5665,7 @@ func (itr *stringStreamBooleanIterator) reduce() []BooleanPoint { points[i].Name = rp.Name points[i].Tags = rp.Tags } - return points + return points, nil } } @@ -5355,13 +5689,18 @@ func (itr *stringBooleanExprIterator) Close() error { return nil } -func (itr *stringBooleanExprIterator) Next() *BooleanPoint { - a := itr.left.Next() - b := itr.right.Next() - if a == nil && b == nil { - return nil +func (itr *stringBooleanExprIterator) Next() (*BooleanPoint, error) { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } else if a == nil && b == nil { + return nil, nil } - return itr.fn(a, b) + return itr.fn(a, b), nil } // stringBooleanExprFunc creates or modifies a point by combining two @@ -5384,12 +5723,14 @@ func (itr *stringTransformIterator) Stats() IteratorStats { return itr.input.Sta func (itr *stringTransformIterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *stringTransformIterator) Next() *StringPoint { - p := itr.input.Next() - if p != nil { +func (itr *stringTransformIterator) Next() (*StringPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { p = itr.fn(p) } - return p + return p, nil } // stringTransformFunc creates or modifies a point. @@ -5411,12 +5752,14 @@ func (itr *stringBoolTransformIterator) Stats() IteratorStats { return itr.input func (itr *stringBoolTransformIterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *stringBoolTransformIterator) Next() *BooleanPoint { - p := itr.input.Next() - if p != nil { - return itr.fn(p) +func (itr *stringBoolTransformIterator) Next() (*BooleanPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + return itr.fn(p), nil } - return nil + return nil, nil } // stringBoolTransformFunc creates or modifies a point. @@ -5448,19 +5791,18 @@ func (itr *stringDedupeIterator) Stats() IteratorStats { return itr.input.Stats( func (itr *stringDedupeIterator) Close() error { return itr.input.Close() } // Next returns the next unique point from the input iterator. -func (itr *stringDedupeIterator) Next() *StringPoint { +func (itr *stringDedupeIterator) Next() (*StringPoint, error) { for { // Read next point. - p := itr.input.Next() - if p == nil { - return nil + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err } // Serialize to bytes to store in lookup. buf, err := proto.Marshal(encodeStringPoint(p)) if err != nil { - log.Println("error marshaling dedupe point:", err) - continue + return nil, err } // If the point has already been output then move to the next point. @@ -5470,7 +5812,7 @@ func (itr *stringDedupeIterator) Next() *StringPoint { // Otherwise mark it as emitted and return point. itr.m[string(buf)] = struct{}{} - return p + return p, nil } } @@ -5503,24 +5845,23 @@ func (itr *stringReaderIterator) Close() error { } // Next returns the next point from the iterator. -func (itr *stringReaderIterator) Next() *StringPoint { +func (itr *stringReaderIterator) Next() (*StringPoint, error) { // OPTIMIZE(benbjohnson): Reuse point on iterator. // Unmarshal next point. p := &StringPoint{} if err := itr.dec.DecodeStringPoint(p); err == io.EOF { - return nil + return nil, nil } else if err != nil { - log.Printf("error reading iterator point: %s", err) - return nil + return nil, err } - return p + return p, nil } // BooleanIterator represents a stream of boolean points. type BooleanIterator interface { Iterator - Next() *BooleanPoint + Next() (*BooleanPoint, error) } // newBooleanIterators converts a slice of Iterator to a slice of BooleanIterator. @@ -5558,43 +5899,46 @@ func (itr *bufBooleanIterator) Stats() IteratorStats { return itr.itr.Stats() } func (itr *bufBooleanIterator) Close() error { return itr.itr.Close() } // peek returns the next point without removing it from the iterator. -func (itr *bufBooleanIterator) peek() *BooleanPoint { - p := itr.Next() +func (itr *bufBooleanIterator) peek() (*BooleanPoint, error) { + p, err := itr.Next() + if err != nil { + return nil, err + } itr.unread(p) - return p + return p, nil } // peekTime returns the time of the next point. // Returns zero time if no more points available. -func (itr *bufBooleanIterator) peekTime() int64 { - p := itr.peek() - if p == nil { - return ZeroTime +func (itr *bufBooleanIterator) peekTime() (int64, error) { + p, err := itr.peek() + if p == nil || err != nil { + return ZeroTime, err } - return p.Time + return p.Time, nil } // Next returns the current buffer, if exists, or calls the underlying iterator. -func (itr *bufBooleanIterator) Next() *BooleanPoint { - if itr.buf != nil { - buf := itr.buf +func (itr *bufBooleanIterator) Next() (*BooleanPoint, error) { + buf := itr.buf + if buf != nil { itr.buf = nil - return buf + return buf, nil } return itr.itr.Next() } // NextInWindow returns the next value if it is between [startTime, endTime). // If the next value is outside the range then it is moved to the buffer. -func (itr *bufBooleanIterator) NextInWindow(startTime, endTime int64) *BooleanPoint { - v := itr.Next() - if v == nil { - return nil - } else if v.Time < startTime || v.Time >= endTime { +func (itr *bufBooleanIterator) NextInWindow(startTime, endTime int64) (*BooleanPoint, error) { + v, err := itr.Next() + if v == nil || err != nil { + return nil, err + } else if t := v.Time; t >= endTime || t < startTime { itr.unread(v) - return nil + return nil, nil } - return v + return v, nil } // unread sets v to the buffer. It is read on the next call to Next(). @@ -5656,7 +6000,7 @@ func (itr *booleanMergeIterator) Close() error { } // Next returns the next point from the iterator. -func (itr *booleanMergeIterator) Next() *BooleanPoint { +func (itr *booleanMergeIterator) Next() (*BooleanPoint, error) { // Initialize the heap. This needs to be done lazily on the first call to this iterator // so that iterator initialization done through the Select() call returns quickly. // Queries can only be interrupted after the Select() call completes so any operations @@ -5666,7 +6010,9 @@ func (itr *booleanMergeIterator) Next() *BooleanPoint { items := itr.heap.items itr.heap.items = make([]*booleanMergeHeapItem, 0, len(items)) for _, item := range items { - if item.itr.peek() == nil { + if p, err := item.itr.peek(); err != nil { + return nil, err + } else if p == nil { continue } itr.heap.items = append(itr.heap.items, item) @@ -5679,19 +6025,25 @@ func (itr *booleanMergeIterator) Next() *BooleanPoint { // Retrieve the next iterator if we don't have one. if itr.curr == nil { if len(itr.heap.items) == 0 { - return nil + return nil, nil } itr.curr = heap.Pop(itr.heap).(*booleanMergeHeapItem) // Read point and set current window. - p := itr.curr.itr.Next() + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } itr.window.name, itr.window.tags = p.Name, p.Tags.ID() itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) - return p + return p, nil } // Read the next point from the current iterator. - p := itr.curr.itr.Next() + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } // If there are no more points then remove iterator from heap and find next. if p == nil { @@ -5701,13 +6053,13 @@ func (itr *booleanMergeIterator) Next() *BooleanPoint { // Check if the point is inside of our current window. inWindow := true - if itr.window.name != p.Name { + if window := itr.window; window.name != p.Name { inWindow = false - } else if itr.window.tags != p.Tags.ID() { + } else if window.tags != p.Tags.ID() { inWindow = false - } else if itr.heap.opt.Ascending && p.Time >= itr.window.endTime { + } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { inWindow = false - } else if !itr.heap.opt.Ascending && p.Time < itr.window.startTime { + } else if !opt.Ascending && p.Time < window.startTime { inWindow = false } @@ -5719,7 +6071,7 @@ func (itr *booleanMergeIterator) Next() *BooleanPoint { continue } - return p + return p, nil } } @@ -5733,7 +6085,14 @@ type booleanMergeHeap struct { func (h booleanMergeHeap) Len() int { return len(h.items) } func (h booleanMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } func (h booleanMergeHeap) Less(i, j int) bool { - x, y := h.items[i].itr.peek(), h.items[j].itr.peek() + x, err := h.items[i].itr.peek() + if err != nil { + return true + } + y, err := h.items[j].itr.peek() + if err != nil { + return false + } if h.opt.Ascending { if x.Name != y.Name { @@ -5772,6 +6131,7 @@ func (h *booleanMergeHeap) Pop() interface{} { type booleanMergeHeapItem struct { itr *bufBooleanIterator + err error } // booleanSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. @@ -5779,6 +6139,7 @@ type booleanSortedMergeIterator struct { inputs []BooleanIterator opt IteratorOptions heap booleanSortedMergeHeap + init bool } // newBooleanSortedMergeIterator returns an instance of booleanSortedMergeIterator. @@ -5789,18 +6150,11 @@ func newBooleanSortedMergeIterator(inputs []BooleanIterator, opt IteratorOptions opt: opt, } - // Initialize heap. + // Initialize heap items. for _, input := range inputs { - // Read next point. - p := input.Next() - if p == nil { - continue - } - // Append to the heap. - itr.heap = append(itr.heap, &booleanSortedMergeHeapItem{point: p, itr: input, ascending: opt.Ascending}) + itr.heap = append(itr.heap, &booleanSortedMergeHeapItem{itr: input, ascending: opt.Ascending}) } - heap.Init(&itr.heap) return itr } @@ -5823,27 +6177,47 @@ func (itr *booleanSortedMergeIterator) Close() error { } // Next returns the next points from the iterator. -func (itr *booleanSortedMergeIterator) Next() *BooleanPoint { return itr.pop() } +func (itr *booleanSortedMergeIterator) Next() (*BooleanPoint, error) { return itr.pop() } // pop returns the next point from the heap. // Reads the next point from item's cursor and puts it back on the heap. -func (itr *booleanSortedMergeIterator) pop() *BooleanPoint { +func (itr *booleanSortedMergeIterator) pop() (*BooleanPoint, error) { + // Initialize the heap. See the MergeIterator to see why this has to be done lazily. + if !itr.init { + items := itr.heap + itr.heap = make([]*booleanSortedMergeHeapItem, 0, len(items)) + for _, item := range items { + var err error + if item.point, err = item.itr.Next(); err != nil { + return nil, err + } else if item.point == nil { + continue + } + itr.heap = append(itr.heap, item) + } + heap.Init(&itr.heap) + itr.init = true + } + if len(itr.heap) == 0 { - return nil + return nil, nil } // Read the next item from the heap. item := heap.Pop(&itr.heap).(*booleanSortedMergeHeapItem) + if item.err != nil { + return nil, item.err + } // Copy the point for return. p := item.point.Clone() // Read the next item from the cursor. Push back to heap if one exists. - if item.point = item.itr.Next(); item.point != nil { + if item.point, item.err = item.itr.Next(); item.point != nil { heap.Push(&itr.heap, item) } - return p + return p, nil } // booleanSortedMergeHeap represents a heap of booleanSortedMergeHeapItems. @@ -5885,6 +6259,7 @@ func (h *booleanSortedMergeHeap) Pop() interface{} { type booleanSortedMergeHeapItem struct { point *BooleanPoint + err error itr BooleanIterator ascending bool } @@ -5916,11 +6291,11 @@ func (itr *booleanLimitIterator) Stats() IteratorStats { return itr.input.Stats( func (itr *booleanLimitIterator) Close() error { return itr.input.Close() } // Next returns the next point from the iterator. -func (itr *booleanLimitIterator) Next() *BooleanPoint { +func (itr *booleanLimitIterator) Next() (*BooleanPoint, error) { for { - p := itr.input.Next() - if p == nil { - return nil + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err } // Reset window and counter if a new window is encountered. @@ -5942,12 +6317,12 @@ func (itr *booleanLimitIterator) Next() *BooleanPoint { if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { // If there's no interval, no groups, and a single source then simply exit. if itr.opt.Interval.IsZero() && len(itr.opt.Dimensions) == 0 && len(itr.opt.Sources) == 1 { - return nil + return nil, nil } continue } - return p + return p, nil } } @@ -5957,6 +6332,7 @@ type booleanFillIterator struct { startTime int64 endTime int64 auxFields []interface{} + init bool opt IteratorOptions window struct { @@ -5988,33 +6364,33 @@ func newBooleanFillIterator(input BooleanIterator, expr Expr, opt IteratorOption auxFields = make([]interface{}, len(opt.Aux)) } - itr := &booleanFillIterator{ + return &booleanFillIterator{ input: newBufBooleanIterator(input), startTime: startTime, endTime: endTime, auxFields: auxFields, opt: opt, } - - p := itr.input.peek() - if p != nil { - itr.window.name, itr.window.tags = p.Name, p.Tags - itr.window.time = itr.startTime - } else { - if opt.Ascending { - itr.window.time = itr.endTime + 1 - } else { - itr.window.time = itr.endTime - 1 - } - } - return itr } func (itr *booleanFillIterator) Stats() IteratorStats { return itr.input.Stats() } func (itr *booleanFillIterator) Close() error { return itr.input.Close() } -func (itr *booleanFillIterator) Next() *BooleanPoint { - p := itr.input.Next() +func (itr *booleanFillIterator) Next() (*BooleanPoint, error) { + if !itr.init { + p, err := itr.input.peek() + if p == nil || err != nil { + return nil, err + } + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + itr.init = true + } + + p, err := itr.input.Next() + if err != nil { + return nil, err + } // Check if the next point is outside of our window or is nil. for p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { @@ -6037,7 +6413,7 @@ func (itr *booleanFillIterator) Next() *BooleanPoint { // We are *not* in a current interval. If there is no next point, // we are at the end of all intervals. if p == nil { - return nil + return nil, nil } // Set the new interval. @@ -6085,7 +6461,7 @@ func (itr *booleanFillIterator) Next() *BooleanPoint { } else { itr.window.time = p.Time - int64(itr.opt.Interval.Duration) } - return p + return p, nil } // booleanIntervalIterator represents a boolean implementation of IntervalIterator. @@ -6101,13 +6477,13 @@ func newBooleanIntervalIterator(input BooleanIterator, opt IteratorOptions) *boo func (itr *booleanIntervalIterator) Stats() IteratorStats { return itr.input.Stats() } func (itr *booleanIntervalIterator) Close() error { return itr.input.Close() } -func (itr *booleanIntervalIterator) Next() *BooleanPoint { - p := itr.input.Next() - if p == nil { - return p +func (itr *booleanIntervalIterator) Next() (*BooleanPoint, error) { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err } p.Time, _ = itr.opt.Window(p.Time) - return p + return p, nil } // booleanInterruptIterator represents a boolean implementation of InterruptIterator. @@ -6124,15 +6500,15 @@ func newBooleanInterruptIterator(input BooleanIterator, closing <-chan struct{}) func (itr *booleanInterruptIterator) Stats() IteratorStats { return itr.input.Stats() } func (itr *booleanInterruptIterator) Close() error { return itr.input.Close() } -func (itr *booleanInterruptIterator) Next() *BooleanPoint { - // Only check if the channel is closed every 256 points. This - // intentionally checks on both 0 and 256 so that if the iterator +func (itr *booleanInterruptIterator) Next() (*BooleanPoint, error) { + // Only check if the channel is closed every N points. This + // intentionally checks on both 0 and N so that if the iterator // has been interrupted before the first point is emitted it will // not emit any points. - if itr.count&0x100 == 0 { + if itr.count&0xFF == 0xFF { select { case <-itr.closing: - return nil + return nil, nil default: // Reset iterator count to zero and fall through to emit the next point. itr.count = 0 @@ -6144,10 +6520,16 @@ func (itr *booleanInterruptIterator) Next() *BooleanPoint { return itr.input.Next() } +// auxBooleanPoint represents a combination of a point and an error for the AuxIterator. +type auxBooleanPoint struct { + point *BooleanPoint + err error +} + // booleanAuxIterator represents a boolean implementation of AuxIterator. type booleanAuxIterator struct { input *bufBooleanIterator - output chan *BooleanPoint + output chan auxBooleanPoint fields auxIteratorFields background bool } @@ -6155,7 +6537,7 @@ type booleanAuxIterator struct { func newBooleanAuxIterator(input BooleanIterator, seriesKeys SeriesList, opt IteratorOptions) *booleanAuxIterator { return &booleanAuxIterator{ input: newBufBooleanIterator(input), - output: make(chan *BooleanPoint, 1), + output: make(chan auxBooleanPoint, 1), fields: newAuxIteratorFields(seriesKeys, opt), } } @@ -6166,10 +6548,13 @@ func (itr *booleanAuxIterator) Background() { go DrainIterator(itr) } -func (itr *booleanAuxIterator) Start() { go itr.stream() } -func (itr *booleanAuxIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *booleanAuxIterator) Close() error { return itr.input.Close() } -func (itr *booleanAuxIterator) Next() *BooleanPoint { return <-itr.output } +func (itr *booleanAuxIterator) Start() { go itr.stream() } +func (itr *booleanAuxIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *booleanAuxIterator) Close() error { return itr.input.Close() } +func (itr *booleanAuxIterator) Next() (*BooleanPoint, error) { + p := <-itr.output + return p.point, p.err +} func (itr *booleanAuxIterator) Iterator(name string) Iterator { return itr.fields.iterator(name) } func (itr *booleanAuxIterator) CreateIterator(opt IteratorOptions) (Iterator, error) { @@ -6201,13 +6586,17 @@ func (itr *booleanAuxIterator) ExpandSources(sources Sources) (Sources, error) { func (itr *booleanAuxIterator) stream() { for { // Read next point. - p := itr.input.Next() - if p == nil { + p, err := itr.input.Next() + if err != nil { + itr.output <- auxBooleanPoint{err: err} + itr.fields.sendError(err) + break + } else if p == nil { break } // Send point to output and to each field iterator. - itr.output <- p + itr.output <- auxBooleanPoint{point: p} if ok := itr.fields.send(p); !ok && itr.background { break } @@ -6224,6 +6613,7 @@ type booleanChanIterator struct { filled bool points [2]BooleanPoint } + err error cond *sync.Cond done bool } @@ -6271,8 +6661,23 @@ func (itr *booleanChanIterator) setBuf(name string, tags Tags, time int64, value return true } -func (itr *booleanChanIterator) Next() *BooleanPoint { +func (itr *booleanChanIterator) setErr(err error) { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + itr.err = err + + // Signal to all waiting goroutines that a new value is ready to read. + itr.cond.Signal() +} + +func (itr *booleanChanIterator) Next() (*BooleanPoint, error) { itr.cond.L.Lock() + defer itr.cond.L.Unlock() + + // Check for an error and return one if there. + if itr.err != nil { + return nil, itr.err + } // Wait until either a value is available in the buffer or // the iterator is closed. @@ -6282,8 +6687,7 @@ func (itr *booleanChanIterator) Next() *BooleanPoint { // Return nil once the channel is done and the buffer is empty. if itr.done && !itr.buf.filled { - itr.cond.L.Unlock() - return nil + return nil, nil } // Always read from the buffer if it exists, even if the iterator @@ -6293,10 +6697,7 @@ func (itr *booleanChanIterator) Next() *BooleanPoint { itr.buf.i = (itr.buf.i + 1) % len(itr.buf.points) itr.buf.filled = false itr.cond.Signal() - - // Do not defer the unlock so we don't create an unnecessary allocation. - itr.cond.L.Unlock() - return p + return p, nil } // booleanReduceFloatIterator executes a reducer for every interval and buffers the result. @@ -6314,19 +6715,20 @@ func (itr *booleanReduceFloatIterator) Stats() IteratorStats { return itr.input. func (itr *booleanReduceFloatIterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *booleanReduceFloatIterator) Next() *FloatPoint { +func (itr *booleanReduceFloatIterator) Next() (*FloatPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // booleanReduceFloatPoint stores the reduced data for a name/tag combination. @@ -6339,16 +6741,22 @@ type booleanReduceFloatPoint struct { // reduce executes fn once for every point in the next window. // The previous value for the dimension is passed to fn. -func (itr *booleanReduceFloatIterator) reduce() []FloatPoint { +func (itr *booleanReduceFloatIterator) reduce() ([]FloatPoint, error) { // Calculate next window. - startTime, endTime := itr.opt.Window(itr.input.peekTime()) + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*booleanReduceFloatPoint) for { // Read next point. - curr := itr.input.NextInWindow(startTime, endTime) - if curr == nil { + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { break } else if curr.Nil { continue @@ -6399,7 +6807,7 @@ func (itr *booleanReduceFloatIterator) reduce() []FloatPoint { } } - return a + return a, nil } // booleanStreamFloatIterator streams inputs into the iterator and emits points gradually. @@ -6428,29 +6836,30 @@ func (itr *booleanStreamFloatIterator) Stats() IteratorStats { return itr.input. func (itr *booleanStreamFloatIterator) Close() error { return itr.input.Close() } // Next returns the next value for the stream iterator. -func (itr *booleanStreamFloatIterator) Next() *FloatPoint { +func (itr *booleanStreamFloatIterator) Next() (*FloatPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // reduce creates and manages aggregators for every point from the input. // After aggregating a point, it always tries to emit a value using the emitter. -func (itr *booleanStreamFloatIterator) reduce() []FloatPoint { +func (itr *booleanStreamFloatIterator) reduce() ([]FloatPoint, error) { for { // Read next point. - curr := itr.input.Next() - if curr == nil { - return nil + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err } else if curr.Nil { continue } @@ -6485,7 +6894,7 @@ func (itr *booleanStreamFloatIterator) reduce() []FloatPoint { points[i].Name = rp.Name points[i].Tags = rp.Tags } - return points + return points, nil } } @@ -6509,13 +6918,18 @@ func (itr *booleanFloatExprIterator) Close() error { return nil } -func (itr *booleanFloatExprIterator) Next() *FloatPoint { - a := itr.left.Next() - b := itr.right.Next() - if a == nil && b == nil { - return nil +func (itr *booleanFloatExprIterator) Next() (*FloatPoint, error) { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } else if a == nil && b == nil { + return nil, nil } - return itr.fn(a, b) + return itr.fn(a, b), nil } // booleanFloatExprFunc creates or modifies a point by combining two @@ -6539,19 +6953,20 @@ func (itr *booleanReduceIntegerIterator) Stats() IteratorStats { return itr.inpu func (itr *booleanReduceIntegerIterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *booleanReduceIntegerIterator) Next() *IntegerPoint { +func (itr *booleanReduceIntegerIterator) Next() (*IntegerPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // booleanReduceIntegerPoint stores the reduced data for a name/tag combination. @@ -6564,16 +6979,22 @@ type booleanReduceIntegerPoint struct { // reduce executes fn once for every point in the next window. // The previous value for the dimension is passed to fn. -func (itr *booleanReduceIntegerIterator) reduce() []IntegerPoint { +func (itr *booleanReduceIntegerIterator) reduce() ([]IntegerPoint, error) { // Calculate next window. - startTime, endTime := itr.opt.Window(itr.input.peekTime()) + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*booleanReduceIntegerPoint) for { // Read next point. - curr := itr.input.NextInWindow(startTime, endTime) - if curr == nil { + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { break } else if curr.Nil { continue @@ -6624,7 +7045,7 @@ func (itr *booleanReduceIntegerIterator) reduce() []IntegerPoint { } } - return a + return a, nil } // booleanStreamIntegerIterator streams inputs into the iterator and emits points gradually. @@ -6653,29 +7074,30 @@ func (itr *booleanStreamIntegerIterator) Stats() IteratorStats { return itr.inpu func (itr *booleanStreamIntegerIterator) Close() error { return itr.input.Close() } // Next returns the next value for the stream iterator. -func (itr *booleanStreamIntegerIterator) Next() *IntegerPoint { +func (itr *booleanStreamIntegerIterator) Next() (*IntegerPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // reduce creates and manages aggregators for every point from the input. // After aggregating a point, it always tries to emit a value using the emitter. -func (itr *booleanStreamIntegerIterator) reduce() []IntegerPoint { +func (itr *booleanStreamIntegerIterator) reduce() ([]IntegerPoint, error) { for { // Read next point. - curr := itr.input.Next() - if curr == nil { - return nil + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err } else if curr.Nil { continue } @@ -6710,7 +7132,7 @@ func (itr *booleanStreamIntegerIterator) reduce() []IntegerPoint { points[i].Name = rp.Name points[i].Tags = rp.Tags } - return points + return points, nil } } @@ -6734,13 +7156,18 @@ func (itr *booleanIntegerExprIterator) Close() error { return nil } -func (itr *booleanIntegerExprIterator) Next() *IntegerPoint { - a := itr.left.Next() - b := itr.right.Next() - if a == nil && b == nil { - return nil +func (itr *booleanIntegerExprIterator) Next() (*IntegerPoint, error) { + a, err := itr.left.Next() + if err != nil { + return nil, err } - return itr.fn(a, b) + b, err := itr.right.Next() + if err != nil { + return nil, err + } else if a == nil && b == nil { + return nil, nil + } + return itr.fn(a, b), nil } // booleanIntegerExprFunc creates or modifies a point by combining two @@ -6764,19 +7191,20 @@ func (itr *booleanReduceStringIterator) Stats() IteratorStats { return itr.input func (itr *booleanReduceStringIterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *booleanReduceStringIterator) Next() *StringPoint { +func (itr *booleanReduceStringIterator) Next() (*StringPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // booleanReduceStringPoint stores the reduced data for a name/tag combination. @@ -6789,16 +7217,22 @@ type booleanReduceStringPoint struct { // reduce executes fn once for every point in the next window. // The previous value for the dimension is passed to fn. -func (itr *booleanReduceStringIterator) reduce() []StringPoint { +func (itr *booleanReduceStringIterator) reduce() ([]StringPoint, error) { // Calculate next window. - startTime, endTime := itr.opt.Window(itr.input.peekTime()) + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*booleanReduceStringPoint) for { // Read next point. - curr := itr.input.NextInWindow(startTime, endTime) - if curr == nil { + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { break } else if curr.Nil { continue @@ -6849,7 +7283,7 @@ func (itr *booleanReduceStringIterator) reduce() []StringPoint { } } - return a + return a, nil } // booleanStreamStringIterator streams inputs into the iterator and emits points gradually. @@ -6878,29 +7312,30 @@ func (itr *booleanStreamStringIterator) Stats() IteratorStats { return itr.input func (itr *booleanStreamStringIterator) Close() error { return itr.input.Close() } // Next returns the next value for the stream iterator. -func (itr *booleanStreamStringIterator) Next() *StringPoint { +func (itr *booleanStreamStringIterator) Next() (*StringPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // reduce creates and manages aggregators for every point from the input. // After aggregating a point, it always tries to emit a value using the emitter. -func (itr *booleanStreamStringIterator) reduce() []StringPoint { +func (itr *booleanStreamStringIterator) reduce() ([]StringPoint, error) { for { // Read next point. - curr := itr.input.Next() - if curr == nil { - return nil + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err } else if curr.Nil { continue } @@ -6935,7 +7370,7 @@ func (itr *booleanStreamStringIterator) reduce() []StringPoint { points[i].Name = rp.Name points[i].Tags = rp.Tags } - return points + return points, nil } } @@ -6959,13 +7394,18 @@ func (itr *booleanStringExprIterator) Close() error { return nil } -func (itr *booleanStringExprIterator) Next() *StringPoint { - a := itr.left.Next() - b := itr.right.Next() - if a == nil && b == nil { - return nil +func (itr *booleanStringExprIterator) Next() (*StringPoint, error) { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } else if a == nil && b == nil { + return nil, nil } - return itr.fn(a, b) + return itr.fn(a, b), nil } // booleanStringExprFunc creates or modifies a point by combining two @@ -6989,19 +7429,20 @@ func (itr *booleanReduceBooleanIterator) Stats() IteratorStats { return itr.inpu func (itr *booleanReduceBooleanIterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *booleanReduceBooleanIterator) Next() *BooleanPoint { +func (itr *booleanReduceBooleanIterator) Next() (*BooleanPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // booleanReduceBooleanPoint stores the reduced data for a name/tag combination. @@ -7014,16 +7455,22 @@ type booleanReduceBooleanPoint struct { // reduce executes fn once for every point in the next window. // The previous value for the dimension is passed to fn. -func (itr *booleanReduceBooleanIterator) reduce() []BooleanPoint { +func (itr *booleanReduceBooleanIterator) reduce() ([]BooleanPoint, error) { // Calculate next window. - startTime, endTime := itr.opt.Window(itr.input.peekTime()) + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*booleanReduceBooleanPoint) for { // Read next point. - curr := itr.input.NextInWindow(startTime, endTime) - if curr == nil { + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { break } else if curr.Nil { continue @@ -7074,7 +7521,7 @@ func (itr *booleanReduceBooleanIterator) reduce() []BooleanPoint { } } - return a + return a, nil } // booleanStreamBooleanIterator streams inputs into the iterator and emits points gradually. @@ -7103,29 +7550,30 @@ func (itr *booleanStreamBooleanIterator) Stats() IteratorStats { return itr.inpu func (itr *booleanStreamBooleanIterator) Close() error { return itr.input.Close() } // Next returns the next value for the stream iterator. -func (itr *booleanStreamBooleanIterator) Next() *BooleanPoint { +func (itr *booleanStreamBooleanIterator) Next() (*BooleanPoint, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // reduce creates and manages aggregators for every point from the input. // After aggregating a point, it always tries to emit a value using the emitter. -func (itr *booleanStreamBooleanIterator) reduce() []BooleanPoint { +func (itr *booleanStreamBooleanIterator) reduce() ([]BooleanPoint, error) { for { // Read next point. - curr := itr.input.Next() - if curr == nil { - return nil + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err } else if curr.Nil { continue } @@ -7160,7 +7608,7 @@ func (itr *booleanStreamBooleanIterator) reduce() []BooleanPoint { points[i].Name = rp.Name points[i].Tags = rp.Tags } - return points + return points, nil } } @@ -7184,13 +7632,18 @@ func (itr *booleanExprIterator) Close() error { return nil } -func (itr *booleanExprIterator) Next() *BooleanPoint { - a := itr.left.Next() - b := itr.right.Next() - if a == nil && b == nil { - return nil +func (itr *booleanExprIterator) Next() (*BooleanPoint, error) { + a, err := itr.left.Next() + if err != nil { + return nil, err + } + b, err := itr.right.Next() + if err != nil { + return nil, err + } else if a == nil && b == nil { + return nil, nil } - return itr.fn(a, b) + return itr.fn(a, b), nil } // booleanExprFunc creates or modifies a point by combining two @@ -7213,12 +7666,14 @@ func (itr *booleanTransformIterator) Stats() IteratorStats { return itr.input.St func (itr *booleanTransformIterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *booleanTransformIterator) Next() *BooleanPoint { - p := itr.input.Next() - if p != nil { +func (itr *booleanTransformIterator) Next() (*BooleanPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { p = itr.fn(p) } - return p + return p, nil } // booleanTransformFunc creates or modifies a point. @@ -7240,12 +7695,14 @@ func (itr *booleanBoolTransformIterator) Stats() IteratorStats { return itr.inpu func (itr *booleanBoolTransformIterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *booleanBoolTransformIterator) Next() *BooleanPoint { - p := itr.input.Next() - if p != nil { - return itr.fn(p) +func (itr *booleanBoolTransformIterator) Next() (*BooleanPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + return itr.fn(p), nil } - return nil + return nil, nil } // booleanBoolTransformFunc creates or modifies a point. @@ -7277,19 +7734,18 @@ func (itr *booleanDedupeIterator) Stats() IteratorStats { return itr.input.Stats func (itr *booleanDedupeIterator) Close() error { return itr.input.Close() } // Next returns the next unique point from the input iterator. -func (itr *booleanDedupeIterator) Next() *BooleanPoint { +func (itr *booleanDedupeIterator) Next() (*BooleanPoint, error) { for { // Read next point. - p := itr.input.Next() - if p == nil { - return nil + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err } // Serialize to bytes to store in lookup. buf, err := proto.Marshal(encodeBooleanPoint(p)) if err != nil { - log.Println("error marshaling dedupe point:", err) - continue + return nil, err } // If the point has already been output then move to the next point. @@ -7299,7 +7755,7 @@ func (itr *booleanDedupeIterator) Next() *BooleanPoint { // Otherwise mark it as emitted and return point. itr.m[string(buf)] = struct{}{} - return p + return p, nil } } @@ -7332,18 +7788,17 @@ func (itr *booleanReaderIterator) Close() error { } // Next returns the next point from the iterator. -func (itr *booleanReaderIterator) Next() *BooleanPoint { +func (itr *booleanReaderIterator) Next() (*BooleanPoint, error) { // OPTIMIZE(benbjohnson): Reuse point on iterator. // Unmarshal next point. p := &BooleanPoint{} if err := itr.dec.DecodeBooleanPoint(p); err == io.EOF { - return nil + return nil, nil } else if err != nil { - log.Printf("error reading iterator point: %s", err) - return nil + return nil, err } - return p + return p, nil } // IteratorEncoder is an encoder for encoding an iterator's points to w. @@ -7402,8 +7857,10 @@ func (enc *IteratorEncoder) encodeFloatIterator(itr FloatIterator) error { } // Retrieve the next point from the iterator. - p := itr.Next() - if p == nil { + p, err := itr.Next() + if err != nil { + return err + } else if p == nil { break } @@ -7443,8 +7900,10 @@ func (enc *IteratorEncoder) encodeIntegerIterator(itr IntegerIterator) error { } // Retrieve the next point from the iterator. - p := itr.Next() - if p == nil { + p, err := itr.Next() + if err != nil { + return err + } else if p == nil { break } @@ -7484,8 +7943,10 @@ func (enc *IteratorEncoder) encodeStringIterator(itr StringIterator) error { } // Retrieve the next point from the iterator. - p := itr.Next() - if p == nil { + p, err := itr.Next() + if err != nil { + return err + } else if p == nil { break } @@ -7525,8 +7986,10 @@ func (enc *IteratorEncoder) encodeBooleanIterator(itr BooleanIterator) error { } // Retrieve the next point from the iterator. - p := itr.Next() - if p == nil { + p, err := itr.Next() + if err != nil { + return err + } else if p == nil { break } diff --git a/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go.tmpl b/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go.tmpl index 28878ad00..453c18c09 100644 --- a/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go.tmpl +++ b/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go.tmpl @@ -8,7 +8,6 @@ import ( "io" "sort" "sync" - "log" "time" "github.com/gogo/protobuf/proto" @@ -23,7 +22,7 @@ const DefaultStatsInterval = 10 * time.Second // {{$k.Name}}Iterator represents a stream of {{$k.name}} points. type {{$k.Name}}Iterator interface { Iterator - Next() *{{$k.Name}}Point + Next() (*{{$k.Name}}Point, error) } // new{{$k.Name}}Iterators converts a slice of Iterator to a slice of {{$k.Name}}Iterator. @@ -65,43 +64,46 @@ func (itr *buf{{$k.Name}}Iterator) Stats() IteratorStats { return itr.itr.Stats( func (itr *buf{{$k.Name}}Iterator) Close() error { return itr.itr.Close() } // peek returns the next point without removing it from the iterator. -func (itr *buf{{$k.Name}}Iterator) peek() *{{$k.Name}}Point { - p := itr.Next() +func (itr *buf{{$k.Name}}Iterator) peek() (*{{$k.Name}}Point, error) { + p, err := itr.Next() + if err != nil { + return nil, err + } itr.unread(p) - return p + return p, nil } // peekTime returns the time of the next point. // Returns zero time if no more points available. -func (itr *buf{{$k.Name}}Iterator) peekTime() int64 { - p := itr.peek() - if p == nil { - return ZeroTime +func (itr *buf{{$k.Name}}Iterator) peekTime() (int64, error) { + p, err := itr.peek() + if p == nil || err != nil { + return ZeroTime, err } - return p.Time + return p.Time, nil } // Next returns the current buffer, if exists, or calls the underlying iterator. -func (itr *buf{{$k.Name}}Iterator) Next() *{{$k.Name}}Point { - if itr.buf != nil { - buf := itr.buf +func (itr *buf{{$k.Name}}Iterator) Next() (*{{$k.Name}}Point, error) { + buf := itr.buf + if buf != nil { itr.buf = nil - return buf + return buf, nil } return itr.itr.Next() } // NextInWindow returns the next value if it is between [startTime, endTime). // If the next value is outside the range then it is moved to the buffer. -func (itr *buf{{$k.Name}}Iterator) NextInWindow(startTime, endTime int64) *{{$k.Name}}Point { - v := itr.Next() - if v == nil { - return nil - } else if v.Time < startTime || v.Time >= endTime { +func (itr *buf{{$k.Name}}Iterator) NextInWindow(startTime, endTime int64) (*{{$k.Name}}Point, error) { + v, err := itr.Next() + if v == nil || err != nil { + return nil, err + } else if t := v.Time; t >= endTime || t < startTime { itr.unread(v) - return nil + return nil, nil } - return v + return v, nil } // unread sets v to the buffer. It is read on the next call to Next(). @@ -163,7 +165,7 @@ func (itr *{{$k.name}}MergeIterator) Close() error { } // Next returns the next point from the iterator. -func (itr *{{$k.name}}MergeIterator) Next() *{{$k.Name}}Point { +func (itr *{{$k.name}}MergeIterator) Next() (*{{$k.Name}}Point, error) { // Initialize the heap. This needs to be done lazily on the first call to this iterator // so that iterator initialization done through the Select() call returns quickly. // Queries can only be interrupted after the Select() call completes so any operations @@ -173,7 +175,9 @@ func (itr *{{$k.name}}MergeIterator) Next() *{{$k.Name}}Point { items := itr.heap.items itr.heap.items = make([]*{{$k.name}}MergeHeapItem, 0, len(items)) for _, item := range items { - if item.itr.peek() == nil { + if p, err := item.itr.peek(); err != nil { + return nil, err + } else if p == nil { continue } itr.heap.items = append(itr.heap.items, item) @@ -186,19 +190,25 @@ func (itr *{{$k.name}}MergeIterator) Next() *{{$k.Name}}Point { // Retrieve the next iterator if we don't have one. if itr.curr == nil { if len(itr.heap.items) == 0 { - return nil + return nil, nil } itr.curr = heap.Pop(itr.heap).(*{{$k.name}}MergeHeapItem) // Read point and set current window. - p := itr.curr.itr.Next() + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } itr.window.name, itr.window.tags = p.Name, p.Tags.ID() itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) - return p + return p, nil } // Read the next point from the current iterator. - p := itr.curr.itr.Next() + p, err := itr.curr.itr.Next() + if err != nil { + return nil, err + } // If there are no more points then remove iterator from heap and find next. if p == nil { @@ -208,13 +218,13 @@ func (itr *{{$k.name}}MergeIterator) Next() *{{$k.Name}}Point { // Check if the point is inside of our current window. inWindow := true - if itr.window.name != p.Name { + if window := itr.window; window.name != p.Name { inWindow = false - } else if itr.window.tags != p.Tags.ID() { + } else if window.tags != p.Tags.ID() { inWindow = false - } else if itr.heap.opt.Ascending && p.Time >= itr.window.endTime { + } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { inWindow = false - } else if !itr.heap.opt.Ascending && p.Time < itr.window.startTime { + } else if !opt.Ascending && p.Time < window.startTime { inWindow = false } @@ -226,7 +236,7 @@ func (itr *{{$k.name}}MergeIterator) Next() *{{$k.Name}}Point { continue } - return p + return p, nil } } @@ -240,7 +250,14 @@ type {{$k.name}}MergeHeap struct { func (h {{$k.name}}MergeHeap) Len() int { return len(h.items) } func (h {{$k.name}}MergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } func (h {{$k.name}}MergeHeap) Less(i, j int) bool { - x, y := h.items[i].itr.peek(), h.items[j].itr.peek() + x, err := h.items[i].itr.peek() + if err != nil { + return true + } + y, err := h.items[j].itr.peek() + if err != nil { + return false + } if h.opt.Ascending { if x.Name != y.Name { @@ -280,6 +297,7 @@ func (h *{{$k.name}}MergeHeap) Pop() interface{} { type {{$k.name}}MergeHeapItem struct { itr *buf{{$k.Name}}Iterator + err error } @@ -288,6 +306,7 @@ type {{$k.name}}SortedMergeIterator struct { inputs []{{$k.Name}}Iterator opt IteratorOptions heap {{$k.name}}SortedMergeHeap + init bool } // new{{$k.Name}}SortedMergeIterator returns an instance of {{$k.name}}SortedMergeIterator. @@ -298,18 +317,11 @@ func new{{$k.Name}}SortedMergeIterator(inputs []{{$k.Name}}Iterator, opt Iterato opt: opt, } - // Initialize heap. + // Initialize heap items. for _, input := range inputs { - // Read next point. - p := input.Next() - if p == nil { - continue - } - // Append to the heap. - itr.heap = append(itr.heap, &{{$k.name}}SortedMergeHeapItem{point: p, itr: input, ascending: opt.Ascending}) + itr.heap = append(itr.heap, &{{$k.name}}SortedMergeHeapItem{itr: input, ascending: opt.Ascending}) } - heap.Init(&itr.heap) return itr } @@ -332,27 +344,47 @@ func (itr *{{$k.name}}SortedMergeIterator) Close() error { } // Next returns the next points from the iterator. -func (itr *{{$k.name}}SortedMergeIterator) Next() *{{$k.Name}}Point { return itr.pop() } +func (itr *{{$k.name}}SortedMergeIterator) Next() (*{{$k.Name}}Point, error) { return itr.pop() } // pop returns the next point from the heap. // Reads the next point from item's cursor and puts it back on the heap. -func (itr *{{$k.name}}SortedMergeIterator) pop() *{{$k.Name}}Point { +func (itr *{{$k.name}}SortedMergeIterator) pop() (*{{$k.Name}}Point, error) { + // Initialize the heap. See the MergeIterator to see why this has to be done lazily. + if !itr.init { + items := itr.heap + itr.heap = make([]*{{$k.name}}SortedMergeHeapItem, 0, len(items)) + for _, item := range items { + var err error + if item.point, err = item.itr.Next(); err != nil { + return nil, err + } else if item.point == nil { + continue + } + itr.heap = append(itr.heap, item) + } + heap.Init(&itr.heap) + itr.init = true + } + if len(itr.heap) == 0 { - return nil + return nil, nil } // Read the next item from the heap. item := heap.Pop(&itr.heap).(*{{$k.name}}SortedMergeHeapItem) + if item.err != nil { + return nil, item.err + } // Copy the point for return. p := item.point.Clone() // Read the next item from the cursor. Push back to heap if one exists. - if item.point = item.itr.Next(); item.point != nil { + if item.point, item.err = item.itr.Next(); item.point != nil { heap.Push(&itr.heap, item) } - return p + return p, nil } // {{$k.name}}SortedMergeHeap represents a heap of {{$k.name}}SortedMergeHeapItems. @@ -394,6 +426,7 @@ func (h *{{$k.name}}SortedMergeHeap) Pop() interface{} { type {{$k.name}}SortedMergeHeapItem struct { point *{{$k.Name}}Point + err error itr {{$k.Name}}Iterator ascending bool } @@ -425,11 +458,11 @@ func (itr *{{$k.name}}LimitIterator) Stats() IteratorStats { return itr.input.St func (itr *{{$k.name}}LimitIterator) Close() error { return itr.input.Close() } // Next returns the next point from the iterator. -func (itr *{{$k.name}}LimitIterator) Next() *{{$k.Name}}Point { +func (itr *{{$k.name}}LimitIterator) Next() (*{{$k.Name}}Point, error) { for { - p := itr.input.Next() - if p == nil { - return nil + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err } // Reset window and counter if a new window is encountered. @@ -451,12 +484,12 @@ func (itr *{{$k.name}}LimitIterator) Next() *{{$k.Name}}Point { if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { // If there's no interval, no groups, and a single source then simply exit. if itr.opt.Interval.IsZero() && len(itr.opt.Dimensions) == 0 && len(itr.opt.Sources) == 1 { - return nil + return nil, nil } continue } - return p + return p, nil } } @@ -466,6 +499,7 @@ type {{$k.name}}FillIterator struct { startTime int64 endTime int64 auxFields []interface{} + init bool opt IteratorOptions window struct { @@ -497,33 +531,33 @@ func new{{$k.Name}}FillIterator(input {{$k.Name}}Iterator, expr Expr, opt Iterat auxFields = make([]interface{}, len(opt.Aux)) } - itr := &{{$k.name}}FillIterator{ + return &{{$k.name}}FillIterator{ input: newBuf{{$k.Name}}Iterator(input), startTime: startTime, endTime: endTime, auxFields: auxFields, opt: opt, } - - p := itr.input.peek() - if p != nil { - itr.window.name, itr.window.tags = p.Name, p.Tags - itr.window.time = itr.startTime - } else { - if opt.Ascending { - itr.window.time = itr.endTime + 1 - } else { - itr.window.time = itr.endTime - 1 - } - } - return itr } func (itr *{{$k.name}}FillIterator) Stats() IteratorStats { return itr.input.Stats() } func (itr *{{$k.name}}FillIterator) Close() error { return itr.input.Close() } -func (itr *{{$k.name}}FillIterator) Next() *{{$k.Name}}Point { - p := itr.input.Next() +func (itr *{{$k.name}}FillIterator) Next() (*{{$k.Name}}Point, error) { + if !itr.init { + p, err := itr.input.peek() + if p == nil || err != nil { + return nil, err + } + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + itr.init = true + } + + p, err := itr.input.Next() + if err != nil { + return nil, err + } // Check if the next point is outside of our window or is nil. for p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { @@ -546,7 +580,7 @@ func (itr *{{$k.name}}FillIterator) Next() *{{$k.Name}}Point { // We are *not* in a current interval. If there is no next point, // we are at the end of all intervals. if p == nil { - return nil + return nil, nil } // Set the new interval. @@ -594,7 +628,7 @@ func (itr *{{$k.name}}FillIterator) Next() *{{$k.Name}}Point { } else { itr.window.time = p.Time - int64(itr.opt.Interval.Duration) } - return p + return p, nil } // {{$k.name}}IntervalIterator represents a {{$k.name}} implementation of IntervalIterator. @@ -610,13 +644,13 @@ func new{{$k.Name}}IntervalIterator(input {{$k.Name}}Iterator, opt IteratorOptio func (itr *{{$k.name}}IntervalIterator) Stats() IteratorStats { return itr.input.Stats() } func (itr *{{$k.name}}IntervalIterator) Close() error { return itr.input.Close() } -func (itr *{{$k.name}}IntervalIterator) Next() *{{$k.Name}}Point { - p := itr.input.Next() - if p == nil { - return p +func (itr *{{$k.name}}IntervalIterator) Next() (*{{$k.Name}}Point, error) { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err } p.Time, _ = itr.opt.Window(p.Time) - return p + return p, nil } // {{$k.name}}InterruptIterator represents a {{$k.name}} implementation of InterruptIterator. @@ -633,15 +667,15 @@ func new{{$k.Name}}InterruptIterator(input {{$k.Name}}Iterator, closing <-chan s func (itr *{{$k.name}}InterruptIterator) Stats() IteratorStats { return itr.input.Stats() } func (itr *{{$k.name}}InterruptIterator) Close() error { return itr.input.Close() } -func (itr *{{$k.name}}InterruptIterator) Next() *{{$k.Name}}Point { - // Only check if the channel is closed every 256 points. This - // intentionally checks on both 0 and 256 so that if the iterator +func (itr *{{$k.name}}InterruptIterator) Next() (*{{$k.Name}}Point, error) { + // Only check if the channel is closed every N points. This + // intentionally checks on both 0 and N so that if the iterator // has been interrupted before the first point is emitted it will // not emit any points. - if itr.count&0x100 == 0 { + if itr.count & 0xFF == 0xFF { select { case <-itr.closing: - return nil + return nil, nil default: // Reset iterator count to zero and fall through to emit the next point. itr.count = 0 @@ -653,10 +687,16 @@ func (itr *{{$k.name}}InterruptIterator) Next() *{{$k.Name}}Point { return itr.input.Next() } +// aux{{$k.Name}}Point represents a combination of a point and an error for the AuxIterator. +type aux{{$k.Name}}Point struct { + point *{{$k.Name}}Point + err error +} + // {{$k.name}}AuxIterator represents a {{$k.name}} implementation of AuxIterator. type {{$k.name}}AuxIterator struct { input *buf{{$k.Name}}Iterator - output chan *{{$k.Name}}Point + output chan aux{{$k.Name}}Point fields auxIteratorFields background bool } @@ -664,7 +704,7 @@ type {{$k.name}}AuxIterator struct { func new{{$k.Name}}AuxIterator(input {{$k.Name}}Iterator, seriesKeys SeriesList, opt IteratorOptions) *{{$k.name}}AuxIterator { return &{{$k.name}}AuxIterator{ input: newBuf{{$k.Name}}Iterator(input), - output: make(chan *{{$k.Name}}Point, 1), + output: make(chan aux{{$k.Name}}Point, 1), fields: newAuxIteratorFields(seriesKeys, opt), } } @@ -675,11 +715,14 @@ func (itr *{{$k.name}}AuxIterator) Background() { go DrainIterator(itr) } -func (itr *{{$k.name}}AuxIterator) Start() { go itr.stream() } -func (itr *{{$k.name}}AuxIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *{{$k.name}}AuxIterator) Close() error { return itr.input.Close() } -func (itr *{{$k.name}}AuxIterator) Next() *{{$k.Name}}Point { return <-itr.output } -func (itr *{{$k.name}}AuxIterator) Iterator(name string) Iterator { return itr.fields.iterator(name) } +func (itr *{{$k.name}}AuxIterator) Start() { go itr.stream() } +func (itr *{{$k.name}}AuxIterator) Stats() IteratorStats { return itr.input.Stats() } +func (itr *{{$k.name}}AuxIterator) Close() error { return itr.input.Close() } +func (itr *{{$k.name}}AuxIterator) Next() (*{{$k.Name}}Point, error) { + p := <-itr.output + return p.point, p.err +} +func (itr *{{$k.name}}AuxIterator) Iterator(name string) Iterator { return itr.fields.iterator(name) } func (itr *{{$k.name}}AuxIterator) CreateIterator(opt IteratorOptions) (Iterator, error) { expr := opt.Expr @@ -710,13 +753,17 @@ func (itr *{{$k.name}}AuxIterator) ExpandSources(sources Sources) (Sources, erro func (itr *{{.name}}AuxIterator) stream() { for { // Read next point. - p := itr.input.Next() - if p == nil { + p, err := itr.input.Next() + if err != nil { + itr.output <- aux{{$k.Name}}Point{err: err} + itr.fields.sendError(err) + break + } else if p == nil { break } // Send point to output and to each field iterator. - itr.output <- p + itr.output <- aux{{$k.Name}}Point{point: p} if ok := itr.fields.send(p); !ok && itr.background { break } @@ -733,6 +780,7 @@ type {{$k.name}}ChanIterator struct { filled bool points [2]{{$k.Name}}Point } + err error cond *sync.Cond done bool } @@ -783,8 +831,23 @@ func (itr *{{$k.name}}ChanIterator) setBuf(name string, tags Tags, time int64, v return true } -func (itr *{{$k.name}}ChanIterator) Next() *{{$k.Name}}Point { +func (itr *{{$k.name}}ChanIterator) setErr(err error) { + itr.cond.L.Lock() + defer itr.cond.L.Unlock() + itr.err = err + + // Signal to all waiting goroutines that a new value is ready to read. + itr.cond.Signal() +} + +func (itr *{{$k.name}}ChanIterator) Next() (*{{$k.Name}}Point, error) { itr.cond.L.Lock() + defer itr.cond.L.Unlock() + + // Check for an error and return one if there. + if itr.err != nil { + return nil, itr.err + } // Wait until either a value is available in the buffer or // the iterator is closed. @@ -794,8 +857,7 @@ func (itr *{{$k.name}}ChanIterator) Next() *{{$k.Name}}Point { // Return nil once the channel is done and the buffer is empty. if itr.done && !itr.buf.filled { - itr.cond.L.Unlock() - return nil + return nil, nil } // Always read from the buffer if it exists, even if the iterator @@ -805,10 +867,7 @@ func (itr *{{$k.name}}ChanIterator) Next() *{{$k.Name}}Point { itr.buf.i = (itr.buf.i + 1) % len(itr.buf.points) itr.buf.filled = false itr.cond.Signal() - - // Do not defer the unlock so we don't create an unnecessary allocation. - itr.cond.L.Unlock() - return p + return p, nil } {{range $v := $types}} @@ -828,19 +887,20 @@ func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) Stats() IteratorStats { return func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) Next() *{{$v.Name}}Point { +func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) Next() (*{{$v.Name}}Point, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // {{$k.name}}Reduce{{$v.Name}}Point stores the reduced data for a name/tag combination. @@ -853,16 +913,22 @@ type {{$k.name}}Reduce{{$v.Name}}Point struct { // reduce executes fn once for every point in the next window. // The previous value for the dimension is passed to fn. -func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() []{{$v.Name}}Point { +func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, error) { // Calculate next window. - startTime, endTime := itr.opt.Window(itr.input.peekTime()) + t, err := itr.input.peekTime() + if err != nil { + return nil, err + } + startTime, endTime := itr.opt.Window(t) // Create points by tags. m := make(map[string]*{{$k.name}}Reduce{{$v.Name}}Point) for { // Read next point. - curr := itr.input.NextInWindow(startTime, endTime) - if curr == nil { + curr, err := itr.input.NextInWindow(startTime, endTime) + if err != nil { + return nil, err + } else if curr == nil { break } else if curr.Nil { continue @@ -913,7 +979,7 @@ func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() []{{$v.Name}}Point { } } - return a + return a, nil } // {{$k.name}}Stream{{$v.Name}}Iterator streams inputs into the iterator and emits points gradually. @@ -942,29 +1008,30 @@ func (itr *{{$k.name}}Stream{{$v.Name}}Iterator) Stats() IteratorStats { return func (itr *{{$k.name}}Stream{{$v.Name}}Iterator) Close() error { return itr.input.Close() } // Next returns the next value for the stream iterator. -func (itr *{{$k.name}}Stream{{$v.Name}}Iterator) Next() *{{$v.Name}}Point { +func (itr *{{$k.name}}Stream{{$v.Name}}Iterator) Next() (*{{$v.Name}}Point, error) { // Calculate next window if we have no more points. if len(itr.points) == 0 { - itr.points = itr.reduce() + var err error + itr.points, err = itr.reduce() if len(itr.points) == 0 { - return nil + return nil, err } } // Pop next point off the stack. p := &itr.points[len(itr.points)-1] itr.points = itr.points[:len(itr.points)-1] - return p + return p, nil } // reduce creates and manages aggregators for every point from the input. // After aggregating a point, it always tries to emit a value using the emitter. -func (itr *{{$k.name}}Stream{{$v.Name}}Iterator) reduce() []{{$v.Name}}Point { +func (itr *{{$k.name}}Stream{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, error) { for { // Read next point. - curr := itr.input.Next() - if curr == nil { - return nil + curr, err := itr.input.Next() + if curr == nil || err != nil { + return nil, err } else if curr.Nil { continue } @@ -999,7 +1066,7 @@ func (itr *{{$k.name}}Stream{{$v.Name}}Iterator) reduce() []{{$v.Name}}Point { points[i].Name = rp.Name points[i].Tags = rp.Tags } - return points + return points, nil } } @@ -1023,13 +1090,18 @@ func (itr *{{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator) C return nil } -func (itr *{{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator) Next() *{{$v.Name}}Point { - a := itr.left.Next() - b := itr.right.Next() - if a == nil && b == nil { - return nil +func (itr *{{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator) Next() (*{{$v.Name}}Point, error) { + a, err := itr.left.Next() + if err != nil { + return nil, err } - return itr.fn(a, b) + b, err := itr.right.Next() + if err != nil { + return nil, err + } else if a == nil && b == nil { + return nil, nil + } + return itr.fn(a, b), nil } // {{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprFunc creates or modifies a point by combining two @@ -1053,12 +1125,14 @@ func (itr *{{$k.name}}TransformIterator) Stats() IteratorStats { return itr.inpu func (itr *{{$k.name}}TransformIterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *{{$k.name}}TransformIterator) Next() *{{$k.Name}}Point { - p := itr.input.Next() - if p != nil { +func (itr *{{$k.name}}TransformIterator) Next() (*{{$k.Name}}Point, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { p = itr.fn(p) } - return p + return p, nil } // {{$k.name}}TransformFunc creates or modifies a point. @@ -1080,12 +1154,14 @@ func (itr *{{$k.name}}BoolTransformIterator) Stats() IteratorStats { return itr. func (itr *{{$k.name}}BoolTransformIterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *{{$k.name}}BoolTransformIterator) Next() *BooleanPoint { - p := itr.input.Next() - if p != nil { - return itr.fn(p) +func (itr *{{$k.name}}BoolTransformIterator) Next() (*BooleanPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + return itr.fn(p), nil } - return nil + return nil, nil } // {{$k.name}}BoolTransformFunc creates or modifies a point. @@ -1117,19 +1193,18 @@ func (itr *{{$k.name}}DedupeIterator) Stats() IteratorStats { return itr.input.S func (itr *{{$k.name}}DedupeIterator) Close() error { return itr.input.Close() } // Next returns the next unique point from the input iterator. -func (itr *{{$k.name}}DedupeIterator) Next() *{{$k.Name}}Point { +func (itr *{{$k.name}}DedupeIterator) Next() (*{{$k.Name}}Point, error) { for { // Read next point. - p := itr.input.Next() - if p == nil { - return nil + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err } // Serialize to bytes to store in lookup. buf, err := proto.Marshal(encode{{$k.Name}}Point(p)) if err != nil { - log.Println("error marshaling dedupe point:", err) - continue + return nil, err } // If the point has already been output then move to the next point. @@ -1139,7 +1214,7 @@ func (itr *{{$k.name}}DedupeIterator) Next() *{{$k.Name}}Point { // Otherwise mark it as emitted and return point. itr.m[string(buf)] = struct{}{} - return p + return p, nil } } @@ -1172,18 +1247,17 @@ func (itr *{{$k.name}}ReaderIterator) Close() error { } // Next returns the next point from the iterator. -func (itr *{{$k.name}}ReaderIterator) Next() *{{$k.Name}}Point { +func (itr *{{$k.name}}ReaderIterator) Next() (*{{$k.Name}}Point, error) { // OPTIMIZE(benbjohnson): Reuse point on iterator. // Unmarshal next point. p := &{{$k.Name}}Point{} if err := itr.dec.Decode{{$k.Name}}Point(p); err == io.EOF { - return nil + return nil, nil } else if err != nil { - log.Printf("error reading iterator point: %s", err) - return nil + return nil, err } - return p + return p, nil } {{end}} @@ -1245,8 +1319,10 @@ func (enc *IteratorEncoder) encode{{.Name}}Iterator(itr {{.Name}}Iterator) error } // Retrieve the next point from the iterator. - p := itr.Next() - if p == nil { + p, err := itr.Next() + if err != nil { + return err + } else if p == nil { break } diff --git a/vendor/github.com/influxdata/influxdb/influxql/iterator.go b/vendor/github.com/influxdata/influxdb/influxql/iterator.go index a6ff2549f..d58bafe7e 100644 --- a/vendor/github.com/influxdata/influxdb/influxql/iterator.go +++ b/vendor/github.com/influxdata/influxdb/influxql/iterator.go @@ -395,20 +395,39 @@ func (a auxIteratorFields) send(p Point) (ok bool) { return ok } +func (a auxIteratorFields) sendError(err error) { + for _, f := range a { + for _, itr := range f.itrs { + switch itr := itr.(type) { + case *floatChanIterator: + itr.setErr(err) + case *integerChanIterator: + itr.setErr(err) + case *stringChanIterator: + itr.setErr(err) + case *booleanChanIterator: + itr.setErr(err) + default: + panic(fmt.Sprintf("invalid aux itr type: %T", itr)) + } + } + } +} + // DrainIterator reads all points from an iterator. func DrainIterator(itr Iterator) { switch itr := itr.(type) { case FloatIterator: - for p := itr.Next(); p != nil; p = itr.Next() { + for p, _ := itr.Next(); p != nil; p, _ = itr.Next() { } case IntegerIterator: - for p := itr.Next(); p != nil; p = itr.Next() { + for p, _ := itr.Next(); p != nil; p, _ = itr.Next() { } case StringIterator: - for p := itr.Next(); p != nil; p = itr.Next() { + for p, _ := itr.Next(); p != nil; p, _ = itr.Next() { } case BooleanIterator: - for p := itr.Next(); p != nil; p = itr.Next() { + for p, _ := itr.Next(); p != nil; p, _ = itr.Next() { } default: panic(fmt.Sprintf("unsupported iterator type for draining: %T", itr)) @@ -423,19 +442,19 @@ func DrainIterators(itrs []Iterator) { for _, itr := range itrs { switch itr := itr.(type) { case FloatIterator: - if p := itr.Next(); p != nil { + if p, _ := itr.Next(); p != nil { hasData = true } case IntegerIterator: - if p := itr.Next(); p != nil { + if p, _ := itr.Next(); p != nil { hasData = true } case StringIterator: - if p := itr.Next(); p != nil { + if p, _ := itr.Next(); p != nil { hasData = true } case BooleanIterator: - if p := itr.Next(); p != nil { + if p, _ := itr.Next(); p != nil { hasData = true } default: @@ -702,6 +721,11 @@ func newIteratorOptionsStmt(stmt *SelectStatement, sopt *SelectOptions) (opt Ite // Set duration to zero if a negative interval has been used. if interval < 0 { interval = 0 + } else if interval > 0 { + opt.Interval.Offset, err = stmt.GroupByOffset(&opt) + if err != nil { + return opt, err + } } opt.Interval.Duration = interval @@ -1064,9 +1088,9 @@ func decodeInterval(pb *internal.Interval) Interval { type nilFloatIterator struct{} -func (*nilFloatIterator) Stats() IteratorStats { return IteratorStats{} } -func (*nilFloatIterator) Close() error { return nil } -func (*nilFloatIterator) Next() *FloatPoint { return nil } +func (*nilFloatIterator) Stats() IteratorStats { return IteratorStats{} } +func (*nilFloatIterator) Close() error { return nil } +func (*nilFloatIterator) Next() (*FloatPoint, error) { return nil, nil } // integerFloatTransformIterator executes a function to modify an existing point for every // output of the input iterator. @@ -1082,12 +1106,14 @@ func (itr *integerFloatTransformIterator) Stats() IteratorStats { return itr.inp func (itr *integerFloatTransformIterator) Close() error { return itr.input.Close() } // Next returns the minimum value for the next available interval. -func (itr *integerFloatTransformIterator) Next() *FloatPoint { - p := itr.input.Next() - if p != nil { - return itr.fn(p) +func (itr *integerFloatTransformIterator) Next() (*FloatPoint, error) { + p, err := itr.input.Next() + if err != nil { + return nil, err + } else if p != nil { + return itr.fn(p), nil } - return nil + return nil, nil } // integerFloatTransformFunc creates or modifies a point. @@ -1101,10 +1127,10 @@ type integerFloatCastIterator struct { func (itr *integerFloatCastIterator) Stats() IteratorStats { return itr.input.Stats() } func (itr *integerFloatCastIterator) Close() error { return itr.input.Close() } -func (itr *integerFloatCastIterator) Next() *FloatPoint { - p := itr.input.Next() - if p == nil { - return nil +func (itr *integerFloatCastIterator) Next() (*FloatPoint, error) { + p, err := itr.input.Next() + if p == nil || err != nil { + return nil, err } return &FloatPoint{ @@ -1114,7 +1140,7 @@ func (itr *integerFloatCastIterator) Next() *FloatPoint { Nil: p.Nil, Value: float64(p.Value), Aux: p.Aux, - } + }, nil } // IteratorStats represents statistics about an iterator. diff --git a/vendor/github.com/influxdata/influxdb/influxql/parser.go b/vendor/github.com/influxdata/influxdb/influxql/parser.go index 72d20458b..00b772d1f 100644 --- a/vendor/github.com/influxdata/influxdb/influxql/parser.go +++ b/vendor/github.com/influxdata/influxdb/influxql/parser.go @@ -126,8 +126,6 @@ func (p *Parser) parseShowStatement() (Statement, error) { return p.parseGrantsForUserStatement() case DATABASES: return p.parseShowDatabasesStatement() - case SERVERS: - return p.parseShowServersStatement() case FIELD: tok, pos, lit := p.scanIgnoreWhitespace() if tok == KEYS { @@ -181,7 +179,6 @@ func (p *Parser) parseShowStatement() (Statement, error) { "QUERIES", "RETENTION", "SERIES", - "SERVERS", "TAG", "USERS", "STATS", @@ -225,8 +222,6 @@ func (p *Parser) parseDropStatement() (Statement, error) { switch tok { case CONTINUOUS: return p.parseDropContinuousQueryStatement() - case DATA, META: - return p.parseDropServerStatement(tok) case DATABASE: return p.parseDropDatabaseStatement() case MEASUREMENT: @@ -245,7 +240,7 @@ func (p *Parser) parseDropStatement() (Statement, error) { case USER: return p.parseDropUserStatement() default: - return nil, newParseError(tokstr(tok, lit), []string{"CONTINUOUS", "DATA", "MEASUREMENT", "META", "RETENTION", "SERIES", "SHARD", "SUBSCRIPTION", "USER"}, pos) + return nil, newParseError(tokstr(tok, lit), []string{"CONTINUOUS", "MEASUREMENT", "RETENTION", "SERIES", "SHARD", "SUBSCRIPTION", "USER"}, pos) } } @@ -998,33 +993,34 @@ func (p *Parser) parseTarget(tr targetRequirement) (*Target, error) { return t, nil } -// parseDeleteStatement parses a delete string and returns a DeleteStatement. +// parseDeleteStatement parses a string and returns a delete statement. // This function assumes the DELETE token has already been consumed. -func (p *Parser) parseDeleteStatement() (*DeleteStatement, error) { - // TODO remove and do not skip test once we wire up DELETE FROM. - // See issues https://github.com/influxdata/influxdb/issues/1647 - // and https://github.com/influxdata/influxdb/issues/4404 - return nil, errors.New("DELETE FROM is currently not supported. Use DROP SERIES or DROP MEASUREMENT instead") - //stmt := &DeleteStatement{} - - //// Parse source - //if tok, pos, lit := p.scanIgnoreWhitespace(); tok != FROM { - // return nil, newParseError(tokstr(tok, lit), []string{"FROM"}, pos) - //} - //source, err := p.parseSource() - //if err != nil { - // return nil, err - //} - //stmt.Source = source - - //// Parse condition: "WHERE EXPR". - //condition, err := p.parseCondition() - //if err != nil { - // return nil, err - //} - //stmt.Condition = condition - - //return stmt, nil +func (p *Parser) parseDeleteStatement() (Statement, error) { + stmt := &DeleteSeriesStatement{} + var err error + + tok, pos, lit := p.scanIgnoreWhitespace() + + if tok == FROM { + // Parse source. + if stmt.Sources, err = p.parseSources(); err != nil { + return nil, err + } + } else { + p.unscan() + } + + // Parse condition: "WHERE EXPR". + if stmt.Condition, err = p.parseCondition(); err != nil { + return nil, err + } + + // If they didn't provide a FROM or a WHERE, this query is invalid + if stmt.Condition == nil && stmt.Sources == nil { + return nil, newParseError(tokstr(tok, lit), []string{"FROM", "WHERE"}, pos) + } + + return stmt, nil } // parseShowSeriesStatement parses a string and returns a ShowSeriesStatement. @@ -1380,29 +1376,6 @@ func (p *Parser) parseDropShardStatement() (*DropShardStatement, error) { return stmt, nil } -// parseDropServerStatement parses a string and returns a DropServerStatement. -// This function assumes the "DROP " tokens have already been consumed. -func (p *Parser) parseDropServerStatement(tok Token) (*DropServerStatement, error) { - // Parse the SERVER token - if tok, pos, lit := p.scanIgnoreWhitespace(); tok != SERVER { - return nil, newParseError(tokstr(tok, lit), []string{"SERVER"}, pos) - } - - s := &DropServerStatement{} - var err error - - if tok == META { - s.Meta = true - } - - // Parse the server's ID. - if s.NodeID, err = p.parseUInt64(); err != nil { - return nil, err - } - - return s, nil -} - // parseShowContinuousQueriesStatement parses a string and returns a ShowContinuousQueriesStatement. // This function assumes the "SHOW CONTINUOUS" tokens have already been consumed. func (p *Parser) parseShowContinuousQueriesStatement() (*ShowContinuousQueriesStatement, error) { @@ -1416,13 +1389,6 @@ func (p *Parser) parseShowContinuousQueriesStatement() (*ShowContinuousQueriesSt return stmt, nil } -// parseShowServersStatement parses a string and returns a ShowServersStatement. -// This function assumes the "SHOW SERVERS" tokens have already been consumed. -func (p *Parser) parseShowServersStatement() (*ShowServersStatement, error) { - stmt := &ShowServersStatement{} - return stmt, nil -} - // parseGrantsForUserStatement parses a string and returns a ShowGrantsForUserStatement. // This function assumes the "SHOW GRANTS" tokens have already been consumed. func (p *Parser) parseGrantsForUserStatement() (*ShowGrantsForUserStatement, error) { diff --git a/vendor/github.com/influxdata/influxdb/influxql/query_executor.go b/vendor/github.com/influxdata/influxdb/influxql/query_executor.go index 5336785d7..e029068cd 100644 --- a/vendor/github.com/influxdata/influxdb/influxql/query_executor.go +++ b/vendor/github.com/influxdata/influxdb/influxql/query_executor.go @@ -79,6 +79,9 @@ type ExecutionContext struct { // The requested maximum number of points to return in each result. ChunkSize int + // If this query is being executed in a read-only context. + ReadOnly bool + // Hold the query executor's logger. Log *log.Logger @@ -105,12 +108,16 @@ type QueryExecutor struct { // Query execution timeout. QueryTimeout time.Duration + // Log queries if they are slower than this time. + // If zero, slow queries will never be logged. + LogQueriesAfter time.Duration + // Maximum number of concurrent queries. MaxConcurrentQueries int - // Output of all logging. + // Logger to use for all logging. // Defaults to discarding all log output. - LogOutput io.Writer + Logger *log.Logger // Used for managing and tracking running queries. queries map[uint64]*QueryTask @@ -126,7 +133,7 @@ type QueryExecutor struct { func NewQueryExecutor() *QueryExecutor { return &QueryExecutor{ QueryTimeout: DefaultQueryTimeout, - LogOutput: ioutil.Discard, + Logger: log.New(ioutil.Discard, "[query] ", log.LstdFlags), queries: make(map[uint64]*QueryTask), nextID: 1, statMap: influxdb.NewStatistics("queryExecutor", "queryExecutor", nil), @@ -147,14 +154,20 @@ func (e *QueryExecutor) Close() error { return nil } +// SetLogOutput sets the writer to which all logs are written. It must not be +// called after Open is called. +func (e *QueryExecutor) SetLogOutput(w io.Writer) { + e.Logger = log.New(w, "[query] ", log.LstdFlags) +} + // ExecuteQuery executes each statement within a query. -func (e *QueryExecutor) ExecuteQuery(query *Query, database string, chunkSize int, closing chan struct{}) <-chan *Result { +func (e *QueryExecutor) ExecuteQuery(query *Query, database string, chunkSize int, readonly bool, closing chan struct{}) <-chan *Result { results := make(chan *Result) - go e.executeQuery(query, database, chunkSize, closing, results) + go e.executeQuery(query, database, chunkSize, readonly, closing, results) return results } -func (e *QueryExecutor) executeQuery(query *Query, database string, chunkSize int, closing <-chan struct{}, results chan *Result) { +func (e *QueryExecutor) executeQuery(query *Query, database string, chunkSize int, readonly bool, closing <-chan struct{}, results chan *Result) { defer close(results) defer e.recover(query, results) @@ -171,8 +184,6 @@ func (e *QueryExecutor) executeQuery(query *Query, database string, chunkSize in } defer e.killQuery(qid) - logger := e.logger() - // Setup the execution context that will be used when executing statements. ctx := ExecutionContext{ QueryID: qid, @@ -180,7 +191,8 @@ func (e *QueryExecutor) executeQuery(query *Query, database string, chunkSize in Results: results, Database: database, ChunkSize: chunkSize, - Log: logger, + ReadOnly: readonly, + Log: e.Logger, InterruptCh: task.closing, } @@ -214,7 +226,7 @@ loop: } // Log each normalized statement. - logger.Println(stmt.String()) + e.Logger.Println(stmt.String()) // Handle a query management queries specially so they don't go // to the underlying statement executor. @@ -232,9 +244,15 @@ loop: } continue loop case *KillQueryStatement: + var messages []*Message + if ctx.ReadOnly { + messages = append(messages, ReadOnlyWarning(stmt.String())) + } + err := e.executeKillQueryStatement(stmt) results <- &Result{ StatementID: i, + Messages: messages, Err: err, } @@ -314,10 +332,6 @@ func (e *QueryExecutor) executeShowQueriesStatement(q *ShowQueriesStatement) (mo }}, nil } -func (e *QueryExecutor) logger() *log.Logger { - return log.New(e.LogOutput, "[query] ", log.LstdFlags) -} - func (e *QueryExecutor) query(qid uint64) (*QueryTask, bool) { e.mu.RLock() query, ok := e.queries[qid] @@ -355,6 +369,20 @@ func (e *QueryExecutor) attachQuery(q *Query, database string, interrupt <-chan e.queries[qid] = query go e.waitForQuery(qid, query.closing, interrupt, query.monitorCh) + if e.LogQueriesAfter != 0 { + go query.monitor(func(closing <-chan struct{}) error { + t := time.NewTimer(e.LogQueriesAfter) + defer t.Stop() + + select { + case <-t.C: + e.Logger.Printf("Detected slow query: %s (qid: %d, database: %s, threshold: %s)", + query.query, qid, query.database, e.LogQueriesAfter) + case <-closing: + } + return nil + }) + } e.nextID++ return qid, query, nil } diff --git a/vendor/github.com/influxdata/influxdb/influxql/result.go b/vendor/github.com/influxdata/influxdb/influxql/result.go index d1f2085bb..0069208fb 100644 --- a/vendor/github.com/influxdata/influxdb/influxql/result.go +++ b/vendor/github.com/influxdata/influxdb/influxql/result.go @@ -3,6 +3,7 @@ package influxql import ( "encoding/json" "errors" + "fmt" "github.com/influxdata/influxdb/models" ) @@ -40,6 +41,18 @@ type Message struct { Text string `json:"text"` } +// ReadOnlyWarning generates a warning message that tells the user the command +// they are using is being used for writing in a read only context. +// +// This is a temporary method while to be used while transitioning to read only +// operations for issue #6290. +func ReadOnlyWarning(stmt string) *Message { + return &Message{ + Level: WarningLevel, + Text: fmt.Sprintf("deprecated use of '%s' in a read only context, please use a POST request instead", stmt), + } +} + // Result represents a resultset returned from a single statement. // Rows represents a list of rows that can be sorted consistently by name/tag. type Result struct { diff --git a/vendor/github.com/influxdata/influxdb/influxql/sanitize.go b/vendor/github.com/influxdata/influxdb/influxql/sanitize.go new file mode 100644 index 000000000..d605387d6 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/sanitize.go @@ -0,0 +1,47 @@ +package influxql + +import ( + "bytes" + "regexp" +) + +var ( + sanitizeSetPassword = regexp.MustCompile(`(?i)password\s+for[^=]*=\s+(["']?[^\s"]+["']?)`) + + sanitizeCreatePassword = regexp.MustCompile(`(?i)with\s+password\s+(["']?[^\s"]+["']?)`) +) + +// Sanitize attempts to sanitize passwords out of a raw query. +// It looks for patterns that may be related to the SET PASSWORD and CREATE USER +// statements and will redact the password that should be there. It will attempt +// to redact information from common invalid queries too, but it's not guaranteed +// to succeed on improper queries. +// +// This function works on the raw query and attempts to retain the original input +// as much as possible. +func Sanitize(query string) string { + if matches := sanitizeSetPassword.FindAllStringSubmatchIndex(query, -1); matches != nil { + var buf bytes.Buffer + i := 0 + for _, match := range matches { + buf.WriteString(query[i:match[2]]) + buf.WriteString("[REDACTED]") + i = match[3] + } + buf.WriteString(query[i:]) + query = buf.String() + } + + if matches := sanitizeCreatePassword.FindAllStringSubmatchIndex(query, -1); matches != nil { + var buf bytes.Buffer + i := 0 + for _, match := range matches { + buf.WriteString(query[i:match[2]]) + buf.WriteString("[REDACTED]") + i = match[3] + } + buf.WriteString(query[i:]) + query = buf.String() + } + return query +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/select.go b/vendor/github.com/influxdata/influxdb/influxql/select.go index 980b1f92d..3a5231af4 100644 --- a/vendor/github.com/influxdata/influxdb/influxql/select.go +++ b/vendor/github.com/influxdata/influxdb/influxql/select.go @@ -78,7 +78,18 @@ func Select(stmt *SelectStatement, ic IteratorCreator, sopt *SelectOptions) ([]I } } - return buildFieldIterators(fields, ic, opt) + // Determine if there is one call and it is a selector. + selector := false + if len(info.calls) == 1 { + for call := range info.calls { + switch call.Name { + case "first", "last", "min", "max", "percentile": + selector = true + } + } + } + + return buildFieldIterators(fields, ic, opt, selector) } // buildAuxIterators creates a set of iterators from a single combined auxilary iterator. @@ -117,7 +128,7 @@ func buildAuxIterators(fields Fields, ic IteratorCreator, opt IteratorOptions) ( case *VarRef: itrs[i] = aitr.Iterator(expr.Val) case *BinaryExpr: - itr, err := buildExprIterator(expr, aitr, opt) + itr, err := buildExprIterator(expr, aitr, opt, false) if err != nil { return fmt.Errorf("error constructing iterator for field '%s': %s", f.String(), err) } @@ -140,7 +151,7 @@ func buildAuxIterators(fields Fields, ic IteratorCreator, opt IteratorOptions) ( } // buildFieldIterators creates an iterator for each field expression. -func buildFieldIterators(fields Fields, ic IteratorCreator, opt IteratorOptions) ([]Iterator, error) { +func buildFieldIterators(fields Fields, ic IteratorCreator, opt IteratorOptions, selector bool) ([]Iterator, error) { // Create iterators from fields against the iterator creator. itrs := make([]Iterator, len(fields)) @@ -158,7 +169,7 @@ func buildFieldIterators(fields Fields, ic IteratorCreator, opt IteratorOptions) } expr := Reduce(f.Expr, nil) - itr, err := buildExprIterator(expr, ic, opt) + itr, err := buildExprIterator(expr, ic, opt, selector) if err != nil { return err } @@ -185,7 +196,7 @@ func buildFieldIterators(fields Fields, ic IteratorCreator, opt IteratorOptions) } expr := Reduce(f.Expr, nil) - itr, err := buildExprIterator(expr, aitr, opt) + itr, err := buildExprIterator(expr, aitr, opt, false) if err != nil { return err } @@ -210,7 +221,7 @@ func buildFieldIterators(fields Fields, ic IteratorCreator, opt IteratorOptions) } // buildExprIterator creates an iterator for an expression. -func buildExprIterator(expr Expr, ic IteratorCreator, opt IteratorOptions) (Iterator, error) { +func buildExprIterator(expr Expr, ic IteratorCreator, opt IteratorOptions, selector bool) (Iterator, error) { opt.Expr = expr switch expr := expr.(type) { @@ -221,7 +232,7 @@ func buildExprIterator(expr Expr, ic IteratorCreator, opt IteratorOptions) (Iter switch expr.Name { case "distinct": - input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt) + input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt, selector) if err != nil { return nil, err } @@ -239,7 +250,7 @@ func buildExprIterator(expr Expr, ic IteratorCreator, opt IteratorOptions) (Iter } } - input, err := buildExprIterator(expr.Args[0], ic, opt) + input, err := buildExprIterator(expr.Args[0], ic, opt, selector) if err != nil { return nil, err } @@ -273,7 +284,7 @@ func buildExprIterator(expr Expr, ic IteratorCreator, opt IteratorOptions) (Iter switch arg := expr.Args[0].(type) { case *Call: if arg.Name == "distinct" { - input, err := buildExprIterator(arg, ic, opt) + input, err := buildExprIterator(arg, ic, opt, selector) if err != nil { return nil, err } @@ -284,20 +295,20 @@ func buildExprIterator(expr Expr, ic IteratorCreator, opt IteratorOptions) (Iter case "min", "max", "sum", "first", "last", "mean": return ic.CreateIterator(opt) case "median": - input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt) + input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt, false) if err != nil { return nil, err } return newMedianIterator(input, opt) case "stddev": - input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt) + input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt, false) if err != nil { return nil, err } return newStddevIterator(input, opt) case "spread": // OPTIMIZE(benbjohnson): convert to map/reduce - input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt) + input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt, false) if err != nil { return nil, err } @@ -320,7 +331,7 @@ func buildExprIterator(expr Expr, ic IteratorCreator, opt IteratorOptions) (Iter } } - input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt) + input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt, false) if err != nil { return nil, err } @@ -344,14 +355,14 @@ func buildExprIterator(expr Expr, ic IteratorCreator, opt IteratorOptions) (Iter } } - input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt) + input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt, false) if err != nil { return nil, err } n := expr.Args[len(expr.Args)-1].(*IntegerLiteral) return newBottomIterator(input, opt, n, tags) case "percentile": - input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt) + input, err := buildExprIterator(expr.Args[0].(*VarRef), ic, opt, false) if err != nil { return nil, err } @@ -372,11 +383,13 @@ func buildExprIterator(expr Expr, ic IteratorCreator, opt IteratorOptions) (Iter return nil, err } - if expr.Name != "top" && expr.Name != "bottom" { - itr = NewIntervalIterator(itr, opt) - } - if !opt.Interval.IsZero() && opt.Fill != NoFill { - itr = NewFillIterator(itr, expr, opt) + if !selector || !opt.Interval.IsZero() { + if expr.Name != "top" && expr.Name != "bottom" { + itr = NewIntervalIterator(itr, opt) + } + if !opt.Interval.IsZero() && opt.Fill != NoFill { + itr = NewFillIterator(itr, expr, opt) + } } if opt.InterruptCh != nil { itr = NewInterruptIterator(itr, opt.InterruptCh) @@ -392,31 +405,31 @@ func buildExprIterator(expr Expr, ic IteratorCreator, opt IteratorOptions) (Iter return nil, fmt.Errorf("unable to construct an iterator from two literals: LHS: %T, RHS: %T", lhs, rhs) } - lhs, err := buildExprIterator(expr.LHS, ic, opt) + lhs, err := buildExprIterator(expr.LHS, ic, opt, false) if err != nil { return nil, err } return buildRHSTransformIterator(lhs, rhs, expr.Op, ic, opt) } else if lhs, ok := expr.LHS.(Literal); ok { - rhs, err := buildExprIterator(expr.RHS, ic, opt) + rhs, err := buildExprIterator(expr.RHS, ic, opt, false) if err != nil { return nil, err } return buildLHSTransformIterator(lhs, rhs, expr.Op, ic, opt) } else { // We have two iterators. Combine them into a single iterator. - lhs, err := buildExprIterator(expr.LHS, ic, opt) + lhs, err := buildExprIterator(expr.LHS, ic, opt, false) if err != nil { return nil, err } - rhs, err := buildExprIterator(expr.RHS, ic, opt) + rhs, err := buildExprIterator(expr.RHS, ic, opt, false) if err != nil { return nil, err } return buildTransformIterator(lhs, rhs, expr.Op, ic, opt) } case *ParenExpr: - return buildExprIterator(expr.Expr, ic, opt) + return buildExprIterator(expr.Expr, ic, opt, selector) default: return nil, fmt.Errorf("invalid expression type: %T", expr) } @@ -840,13 +853,33 @@ func buildTransformIterator(lhs Iterator, rhs Iterator, op Token, ic IteratorCre left: newBufIntegerIterator(left), right: newBufIntegerIterator(right), fn: func(a *IntegerPoint, b *IntegerPoint) *FloatPoint { + if a == nil && b == nil { + return nil + } else if a == nil { + return &FloatPoint{ + Name: b.Name, + Tags: b.Tags, + Time: b.Time, + Aux: b.Aux, + Nil: true, + } + } else if b == nil { + return &FloatPoint{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Aux: a.Aux, + Nil: true, + } + } + p := &FloatPoint{ Name: a.Name, Tags: a.Tags, Time: a.Time, Aux: a.Aux, } - if (a != nil && b != nil) && (!a.Nil && !b.Nil) { + if !a.Nil && !b.Nil { p.Value = fn(a.Value, b.Value) } else { p.Nil = true @@ -911,13 +944,33 @@ func buildTransformIterator(lhs Iterator, rhs Iterator, op Token, ic IteratorCre left: newBufFloatIterator(left), right: newBufFloatIterator(right), fn: func(a *FloatPoint, b *FloatPoint) *BooleanPoint { + if a == nil && b == nil { + return nil + } else if a == nil { + return &BooleanPoint{ + Name: b.Name, + Tags: b.Tags, + Time: b.Time, + Aux: b.Aux, + Nil: true, + } + } else if b == nil { + return &BooleanPoint{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Aux: a.Aux, + Nil: true, + } + } + p := &BooleanPoint{ Name: a.Name, Tags: a.Tags, Time: a.Time, Aux: a.Aux, } - if (a != nil && b != nil) && (!a.Nil && !b.Nil) { + if !a.Nil && !b.Nil { p.Value = fn(a.Value, b.Value) } else { p.Nil = true @@ -938,13 +991,33 @@ func buildTransformIterator(lhs Iterator, rhs Iterator, op Token, ic IteratorCre left: newBufIntegerIterator(left), right: newBufIntegerIterator(right), fn: func(a *IntegerPoint, b *IntegerPoint) *BooleanPoint { + if a == nil && b == nil { + return nil + } else if a == nil { + return &BooleanPoint{ + Name: b.Name, + Tags: b.Tags, + Time: b.Time, + Aux: b.Aux, + Nil: true, + } + } else if b == nil { + return &BooleanPoint{ + Name: a.Name, + Tags: a.Tags, + Time: a.Time, + Aux: a.Aux, + Nil: true, + } + } + p := &BooleanPoint{ Name: a.Name, Tags: a.Tags, Time: a.Time, Aux: a.Aux, } - if (a != nil && b != nil) && (!a.Nil && !b.Nil) { + if !a.Nil && !b.Nil { p.Value = fn(a.Value, b.Value) } else { p.Nil = true diff --git a/vendor/github.com/influxdata/influxdb/influxql/statement_rewriter.go b/vendor/github.com/influxdata/influxdb/influxql/statement_rewriter.go index a0f90619b..0d0549d3a 100644 --- a/vendor/github.com/influxdata/influxdb/influxql/statement_rewriter.go +++ b/vendor/github.com/influxdata/influxdb/influxql/statement_rewriter.go @@ -25,9 +25,7 @@ func rewriteShowFieldKeysStatement(stmt *ShowFieldKeysStatement) (Statement, err Fields: Fields([]*Field{ {Expr: &VarRef{Val: "fieldKey"}}, }), - Sources: Sources([]Source{ - &Measurement{Name: "_fieldKeys"}, - }), + Sources: rewriteSources(stmt.Sources, "_fieldKeys"), Condition: rewriteSourcesCondition(stmt.Sources, nil), Offset: stmt.Offset, Limit: stmt.Limit, @@ -74,9 +72,7 @@ func rewriteShowSeriesStatement(stmt *ShowSeriesStatement) (Statement, error) { Fields: []*Field{ {Expr: &VarRef{Val: "key"}}, }, - Sources: []Source{ - &Measurement{Name: "_series"}, - }, + Sources: rewriteSources(stmt.Sources, "_series"), Condition: rewriteSourcesCondition(stmt.Sources, stmt.Condition), Offset: stmt.Offset, Limit: stmt.Limit, @@ -131,9 +127,7 @@ func rewriteShowTagValuesStatement(stmt *ShowTagValuesStatement) (Statement, err {Expr: &VarRef{Val: "_tagKey"}, Alias: "key"}, {Expr: &VarRef{Val: "value"}}, }, - Sources: []Source{ - &Measurement{Name: "_tags"}, - }, + Sources: rewriteSources(stmt.Sources, "_tags"), Condition: condition, Offset: stmt.Offset, Limit: stmt.Limit, @@ -153,9 +147,7 @@ func rewriteShowTagKeysStatement(stmt *ShowTagKeysStatement) (Statement, error) Fields: []*Field{ {Expr: &VarRef{Val: "tagKey"}}, }, - Sources: []Source{ - &Measurement{Name: "_tagKeys"}, - }, + Sources: rewriteSources(stmt.Sources, "_tagKeys"), Condition: rewriteSourcesCondition(stmt.Sources, stmt.Condition), Offset: stmt.Offset, Limit: stmt.Limit, @@ -163,6 +155,28 @@ func rewriteShowTagKeysStatement(stmt *ShowTagKeysStatement) (Statement, error) OmitTime: true, Dedupe: true, }, nil + +} + +// rewriteSources rewrites sources with previous database and retention policy +func rewriteSources(sources Sources, measurementName string) Sources { + newSources := Sources{} + for _, src := range sources { + if src == nil { + continue + } + mm := src.(*Measurement) + newSources = append(newSources, + &Measurement{ + Database: mm.Database, + RetentionPolicy: mm.RetentionPolicy, + Name: measurementName, + }) + } + if len(newSources) <= 0 { + return append(newSources, &Measurement{Name: measurementName}) + } + return newSources } // rewriteSourcesCondition rewrites sources into `name` expressions. diff --git a/vendor/github.com/influxdata/influxdb/influxql/token.go b/vendor/github.com/influxdata/influxdb/influxql/token.go index 762874046..e31c4dabd 100644 --- a/vendor/github.com/influxdata/influxdb/influxql/token.go +++ b/vendor/github.com/influxdata/influxdb/influxql/token.go @@ -67,7 +67,6 @@ const ( BY CREATE CONTINUOUS - DATA DATABASE DATABASES DEFAULT @@ -100,7 +99,6 @@ const ( KEYS KILL LIMIT - META MEASUREMENT MEASUREMENTS NAME @@ -121,8 +119,6 @@ const ( REVOKE SELECT SERIES - SERVER - SERVERS SET SHOW SHARD @@ -191,7 +187,6 @@ var tokens = [...]string{ BY: "BY", CREATE: "CREATE", CONTINUOUS: "CONTINUOUS", - DATA: "DATA", DATABASE: "DATABASE", DATABASES: "DATABASES", DEFAULT: "DEFAULT", @@ -226,7 +221,6 @@ var tokens = [...]string{ LIMIT: "LIMIT", MEASUREMENT: "MEASUREMENT", MEASUREMENTS: "MEASUREMENTS", - META: "META", NAME: "NAME", NOT: "NOT", OFFSET: "OFFSET", @@ -245,8 +239,6 @@ var tokens = [...]string{ REVOKE: "REVOKE", SELECT: "SELECT", SERIES: "SERIES", - SERVER: "SERVER", - SERVERS: "SERVERS", SET: "SET", SHOW: "SHOW", SHARD: "SHARD", diff --git a/vendor/github.com/influxdata/influxdb/models/points.go b/vendor/github.com/influxdata/influxdb/models/points.go index 8f1b2091c..055f7e2c0 100644 --- a/vendor/github.com/influxdata/influxdb/models/points.go +++ b/vendor/github.com/influxdata/influxdb/models/points.go @@ -139,9 +139,17 @@ func ParsePointsString(buf string) ([]Point, error) { // ParseKey returns the measurement name and tags from a point. func ParseKey(buf string) (string, Tags, error) { - _, keyBuf, err := scanKey([]byte(buf), 0) - tags := parseTags([]byte(buf)) - return string(keyBuf), tags, err + // Ignore the error because scanMeasurement returns "missing fields" which we ignore + // when just parsing a key + state, i, _ := scanMeasurement([]byte(buf), 0) + + var tags Tags + if state == tagKeyState { + tags = parseTags([]byte(buf)) + // scanMeasurement returns the location of the comma if there are tags, strip that off + return string(buf[:i-1]), tags, nil + } + return string(buf[:i]), tags, nil } // ParsePointsWithPrecision is similar to ParsePoints, but allows the @@ -1062,6 +1070,10 @@ func escapeStringField(in string) string { // unescapeStringField returns a copy of in with any escaped double-quotes // or backslashes unescaped func unescapeStringField(in string) string { + if strings.IndexByte(in, '\\') == -1 { + return in + } + var out []byte i := 0 for { @@ -1421,17 +1433,14 @@ func parseNumber(val []byte) (interface{}, error) { } func newFieldsFromBinary(buf []byte) Fields { - fields := Fields{} + fields := make(Fields, 8) var ( i int name, valueBuf []byte value interface{} err error ) - for { - if i >= len(buf) { - break - } + for i < len(buf) { i, name = scanTo(buf, i, '=') name = escape.Unescape(name) diff --git a/vendor/github.com/influxdata/influxdb/monitor/service.go b/vendor/github.com/influxdata/influxdb/monitor/service.go index 4fe730867..42f3a44c6 100644 --- a/vendor/github.com/influxdata/influxdb/monitor/service.go +++ b/vendor/github.com/influxdata/influxdb/monitor/service.go @@ -3,6 +3,7 @@ package monitor // import "github.com/influxdata/influxdb/monitor" import ( "expvar" "fmt" + "io" "log" "os" "runtime" @@ -85,6 +86,7 @@ func New(c Config) *Monitor { // for identification purpose. func (m *Monitor) Open() error { m.Logger.Printf("Starting monitor system") + m.done = make(chan struct{}) // Self-register various stats and diagnostics. m.RegisterDiagnosticsClient("build", &build{ @@ -99,7 +101,6 @@ func (m *Monitor) Open() error { // If enabled, record stats in a InfluxDB system. if m.storeEnabled { - // Start periodic writes to system. m.wg.Add(1) go m.storeStatistics() @@ -112,13 +113,19 @@ func (m *Monitor) Open() error { func (m *Monitor) Close() { m.Logger.Println("shutting down monitor system") close(m.done) + m.wg.Wait() m.done = nil + m.DeregisterDiagnosticsClient("build") + m.DeregisterDiagnosticsClient("runtime") + m.DeregisterDiagnosticsClient("network") + m.DeregisterDiagnosticsClient("system") } -// SetLogger sets the internal logger to the logger passed in. -func (m *Monitor) SetLogger(l *log.Logger) { - m.Logger = l +// SetLogOutput sets the writer to which all logs are written. It must not be +// called after Open is called. +func (m *Monitor) SetLogOutput(w io.Writer) { + m.Logger = log.New(w, "[monitor] ", log.LstdFlags) } // RegisterDiagnosticsClient registers a diagnostics client with the given name and tags. diff --git a/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go index bd0262b21..5998bb7b2 100644 --- a/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go +++ b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go @@ -10,6 +10,14 @@ func Bytes(in []byte) []byte { } func Unescape(in []byte) []byte { + if len(in) == 0 { + return nil + } + + if bytes.IndexByte(in, '\\') == -1 { + return in + } + i := 0 inLen := len(in) var out []byte diff --git a/vendor/github.com/influxdata/influxdb/services/collectd/service.go b/vendor/github.com/influxdata/influxdb/services/collectd/service.go index 5cfd28d74..c5c9d3f1f 100644 --- a/vendor/github.com/influxdata/influxdb/services/collectd/service.go +++ b/vendor/github.com/influxdata/influxdb/services/collectd/service.go @@ -3,6 +3,7 @@ package collectd // import "github.com/influxdata/influxdb/services/collectd" import ( "expvar" "fmt" + "io" "log" "net" "os" @@ -167,9 +168,10 @@ func (s *Service) Close() error { return nil } -// SetLogger sets the internal logger to the logger passed in. -func (s *Service) SetLogger(l *log.Logger) { - s.Logger = l +// SetLogOutput sets the writer to which all logs are written. It must not be +// called after Open is called. +func (s *Service) SetLogOutput(w io.Writer) { + s.Logger = log.New(w, "[collectd] ", log.LstdFlags) } // SetTypes sets collectd types db. diff --git a/vendor/github.com/influxdata/influxdb/services/graphite/service.go b/vendor/github.com/influxdata/influxdb/services/graphite/service.go index 4bc113188..6628c7456 100644 --- a/vendor/github.com/influxdata/influxdb/services/graphite/service.go +++ b/vendor/github.com/influxdata/influxdb/services/graphite/service.go @@ -4,6 +4,7 @@ import ( "bufio" "expvar" "fmt" + "io" "log" "math" "net" @@ -200,9 +201,10 @@ func (s *Service) Close() error { return nil } -// SetLogger sets the internal logger to the logger passed in. -func (s *Service) SetLogger(l *log.Logger) { - s.logger = l +// SetLogOutput sets the writer to which all logs are written. It must not be +// called after Open is called. +func (s *Service) SetLogOutput(w io.Writer) { + s.logger = log.New(w, "[graphite] ", log.LstdFlags) } // Addr returns the address the Service binds to. diff --git a/vendor/github.com/influxdata/influxdb/services/meta/client.go b/vendor/github.com/influxdata/influxdb/services/meta/client.go index 52c84db4c..0ddf966e5 100644 --- a/vendor/github.com/influxdata/influxdb/services/meta/client.go +++ b/vendor/github.com/influxdata/influxdb/services/meta/client.go @@ -151,31 +151,31 @@ func (c *Client) ClusterID() uint64 { } // Database returns info for the requested database. -func (c *Client) Database(name string) (*DatabaseInfo, error) { +func (c *Client) Database(name string) *DatabaseInfo { c.mu.RLock() data := c.cacheData.Clone() c.mu.RUnlock() for _, d := range data.Databases { if d.Name == name { - return &d, nil + return &d } } - return nil, influxdb.ErrDatabaseNotFound(name) + return nil } // Databases returns a list of all database infos. -func (c *Client) Databases() ([]DatabaseInfo, error) { +func (c *Client) Databases() []DatabaseInfo { c.mu.RLock() data := c.cacheData.Clone() c.mu.RUnlock() dbs := data.Databases if dbs == nil { - return []DatabaseInfo{}, nil + return []DatabaseInfo{} } - return dbs, nil + return dbs } // CreateDatabase creates a database or returns it if it already exists @@ -957,10 +957,12 @@ func (c *Client) MarshalBinary() ([]byte, error) { return c.cacheData.MarshalBinary() } -func (c *Client) SetLogger(l *log.Logger) { +// SetLogOutput sets the writer to which all logs are written. It must not be +// called after Open is called. +func (c *Client) SetLogOutput(w io.Writer) { c.mu.Lock() defer c.mu.Unlock() - c.logger = l + c.logger = log.New(w, "[metaclient] ", log.LstdFlags) } func (c *Client) updateAuthCache() { diff --git a/vendor/github.com/influxdata/influxdb/services/opentsdb/service.go b/vendor/github.com/influxdata/influxdb/services/opentsdb/service.go index 32b205f1e..9e887ab43 100644 --- a/vendor/github.com/influxdata/influxdb/services/opentsdb/service.go +++ b/vendor/github.com/influxdata/influxdb/services/opentsdb/service.go @@ -177,8 +177,11 @@ func (s *Service) Close() error { return nil } -// SetLogger sets the internal logger to the logger passed in. -func (s *Service) SetLogger(l *log.Logger) { s.Logger = l } +// SetLogOutput sets the writer to which all logs are written. It must not be +// called after Open is called. +func (s *Service) SetLogOutput(w io.Writer) { + s.Logger = log.New(w, "[opentsdb] ", log.LstdFlags) +} // Err returns a channel for fatal errors that occur on the listener. func (s *Service) Err() <-chan error { return s.err } diff --git a/vendor/github.com/influxdata/influxdb/services/udp/service.go b/vendor/github.com/influxdata/influxdb/services/udp/service.go index fc07c01d0..2e349b23e 100644 --- a/vendor/github.com/influxdata/influxdb/services/udp/service.go +++ b/vendor/github.com/influxdata/influxdb/services/udp/service.go @@ -3,6 +3,7 @@ package udp // import "github.com/influxdata/influxdb/services/udp" import ( "errors" "expvar" + "io" "log" "net" "os" @@ -214,9 +215,10 @@ func (s *Service) Close() error { return nil } -// SetLogger sets the internal logger to the logger passed in. -func (s *Service) SetLogger(l *log.Logger) { - s.Logger = l +// SetLogOutput sets the writer to which all logs are written. It must not be +// called after Open is called. +func (s *Service) SetLogOutput(w io.Writer) { + s.Logger = log.New(w, "[udp] ", log.LstdFlags) } // Addr returns the listener's address diff --git a/vendor/github.com/influxdata/influxdb/test.sh b/vendor/github.com/influxdata/influxdb/test.sh index 3b2de3453..ca2bb8ae7 100755 --- a/vendor/github.com/influxdata/influxdb/test.sh +++ b/vendor/github.com/influxdata/influxdb/test.sh @@ -76,6 +76,8 @@ function run_test_docker { -e "INFLUXDB_DATA_ENGINE=$INFLUXDB_DATA_ENGINE" \ -e "GORACE=$GORACE" \ -e "GO_CHECKOUT=$GO_CHECKOUT" \ + -e "AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID" \ + -e "AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY" \ "$imagename" \ "--parallel=$PARALLELISM" \ "--timeout=$TIMEOUT" \ @@ -131,30 +133,31 @@ fi case $ENVIRONMENT_INDEX in 0) # 64 bit tests - run_test_docker Dockerfile_build_ubuntu64 test_64bit --debug --generate --test + # Static builds will be uploaded to S3 upon test completion + run_test_docker Dockerfile_build_ubuntu64 test_64bit --generate --test --upload --package rc=$? ;; 1) # 64 bit tsm tests INFLUXDB_DATA_ENGINE="tsm1" - run_test_docker Dockerfile_build_ubuntu64 test_64bit_tsm --debug --generate --test + run_test_docker Dockerfile_build_ubuntu64 test_64bit_tsm --generate --test rc=$? ;; 2) # 64 bit race tests GORACE="halt_on_error=1" - run_test_docker Dockerfile_build_ubuntu64 test_64bit_race --debug --generate --test --race + run_test_docker Dockerfile_build_ubuntu64 test_64bit_race --generate --test --race rc=$? ;; 3) # 32 bit tests - run_test_docker Dockerfile_build_ubuntu32 test_32bit --debug --generate --test + run_test_docker Dockerfile_build_ubuntu32 test_32bit --generate --test --arch=i386 rc=$? ;; 4) # 64 bit tests on golang go1.6 GO_CHECKOUT=go1.6 - run_test_docker Dockerfile_build_ubuntu64_git test_64bit_go1.6 --debug --generate --test --no-vet + run_test_docker Dockerfile_build_ubuntu64_git test_64bit_go1.6 --generate --test --no-vet rc=$? ;; "save") @@ -216,4 +219,3 @@ case $ENVIRONMENT_INDEX in esac exit $rc - diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine.go b/vendor/github.com/influxdata/influxdb/tsdb/engine.go index e1f711d3a..3c2df38a7 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine.go @@ -29,12 +29,14 @@ type Engine interface { Close() error SetLogOutput(io.Writer) - LoadMetadataIndex(shard *Shard, index *DatabaseIndex) error + LoadMetadataIndex(shardID uint64, index *DatabaseIndex) error CreateIterator(opt influxql.IteratorOptions) (influxql.Iterator, error) SeriesKeys(opt influxql.IteratorOptions) (influxql.SeriesList, error) WritePoints(points []models.Point) error + ContainsSeries(keys []string) (map[string]bool, error) DeleteSeries(keys []string) error + DeleteSeriesRange(keys []string, min, max int64) error DeleteMeasurement(name string, seriesKeys []string) error SeriesCount() (n int, err error) MeasurementFields(measurement string) *MeasurementFields diff --git a/vendor/github.com/influxdata/influxdb/tsdb/meta.go b/vendor/github.com/influxdata/influxdb/tsdb/meta.go index 904822bef..9cd509fa3 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/meta.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/meta.go @@ -5,12 +5,12 @@ import ( "fmt" "regexp" "sort" - "strings" "sync" "time" "github.com/influxdata/influxdb" "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/pkg/escape" internal "github.com/influxdata/influxdb/tsdb/internal" @@ -58,6 +58,16 @@ func (d *DatabaseIndex) Series(key string) *Series { return s } +func (d *DatabaseIndex) SeriesKeys() []string { + d.mu.RLock() + s := make([]string, len(d.series)) + for k := range d.series { + s = append(s, k) + } + d.mu.RUnlock() + return s +} + // SeriesN returns the number of series. func (d *DatabaseIndex) SeriesN() int { d.mu.RLock() @@ -163,6 +173,47 @@ func (d *DatabaseIndex) AssignShard(k string, shardID uint64) { } } +// UnassignShard updates the index to indicate that series k does not exist in +// the given shardID +func (d *DatabaseIndex) UnassignShard(k string, shardID uint64) { + ss := d.Series(k) + if ss != nil { + if ss.Assigned(shardID) { + // Remove the shard from any series + ss.UnassignShard(shardID) + + // If this series no longer has shards assigned, remove the series + if ss.ShardN() == 0 { + + // Remove the series the measurements + ss.measurement.DropSeries(ss) + + // If the measurement no longer has any series, remove it as well + if !ss.measurement.HasSeries() { + d.mu.Lock() + d.dropMeasurement(ss.measurement.Name) + d.mu.Unlock() + } + + // Remove the series key from the series index + d.mu.Lock() + delete(d.series, k) + d.statMap.Add(statDatabaseSeries, int64(-1)) + d.mu.Unlock() + } + } + } +} + +// RemoveShard removes all references to shardID from any series or measurements +// in the index. If the shard was the only owner of data for the series, the series +// is removed from the index. +func (d *DatabaseIndex) RemoveShard(shardID uint64) { + for _, k := range d.SeriesKeys() { + d.UnassignShard(k, shardID) + } +} + // TagsForSeries returns the tag map for the passed in series func (d *DatabaseIndex) TagsForSeries(key string) map[string]string { d.mu.RLock() @@ -364,11 +415,15 @@ func (d *DatabaseIndex) Measurements() Measurements { return measurements } -// DropMeasurement removes the measurement and all of its underlying series from the database index +// DropMeasurement removes the measurement and all of its underlying +// series from the database index func (d *DatabaseIndex) DropMeasurement(name string) { d.mu.Lock() defer d.mu.Unlock() + d.dropMeasurement(name) +} +func (d *DatabaseIndex) dropMeasurement(name string) { m := d.measurements[name] if m == nil { return @@ -388,17 +443,30 @@ func (d *DatabaseIndex) DropSeries(keys []string) { d.mu.Lock() defer d.mu.Unlock() - var nDeleted int64 + var ( + mToDelete = map[string]struct{}{} + nDeleted int64 + ) + for _, k := range keys { series := d.series[k] if series == nil { continue } - series.measurement.DropSeries(series.id) + series.measurement.DropSeries(series) delete(d.series, k) nDeleted++ + + // If there are no more series in the measurement then we'll + // remove it. + if len(series.measurement.seriesByID) == 0 { + mToDelete[series.measurement.Name] = struct{}{} + } } + for mname := range mToDelete { + d.dropMeasurement(mname) + } d.statMap.Add(statDatabaseSeries, -nDeleted) } @@ -531,7 +599,8 @@ func (m *Measurement) AddSeries(s *Series) bool { } // DropSeries will remove a series from the measurementIndex. -func (m *Measurement) DropSeries(seriesID uint64) { +func (m *Measurement) DropSeries(series *Series) { + seriesID := series.id m.mu.Lock() defer m.mu.Unlock() @@ -540,37 +609,24 @@ func (m *Measurement) DropSeries(seriesID uint64) { } delete(m.seriesByID, seriesID) - var ids []uint64 - for _, id := range m.seriesIDs { - if id != seriesID { - ids = append(ids, id) - } - } + ids := filter(m.seriesIDs, seriesID) m.seriesIDs = ids - // remove this series id to the tag index on the measurement + // remove this series id from the tag index on the measurement // s.seriesByTagKeyValue is defined as map[string]map[string]SeriesIDs - for k, v := range m.seriesByTagKeyValue { - values := v - for kk, vv := range values { - var ids []uint64 - for _, id := range vv { - if id != seriesID { - ids = append(ids, id) - } - } - // Check to see if we have any ids, if not, remove the key - if len(ids) == 0 { - delete(values, kk) - } else { - values[kk] = ids - } + for k, v := range series.Tags { + values := m.seriesByTagKeyValue[k][v] + ids := filter(values, seriesID) + // Check to see if we have any ids, if not, remove the key + if len(ids) == 0 { + delete(m.seriesByTagKeyValue[k], v) + } else { + m.seriesByTagKeyValue[k][v] = ids } + // If we have no values, then we delete the key - if len(values) == 0 { + if len(m.seriesByTagKeyValue[k]) == 0 { delete(m.seriesByTagKeyValue, k) - } else { - m.seriesByTagKeyValue[k] = values } } @@ -747,6 +803,15 @@ func mergeSeriesFilters(op influxql.Token, ids SeriesIDs, lfilters, rfilters Fil // idsForExpr will return a collection of series ids and a filter expression that should // be used to filter points from those series. func (m *Measurement) idsForExpr(n *influxql.BinaryExpr) (SeriesIDs, influxql.Expr, error) { + // If this binary expression has another binary expression, then this + // is some expression math and we should just pass it to the underlying query. + if _, ok := n.LHS.(*influxql.BinaryExpr); ok { + return m.seriesIDs, n, nil + } else if _, ok := n.RHS.(*influxql.BinaryExpr); ok { + return m.seriesIDs, n, nil + } + + // Retrieve the variable reference from the correct side of the expression. name, ok := n.LHS.(*influxql.VarRef) value := n.RHS if !ok { @@ -1377,6 +1442,12 @@ func (s *Series) AssignShard(shardID uint64) { s.mu.Unlock() } +func (s *Series) UnassignShard(shardID uint64) { + s.mu.Lock() + delete(s.shardIDs, shardID) + s.mu.Unlock() +} + func (s *Series) Assigned(shardID uint64) bool { s.mu.RLock() b := s.shardIDs[shardID] @@ -1384,6 +1455,13 @@ func (s *Series) Assigned(shardID uint64) bool { return b } +func (s *Series) ShardN() int { + s.mu.RLock() + n := len(s.shardIDs) + s.mu.RUnlock() + return n +} + // MarshalBinary encodes the object to a binary format. func (s *Series) MarshalBinary() ([]byte, error) { s.mu.RLock() @@ -1724,12 +1802,24 @@ func (s stringSet) intersect(o stringSet) stringSet { return ns } +// filter removes v from a if it exists. a must be sorted in ascending +// order. +func filter(a []uint64, v uint64) []uint64 { + // binary search for v + i := sort.Search(len(a), func(i int) bool { return a[i] >= v }) + if i >= len(a) || a[i] != v { + return a + } + + // we found it, so shift the right half down one, overwriting v's position. + copy(a[i:], a[i+1:]) + return a[:len(a)-1] +} + // MeasurementFromSeriesKey returns the name of the measurement from a key that // contains a measurement name. func MeasurementFromSeriesKey(key string) string { - idx := strings.Index(key, ",") - if idx == -1 { - return key - } - return key[:idx] + // Ignoring the error because the func returns "missing fields" + k, _, _ := models.ParseKey(key) + return escape.UnescapeString(k) } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/shard.go b/vendor/github.com/influxdata/influxdb/tsdb/shard.go index ca3b61bb8..3915c2cee 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/shard.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/shard.go @@ -7,11 +7,13 @@ import ( "expvar" "fmt" "io" + "log" "math" "os" "sort" "strings" "sync" + "time" "github.com/gogo/protobuf/proto" "github.com/influxdata/influxdb" @@ -88,6 +90,8 @@ type Shard struct { // expvar-based stats. statMap *expvar.Map + logger *log.Logger + // The writer used by the logger. LogOutput io.Writer } @@ -106,7 +110,7 @@ func NewShard(id uint64, index *DatabaseIndex, path string, walPath string, opti } statMap := influxdb.NewStatistics(key, "shard", tags) - return &Shard{ + s := &Shard{ index: index, id: id, path: path, @@ -119,6 +123,18 @@ func NewShard(id uint64, index *DatabaseIndex, path string, walPath string, opti statMap: statMap, LogOutput: os.Stderr, } + s.SetLogOutput(os.Stderr) + return s +} + +// SetLogOutput sets the writer to which log output will be written. It must +// not be called after the Open method has been called. +func (s *Shard) SetLogOutput(w io.Writer) { + s.LogOutput = w + s.logger = log.New(w, "[shard] ", log.LstdFlags) + if !s.closed() { + s.engine.SetLogOutput(w) + } } // Path returns the path set on the shard when it was created. @@ -151,9 +167,11 @@ func (s *Shard) Open() error { } // Load metadata index. - if err := s.engine.LoadMetadataIndex(s, s.index); err != nil { + start := time.Now() + if err := s.engine.LoadMetadataIndex(s.id, s.index); err != nil { return err } + s.logger.Printf("%s database index loaded in %s", s.path, time.Now().Sub(start)) return nil }(); err != nil { @@ -176,6 +194,9 @@ func (s *Shard) close() error { return nil } + // Don't leak our shard ID and series keys in the index + s.index.RemoveShard(s.id) + err := s.engine.Close() if err == nil { s.engine = nil @@ -255,12 +276,35 @@ func (s *Shard) WritePoints(points []models.Point) error { return nil } +func (s *Shard) ContainsSeries(seriesKeys []string) (map[string]bool, error) { + if s.closed() { + return nil, ErrEngineClosed + } + + return s.engine.ContainsSeries(seriesKeys) +} + // DeleteSeries deletes a list of series. func (s *Shard) DeleteSeries(seriesKeys []string) error { if s.closed() { return ErrEngineClosed } - return s.engine.DeleteSeries(seriesKeys) + if err := s.engine.DeleteSeries(seriesKeys); err != nil { + return err + } + return nil +} + +// DeleteSeriesRange deletes all values from for seriesKeys between min and max (inclusive) +func (s *Shard) DeleteSeriesRange(seriesKeys []string, min, max int64) error { + if s.closed() { + return ErrEngineClosed + } + if err := s.engine.DeleteSeriesRange(seriesKeys, min, max); err != nil { + return err + } + + return nil } // DeleteMeasurement deletes a measurement and all underlying series. @@ -305,14 +349,15 @@ func (s *Shard) validateSeriesAndFields(points []models.Point) ([]*FieldCreate, // get the shard mutex for locally defined fields for _, p := range points { // see if the series should be added to the index - ss := s.index.Series(string(p.Key())) + key := string(p.Key()) + ss := s.index.Series(key) if ss == nil { - ss = NewSeries(string(p.Key()), p.Tags()) + ss = NewSeries(key, p.Tags()) s.statMap.Add(statSeriesCreate, 1) } ss = s.index.CreateSeriesIndexIfNotExists(p.Name(), ss) - s.index.AssignShard(ss.Key, ss.id) + s.index.AssignShard(ss.Key, s.id) // see if the field definitions need to be saved to the shard mf := s.engine.MeasurementFields(p.Name()) @@ -562,6 +607,7 @@ func (m *MeasurementFields) CreateFieldIfNotExists(name string, typ influxql.Dat m.mu.RUnlock() m.mu.Lock() + defer m.mu.Unlock() if f := m.fields[name]; f != nil { return nil } @@ -574,7 +620,6 @@ func (m *MeasurementFields) CreateFieldIfNotExists(name string, typ influxql.Dat } m.fields[name] = f m.Codec = NewFieldCodec(m.fields) - m.mu.Unlock() return nil } @@ -924,16 +969,16 @@ func (itr *MeasurementIterator) Stats() influxql.IteratorStats { return influxql func (itr *MeasurementIterator) Close() error { return nil } // Next emits the next measurement name. -func (itr *MeasurementIterator) Next() *influxql.FloatPoint { +func (itr *MeasurementIterator) Next() (*influxql.FloatPoint, error) { if len(itr.mms) == 0 { - return nil + return nil, nil } mm := itr.mms[0] itr.mms = itr.mms[1:] return &influxql.FloatPoint{ Name: "measurements", Aux: []interface{}{mm.Name}, - } + }, nil } // seriesIterator emits series ids. @@ -991,10 +1036,10 @@ func (itr *seriesIterator) Stats() influxql.IteratorStats { return influxql.Iter func (itr *seriesIterator) Close() error { return nil } // Next emits the next point in the iterator. -func (itr *seriesIterator) Next() *influxql.FloatPoint { +func (itr *seriesIterator) Next() (*influxql.FloatPoint, error) { // If there are no more keys then return nil. if len(itr.keys) == 0 { - return nil + return nil, nil } // Prepare auxiliary fields. @@ -1012,7 +1057,7 @@ func (itr *seriesIterator) Next() *influxql.FloatPoint { } itr.keys = itr.keys[1:] - return p + return p, nil } // NewTagKeysIterator returns a new instance of TagKeysIterator. @@ -1099,12 +1144,12 @@ func (itr *tagValuesIterator) Stats() influxql.IteratorStats { return influxql.I func (itr *tagValuesIterator) Close() error { return nil } // Next emits the next point in the iterator. -func (itr *tagValuesIterator) Next() *influxql.FloatPoint { +func (itr *tagValuesIterator) Next() (*influxql.FloatPoint, error) { for { // If there are no more values then move to the next key. if len(itr.buf.keys) == 0 { if len(itr.series) == 0 { - return nil + return nil, nil } itr.buf.s = itr.series[0] @@ -1138,7 +1183,7 @@ func (itr *tagValuesIterator) Next() *influxql.FloatPoint { } itr.buf.keys = itr.buf.keys[1:] - return p + return p, nil } } @@ -1182,12 +1227,12 @@ func (itr *measurementKeysIterator) Stats() influxql.IteratorStats { return infl func (itr *measurementKeysIterator) Close() error { return nil } // Next emits the next tag key name. -func (itr *measurementKeysIterator) Next() *influxql.FloatPoint { +func (itr *measurementKeysIterator) Next() (*influxql.FloatPoint, error) { for { // If there are no more keys then move to the next measurements. if len(itr.buf.keys) == 0 { if len(itr.mms) == 0 { - return nil + return nil, nil } itr.buf.mm = itr.mms[0] @@ -1203,7 +1248,7 @@ func (itr *measurementKeysIterator) Next() *influxql.FloatPoint { } itr.buf.keys = itr.buf.keys[1:] - return p + return p, nil } } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/store.go b/vendor/github.com/influxdata/influxdb/tsdb/store.go index dab21ba7e..30c42edc7 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/store.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/store.go @@ -42,6 +42,9 @@ type Store struct { EngineOptions EngineOptions Logger *log.Logger + // logOutput is where output from the underlying databases will go. + logOutput io.Writer + closing chan struct{} wg sync.WaitGroup opened bool @@ -57,6 +60,17 @@ func NewStore(path string) *Store { path: path, EngineOptions: opts, Logger: log.New(os.Stderr, "[store] ", log.LstdFlags), + logOutput: os.Stderr, + } +} + +// SetLogOutput sets the writer to which all logs are written. It must not be +// called after Open is called. +func (s *Store) SetLogOutput(w io.Writer) { + s.Logger = log.New(w, "[store] ", log.LstdFlags) + s.logOutput = w + for _, s := range s.shards { + s.SetLogOutput(w) } } @@ -158,6 +172,7 @@ func (s *Store) loadShards() error { } shard := NewShard(shardID, s.databaseIndexes[db], path, walPath, s.EngineOptions) + shard.SetLogOutput(s.logOutput) err = shard.Open() if err != nil { @@ -283,6 +298,7 @@ func (s *Store) CreateShard(database, retentionPolicy string, shardID uint64) er path := filepath.Join(s.path, database, retentionPolicy, strconv.FormatUint(shardID, 10)) shard := NewShard(shardID, db, path, walPath, s.EngineOptions) + shard.SetLogOutput(s.logOutput) if err := shard.Open(); err != nil { return err } @@ -525,6 +541,12 @@ func (s *Store) DeleteSeries(database string, sources []influxql.Source, conditi } sources = a + // Determine deletion time range. + min, max, err := influxql.TimeRangeAsEpochNano(condition) + if err != nil { + return err + } + s.mu.RLock() defer s.mu.RUnlock() @@ -557,7 +579,7 @@ func (s *Store) DeleteSeries(database string, sources []influxql.Source, conditi // Check for unsupported field filters. // Any remaining filters means there were fields (e.g., `WHERE value = 1.2`). if filters.Len() > 0 { - return errors.New("DROP SERIES doesn't support fields in WHERE clause") + return errors.New("fields not supported in WHERE clause during deletion") } } else { // No WHERE clause so get all series IDs for this measurement. @@ -570,18 +592,16 @@ func (s *Store) DeleteSeries(database string, sources []influxql.Source, conditi } // delete the raw series data - if err := s.deleteSeries(database, seriesKeys); err != nil { + if err := s.deleteSeries(database, seriesKeys, min, max); err != nil { return err } - // remove them from the index - db.DropSeries(seriesKeys) - return nil } -func (s *Store) deleteSeries(database string, seriesKeys []string) error { - if _, ok := s.databaseIndexes[database]; !ok { +func (s *Store) deleteSeries(database string, seriesKeys []string, min, max int64) error { + db := s.databaseIndexes[database] + if db == nil { return influxql.ErrDatabaseNotFound(database) } @@ -589,10 +609,24 @@ func (s *Store) deleteSeries(database string, seriesKeys []string) error { if sh.database != database { continue } - if err := sh.DeleteSeries(seriesKeys); err != nil { + if err := sh.DeleteSeriesRange(seriesKeys, min, max); err != nil { + return err + } + + // The keys we passed in may be fully deleted from the shard, if so, + // we need to remove the shard from all the meta data indexes + existing, err := sh.ContainsSeries(seriesKeys) + if err != nil { return err } + + for k, exists := range existing { + if !exists { + db.UnassignShard(k, sh.id) + } + } } + return nil } diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE new file mode 100644 index 000000000..fafcaafdc --- /dev/null +++ b/vendor/github.com/pkg/errors/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md new file mode 100644 index 000000000..a9f7d88ef --- /dev/null +++ b/vendor/github.com/pkg/errors/README.md @@ -0,0 +1,52 @@ +# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) + +Package errors implements functions for manipulating errors. + +The traditional error handling idiom in Go is roughly akin to +``` +if err != nil { + return err +} +``` +which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. + +## Adding context to an error + +The errors.Wrap function returns a new error that adds context to the original error. For example +``` +_, err := ioutil.ReadAll(r) +if err != nil { + return errors.Wrap(err, "read failed") +} +``` +In addition, `errors.Wrap` records the file and line where it was called, allowing the programmer to retrieve the path to the original error. + +## Retrieving the cause of an error + +Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to recurse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. +``` +type causer interface { + Cause() error +} +``` +`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: +``` +switch err := errors.Cause(err).(type) { +case *MyError: + // handle specifically +default: + // unknown error +} +``` + +Would you like to know more? Read the [blog post](http://dave.cheney.net/2016/04/27/dont-just-check-errors-handle-them-gracefully). + +## Contributing + +We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. + +Before proposing a change, please discuss your change by raising an issue. + +## Licence + +MIT diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go new file mode 100644 index 000000000..7ec1c5dd5 --- /dev/null +++ b/vendor/github.com/pkg/errors/errors.go @@ -0,0 +1,248 @@ +// Package errors implements functions for manipulating errors. +// +// The traditional error handling idiom in Go is roughly akin to +// +// if err != nil { +// return err +// } +// +// which applied recursively up the call stack results in error reports +// without context or debugging information. The errors package allows +// programmers to add context to the failure path in their code in a way +// that does not destroy the original value of the error. +// +// Adding context to an error +// +// The errors.Wrap function returns a new error that adds context to the +// original error. For example +// +// _, err := ioutil.ReadAll(r) +// if err != nil { +// return errors.Wrap(err, "read failed") +// } +// +// In addition, errors.Wrap records the file and line where it was called, +// allowing the programmer to retrieve the path to the original error. +// +// Retrieving the cause of an error +// +// Using errors.Wrap constructs a stack of errors, adding context to the +// preceding error. Depending on the nature of the error it may be necessary +// to reverse the operation of errors.Wrap to retrieve the original error +// for inspection. Any error value which implements this interface +// +// type causer interface { +// Cause() error +// } +// +// can be inspected by errors.Cause. errors.Cause will recursively retrieve +// the topmost error which does nor implement causer, which is assumed to be +// the original cause. For example: +// +// switch err := errors.Cause(err).(type) { +// case *MyError: +// // handle specifically +// default: +// // unknown error +// } +package errors + +import ( + "errors" + "fmt" + "io" + "os" + "runtime" + "strings" +) + +// location represents a program counter that +// implements the Location() method. +type location uintptr + +func (l location) Location() (string, int) { + pc := uintptr(l) - 1 + fn := runtime.FuncForPC(pc) + if fn == nil { + return "unknown", 0 + } + + file, line := fn.FileLine(pc) + + // Here we want to get the source file path relative to the compile time + // GOPATH. As of Go 1.6.x there is no direct way to know the compiled + // GOPATH at runtime, but we can infer the number of path segments in the + // GOPATH. We note that fn.Name() returns the function name qualified by + // the import path, which does not include the GOPATH. Thus we can trim + // segments from the beginning of the file path until the number of path + // separators remaining is one more than the number of path separators in + // the function name. For example, given: + // + // GOPATH /home/user + // file /home/user/src/pkg/sub/file.go + // fn.Name() pkg/sub.Type.Method + // + // We want to produce: + // + // pkg/sub/file.go + // + // From this we can easily see that fn.Name() has one less path separator + // than our desired output. We count separators from the end of the file + // path until it finds two more than in the function name and then move + // one character forward to preserve the initial path segment without a + // leading separator. + const sep = "/" + goal := strings.Count(fn.Name(), sep) + 2 + i := len(file) + for n := 0; n < goal; n++ { + i = strings.LastIndex(file[:i], sep) + if i == -1 { + // not enough separators found, set i so that the slice expression + // below leaves file unmodified + i = -len(sep) + break + } + } + // get back to 0 or trim the leading separator + file = file[i+len(sep):] + + return file, line +} + +// New returns an error that formats as the given text. +func New(text string) error { + pc, _, _, _ := runtime.Caller(1) + return struct { + error + location + }{ + errors.New(text), + location(pc), + } +} + +type cause struct { + cause error + message string +} + +func (c cause) Error() string { return c.Message() + ": " + c.Cause().Error() } +func (c cause) Cause() error { return c.cause } +func (c cause) Message() string { return c.message } + +// Errorf formats according to a format specifier and returns the string +// as a value that satisfies error. +func Errorf(format string, args ...interface{}) error { + pc, _, _, _ := runtime.Caller(1) + return struct { + error + location + }{ + fmt.Errorf(format, args...), + location(pc), + } +} + +// Wrap returns an error annotating the cause with message. +// If cause is nil, Wrap returns nil. +func Wrap(cause error, message string) error { + if cause == nil { + return nil + } + pc, _, _, _ := runtime.Caller(1) + return wrap(cause, message, pc) +} + +// Wrapf returns an error annotating the cause with the format specifier. +// If cause is nil, Wrapf returns nil. +func Wrapf(cause error, format string, args ...interface{}) error { + if cause == nil { + return nil + } + pc, _, _, _ := runtime.Caller(1) + return wrap(cause, fmt.Sprintf(format, args...), pc) +} + +func wrap(err error, msg string, pc uintptr) error { + return struct { + cause + location + }{ + cause{ + cause: err, + message: msg, + }, + location(pc), + } +} + +type causer interface { + Cause() error +} + +// Cause returns the underlying cause of the error, if possible. +// An error value has a cause if it implements the following +// interface: +// +// type Causer interface { +// Cause() error +// } +// +// If the error does not implement Cause, the original error will +// be returned. If the error is nil, nil will be returned without further +// investigation. +func Cause(err error) error { + for err != nil { + cause, ok := err.(causer) + if !ok { + break + } + err = cause.Cause() + } + return err +} + +// Print prints the error to Stderr. +// If the error implements the Causer interface described in Cause +// Print will recurse into the error's cause. +// If the error implements the inteface: +// +// type Location interface { +// Location() (file string, line int) +// } +// +// Print will also print the file and line of the error. +func Print(err error) { + Fprint(os.Stderr, err) +} + +// Fprint prints the error to the supplied writer. +// The format of the output is the same as Print. +// If err is nil, nothing is printed. +func Fprint(w io.Writer, err error) { + type location interface { + Location() (string, int) + } + type message interface { + Message() string + } + + for err != nil { + if err, ok := err.(location); ok { + file, line := err.Location() + fmt.Fprintf(w, "%s:%d: ", file, line) + } + switch err := err.(type) { + case message: + fmt.Fprintln(w, err.Message()) + default: + fmt.Fprintln(w, err.Error()) + } + + cause, ok := err.(causer) + if !ok { + break + } + err = cause.Cause() + } +} diff --git a/vendor/github.com/russross/blackfriday/README.md b/vendor/github.com/russross/blackfriday/README.md index 7650ce44e..1879ce71b 100644 --- a/vendor/github.com/russross/blackfriday/README.md +++ b/vendor/github.com/russross/blackfriday/README.md @@ -1,4 +1,4 @@ -Blackfriday [![Build Status](https://travis-ci.org/russross/blackfriday.svg?branch=master)](https://travis-ci.org/russross/blackfriday) +Blackfriday [![Build Status](https://travis-ci.org/russross/blackfriday.svg?branch=master)](https://travis-ci.org/russross/blackfriday) [![GoDoc](https://godoc.org/github.com/russross/blackfriday?status.svg)](https://godoc.org/github.com/russross/blackfriday) =========== Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It diff --git a/vendor/github.com/russross/blackfriday/block.go b/vendor/github.com/russross/blackfriday/block.go index 740ad4626..f9ce9e348 100644 --- a/vendor/github.com/russross/blackfriday/block.go +++ b/vendor/github.com/russross/blackfriday/block.go @@ -1174,6 +1174,13 @@ gatherlines: p.dliPrefix(chunk) > 0: if containsBlankLine { + // end the list if the type changed after a blank line + if (*flags&LIST_TYPE_ORDERED != 0 && p.uliPrefix(chunk) > 0) || + (*flags&LIST_TYPE_ORDERED == 0 && p.oliPrefix(chunk) > 0) { + + *flags |= LIST_ITEM_END_OF_LIST + break gatherlines + } *flags |= LIST_ITEM_CONTAINS_BLOCK } diff --git a/vendor/github.com/russross/blackfriday/markdown.go b/vendor/github.com/russross/blackfriday/markdown.go index ed438de7c..aea997a0e 100644 --- a/vendor/github.com/russross/blackfriday/markdown.go +++ b/vendor/github.com/russross/blackfriday/markdown.go @@ -635,6 +635,9 @@ func scanLinkRef(p *parser, data []byte, i int) (linkOffset, linkEnd, titleOffse i++ } linkOffset = i + if i == len(data) { + return + } for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' { i++ } diff --git a/vendor/github.com/shurcooL/markdownfmt/main.go b/vendor/github.com/shurcooL/markdownfmt/main.go index eac873a16..bd6916c97 100644 --- a/vendor/github.com/shurcooL/markdownfmt/main.go +++ b/vendor/github.com/shurcooL/markdownfmt/main.go @@ -59,7 +59,9 @@ func processFile(filename string, in io.Reader, out io.Writer, stdin bool) error } stdout := int(os.Stdout.Fd()) - res, err := markdown.Process(filename, src, &markdown.Options{Terminal: terminal.IsTerminal(stdout)}) + res, err := markdown.Process(filename, src, &markdown.Options{ + Terminal: !*write && terminal.IsTerminal(stdout), + }) if err != nil { return err } diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go index 70af5a728..f1493a3e6 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go @@ -62,7 +62,7 @@ func ParseSocketControlMessage(b []byte) ([]SocketControlMessage, error) { func socketControlMessageHeaderAndData(b []byte) (*Cmsghdr, []byte, error) { h := (*Cmsghdr)(unsafe.Pointer(&b[0])) - if h.Len < SizeofCmsghdr || int(h.Len) > len(b) { + if h.Len < SizeofCmsghdr || uint64(h.Len) > uint64(len(b)) { return nil, nil, EINVAL } return h, b[cmsgAlignOf(SizeofCmsghdr):h.Len], nil diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index b156d5242..acd2e1c78 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -18,6 +18,7 @@ package unix //sysnb Getgid() (gid int) //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) = SYS_UGETRLIMIT //sysnb Getuid() (uid int) +//sysnb InotifyInit() (fd int, err error) //sys Ioperm(from int, num int, on int) (err error) //sys Iopl(level int) (err error) //sys Lchown(path string, uid int, gid int) (err error) @@ -97,3 +98,29 @@ func (msghdr *Msghdr) SetControllen(length int) { func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } + +//sysnb pipe(p *[2]_C_int) (err error) + +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe(&pp) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sysnb pipe2(p *[2]_C_int, flags int) (err error) + +func Pipe2(p []int, flags int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe2(&pp, flags) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} diff --git a/vendor/golang.org/x/sys/unix/types_linux.go b/vendor/golang.org/x/sys/unix/types_linux.go index 974d28c38..143e767a6 100644 --- a/vendor/golang.org/x/sys/unix/types_linux.go +++ b/vendor/golang.org/x/sys/unix/types_linux.go @@ -113,6 +113,9 @@ struct my_epoll_event { // padding is not specified in linux/eventpoll.h but added to conform to the // alignment requirements of EABI int32_t padFd; +#endif +#ifdef __powerpc64__ + int32_t _padFd; #endif int32_t fd; int32_t pad; diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 844ae592f..4bd18dcee 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -1298,6 +1298,17 @@ func Getuid() (uid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func InotifyInit() (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Ioperm(from int, num int, on int) (err error) { _, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on)) if e1 != 0 { @@ -1810,3 +1821,23 @@ func Utime(path string, buf *Utimbuf) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]_C_int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 0e86c9d9e..fbb43516c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -1298,6 +1298,17 @@ func Getuid() (uid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func InotifyInit() (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Ioperm(from int, num int, on int) (err error) { _, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on)) if e1 != 0 { @@ -1810,3 +1821,23 @@ func Utime(path string, buf *Utimbuf) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]_C_int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 66d99ea7b..d1105402e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -589,9 +589,10 @@ type Ustat_t struct { } type EpollEvent struct { - Events uint32 - Fd int32 - Pad int32 + Events uint32 + X_padFd int32 + Fd int32 + Pad int32 } const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index f7c845133..8e25c9fff 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -589,9 +589,10 @@ type Ustat_t struct { } type EpollEvent struct { - Events uint32 - Fd int32 - Pad int32 + Events uint32 + X_padFd int32 + Fd int32 + Pad int32 } const (