From bd5aa6a70231d4497cd77ae2e9bd538d6ab8b101 Mon Sep 17 00:00:00 2001 From: Nathaniel Cook Date: Fri, 26 Feb 2016 09:21:29 -0700 Subject: [PATCH] New Query Engine is passing all tests --- build.py | 1 + functions.go | 12 +- influxql.gen.go | 499 + influxql.gen.go.tmpl | 231 + influxql.go | 181 + ...egations.srpl => TestStream_InfluxQL.srpl} | 0 integrations/streamer_test.go | 115 +- pipeline/influxql.gen.go | 35 + pipeline/influxql.gen.go.tmpl | 21 + pipeline/influxql.go | 301 + task.go | 2 + tick/eval.go | 2 +- tick/node.go | 2 +- tmpldata | 17 + .../influxdata/influxdb/.dockerignore | 1 + .../influxdb/.github/PULL_REQUEST_TEMPLATE.md | 4 + .../github.com/influxdata/influxdb/.gitignore | 79 + .../influxdata/influxdb/.hooks/pre-commit | 24 + .../influxdata/influxdb/CHANGELOG.md | 1963 ++ .../influxdata/influxdb/CODING_GUIDELINES.md | 82 + .../influxdata/influxdb/CONTRIBUTING.md | 239 + .../github.com/influxdata/influxdb/DOCKER.md | 44 + .../github.com/influxdata/influxdb/Dockerfile | 24 + .../influxdb/Dockerfile_build_ubuntu32 | 35 + .../influxdb/Dockerfile_build_ubuntu64 | 35 + .../influxdb/Dockerfile_build_ubuntu64_git | 43 + .../influxdb/Dockerfile_test_ubuntu32 | 12 + vendor/github.com/influxdata/influxdb/Godeps | 22 + vendor/github.com/influxdata/influxdb/LICENSE | 20 + .../influxdb/LICENSE_OF_DEPENDENCIES.md | 19 + .../github.com/influxdata/influxdb/Makefile | 39 + .../github.com/influxdata/influxdb/QUERIES.md | 180 + .../github.com/influxdata/influxdb/README.md | 72 + .../influxdata/influxdb/build-docker.sh | 9 + .../github.com/influxdata/influxdb/build.py | 819 + .../github.com/influxdata/influxdb/build.sh | 22 + .../influxdata/influxdb/circle-test.sh | 35 + .../github.com/influxdata/influxdb/circle.yml | 16 + .../influxdata/influxdb/client/README.md | 257 + .../influxdb/client/example_test.go | 113 + .../influxdata/influxdb/client/influxdb.go | 715 + .../influxdb/client/influxdb_test.go | 743 + .../influxdata/influxdb/client/v2/client.go | 562 + .../influxdb/client/v2/client_test.go | 369 + .../influxdb/client/v2/example_test.go | 265 + .../influxdata/influxdb/cluster/balancer.go | 69 + .../influxdb/cluster/balancer_test.go | 115 + .../influxdb/cluster/client_pool.go | 57 + .../influxdata/influxdb/cluster/cluster.go | 1 + .../influxdata/influxdb/cluster/config.go | 41 + .../influxdb/cluster/config_test.go | 27 + .../influxdb/cluster/internal/data.pb.go | 301 + .../influxdb/cluster/internal/data.proto | 54 + .../influxdb/cluster/meta_client.go | 40 + .../influxdb/cluster/meta_client_test.go | 160 + .../influxdb/cluster/meta_executor.go | 171 + .../influxdb/cluster/meta_executor_test.go | 121 + .../influxdb/cluster/points_writer.go | 399 + .../influxdb/cluster/points_writer_test.go | 497 + .../influxdata/influxdb/cluster/pool.go | 188 + .../influxdb/cluster/query_executor.go | 1184 ++ .../influxdb/cluster/query_executor_test.go | 314 + .../influxdata/influxdb/cluster/rpc.go | 413 + .../influxdata/influxdb/cluster/rpc_test.go | 139 + .../influxdata/influxdb/cluster/service.go | 566 + .../influxdb/cluster/service_test.go | 174 + .../influxdb/cluster/shard_writer.go | 188 + .../influxdb/cluster/shard_writer_test.go | 224 + .../influxdata/influxdb/cmd/influx/cli/cli.go | 838 + .../influxdb/cmd/influx/cli/cli_test.go | 514 + .../influxdata/influxdb/cmd/influx/main.go | 109 + .../influxdb/cmd/influx_inspect/info.go | 97 + .../influxdb/cmd/influx_inspect/main.go | 120 + .../influxdb/cmd/influx_inspect/tsm.go | 653 + .../influxdb/cmd/influx_stress/README.md | 38 + .../cmd/influx_stress/examples/template.toml | 92 + .../cmd/influx_stress/influx_stress.go | 60 + .../influxdb/cmd/influx_tsm/README.md | 89 + .../influxdb/cmd/influx_tsm/b1/reader.go | 275 + .../influxdb/cmd/influx_tsm/bz1/reader.go | 377 + .../influxdb/cmd/influx_tsm/converter.go | 118 + .../influxdb/cmd/influx_tsm/main.go | 413 + .../influxdb/cmd/influx_tsm/stats/stats.go | 47 + .../influxdb/cmd/influx_tsm/tracker.go | 130 + .../influxdb/cmd/influx_tsm/tsdb/codec.go | 105 + .../influxdb/cmd/influx_tsm/tsdb/database.go | 239 + .../cmd/influx_tsm/tsdb/internal/meta.pb.go | 122 + .../influxdb/cmd/influx_tsm/tsdb/types.go | 60 + .../influxdb/cmd/influx_tsm/tsdb/values.go | 39 + .../influxdb/cmd/influxd/backup/backup.go | 367 + .../influxdb/cmd/influxd/help/help.go | 46 + .../influxdata/influxdb/cmd/influxd/main.go | 203 + .../influxdb/cmd/influxd/restore/restore.go | 403 + .../cmd/influxd/run/backup_restore_test.go | 106 + .../influxdb/cmd/influxd/run/command.go | 258 + .../influxdb/cmd/influxd/run/config.go | 281 + .../cmd/influxd/run/config_command.go | 78 + .../influxdb/cmd/influxd/run/config_test.go | 188 + .../influxdb/cmd/influxd/run/server.go | 763 + .../cmd/influxd/run/server_cluster_test.go | 357 + .../cmd/influxd/run/server_helpers_test.go | 705 + .../cmd/influxd/run/server_suite_test.go | 410 + .../influxdb/cmd/influxd/run/server_test.go | 5718 ++++++ .../influxdb/cmd/influxd/run/server_test.md | 150 + .../github.com/influxdata/influxdb/errors.go | 45 + .../influxdata/influxdb/etc/burn-in/.rvmrc | 1 + .../influxdata/influxdb/etc/burn-in/Gemfile | 4 + .../influxdb/etc/burn-in/Gemfile.lock | 14 + .../influxdb/etc/burn-in/burn-in.rb | 79 + .../influxdata/influxdb/etc/burn-in/log.rb | 23 + .../influxdb/etc/burn-in/random_gaussian.rb | 31 + .../influxdb/etc/burn-in/random_points.rb | 29 + .../influxdb/etc/config.sample.toml | 359 + .../github.com/influxdata/influxdb/gobuild.sh | 18 + .../influxdata/influxdb/importer/README.md | 193 + .../influxdb/importer/v8/importer.go | 248 + .../influxdata/influxdb/influxdb.go | 1 + .../influxdata/influxdb/influxql/README.md | 944 + .../influxdata/influxdb/influxql/ast.go | 3921 ++++ .../influxdata/influxdb/influxql/ast_test.go | 1298 ++ .../influxdb/influxql/call_iterator.go | 1093 ++ .../influxdb/influxql/call_iterator_test.go | 599 + .../influxdata/influxdb/influxql/cast.go | 41 + .../influxdata/influxdb/influxql/doc.go | 64 + .../influxdata/influxdb/influxql/emitter.go | 199 + .../influxdb/influxql/emitter_test.go | 69 + .../influxdb/influxql/functions.gen.go | 1003 + .../influxdb/influxql/functions.gen.go.tmpl | 90 + .../influxdata/influxdb/influxql/functions.go | 55 + .../influxdata/influxdb/influxql/influxql.go | 7 + .../influxdb/influxql/internal/internal.pb.go | 449 + .../influxdb/influxql/internal/internal.proto | 69 + .../influxdb/influxql/iterator.gen.go | 4668 +++++ .../influxdb/influxql/iterator.gen.go.tmpl | 928 + .../influxdata/influxdb/influxql/iterator.go | 1007 + .../influxdb/influxql/iterator_test.go | 1145 ++ .../influxdata/influxdb/influxql/parser.go | 2659 +++ .../influxdb/influxql/parser_test.go | 2342 +++ .../influxdata/influxdb/influxql/point.gen.go | 767 + .../influxdb/influxql/point.gen.go.tmpl | 212 + .../influxdata/influxdb/influxql/point.go | 294 + .../influxdb/influxql/point_test.go | 187 + .../influxdb/influxql/query_executor.go | 26 + .../influxdata/influxdb/influxql/result.go | 189 + .../influxdata/influxdb/influxql/scanner.go | 567 + .../influxdb/influxql/scanner_test.go | 297 + .../influxdata/influxdb/influxql/select.go | 780 + .../influxdb/influxql/select_test.go | 1688 ++ .../influxdb/influxql/statement_rewriter.go | 211 + .../influxql/statement_rewriter_test.go | 93 + .../influxdata/influxdb/influxql/tmpldata | 30 + .../influxdata/influxdb/influxql/token.go | 329 + .../influxdata/influxdb/influxvar.go | 45 + .../influxdata/influxdb/models/points.go | 1537 ++ .../influxdata/influxdb/models/points_test.go | 1788 ++ .../influxdata/influxdb/models/rows.go | 60 + .../influxdata/influxdb/models/time.go | 51 + .../influxdata/influxdb/monitor/README.md | 73 + .../influxdata/influxdb/monitor/build_info.go | 22 + .../influxdata/influxdb/monitor/config.go | 35 + .../influxdb/monitor/config_test.go | 30 + .../monitor/diagnostics/diagnostics.go | 41 + .../influxdata/influxdb/monitor/go_runtime.go | 21 + .../influxdata/influxdb/monitor/network.go | 23 + .../influxdata/influxdb/monitor/service.go | 398 + .../influxdata/influxdb/monitor/system.go | 28 + .../github.com/influxdata/influxdb/nightly.sh | 57 + vendor/github.com/influxdata/influxdb/node.go | 116 + .../github.com/influxdata/influxdb/package.sh | 571 + .../influxdata/influxdb/pkg/README.md | 5 + .../influxdata/influxdb/pkg/deep/equal.go | 158 + .../influxdata/influxdb/pkg/escape/bytes.go | 45 + .../influxdb/pkg/escape/bytes_test.go | 45 + .../influxdata/influxdb/pkg/escape/strings.go | 34 + .../influxdata/influxdb/pkg/slices/strings.go | 40 + .../influxdb/scripts/influxdb.service | 21 + .../influxdata/influxdb/scripts/init.sh | 219 + .../influxdata/influxdb/scripts/logrotate | 8 + .../influxdb/scripts/post-install.sh | 73 + .../influxdb/scripts/post-uninstall.sh | 56 + .../influxdb/scripts/pre-install.sh | 16 + .../influxdb/services/admin/README.md | 23 + .../influxdb/services/admin/admin.go | 4 + .../influxdb/services/admin/assets/README.md | 4 + .../services/admin/assets/css/admin.css | 87 + .../services/admin/assets/css/bootstrap.css | 6584 +++++++ .../assets/css/dropdowns-enhancement.css | 294 + .../fonts/glyphicons-halflings-regular.eot | Bin 0 -> 20127 bytes .../fonts/glyphicons-halflings-regular.svg | 288 + .../fonts/glyphicons-halflings-regular.ttf | Bin 0 -> 45404 bytes .../fonts/glyphicons-halflings-regular.woff | Bin 0 -> 23424 bytes .../fonts/glyphicons-halflings-regular.woff2 | Bin 0 -> 18028 bytes .../admin/assets/img/influxdb-light400.png | Bin 0 -> 19775 bytes .../influxdb/services/admin/assets/index.html | 203 + .../services/admin/assets/js/admin.js | 478 + .../assets/js/vendor/bootstrap-3.3.5.min.js | 7 + .../assets/js/vendor/jquery-2.1.4.min.js | 4 + .../assets/js/vendor/react-0.13.3.min.js | 16 + .../influxdb/services/admin/config.go | 23 + .../influxdb/services/admin/config_test.go | 32 + .../influxdb/services/admin/service.go | 111 + .../influxdb/services/admin/service_test.go | 33 + .../influxdb/services/admin/statik/README.md | 3 + .../influxdb/services/admin/statik/statik.go | 10 + .../influxdb/services/collectd/README.md | 35 + .../services/collectd/collectd_test.conf | 209 + .../influxdb/services/collectd/config.go | 70 + .../influxdb/services/collectd/config_test.go | 32 + .../influxdb/services/collectd/service.go | 317 + .../services/collectd/service_test.go | 497 + .../services/collectd/test_client/README.md | 3 + .../services/collectd/test_client/client.go | 71 + .../services/continuous_querier/config.go | 36 + .../continuous_querier/config_test.go | 27 + .../continuous_querier/continuous_queries.md | 235 + .../services/continuous_querier/service.go | 455 + .../continuous_querier/service_test.go | 613 + .../services/copier/internal/internal.pb.go | 56 + .../services/copier/internal/internal.proto | 9 + .../influxdb/services/copier/service.go | 261 + .../influxdb/services/copier/service_test.go | 185 + .../influxdb/services/graphite/README.md | 196 + .../influxdb/services/graphite/config.go | 254 + .../influxdb/services/graphite/config_test.go | 167 + .../influxdb/services/graphite/errors.go | 14 + .../influxdb/services/graphite/parser.go | 419 + .../influxdb/services/graphite/parser_test.go | 709 + .../influxdb/services/graphite/service.go | 395 + .../services/graphite/service_test.go | 186 + .../influxdata/influxdb/services/hh/config.go | 68 + .../influxdb/services/hh/config_test.go | 73 + .../influxdata/influxdb/services/hh/doc.go | 5 + .../influxdb/services/hh/limiter.go | 61 + .../influxdb/services/hh/limiter_test.go | 47 + .../influxdb/services/hh/node_processor.go | 295 + .../services/hh/node_processor_test.go | 155 + .../influxdata/influxdb/services/hh/queue.go | 710 + .../influxdb/services/hh/queue_test.go | 327 + .../influxdb/services/hh/service.go | 275 + .../influxdb/services/httpd/config.go | 26 + .../influxdb/services/httpd/config_test.go | 52 + .../influxdb/services/httpd/handler.go | 972 + .../influxdb/services/httpd/handler_test.go | 549 + .../services/httpd/response_logger.go | 161 + .../influxdb/services/httpd/service.go | 157 + .../influxdb/services/meta/client.go | 1292 ++ .../influxdb/services/meta/config.go | 121 + .../influxdb/services/meta/config_test.go | 45 + .../influxdata/influxdb/services/meta/data.go | 1527 ++ .../influxdb/services/meta/data_test.go | 31 + .../influxdb/services/meta/errors.go | 118 + .../influxdb/services/meta/handler.go | 525 + .../services/meta/internal/meta.pb.go | 1731 ++ .../services/meta/internal/meta.proto | 377 + .../influxdb/services/meta/meta_test.go | 7 + .../services/meta/query_authorizer.go | 107 + .../influxdb/services/meta/raft_state.go | 352 + .../influxdb/services/meta/response_logger.go | 161 + .../influxdb/services/meta/service.go | 210 + .../influxdb/services/meta/service_test.go | 1462 ++ .../influxdb/services/meta/store.go | 450 + .../influxdb/services/meta/store_fsm.go | 654 + .../influxdb/services/opentsdb/README.md | 10 + .../influxdb/services/opentsdb/config.go | 61 + .../influxdb/services/opentsdb/config_test.go | 41 + .../influxdb/services/opentsdb/handler.go | 191 + .../influxdb/services/opentsdb/service.go | 393 + .../services/opentsdb/service_test.go | 172 + .../influxdb/services/precreator/README.md | 13 + .../influxdb/services/precreator/config.go | 32 + .../services/precreator/config_test.go | 31 + .../influxdb/services/precreator/service.go | 94 + .../services/precreator/service_test.go | 55 + .../influxdb/services/retention/config.go | 18 + .../services/retention/config_test.go | 27 + .../influxdb/services/retention/service.go | 141 + .../influxdb/services/snapshotter/service.go | 290 + .../services/snapshotter/service_test.go | 1 + .../influxdb/services/subscriber/config.go | 12 + .../services/subscriber/config_test.go | 23 + .../influxdb/services/subscriber/service.go | 288 + .../services/subscriber/service_test.go | 443 + .../influxdb/services/subscriber/udp.go | 42 + .../influxdb/services/udp/README.md | 125 + .../influxdb/services/udp/config.go | 116 + .../influxdb/services/udp/config_test.go | 45 + .../influxdb/services/udp/service.go | 226 + .../influxdata/influxdb/stress/README.md | 47 + .../influxdata/influxdb/stress/basic.go | 682 + .../influxdata/influxdb/stress/config.go | 141 + .../influxdata/influxdb/stress/run.go | 335 + .../influxdata/influxdb/stress/stress.toml | 54 + .../influxdata/influxdb/stress/stress_test.go | 597 + .../stress/stress_test_server/server.go | 73 + .../influxdata/influxdb/stress/template.go | 64 + .../influxdata/influxdb/stress/util.go | 132 + .../github.com/influxdata/influxdb/tcp/mux.go | 173 + .../influxdata/influxdb/tcp/mux_test.go | 137 + vendor/github.com/influxdata/influxdb/test.sh | 219 + .../influxdata/influxdb/tests/README.md | 4 + .../influxdb/tests/create_future_writes.sh | 22 + .../tests/create_write_multiple_query.sh | 14 + .../tests/create_write_single_query.sh | 19 + ..._with_multiple_measurements_values_tags.sh | 23 + ...e_write_single_with_multiple_tags_query.sh | 11 + .../influxdb/tests/distinct-data-scenarios.sh | 35 + .../influxdb/tests/read_write_gzip.sh | 15 + .../influxdb/tests/siege/.gitignore | 1 + .../influxdata/influxdb/tests/siege/README.md | 66 + .../influxdata/influxdb/tests/siege/urlgen | 107 + .../influxdata/influxdb/tests/tmux/3_shards | 28 + .../influxdata/influxdb/tests/tmux/README.md | 31 + .../influxdb/tests/tmux/sample.json | 16000 ++++++++++++++++ .../influxdata/influxdb/tests/tmux/seed.sh | 13 + .../influxdb/tests/tmux/server_8086.toml | 7 + .../influxdb/tests/tmux/server_8087.toml | 7 + .../influxdb/tests/tmux/server_8088.toml | 7 + .../influxdb/tests/urlgen/urlgen.go | 58 + .../influxdata/influxdb/toml/toml.go | 72 + .../influxdata/influxdb/toml/toml_test.go | 45 + .../influxdata/influxdb/tsdb/README.md | 91 + .../influxdata/influxdb/tsdb/batcher.go | 149 + .../influxdata/influxdb/tsdb/batcher_test.go | 146 + .../influxdata/influxdb/tsdb/config.go | 153 + .../influxdata/influxdb/tsdb/config_test.go | 24 + .../influxdata/influxdb/tsdb/cursor.go | 319 + .../influxdata/influxdb/tsdb/cursor_test.go | 514 + .../influxdata/influxdb/tsdb/doc.go | 5 + .../influxdata/influxdb/tsdb/engine.go | 152 + .../influxdata/influxdb/tsdb/engine/engine.go | 6 + .../influxdb/tsdb/engine/tsm1/DESIGN.md | 451 + .../influxdb/tsdb/engine/tsm1/bool.go | 141 + .../influxdb/tsdb/engine/tsm1/bool_test.go | 111 + .../influxdb/tsdb/engine/tsm1/cache.go | 500 + .../tsdb/engine/tsm1/cache_race_test.go | 165 + .../influxdb/tsdb/engine/tsm1/cache_test.go | 428 + .../influxdb/tsdb/engine/tsm1/compact.go | 963 + .../influxdb/tsdb/engine/tsm1/compact_test.go | 1194 ++ .../influxdb/tsdb/engine/tsm1/cursor.go | 77 + .../influxdb/tsdb/engine/tsm1/encoding.go | 737 + .../tsdb/engine/tsm1/encoding_test.go | 557 + .../influxdb/tsdb/engine/tsm1/engine.go | 965 + .../influxdb/tsdb/engine/tsm1/engine_test.go | 590 + .../influxdb/tsdb/engine/tsm1/file_store.go | 934 + .../tsdb/engine/tsm1/file_store_test.go | 719 + .../influxdb/tsdb/engine/tsm1/file_unix.go | 15 + .../influxdb/tsdb/engine/tsm1/file_windows.go | 5 + .../influxdb/tsdb/engine/tsm1/float.go | 241 + .../influxdb/tsdb/engine/tsm1/float_test.go | 275 + .../influxdb/tsdb/engine/tsm1/int.go | 293 + .../influxdb/tsdb/engine/tsm1/int_test.go | 527 + .../influxdb/tsdb/engine/tsm1/iterator.gen.go | 1428 ++ .../tsdb/engine/tsm1/iterator.gen.go.tmpl | 431 + .../tsdb/engine/tsm1/iterator.gen.go.tmpldata | 26 + .../influxdb/tsdb/engine/tsm1/mmap_solaris.go | 32 + .../influxdb/tsdb/engine/tsm1/mmap_unix.go | 31 + .../influxdb/tsdb/engine/tsm1/mmap_windows.go | 117 + .../influxdb/tsdb/engine/tsm1/pools.go | 147 + .../influxdb/tsdb/engine/tsm1/reader.go | 1208 ++ .../influxdb/tsdb/engine/tsm1/reader_test.go | 997 + .../influxdb/tsdb/engine/tsm1/string.go | 96 + .../influxdb/tsdb/engine/tsm1/string_test.go | 127 + .../influxdb/tsdb/engine/tsm1/timestamp.go | 343 + .../tsdb/engine/tsm1/timestamp_test.go | 548 + .../influxdb/tsdb/engine/tsm1/tombstone.go | 142 + .../tsdb/engine/tsm1/tombstone_test.go | 95 + .../influxdb/tsdb/engine/tsm1/wal.go | 836 + .../influxdb/tsdb/engine/tsm1/wal_test.go | 552 + .../influxdb/tsdb/engine/tsm1/writer.go | 676 + .../influxdb/tsdb/engine/tsm1/writer_test.go | 512 + .../influxdata/influxdb/tsdb/executor.go | 8 + .../influxdb/tsdb/internal/meta.pb.go | 122 + .../influxdb/tsdb/internal/meta.proto | 27 + .../influxdata/influxdb/tsdb/meta.go | 1618 ++ .../influxdata/influxdb/tsdb/meta_test.go | 302 + .../influxdata/influxdb/tsdb/shard.go | 1099 ++ .../influxdata/influxdb/tsdb/shard_test.go | 393 + .../influxdata/influxdb/tsdb/store.go | 1013 + .../influxdata/influxdb/tsdb/store_test.go | 422 + .../influxdata/influxdb/uuid/uuid.go | 95 + .../influxdb/cluster/internal/data.pb.go | 7 + .../services/copier/internal/internal.pb.go | 5 + .../services/meta/internal/meta.pb.go | 29 +- .../influxdb/tsdb/internal/meta.pb.go | 7 + 384 files changed, 137033 insertions(+), 65 deletions(-) create mode 100644 influxql.gen.go create mode 100644 influxql.gen.go.tmpl create mode 100644 influxql.go rename integrations/data/{TestStream_Aggregations.srpl => TestStream_InfluxQL.srpl} (100%) create mode 100644 pipeline/influxql.gen.go create mode 100644 pipeline/influxql.gen.go.tmpl create mode 100644 pipeline/influxql.go create mode 100644 tmpldata create mode 100644 vendor/github.com/influxdata/influxdb/.dockerignore create mode 100644 vendor/github.com/influxdata/influxdb/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 vendor/github.com/influxdata/influxdb/.gitignore create mode 100755 vendor/github.com/influxdata/influxdb/.hooks/pre-commit create mode 100644 vendor/github.com/influxdata/influxdb/CHANGELOG.md create mode 100644 vendor/github.com/influxdata/influxdb/CODING_GUIDELINES.md create mode 100644 vendor/github.com/influxdata/influxdb/CONTRIBUTING.md create mode 100644 vendor/github.com/influxdata/influxdb/DOCKER.md create mode 100644 vendor/github.com/influxdata/influxdb/Dockerfile create mode 100644 vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu32 create mode 100644 vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64 create mode 100644 vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64_git create mode 100644 vendor/github.com/influxdata/influxdb/Dockerfile_test_ubuntu32 create mode 100644 vendor/github.com/influxdata/influxdb/Godeps create mode 100644 vendor/github.com/influxdata/influxdb/LICENSE create mode 100644 vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md create mode 100644 vendor/github.com/influxdata/influxdb/Makefile create mode 100644 vendor/github.com/influxdata/influxdb/QUERIES.md create mode 100644 vendor/github.com/influxdata/influxdb/README.md create mode 100755 vendor/github.com/influxdata/influxdb/build-docker.sh create mode 100755 vendor/github.com/influxdata/influxdb/build.py create mode 100755 vendor/github.com/influxdata/influxdb/build.sh create mode 100755 vendor/github.com/influxdata/influxdb/circle-test.sh create mode 100644 vendor/github.com/influxdata/influxdb/circle.yml create mode 100644 vendor/github.com/influxdata/influxdb/client/README.md create mode 100644 vendor/github.com/influxdata/influxdb/client/example_test.go create mode 100644 vendor/github.com/influxdata/influxdb/client/influxdb.go create mode 100644 vendor/github.com/influxdata/influxdb/client/influxdb_test.go create mode 100644 vendor/github.com/influxdata/influxdb/client/v2/client.go create mode 100644 vendor/github.com/influxdata/influxdb/client/v2/client_test.go create mode 100644 vendor/github.com/influxdata/influxdb/client/v2/example_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cluster/balancer.go create mode 100644 vendor/github.com/influxdata/influxdb/cluster/balancer_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cluster/client_pool.go create mode 100644 vendor/github.com/influxdata/influxdb/cluster/cluster.go create mode 100644 vendor/github.com/influxdata/influxdb/cluster/config.go create mode 100644 vendor/github.com/influxdata/influxdb/cluster/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cluster/internal/data.pb.go create mode 100644 vendor/github.com/influxdata/influxdb/cluster/internal/data.proto create mode 100644 vendor/github.com/influxdata/influxdb/cluster/meta_client.go create mode 100644 vendor/github.com/influxdata/influxdb/cluster/meta_client_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cluster/meta_executor.go create mode 100644 vendor/github.com/influxdata/influxdb/cluster/meta_executor_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cluster/points_writer.go create mode 100644 vendor/github.com/influxdata/influxdb/cluster/points_writer_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cluster/pool.go create mode 100644 vendor/github.com/influxdata/influxdb/cluster/query_executor.go create mode 100644 vendor/github.com/influxdata/influxdb/cluster/query_executor_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cluster/rpc.go create mode 100644 vendor/github.com/influxdata/influxdb/cluster/rpc_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cluster/service.go create mode 100644 vendor/github.com/influxdata/influxdb/cluster/service_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cluster/shard_writer.go create mode 100644 vendor/github.com/influxdata/influxdb/cluster/shard_writer_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx/main.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_inspect/info.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_inspect/main.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_inspect/tsm.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_stress/README.md create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_stress/examples/template.toml create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_stress/influx_stress.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_tsm/README.md create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_tsm/b1/reader.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_tsm/bz1/reader.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_tsm/converter.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_tsm/main.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_tsm/stats/stats.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tracker.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/codec.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/database.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/internal/meta.pb.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/types.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/values.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/backup/backup.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/help/help.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/main.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/restore/restore.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/run/backup_restore_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/run/command.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/run/config.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_command.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/run/server.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/run/server_cluster_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/run/server_helpers_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/run/server_suite_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/run/server_test.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/run/server_test.md create mode 100644 vendor/github.com/influxdata/influxdb/errors.go create mode 100644 vendor/github.com/influxdata/influxdb/etc/burn-in/.rvmrc create mode 100644 vendor/github.com/influxdata/influxdb/etc/burn-in/Gemfile create mode 100644 vendor/github.com/influxdata/influxdb/etc/burn-in/Gemfile.lock create mode 100644 vendor/github.com/influxdata/influxdb/etc/burn-in/burn-in.rb create mode 100644 vendor/github.com/influxdata/influxdb/etc/burn-in/log.rb create mode 100644 vendor/github.com/influxdata/influxdb/etc/burn-in/random_gaussian.rb create mode 100644 vendor/github.com/influxdata/influxdb/etc/burn-in/random_points.rb create mode 100644 vendor/github.com/influxdata/influxdb/etc/config.sample.toml create mode 100755 vendor/github.com/influxdata/influxdb/gobuild.sh create mode 100644 vendor/github.com/influxdata/influxdb/importer/README.md create mode 100644 vendor/github.com/influxdata/influxdb/importer/v8/importer.go create mode 100644 vendor/github.com/influxdata/influxdb/influxdb.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/README.md create mode 100644 vendor/github.com/influxdata/influxdb/influxql/ast.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/ast_test.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/call_iterator.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/call_iterator_test.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/cast.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/doc.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/emitter.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/emitter_test.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/functions.gen.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/functions.gen.go.tmpl create mode 100644 vendor/github.com/influxdata/influxdb/influxql/functions.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/influxql.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/internal/internal.pb.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/internal/internal.proto create mode 100644 vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go.tmpl create mode 100644 vendor/github.com/influxdata/influxdb/influxql/iterator.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/iterator_test.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/parser.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/parser_test.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/point.gen.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/point.gen.go.tmpl create mode 100644 vendor/github.com/influxdata/influxdb/influxql/point.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/point_test.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/query_executor.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/result.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/scanner.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/scanner_test.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/select.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/select_test.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/statement_rewriter.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/statement_rewriter_test.go create mode 100644 vendor/github.com/influxdata/influxdb/influxql/tmpldata create mode 100644 vendor/github.com/influxdata/influxdb/influxql/token.go create mode 100644 vendor/github.com/influxdata/influxdb/influxvar.go create mode 100644 vendor/github.com/influxdata/influxdb/models/points.go create mode 100644 vendor/github.com/influxdata/influxdb/models/points_test.go create mode 100644 vendor/github.com/influxdata/influxdb/models/rows.go create mode 100644 vendor/github.com/influxdata/influxdb/models/time.go create mode 100644 vendor/github.com/influxdata/influxdb/monitor/README.md create mode 100644 vendor/github.com/influxdata/influxdb/monitor/build_info.go create mode 100644 vendor/github.com/influxdata/influxdb/monitor/config.go create mode 100644 vendor/github.com/influxdata/influxdb/monitor/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/monitor/diagnostics/diagnostics.go create mode 100644 vendor/github.com/influxdata/influxdb/monitor/go_runtime.go create mode 100644 vendor/github.com/influxdata/influxdb/monitor/network.go create mode 100644 vendor/github.com/influxdata/influxdb/monitor/service.go create mode 100644 vendor/github.com/influxdata/influxdb/monitor/system.go create mode 100755 vendor/github.com/influxdata/influxdb/nightly.sh create mode 100644 vendor/github.com/influxdata/influxdb/node.go create mode 100755 vendor/github.com/influxdata/influxdb/package.sh create mode 100644 vendor/github.com/influxdata/influxdb/pkg/README.md create mode 100644 vendor/github.com/influxdata/influxdb/pkg/deep/equal.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/escape/bytes_test.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/escape/strings.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/slices/strings.go create mode 100644 vendor/github.com/influxdata/influxdb/scripts/influxdb.service create mode 100755 vendor/github.com/influxdata/influxdb/scripts/init.sh create mode 100644 vendor/github.com/influxdata/influxdb/scripts/logrotate create mode 100644 vendor/github.com/influxdata/influxdb/scripts/post-install.sh create mode 100644 vendor/github.com/influxdata/influxdb/scripts/post-uninstall.sh create mode 100755 vendor/github.com/influxdata/influxdb/scripts/pre-install.sh create mode 100644 vendor/github.com/influxdata/influxdb/services/admin/README.md create mode 100644 vendor/github.com/influxdata/influxdb/services/admin/admin.go create mode 100644 vendor/github.com/influxdata/influxdb/services/admin/assets/README.md create mode 100644 vendor/github.com/influxdata/influxdb/services/admin/assets/css/admin.css create mode 100644 vendor/github.com/influxdata/influxdb/services/admin/assets/css/bootstrap.css create mode 100644 vendor/github.com/influxdata/influxdb/services/admin/assets/css/dropdowns-enhancement.css create mode 100644 vendor/github.com/influxdata/influxdb/services/admin/assets/fonts/glyphicons-halflings-regular.eot create mode 100644 vendor/github.com/influxdata/influxdb/services/admin/assets/fonts/glyphicons-halflings-regular.svg create mode 100644 vendor/github.com/influxdata/influxdb/services/admin/assets/fonts/glyphicons-halflings-regular.ttf create mode 100644 vendor/github.com/influxdata/influxdb/services/admin/assets/fonts/glyphicons-halflings-regular.woff create mode 100644 vendor/github.com/influxdata/influxdb/services/admin/assets/fonts/glyphicons-halflings-regular.woff2 create mode 100644 vendor/github.com/influxdata/influxdb/services/admin/assets/img/influxdb-light400.png create mode 100644 vendor/github.com/influxdata/influxdb/services/admin/assets/index.html create mode 100644 vendor/github.com/influxdata/influxdb/services/admin/assets/js/admin.js create mode 100644 vendor/github.com/influxdata/influxdb/services/admin/assets/js/vendor/bootstrap-3.3.5.min.js create mode 100644 vendor/github.com/influxdata/influxdb/services/admin/assets/js/vendor/jquery-2.1.4.min.js create mode 100644 vendor/github.com/influxdata/influxdb/services/admin/assets/js/vendor/react-0.13.3.min.js create mode 100644 vendor/github.com/influxdata/influxdb/services/admin/config.go create mode 100644 vendor/github.com/influxdata/influxdb/services/admin/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/admin/service.go create mode 100644 vendor/github.com/influxdata/influxdb/services/admin/service_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/admin/statik/README.md create mode 100644 vendor/github.com/influxdata/influxdb/services/admin/statik/statik.go create mode 100644 vendor/github.com/influxdata/influxdb/services/collectd/README.md create mode 100644 vendor/github.com/influxdata/influxdb/services/collectd/collectd_test.conf create mode 100644 vendor/github.com/influxdata/influxdb/services/collectd/config.go create mode 100644 vendor/github.com/influxdata/influxdb/services/collectd/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/collectd/service.go create mode 100644 vendor/github.com/influxdata/influxdb/services/collectd/service_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/collectd/test_client/README.md create mode 100644 vendor/github.com/influxdata/influxdb/services/collectd/test_client/client.go create mode 100644 vendor/github.com/influxdata/influxdb/services/continuous_querier/config.go create mode 100644 vendor/github.com/influxdata/influxdb/services/continuous_querier/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/continuous_querier/continuous_queries.md create mode 100644 vendor/github.com/influxdata/influxdb/services/continuous_querier/service.go create mode 100644 vendor/github.com/influxdata/influxdb/services/continuous_querier/service_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/copier/internal/internal.pb.go create mode 100644 vendor/github.com/influxdata/influxdb/services/copier/internal/internal.proto create mode 100644 vendor/github.com/influxdata/influxdb/services/copier/service.go create mode 100644 vendor/github.com/influxdata/influxdb/services/copier/service_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/graphite/README.md create mode 100644 vendor/github.com/influxdata/influxdb/services/graphite/config.go create mode 100644 vendor/github.com/influxdata/influxdb/services/graphite/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/graphite/errors.go create mode 100644 vendor/github.com/influxdata/influxdb/services/graphite/parser.go create mode 100644 vendor/github.com/influxdata/influxdb/services/graphite/parser_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/graphite/service.go create mode 100644 vendor/github.com/influxdata/influxdb/services/graphite/service_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/hh/config.go create mode 100644 vendor/github.com/influxdata/influxdb/services/hh/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/hh/doc.go create mode 100644 vendor/github.com/influxdata/influxdb/services/hh/limiter.go create mode 100644 vendor/github.com/influxdata/influxdb/services/hh/limiter_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/hh/node_processor.go create mode 100644 vendor/github.com/influxdata/influxdb/services/hh/node_processor_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/hh/queue.go create mode 100644 vendor/github.com/influxdata/influxdb/services/hh/queue_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/hh/service.go create mode 100644 vendor/github.com/influxdata/influxdb/services/httpd/config.go create mode 100644 vendor/github.com/influxdata/influxdb/services/httpd/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/httpd/handler.go create mode 100644 vendor/github.com/influxdata/influxdb/services/httpd/handler_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/httpd/response_logger.go create mode 100644 vendor/github.com/influxdata/influxdb/services/httpd/service.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/client.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/config.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/data.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/data_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/errors.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/handler.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/internal/meta.pb.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/internal/meta.proto create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/meta_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/query_authorizer.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/raft_state.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/response_logger.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/service.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/service_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/store.go create mode 100644 vendor/github.com/influxdata/influxdb/services/meta/store_fsm.go create mode 100644 vendor/github.com/influxdata/influxdb/services/opentsdb/README.md create mode 100644 vendor/github.com/influxdata/influxdb/services/opentsdb/config.go create mode 100644 vendor/github.com/influxdata/influxdb/services/opentsdb/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/opentsdb/handler.go create mode 100644 vendor/github.com/influxdata/influxdb/services/opentsdb/service.go create mode 100644 vendor/github.com/influxdata/influxdb/services/opentsdb/service_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/precreator/README.md create mode 100644 vendor/github.com/influxdata/influxdb/services/precreator/config.go create mode 100644 vendor/github.com/influxdata/influxdb/services/precreator/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/precreator/service.go create mode 100644 vendor/github.com/influxdata/influxdb/services/precreator/service_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/retention/config.go create mode 100644 vendor/github.com/influxdata/influxdb/services/retention/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/retention/service.go create mode 100644 vendor/github.com/influxdata/influxdb/services/snapshotter/service.go create mode 100644 vendor/github.com/influxdata/influxdb/services/snapshotter/service_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/subscriber/config.go create mode 100644 vendor/github.com/influxdata/influxdb/services/subscriber/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/subscriber/service.go create mode 100644 vendor/github.com/influxdata/influxdb/services/subscriber/service_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/subscriber/udp.go create mode 100644 vendor/github.com/influxdata/influxdb/services/udp/README.md create mode 100644 vendor/github.com/influxdata/influxdb/services/udp/config.go create mode 100644 vendor/github.com/influxdata/influxdb/services/udp/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/udp/service.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/README.md create mode 100644 vendor/github.com/influxdata/influxdb/stress/basic.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/config.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/run.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/stress.toml create mode 100644 vendor/github.com/influxdata/influxdb/stress/stress_test.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/stress_test_server/server.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/template.go create mode 100644 vendor/github.com/influxdata/influxdb/stress/util.go create mode 100644 vendor/github.com/influxdata/influxdb/tcp/mux.go create mode 100644 vendor/github.com/influxdata/influxdb/tcp/mux_test.go create mode 100755 vendor/github.com/influxdata/influxdb/test.sh create mode 100644 vendor/github.com/influxdata/influxdb/tests/README.md create mode 100755 vendor/github.com/influxdata/influxdb/tests/create_future_writes.sh create mode 100755 vendor/github.com/influxdata/influxdb/tests/create_write_multiple_query.sh create mode 100755 vendor/github.com/influxdata/influxdb/tests/create_write_single_query.sh create mode 100755 vendor/github.com/influxdata/influxdb/tests/create_write_single_with_multiple_measurements_values_tags.sh create mode 100755 vendor/github.com/influxdata/influxdb/tests/create_write_single_with_multiple_tags_query.sh create mode 100755 vendor/github.com/influxdata/influxdb/tests/distinct-data-scenarios.sh create mode 100755 vendor/github.com/influxdata/influxdb/tests/read_write_gzip.sh create mode 100644 vendor/github.com/influxdata/influxdb/tests/siege/.gitignore create mode 100644 vendor/github.com/influxdata/influxdb/tests/siege/README.md create mode 100755 vendor/github.com/influxdata/influxdb/tests/siege/urlgen create mode 100755 vendor/github.com/influxdata/influxdb/tests/tmux/3_shards create mode 100644 vendor/github.com/influxdata/influxdb/tests/tmux/README.md create mode 100644 vendor/github.com/influxdata/influxdb/tests/tmux/sample.json create mode 100755 vendor/github.com/influxdata/influxdb/tests/tmux/seed.sh create mode 100644 vendor/github.com/influxdata/influxdb/tests/tmux/server_8086.toml create mode 100644 vendor/github.com/influxdata/influxdb/tests/tmux/server_8087.toml create mode 100644 vendor/github.com/influxdata/influxdb/tests/tmux/server_8088.toml create mode 100644 vendor/github.com/influxdata/influxdb/tests/urlgen/urlgen.go create mode 100644 vendor/github.com/influxdata/influxdb/toml/toml.go create mode 100644 vendor/github.com/influxdata/influxdb/toml/toml_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/README.md create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/batcher.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/batcher_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/config.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/config_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/cursor.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/cursor_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/doc.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/engine.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/DESIGN.md create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/bool.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/bool_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cache.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cache_race_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cache_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cursor.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/encoding.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/encoding_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/engine.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/engine_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_unix.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_windows.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/float.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/float_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/int.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/int_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator.gen.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator.gen.go.tmpl create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator.gen.go.tmpldata create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/mmap_solaris.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/mmap_unix.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/mmap_windows.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/pools.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/reader.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/reader_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/string.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/string_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/timestamp.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/timestamp_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/tombstone.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/tombstone_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/wal.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/wal_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/writer.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/writer_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/executor.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/internal/meta.pb.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/internal/meta.proto create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/meta.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/meta_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/shard.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/shard_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/store.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/store_test.go create mode 100644 vendor/github.com/influxdata/influxdb/uuid/uuid.go diff --git a/build.py b/build.py index fb1bd26465..9b2f264b18 100755 --- a/build.py +++ b/build.py @@ -119,6 +119,7 @@ def package_scripts(build_root): def run_generate(): print "Running generate..." run("go get github.com/gogo/protobuf/protoc-gen-gogo") + run("go get github.com/benbjohnson/tmpl") command = "go generate ./..." code = os.system(command) if code != 0: diff --git a/functions.go b/functions.go index e0dee41e51..4806da03ac 100644 --- a/functions.go +++ b/functions.go @@ -78,22 +78,22 @@ func (influxqlMapReducers) Percentile(field string, p float64) pipeline.MapReduc return mr(field, "percentile", pipeline.StreamEdge, tsdb.MapEcho, r) } -func (influxqlMapReducers) Top(limit int64, field string, fieldsOrTags ...string) pipeline.MapReduceInfo { +func (influxqlMapReducers) Top(limit int64, field string, fieldsAndTags ...string) pipeline.MapReduceInfo { m := func(in *tsdb.MapInput) interface{} { - return tsdb.MapTopBottom(in, int(limit), fieldsOrTags, len(fieldsOrTags)+2, "top") + return tsdb.MapTopBottom(in, int(limit), fieldsAndTags, len(fieldsAndTags)+2, "top") } r := func(values []interface{}) interface{} { - return tsdb.ReduceTopBottom(values, int(limit), fieldsOrTags, "top") + return tsdb.ReduceTopBottom(values, int(limit), fieldsAndTags, "top") } return mr(field, "top", pipeline.BatchEdge, m, r) } -func (influxqlMapReducers) Bottom(limit int64, field string, fieldsOrTags ...string) pipeline.MapReduceInfo { +func (influxqlMapReducers) Bottom(limit int64, field string, fieldsAndTags ...string) pipeline.MapReduceInfo { m := func(in *tsdb.MapInput) interface{} { - return tsdb.MapTopBottom(in, int(limit), fieldsOrTags, len(fieldsOrTags)+2, "bottom") + return tsdb.MapTopBottom(in, int(limit), fieldsAndTags, len(fieldsAndTags)+2, "bottom") } r := func(values []interface{}) interface{} { - return tsdb.ReduceTopBottom(values, int(limit), fieldsOrTags, "bottom") + return tsdb.ReduceTopBottom(values, int(limit), fieldsAndTags, "bottom") } return mr(field, "bottom", pipeline.BatchEdge, m, r) } diff --git a/influxql.gen.go b/influxql.gen.go new file mode 100644 index 0000000000..d0a23b2bee --- /dev/null +++ b/influxql.gen.go @@ -0,0 +1,499 @@ +// Generated by tmpl +// https://github.com/benbjohnson/tmpl +// +// DO NOT EDIT! +// Source: influxql.gen.go.tmpl + +package kapacitor + +import ( + "fmt" + "time" + + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/kapacitor/models" + "github.com/influxdata/kapacitor/pipeline" +) + +type floatPointAggregator struct { + field string + topBottomInfo *pipeline.TopBottomCallInfo + aggregator influxql.FloatPointAggregator +} + +func floatPopulateAuxFieldsAndTags(ap *influxql.FloatPoint, fieldsAndTags []string, fields models.Fields, tags models.Tags) { + ap.Aux = make([]interface{}, len(fieldsAndTags)) + for i, name := range fieldsAndTags { + if f, ok := fields[name]; ok { + ap.Aux[i] = f + } else { + ap.Aux[i] = tags[name] + } + } +} + +func (a *floatPointAggregator) AggregateBatch(b *models.Batch) { + for _, p := range b.Points { + ap := &influxql.FloatPoint{ + Name: b.Name, + Tags: influxql.NewTags(p.Tags), + Time: p.Time.UnixNano(), + Value: p.Fields[a.field].(float64), + } + if a.topBottomInfo != nil { + // We need to populate the Aux fields + floatPopulateAuxFieldsAndTags(ap, a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) + } + a.aggregator.AggregateFloat(ap) + } +} + +func (a *floatPointAggregator) AggregatePoint(p *models.Point) { + ap := &influxql.FloatPoint{ + Name: p.Name, + Tags: influxql.NewTags(p.Tags), + Time: p.Time.UnixNano(), + Value: p.Fields[a.field].(float64), + } + if a.topBottomInfo != nil { + // We need to populate the Aux fields + floatPopulateAuxFieldsAndTags(ap, a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) + } + a.aggregator.AggregateFloat(ap) +} + +type floatPointBulkAggregator struct { + field string + topBottomInfo *pipeline.TopBottomCallInfo + aggregator pipeline.FloatBulkPointAggregator +} + +func (a *floatPointBulkAggregator) AggregateBatch(b *models.Batch) { + slice := make([]influxql.FloatPoint, len(b.Points)) + for i, p := range b.Points { + slice[i] = influxql.FloatPoint{ + Name: b.Name, + Tags: influxql.NewTags(p.Tags), + Time: p.Time.UnixNano(), + Value: p.Fields[a.field].(float64), + } + if a.topBottomInfo != nil { + // We need to populate the Aux fields + floatPopulateAuxFieldsAndTags(&slice[i], a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) + } + } + a.aggregator.AggregateFloatBulk(slice) +} + +func (a *floatPointBulkAggregator) AggregatePoint(p *models.Point) { + ap := &influxql.FloatPoint{ + Name: p.Name, + Tags: influxql.NewTags(p.Tags), + Time: p.Time.UnixNano(), + Value: p.Fields[a.field].(float64), + } + if a.topBottomInfo != nil { + // We need to populate the Aux fields + floatPopulateAuxFieldsAndTags(ap, a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) + } + a.aggregator.AggregateFloat(ap) +} + +type floatPointEmitter struct { + baseReduceContext + emitter influxql.FloatPointEmitter +} + +func (e *floatPointEmitter) EmitPoint() (models.Point, error) { + slice := e.emitter.Emit() + if len(slice) != 1 { + return models.Point{}, fmt.Errorf("unexpected result from InfluxQL function, got %d points expected 1", len(slice)) + } + ap := slice[0] + var t time.Time + if e.pointTimes { + if ap.Time == influxql.ZeroTime { + t = e.time + } else { + t = time.Unix(0, ap.Time).UTC() + } + } else { + t = e.time + } + return models.Point{ + Name: e.name, + Time: t, + Group: e.group, + Dimensions: e.dimensions, + Tags: e.tags, + Fields: map[string]interface{}{e.as: ap.Value}, + }, nil +} + +func (e *floatPointEmitter) EmitBatch() models.Batch { + slice := e.emitter.Emit() + b := models.Batch{ + Name: e.name, + TMax: e.time, + Group: e.group, + Tags: e.tags, + Points: make([]models.BatchPoint, len(slice)), + } + var t time.Time + for i, ap := range slice { + if e.pointTimes { + if ap.Time == influxql.ZeroTime { + t = e.time + } else { + t = time.Unix(0, ap.Time).UTC() + } + } else { + t = e.time + } + b.Points[i] = models.BatchPoint{ + Time: t, + Tags: ap.Tags.KeyValues(), + Fields: map[string]interface{}{e.as: ap.Value}, + } + } + return b +} + +type integerPointAggregator struct { + field string + topBottomInfo *pipeline.TopBottomCallInfo + aggregator influxql.IntegerPointAggregator +} + +func integerPopulateAuxFieldsAndTags(ap *influxql.IntegerPoint, fieldsAndTags []string, fields models.Fields, tags models.Tags) { + ap.Aux = make([]interface{}, len(fieldsAndTags)) + for i, name := range fieldsAndTags { + if f, ok := fields[name]; ok { + ap.Aux[i] = f + } else { + ap.Aux[i] = tags[name] + } + } +} + +func (a *integerPointAggregator) AggregateBatch(b *models.Batch) { + for _, p := range b.Points { + ap := &influxql.IntegerPoint{ + Name: b.Name, + Tags: influxql.NewTags(p.Tags), + Time: p.Time.UnixNano(), + Value: p.Fields[a.field].(int64), + } + if a.topBottomInfo != nil { + // We need to populate the Aux fields + integerPopulateAuxFieldsAndTags(ap, a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) + } + a.aggregator.AggregateInteger(ap) + } +} + +func (a *integerPointAggregator) AggregatePoint(p *models.Point) { + ap := &influxql.IntegerPoint{ + Name: p.Name, + Tags: influxql.NewTags(p.Tags), + Time: p.Time.UnixNano(), + Value: p.Fields[a.field].(int64), + } + if a.topBottomInfo != nil { + // We need to populate the Aux fields + integerPopulateAuxFieldsAndTags(ap, a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) + } + a.aggregator.AggregateInteger(ap) +} + +type integerPointBulkAggregator struct { + field string + topBottomInfo *pipeline.TopBottomCallInfo + aggregator pipeline.IntegerBulkPointAggregator +} + +func (a *integerPointBulkAggregator) AggregateBatch(b *models.Batch) { + slice := make([]influxql.IntegerPoint, len(b.Points)) + for i, p := range b.Points { + slice[i] = influxql.IntegerPoint{ + Name: b.Name, + Tags: influxql.NewTags(p.Tags), + Time: p.Time.UnixNano(), + Value: p.Fields[a.field].(int64), + } + if a.topBottomInfo != nil { + // We need to populate the Aux fields + integerPopulateAuxFieldsAndTags(&slice[i], a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) + } + } + a.aggregator.AggregateIntegerBulk(slice) +} + +func (a *integerPointBulkAggregator) AggregatePoint(p *models.Point) { + ap := &influxql.IntegerPoint{ + Name: p.Name, + Tags: influxql.NewTags(p.Tags), + Time: p.Time.UnixNano(), + Value: p.Fields[a.field].(int64), + } + if a.topBottomInfo != nil { + // We need to populate the Aux fields + integerPopulateAuxFieldsAndTags(ap, a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) + } + a.aggregator.AggregateInteger(ap) +} + +type integerPointEmitter struct { + baseReduceContext + emitter influxql.IntegerPointEmitter +} + +func (e *integerPointEmitter) EmitPoint() (models.Point, error) { + slice := e.emitter.Emit() + if len(slice) != 1 { + return models.Point{}, fmt.Errorf("unexpected result from InfluxQL function, got %d points expected 1", len(slice)) + } + ap := slice[0] + var t time.Time + if e.pointTimes { + if ap.Time == influxql.ZeroTime { + t = e.time + } else { + t = time.Unix(0, ap.Time).UTC() + } + } else { + t = e.time + } + return models.Point{ + Name: e.name, + Time: t, + Group: e.group, + Dimensions: e.dimensions, + Tags: e.tags, + Fields: map[string]interface{}{e.as: ap.Value}, + }, nil +} + +func (e *integerPointEmitter) EmitBatch() models.Batch { + slice := e.emitter.Emit() + b := models.Batch{ + Name: e.name, + TMax: e.time, + Group: e.group, + Tags: e.tags, + Points: make([]models.BatchPoint, len(slice)), + } + var t time.Time + for i, ap := range slice { + if e.pointTimes { + if ap.Time == influxql.ZeroTime { + t = e.time + } else { + t = time.Unix(0, ap.Time).UTC() + } + } else { + t = e.time + } + b.Points[i] = models.BatchPoint{ + Time: t, + Tags: ap.Tags.KeyValues(), + Fields: map[string]interface{}{e.as: ap.Value}, + } + } + return b +} + +// floatReduceContext uses composition to implement the reduceContext interface +type floatReduceContext struct { + floatPointAggregator + floatPointEmitter +} + +// floatBulkReduceContext uses composition to implement the reduceContext interface +type floatBulkReduceContext struct { + floatPointBulkAggregator + floatPointEmitter +} + +// floatIntegerReduceContext uses composition to implement the reduceContext interface +type floatIntegerReduceContext struct { + floatPointAggregator + integerPointEmitter +} + +// floatBulkIntegerReduceContext uses composition to implement the reduceContext interface +type floatBulkIntegerReduceContext struct { + floatPointBulkAggregator + integerPointEmitter +} + +// integerFloatReduceContext uses composition to implement the reduceContext interface +type integerFloatReduceContext struct { + integerPointAggregator + floatPointEmitter +} + +// integerBulkFloatReduceContext uses composition to implement the reduceContext interface +type integerBulkFloatReduceContext struct { + integerPointBulkAggregator + floatPointEmitter +} + +// integerReduceContext uses composition to implement the reduceContext interface +type integerReduceContext struct { + integerPointAggregator + integerPointEmitter +} + +// integerBulkReduceContext uses composition to implement the reduceContext interface +type integerBulkReduceContext struct { + integerPointBulkAggregator + integerPointEmitter +} + +func determineReduceContextCreateFn(method string, value interface{}, rc pipeline.ReduceCreater) (fn createReduceContextFunc, err error) { + switch value.(type) { + + case float64: + switch { + + case rc.CreateFloatReducer != nil: + fn = func(c baseReduceContext) reduceContext { + a, e := rc.CreateFloatReducer() + return &floatReduceContext{ + floatPointAggregator: floatPointAggregator{ + field: c.field, + topBottomInfo: rc.TopBottomCallInfo, + aggregator: a, + }, + floatPointEmitter: floatPointEmitter{ + baseReduceContext: c, + emitter: e, + }, + } + } + case rc.CreateFloatBulkReducer != nil: + fn = func(c baseReduceContext) reduceContext { + a, e := rc.CreateFloatBulkReducer() + return &floatBulkReduceContext{ + floatPointBulkAggregator: floatPointBulkAggregator{ + field: c.field, + topBottomInfo: rc.TopBottomCallInfo, + aggregator: a, + }, + floatPointEmitter: floatPointEmitter{ + baseReduceContext: c, + emitter: e, + }, + } + } + + case rc.CreateFloatIntegerReducer != nil: + fn = func(c baseReduceContext) reduceContext { + a, e := rc.CreateFloatIntegerReducer() + return &floatIntegerReduceContext{ + floatPointAggregator: floatPointAggregator{ + field: c.field, + topBottomInfo: rc.TopBottomCallInfo, + aggregator: a, + }, + integerPointEmitter: integerPointEmitter{ + baseReduceContext: c, + emitter: e, + }, + } + } + case rc.CreateFloatBulkIntegerReducer != nil: + fn = func(c baseReduceContext) reduceContext { + a, e := rc.CreateFloatBulkIntegerReducer() + return &floatBulkIntegerReduceContext{ + floatPointBulkAggregator: floatPointBulkAggregator{ + field: c.field, + topBottomInfo: rc.TopBottomCallInfo, + aggregator: a, + }, + integerPointEmitter: integerPointEmitter{ + baseReduceContext: c, + emitter: e, + }, + } + } + + default: + err = fmt.Errorf("cannot apply %s to float64 field", method) + } + + case int64: + switch { + + case rc.CreateIntegerFloatReducer != nil: + fn = func(c baseReduceContext) reduceContext { + a, e := rc.CreateIntegerFloatReducer() + return &integerFloatReduceContext{ + integerPointAggregator: integerPointAggregator{ + field: c.field, + topBottomInfo: rc.TopBottomCallInfo, + aggregator: a, + }, + floatPointEmitter: floatPointEmitter{ + baseReduceContext: c, + emitter: e, + }, + } + } + case rc.CreateIntegerBulkFloatReducer != nil: + fn = func(c baseReduceContext) reduceContext { + a, e := rc.CreateIntegerBulkFloatReducer() + return &integerBulkFloatReduceContext{ + integerPointBulkAggregator: integerPointBulkAggregator{ + field: c.field, + topBottomInfo: rc.TopBottomCallInfo, + aggregator: a, + }, + floatPointEmitter: floatPointEmitter{ + baseReduceContext: c, + emitter: e, + }, + } + } + + case rc.CreateIntegerReducer != nil: + fn = func(c baseReduceContext) reduceContext { + a, e := rc.CreateIntegerReducer() + return &integerReduceContext{ + integerPointAggregator: integerPointAggregator{ + field: c.field, + topBottomInfo: rc.TopBottomCallInfo, + aggregator: a, + }, + integerPointEmitter: integerPointEmitter{ + baseReduceContext: c, + emitter: e, + }, + } + } + case rc.CreateIntegerBulkReducer != nil: + fn = func(c baseReduceContext) reduceContext { + a, e := rc.CreateIntegerBulkReducer() + return &integerBulkReduceContext{ + integerPointBulkAggregator: integerPointBulkAggregator{ + field: c.field, + topBottomInfo: rc.TopBottomCallInfo, + aggregator: a, + }, + integerPointEmitter: integerPointEmitter{ + baseReduceContext: c, + emitter: e, + }, + } + } + + default: + err = fmt.Errorf("cannot apply %s to int64 field", method) + } + + default: + err = fmt.Errorf("invalid field type: %T", value) + } + return +} diff --git a/influxql.gen.go.tmpl b/influxql.gen.go.tmpl new file mode 100644 index 0000000000..8979aec40a --- /dev/null +++ b/influxql.gen.go.tmpl @@ -0,0 +1,231 @@ +package kapacitor + + +import ( + "fmt" + "time" + + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/kapacitor/models" + "github.com/influxdata/kapacitor/pipeline" +) + +{{/* Define typed Aggregate/Emit types */}} +{{range .}} + +type {{.name}}PointAggregator struct { + field string + topBottomInfo *pipeline.TopBottomCallInfo + aggregator influxql.{{.Name}}PointAggregator +} + +func {{.name}}PopulateAuxFieldsAndTags(ap *influxql.{{.Name}}Point, fieldsAndTags []string, fields models.Fields, tags models.Tags) { + ap.Aux = make([]interface{}, len(fieldsAndTags)) + for i, name := range fieldsAndTags { + if f, ok := fields[name]; ok { + ap.Aux[i] = f + } else { + ap.Aux[i] = tags[name] + } + } +} + +func (a *{{.name}}PointAggregator) AggregateBatch(b *models.Batch) { + for _, p := range b.Points { + ap := &influxql.{{.Name}}Point{ + Name: b.Name, + Tags: influxql.NewTags(p.Tags), + Time: p.Time.UnixNano(), + Value: p.Fields[a.field].({{.Type}}), + } + if a.topBottomInfo != nil { + // We need to populate the Aux fields + {{.name}}PopulateAuxFieldsAndTags(ap, a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) + } + a.aggregator.Aggregate{{.Name}}(ap) + } +} + +func (a *{{.name}}PointAggregator) AggregatePoint(p *models.Point) { + ap := &influxql.{{.Name}}Point{ + Name: p.Name, + Tags: influxql.NewTags(p.Tags), + Time: p.Time.UnixNano(), + Value: p.Fields[a.field].({{.Type}}), + } + if a.topBottomInfo != nil { + // We need to populate the Aux fields + {{.name}}PopulateAuxFieldsAndTags(ap, a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) + } + a.aggregator.Aggregate{{.Name}}(ap) +} + + + +type {{.name}}PointBulkAggregator struct { + field string + topBottomInfo *pipeline.TopBottomCallInfo + aggregator pipeline.{{.Name}}BulkPointAggregator +} + +func (a *{{.name}}PointBulkAggregator) AggregateBatch(b *models.Batch) { + slice := make([]influxql.{{.Name}}Point, len(b.Points)) + for i, p := range b.Points { + slice[i] = influxql.{{.Name}}Point{ + Name: b.Name, + Tags: influxql.NewTags(p.Tags), + Time: p.Time.UnixNano(), + Value: p.Fields[a.field].({{.Type}}), + } + if a.topBottomInfo != nil { + // We need to populate the Aux fields + {{.name}}PopulateAuxFieldsAndTags(&slice[i], a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) + } + } + a.aggregator.Aggregate{{.Name}}Bulk(slice) +} + +func (a *{{.name}}PointBulkAggregator) AggregatePoint(p *models.Point) { + ap := &influxql.{{.Name}}Point{ + Name: p.Name, + Tags: influxql.NewTags(p.Tags), + Time: p.Time.UnixNano(), + Value: p.Fields[a.field].({{.Type}}), + } + if a.topBottomInfo != nil { + // We need to populate the Aux fields + {{.name}}PopulateAuxFieldsAndTags(ap, a.topBottomInfo.FieldsAndTags, p.Fields, p.Tags) + } + a.aggregator.Aggregate{{.Name}}(ap) +} + +type {{.name}}PointEmitter struct { + baseReduceContext + emitter influxql.{{.Name}}PointEmitter +} + +func (e *{{.name}}PointEmitter) EmitPoint() (models.Point, error) { + slice := e.emitter.Emit() + if len(slice) != 1 { + return models.Point{}, fmt.Errorf("unexpected result from InfluxQL function, got %d points expected 1", len(slice)) + } + ap := slice[0] + var t time.Time + if e.pointTimes { + if ap.Time == influxql.ZeroTime { + t = e.time + } else { + t = time.Unix(0, ap.Time).UTC() + } + } else { + t = e.time + } + return models.Point{ + Name: e.name, + Time: t, + Group: e.group, + Dimensions: e.dimensions, + Tags: e.tags, + Fields: map[string]interface{}{e.as: ap.Value}, + }, nil +} + +func (e *{{.name}}PointEmitter) EmitBatch() models.Batch { + slice := e.emitter.Emit() + b := models.Batch{ + Name: e.name, + TMax: e.time, + Group: e.group, + Tags: e.tags, + Points: make([]models.BatchPoint, len(slice)), + } + var t time.Time + for i, ap := range slice { + if e.pointTimes { + if ap.Time == influxql.ZeroTime { + t = e.time + } else { + t = time.Unix(0, ap.Time).UTC() + } + } else { + t = e.time + } + b.Points[i] = models.BatchPoint{ + Time: t, + Tags: ap.Tags.KeyValues(), + Fields: map[string]interface{}{e.as: ap.Value}, + } + } + return b +} + +{{end}} + +{{/* Define composite types for reduceContext */}} +{{with $types := .}} +{{range $a := $types}} +{{range $e := $types}} + +// {{$a.name}}{{if ne $a.Name $e.Name}}{{$e.Name}}{{end}}ReduceContext uses composition to implement the reduceContext interface +type {{$a.name}}{{if ne $a.Name $e.Name}}{{$e.Name}}{{end}}ReduceContext struct { + {{$a.name}}PointAggregator + {{$e.name}}PointEmitter +} + +// {{$a.name}}Bulk{{if ne $a.Name $e.Name}}{{$e.Name}}{{end}}ReduceContext uses composition to implement the reduceContext interface +type {{$a.name}}Bulk{{if ne $a.Name $e.Name}}{{$e.Name}}{{end}}ReduceContext struct { + {{$a.name}}PointBulkAggregator + {{$e.name}}PointEmitter +} +{{end}}{{end}} + + +{{/* Define switch cases for reduceContext contruction */}} + +func determineReduceContextCreateFn(method string, value interface{}, rc pipeline.ReduceCreater) (fn createReduceContextFunc, err error) { + switch value.(type) { +{{range $a := $types}} + case {{.Type}}: + switch { +{{range $e := $types}} + case rc.Create{{$a.Name}}{{if ne $a.Name $e.Name}}{{$e.Name}}{{end}}Reducer != nil: + fn = func(c baseReduceContext) reduceContext { + a, e := rc.Create{{$a.Name}}{{if ne $a.Name $e.Name}}{{$e.Name}}{{end}}Reducer() + return &{{$a.name}}{{if ne $a.Name $e.Name}}{{$e.Name}}{{end}}ReduceContext{ + {{$a.name}}PointAggregator: {{$a.name}}PointAggregator{ + field: c.field, + topBottomInfo: rc.TopBottomCallInfo, + aggregator: a, + }, + {{$e.name}}PointEmitter: {{$e.name}}PointEmitter{ + baseReduceContext: c, + emitter: e, + }, + } + } + case rc.Create{{$a.Name}}Bulk{{if ne $a.Name $e.Name}}{{$e.Name}}{{end}}Reducer != nil: + fn = func(c baseReduceContext) reduceContext { + a, e := rc.Create{{$a.Name}}Bulk{{if ne $a.Name $e.Name}}{{$e.Name}}{{end}}Reducer() + return &{{$a.name}}Bulk{{if ne $a.Name $e.Name}}{{$e.Name}}{{end}}ReduceContext{ + {{$a.name}}PointBulkAggregator: {{$a.name}}PointBulkAggregator{ + field: c.field, + topBottomInfo: rc.TopBottomCallInfo, + aggregator: a, + }, + {{$e.name}}PointEmitter: {{$e.name}}PointEmitter{ + baseReduceContext: c, + emitter: e, + }, + } + } +{{end}} + default: + err = fmt.Errorf("cannot apply %s to {{$a.Type}} field", method) + } +{{end}} + default: + err = fmt.Errorf("invalid field type: %T", value) + } + return +} +{{end}} diff --git a/influxql.go b/influxql.go new file mode 100644 index 0000000000..b1268f9219 --- /dev/null +++ b/influxql.go @@ -0,0 +1,181 @@ +package kapacitor + +import ( + "fmt" + "log" + "time" + + "github.com/influxdata/kapacitor/models" + "github.com/influxdata/kapacitor/pipeline" +) + +// tmpl -- go get github.com/benbjohnson/tmpl +//go:generate tmpl -data=@tmpldata influxql.gen.go.tmpl + +type createReduceContextFunc func(c baseReduceContext) reduceContext + +type InfluxQLNode struct { + node + n *pipeline.InfluxQLNode + createFn createReduceContextFunc +} + +func newInfluxQLNode(et *ExecutingTask, n *pipeline.InfluxQLNode, l *log.Logger) (*InfluxQLNode, error) { + m := &InfluxQLNode{ + node: node{Node: n, et: et, logger: l}, + n: n, + } + m.node.runF = m.runInfluxQLs + return m, nil +} + +func (n *InfluxQLNode) runInfluxQLs([]byte) error { + switch n.n.Wants() { + case pipeline.StreamEdge: + return n.runStreamInfluxQL() + case pipeline.BatchEdge: + return n.runBatchInfluxQL() + default: + return fmt.Errorf("cannot map %v edge", n.n.Wants()) + } +} + +type reduceContext interface { + AggregatePoint(p *models.Point) + AggregateBatch(b *models.Batch) + EmitPoint() (models.Point, error) + EmitBatch() models.Batch + Time() time.Time +} + +type baseReduceContext struct { + as string + field string + name string + group models.GroupID + dimensions models.Dimensions + tags models.Tags + time time.Time + pointTimes bool + topBottomInfo *pipeline.TopBottomCallInfo +} + +func (c *baseReduceContext) Time() time.Time { + return c.time +} + +func (n *InfluxQLNode) runStreamInfluxQL() error { + contexts := make(map[models.GroupID]reduceContext) + for p, ok := n.ins[0].NextPoint(); ok; { + context := contexts[p.Group] + // Fisrt point in window + if context == nil { + // Create new context + c := baseReduceContext{ + as: n.n.As, + field: n.n.Field, + name: p.Name, + group: p.Group, + dimensions: p.Dimensions, + tags: p.PointTags(), + time: p.Time, + pointTimes: n.n.PointTimes, + } + + createFn, err := n.getCreateFn(p.Fields[c.field]) + if err != nil { + return err + } + + context = createFn(c) + contexts[p.Group] = context + context.AggregatePoint(&p) + + } else if p.Time.Equal(context.Time()) { + context.AggregatePoint(&p) + // advance to next point + p, ok = n.ins[0].NextPoint() + } else { + err := n.emit(context) + if err != nil { + return err + } + + // Nil out reduced point + contexts[p.Group] = nil + // do not advance, + // go through loop again to initialize new iterator. + } + } + return nil +} + +func (n *InfluxQLNode) runBatchInfluxQL() error { + for b, ok := n.ins[0].NextBatch(); ok; b, ok = n.ins[0].NextBatch() { + // Skip empty batches + if len(b.Points) == 0 { + continue + } + + // Create new base context + c := baseReduceContext{ + as: n.n.As, + field: n.n.Field, + name: b.Name, + group: b.Group, + dimensions: b.PointDimensions(), + tags: b.Tags, + time: b.TMax, + pointTimes: n.n.PointTimes, + } + createFn, err := n.getCreateFn(b.Points[0].Fields[c.field]) + if err != nil { + return err + } + + context := createFn(c) + context.AggregateBatch(&b) + err = n.emit(context) + if err != nil { + return err + } + } + return nil +} + +func (n *InfluxQLNode) getCreateFn(value interface{}) (createReduceContextFunc, error) { + if n.createFn != nil { + return n.createFn, nil + } + createFn, err := determineReduceContextCreateFn(n.n.Method, value, n.n.ReduceCreater) + if err != nil { + return nil, err + } + n.createFn = createFn + return n.createFn, nil +} + +func (n *InfluxQLNode) emit(context reduceContext) error { + switch n.Provides() { + case pipeline.StreamEdge: + p, err := context.EmitPoint() + if err != nil { + return err + } + for _, out := range n.outs { + err := out.CollectPoint(p) + if err != nil { + return err + } + } + case pipeline.BatchEdge: + b := context.EmitBatch() + for _, out := range n.outs { + err := out.CollectBatch(b) + if err != nil { + return err + } + } + } + return nil +} diff --git a/integrations/data/TestStream_Aggregations.srpl b/integrations/data/TestStream_InfluxQL.srpl similarity index 100% rename from integrations/data/TestStream_Aggregations.srpl rename to integrations/data/TestStream_InfluxQL.srpl diff --git a/integrations/streamer_test.go b/integrations/streamer_test.go index fd1812a807..8ef4ce18e1 100644 --- a/integrations/streamer_test.go +++ b/integrations/streamer_test.go @@ -1333,7 +1333,7 @@ cpu.union(mem, disk) testStreamerWithOutput(t, "TestStream_Union", script, 15*time.Second, er, nil, false) } -func TestStream_Aggregations(t *testing.T) { +func TestStream_InfluxQL(t *testing.T) { type testCase struct { Method string @@ -1349,14 +1349,26 @@ stream .window() .period(10s) .every(10s) - .mapReduce({{ .Method }}({{ .Args }})) + .mapReduce(influxql.{{ .Method }}({{ .Args }})) {{ if .UsePointTimes }}.usePointTimes(){{ end }} - .httpOut('TestStream_Aggregations') + .httpOut('TestStream_InfluxQL') +` + + var newScriptTmpl = ` +stream + .from().measurement('cpu') + .where(lambda: "host" == 'serverA') + .window() + .period(10s) + .every(10s) + .{{ .Method }}({{ .Args }}) + {{ if .UsePointTimes }}.usePointTimes(){{ end }} + .httpOut('TestStream_InfluxQL') ` endTime := time.Date(1971, 1, 1, 0, 0, 10, 0, time.UTC) testCases := []testCase{ testCase{ - Method: "influxql.sum", + Method: "sum", ER: kapacitor.Result{ Series: imodels.Rows{ { @@ -1372,7 +1384,7 @@ stream }, }, testCase{ - Method: "influxql.count", + Method: "count", ER: kapacitor.Result{ Series: imodels.Rows{ { @@ -1388,7 +1400,7 @@ stream }, }, testCase{ - Method: "influxql.distinct", + Method: "distinct", ER: kapacitor.Result{ Series: imodels.Rows{ { @@ -1398,27 +1410,27 @@ stream Values: [][]interface{}{ { endTime, - 91.0, + 98.0, }, { endTime, - 92.0, + 91.0, }, { endTime, - 93.0, + 95.0, }, { endTime, - 95.0, + 93.0, }, { endTime, - 96.0, + 92.0, }, { endTime, - 98.0, + 96.0, }, }, }, @@ -1426,7 +1438,7 @@ stream }, }, testCase{ - Method: "influxql.mean", + Method: "mean", ER: kapacitor.Result{ Series: imodels.Rows{ { @@ -1442,7 +1454,7 @@ stream }, }, testCase{ - Method: "influxql.median", + Method: "median", ER: kapacitor.Result{ Series: imodels.Rows{ { @@ -1458,7 +1470,7 @@ stream }, }, testCase{ - Method: "influxql.min", + Method: "min", UsePointTimes: true, ER: kapacitor.Result{ Series: imodels.Rows{ @@ -1475,7 +1487,7 @@ stream }, }, testCase{ - Method: "influxql.min", + Method: "min", ER: kapacitor.Result{ Series: imodels.Rows{ { @@ -1491,7 +1503,7 @@ stream }, }, testCase{ - Method: "influxql.max", + Method: "max", UsePointTimes: true, ER: kapacitor.Result{ Series: imodels.Rows{ @@ -1508,7 +1520,7 @@ stream }, }, testCase{ - Method: "influxql.max", + Method: "max", ER: kapacitor.Result{ Series: imodels.Rows{ { @@ -1524,7 +1536,7 @@ stream }, }, testCase{ - Method: "influxql.spread", + Method: "spread", ER: kapacitor.Result{ Series: imodels.Rows{ { @@ -1540,7 +1552,7 @@ stream }, }, testCase{ - Method: "influxql.stddev", + Method: "stddev", ER: kapacitor.Result{ Series: imodels.Rows{ { @@ -1556,7 +1568,7 @@ stream }, }, testCase{ - Method: "influxql.first", + Method: "first", UsePointTimes: true, ER: kapacitor.Result{ Series: imodels.Rows{ @@ -1573,7 +1585,7 @@ stream }, }, testCase{ - Method: "influxql.first", + Method: "first", ER: kapacitor.Result{ Series: imodels.Rows{ { @@ -1589,7 +1601,7 @@ stream }, }, testCase{ - Method: "influxql.last", + Method: "last", UsePointTimes: true, ER: kapacitor.Result{ Series: imodels.Rows{ @@ -1606,7 +1618,7 @@ stream }, }, testCase{ - Method: "influxql.last", + Method: "last", ER: kapacitor.Result{ Series: imodels.Rows{ { @@ -1622,7 +1634,7 @@ stream }, }, testCase{ - Method: "influxql.percentile", + Method: "percentile", Args: "'value', 50.0", ER: kapacitor.Result{ Series: imodels.Rows{ @@ -1639,7 +1651,7 @@ stream }, }, testCase{ - Method: "influxql.top", + Method: "top", UsePointTimes: true, Args: "2, 'value'", ER: kapacitor.Result{ @@ -1667,7 +1679,7 @@ stream }, }, testCase{ - Method: "influxql.top", + Method: "top", Args: "2, 'value'", ER: kapacitor.Result{ Series: imodels.Rows{ @@ -1694,7 +1706,7 @@ stream }, }, testCase{ - Method: "influxql.bottom", + Method: "bottom", UsePointTimes: true, Args: "3, 'value'", ER: kapacitor.Result{ @@ -1728,7 +1740,7 @@ stream }, }, testCase{ - Method: "influxql.bottom", + Method: "bottom", Args: "3, 'value'", ER: kapacitor.Result{ Series: imodels.Rows{ @@ -1767,22 +1779,35 @@ stream t.Fatal(err) } + newTmpl, err := template.New("script").Parse(newScriptTmpl) + if err != nil { + t.Fatal(err) + } + + tmpls := []*template.Template{tmpl, newTmpl} + for _, tc := range testCases { - t.Log("Method:", tc.Method) - var script bytes.Buffer - if tc.Args == "" { - tc.Args = "'value'" - } - tmpl.Execute(&script, tc) - testStreamerWithOutput( - t, - "TestStream_Aggregations", - string(script.Bytes()), - 13*time.Second, - tc.ER, - nil, - false, - ) + for i, tmpl := range tmpls { + if tc.Method == "distinct" && i == 0 { + // Skip legacy test for new behavior + continue + } + t.Log("Method:", tc.Method, i) + var script bytes.Buffer + if tc.Args == "" { + tc.Args = "'value'" + } + tmpl.Execute(&script, tc) + testStreamerWithOutput( + t, + "TestStream_InfluxQL", + string(script.Bytes()), + 13*time.Second, + tc.ER, + nil, + false, + ) + } } } @@ -3019,7 +3044,7 @@ var topScores = stream .mapReduce(influxql.last('value')) // Calculate the top 5 scores per game .groupBy('game') - .mapReduce(influxql.top(5, 'last', 'player')) + .top(5, 'last', 'player') topScores .httpOut('top_scores') diff --git a/pipeline/influxql.gen.go b/pipeline/influxql.gen.go new file mode 100644 index 0000000000..0e1e1f4a33 --- /dev/null +++ b/pipeline/influxql.gen.go @@ -0,0 +1,35 @@ +// Generated by tmpl +// https://github.com/benbjohnson/tmpl +// +// DO NOT EDIT! +// Source: influxql.gen.go.tmpl + +package pipeline + +import "github.com/influxdata/influxdb/influxql" + +type ReduceCreater struct { + CreateFloatReducer func() (influxql.FloatPointAggregator, influxql.FloatPointEmitter) + CreateFloatBulkReducer func() (FloatBulkPointAggregator, influxql.FloatPointEmitter) + + CreateFloatIntegerReducer func() (influxql.FloatPointAggregator, influxql.IntegerPointEmitter) + CreateFloatBulkIntegerReducer func() (FloatBulkPointAggregator, influxql.IntegerPointEmitter) + + CreateIntegerFloatReducer func() (influxql.IntegerPointAggregator, influxql.FloatPointEmitter) + CreateIntegerBulkFloatReducer func() (IntegerBulkPointAggregator, influxql.FloatPointEmitter) + + CreateIntegerReducer func() (influxql.IntegerPointAggregator, influxql.IntegerPointEmitter) + CreateIntegerBulkReducer func() (IntegerBulkPointAggregator, influxql.IntegerPointEmitter) + + TopBottomCallInfo *TopBottomCallInfo +} + +type FloatBulkPointAggregator interface { + influxql.FloatPointAggregator + influxql.FloatBulkPointAggregator +} + +type IntegerBulkPointAggregator interface { + influxql.IntegerPointAggregator + influxql.IntegerBulkPointAggregator +} diff --git a/pipeline/influxql.gen.go.tmpl b/pipeline/influxql.gen.go.tmpl new file mode 100644 index 0000000000..6b0ee5b9df --- /dev/null +++ b/pipeline/influxql.gen.go.tmpl @@ -0,0 +1,21 @@ +package pipeline + +import "github.com/influxdata/influxdb/influxql" + +type ReduceCreater struct { +{{with $types := .}} +{{range $a := $types}} +{{range $e := $types}} + Create{{$a.Name}}{{if ne $a.Name $e.Name}}{{$e.Name}}{{end}}Reducer func() (influxql.{{$a.Name}}PointAggregator, influxql.{{$e.Name}}PointEmitter) + Create{{$a.Name}}Bulk{{if ne $a.Name $e.Name}}{{$e.Name}}{{end}}Reducer func() ({{$a.Name}}BulkPointAggregator, influxql.{{$e.Name}}PointEmitter) +{{end}}{{end}}{{end}} + + TopBottomCallInfo *TopBottomCallInfo +} + +{{range .}} +type {{.Name}}BulkPointAggregator interface { + influxql.{{.Name}}PointAggregator + influxql.{{.Name}}BulkPointAggregator +} +{{end}} diff --git a/pipeline/influxql.go b/pipeline/influxql.go new file mode 100644 index 0000000000..296d268e45 --- /dev/null +++ b/pipeline/influxql.go @@ -0,0 +1,301 @@ +package pipeline + +import "github.com/influxdata/influxdb/influxql" + +// tmpl -- go get github.com/benbjohnson/tmpl +//go:generate tmpl -data=@../tmpldata influxql.gen.go.tmpl + +type InfluxQLNode struct { + chainnode + + // tick:ignore + Method string + // tick:ignore + Field string + + // The name of the field, defaults to the name of + // function used (i.e. .mean -> 'mean') + As string + + // tick:ignore + ReduceCreater ReduceCreater + + // tick:ignore + PointTimes bool +} + +func newInfluxQLNode(method, field string, wants, provides EdgeType, reducer ReduceCreater) *InfluxQLNode { + return &InfluxQLNode{ + chainnode: newBasicChainNode(method, wants, provides), + Method: method, + Field: field, + As: method, + ReduceCreater: reducer, + } +} + +// Use the time of the selected point instead of the time of the batch. +// +// Only applies to selector MR functions like first, last, top, bottom, etc. +// Aggregation functions always use the batch time. +// tick:property +func (n *InfluxQLNode) UsePointTimes() *InfluxQLNode { + n.PointTimes = true + return n +} + +//------------------------------------ +// Aggregation Functions +// + +func (n *chainnode) Count(field string) *InfluxQLNode { + i := newInfluxQLNode("count", field, n.Provides(), StreamEdge, ReduceCreater{ + CreateFloatIntegerReducer: func() (influxql.FloatPointAggregator, influxql.IntegerPointEmitter) { + fn := influxql.NewFloatFuncIntegerReducer(influxql.FloatCountReduce) + return fn, fn + }, + CreateIntegerReducer: func() (influxql.IntegerPointAggregator, influxql.IntegerPointEmitter) { + fn := influxql.NewIntegerFuncReducer(influxql.IntegerCountReduce) + return fn, fn + }, + }) + n.linkChild(i) + return i +} + +func (n *chainnode) Distinct(field string) *InfluxQLNode { + i := newInfluxQLNode("distinct", field, n.Provides(), BatchEdge, ReduceCreater{ + CreateFloatBulkReducer: func() (FloatBulkPointAggregator, influxql.FloatPointEmitter) { + fn := influxql.NewFloatSliceFuncReducer(influxql.FloatDistinctReduceSlice) + return fn, fn + }, + CreateIntegerBulkReducer: func() (IntegerBulkPointAggregator, influxql.IntegerPointEmitter) { + fn := influxql.NewIntegerSliceFuncReducer(influxql.IntegerDistinctReduceSlice) + return fn, fn + }, + }) + n.linkChild(i) + return i +} + +func (n *chainnode) Mean(field string) *InfluxQLNode { + i := newInfluxQLNode("mean", field, n.Provides(), StreamEdge, ReduceCreater{ + CreateFloatReducer: func() (influxql.FloatPointAggregator, influxql.FloatPointEmitter) { + fn := influxql.NewFloatMeanReducer() + return fn, fn + }, + CreateIntegerFloatReducer: func() (influxql.IntegerPointAggregator, influxql.FloatPointEmitter) { + fn := influxql.NewIntegerMeanReducer() + return fn, fn + }, + }) + n.linkChild(i) + return i +} + +func (n *chainnode) Median(field string) *InfluxQLNode { + i := newInfluxQLNode("median", field, n.Provides(), StreamEdge, ReduceCreater{ + CreateFloatBulkReducer: func() (FloatBulkPointAggregator, influxql.FloatPointEmitter) { + fn := influxql.NewFloatSliceFuncReducer(influxql.FloatMedianReduceSlice) + return fn, fn + }, + CreateIntegerBulkFloatReducer: func() (IntegerBulkPointAggregator, influxql.FloatPointEmitter) { + fn := influxql.NewIntegerSliceFuncFloatReducer(influxql.IntegerMedianReduceSlice) + return fn, fn + }, + }) + n.linkChild(i) + return i +} + +func (n *chainnode) Spread(field string) *InfluxQLNode { + i := newInfluxQLNode("spread", field, n.Provides(), StreamEdge, ReduceCreater{ + CreateFloatBulkReducer: func() (FloatBulkPointAggregator, influxql.FloatPointEmitter) { + fn := influxql.NewFloatSliceFuncReducer(influxql.FloatSpreadReduceSlice) + return fn, fn + }, + CreateIntegerBulkReducer: func() (IntegerBulkPointAggregator, influxql.IntegerPointEmitter) { + fn := influxql.NewIntegerSliceFuncReducer(influxql.IntegerSpreadReduceSlice) + return fn, fn + }, + }) + n.linkChild(i) + return i +} + +func (n *chainnode) Sum(field string) *InfluxQLNode { + i := newInfluxQLNode("sum", field, n.Provides(), StreamEdge, ReduceCreater{ + CreateFloatReducer: func() (influxql.FloatPointAggregator, influxql.FloatPointEmitter) { + fn := influxql.NewFloatFuncReducer(influxql.FloatSumReduce) + return fn, fn + }, + CreateIntegerReducer: func() (influxql.IntegerPointAggregator, influxql.IntegerPointEmitter) { + fn := influxql.NewIntegerFuncReducer(influxql.IntegerSumReduce) + return fn, fn + }, + }) + n.linkChild(i) + return i +} + +//------------------------------------ +// Selection Functions +// + +func (n *chainnode) First(field string) *InfluxQLNode { + i := newInfluxQLNode("first", field, n.Provides(), StreamEdge, ReduceCreater{ + CreateFloatReducer: func() (influxql.FloatPointAggregator, influxql.FloatPointEmitter) { + fn := influxql.NewFloatFuncReducer(influxql.FloatFirstReduce) + return fn, fn + }, + CreateIntegerReducer: func() (influxql.IntegerPointAggregator, influxql.IntegerPointEmitter) { + fn := influxql.NewIntegerFuncReducer(influxql.IntegerFirstReduce) + return fn, fn + }, + }) + n.linkChild(i) + return i +} + +func (n *chainnode) Last(field string) *InfluxQLNode { + i := newInfluxQLNode("last", field, n.Provides(), StreamEdge, ReduceCreater{ + CreateFloatReducer: func() (influxql.FloatPointAggregator, influxql.FloatPointEmitter) { + fn := influxql.NewFloatFuncReducer(influxql.FloatLastReduce) + return fn, fn + }, + CreateIntegerReducer: func() (influxql.IntegerPointAggregator, influxql.IntegerPointEmitter) { + fn := influxql.NewIntegerFuncReducer(influxql.IntegerLastReduce) + return fn, fn + }, + }) + n.linkChild(i) + return i +} + +func (n *chainnode) Min(field string) *InfluxQLNode { + i := newInfluxQLNode("min", field, n.Provides(), StreamEdge, ReduceCreater{ + CreateFloatReducer: func() (influxql.FloatPointAggregator, influxql.FloatPointEmitter) { + fn := influxql.NewFloatFuncReducer(influxql.FloatMinReduce) + return fn, fn + }, + CreateIntegerReducer: func() (influxql.IntegerPointAggregator, influxql.IntegerPointEmitter) { + fn := influxql.NewIntegerFuncReducer(influxql.IntegerMinReduce) + return fn, fn + }, + }) + n.linkChild(i) + return i +} + +func (n *chainnode) Max(field string) *InfluxQLNode { + i := newInfluxQLNode("max", field, n.Provides(), StreamEdge, ReduceCreater{ + CreateFloatReducer: func() (influxql.FloatPointAggregator, influxql.FloatPointEmitter) { + fn := influxql.NewFloatFuncReducer(influxql.FloatMaxReduce) + return fn, fn + }, + CreateIntegerReducer: func() (influxql.IntegerPointAggregator, influxql.IntegerPointEmitter) { + fn := influxql.NewIntegerFuncReducer(influxql.IntegerMaxReduce) + return fn, fn + }, + }) + n.linkChild(i) + return i +} + +func (n *chainnode) Percentile(field string, percentile float64) *InfluxQLNode { + i := newInfluxQLNode("percentile", field, n.Provides(), StreamEdge, ReduceCreater{ + CreateFloatBulkReducer: func() (FloatBulkPointAggregator, influxql.FloatPointEmitter) { + fn := influxql.NewFloatSliceFuncReducer(influxql.NewFloatPercentileReduceSliceFunc(percentile)) + return fn, fn + }, + CreateIntegerBulkReducer: func() (IntegerBulkPointAggregator, influxql.IntegerPointEmitter) { + fn := influxql.NewIntegerSliceFuncReducer(influxql.NewIntegerPercentileReduceSliceFunc(percentile)) + return fn, fn + }, + }) + n.linkChild(i) + return i +} + +type TopBottomCallInfo struct { + FieldsAndTags []string +} + +func (n *chainnode) Top(num int64, field string, fieldsAndTags ...string) *InfluxQLNode { + tags := make([]int, len(fieldsAndTags)) + for i := range fieldsAndTags { + tags[i] = i + } + i := newInfluxQLNode("top", field, n.Provides(), BatchEdge, ReduceCreater{ + CreateFloatBulkReducer: func() (FloatBulkPointAggregator, influxql.FloatPointEmitter) { + fn := influxql.NewFloatSliceFuncReducer(influxql.NewFloatTopReduceSliceFunc( + int(num), + tags, + influxql.Interval{}, + )) + return fn, fn + }, + CreateIntegerBulkReducer: func() (IntegerBulkPointAggregator, influxql.IntegerPointEmitter) { + fn := influxql.NewIntegerSliceFuncReducer(influxql.NewIntegerTopReduceSliceFunc( + int(num), + tags, + influxql.Interval{}, + )) + return fn, fn + }, + TopBottomCallInfo: &TopBottomCallInfo{ + FieldsAndTags: fieldsAndTags, + }, + }) + n.linkChild(i) + return i +} + +func (n *chainnode) Bottom(num int64, field string, fieldsAndTags ...string) *InfluxQLNode { + tags := make([]int, len(fieldsAndTags)) + for i := range fieldsAndTags { + tags[i] = i + } + i := newInfluxQLNode("bottom", field, n.Provides(), BatchEdge, ReduceCreater{ + CreateFloatBulkReducer: func() (FloatBulkPointAggregator, influxql.FloatPointEmitter) { + fn := influxql.NewFloatSliceFuncReducer(influxql.NewFloatBottomReduceSliceFunc( + int(num), + tags, + influxql.Interval{}, + )) + return fn, fn + }, + CreateIntegerBulkReducer: func() (IntegerBulkPointAggregator, influxql.IntegerPointEmitter) { + fn := influxql.NewIntegerSliceFuncReducer(influxql.NewIntegerBottomReduceSliceFunc( + int(num), + tags, + influxql.Interval{}, + )) + return fn, fn + }, + TopBottomCallInfo: &TopBottomCallInfo{ + FieldsAndTags: fieldsAndTags, + }, + }) + n.linkChild(i) + return i +} + +//------------------------------------ +// Transformation Functions +// + +func (n *chainnode) Stddev(field string) *InfluxQLNode { + i := newInfluxQLNode("stddev", field, n.Provides(), StreamEdge, ReduceCreater{ + CreateFloatBulkReducer: func() (FloatBulkPointAggregator, influxql.FloatPointEmitter) { + fn := influxql.NewFloatSliceFuncReducer(influxql.FloatStddevReduceSlice) + return fn, fn + }, + CreateIntegerBulkFloatReducer: func() (IntegerBulkPointAggregator, influxql.FloatPointEmitter) { + fn := influxql.NewIntegerSliceFuncFloatReducer(influxql.IntegerStddevReduceSlice) + return fn, fn + }, + }) + n.linkChild(i) + return i +} diff --git a/task.go b/task.go index 2dae9ab56f..5caed3e751 100644 --- a/task.go +++ b/task.go @@ -404,6 +404,8 @@ func (et *ExecutingTask) createNode(p pipeline.Node, l *log.Logger) (Node, error return newShiftNode(et, t, l) case *pipeline.NoOpNode: return newNoOpNode(et, t, l) + case *pipeline.InfluxQLNode: + return newInfluxQLNode(et, t, l) default: return nil, fmt.Errorf("unknown pipeline node type %T", p) } diff --git a/tick/eval.go b/tick/eval.go index ce49f6419d..b51d0875e6 100644 --- a/tick/eval.go +++ b/tick/eval.go @@ -303,7 +303,7 @@ func NewReflectionDescriber(obj interface{}) *ReflectionDescriber { } func (r *ReflectionDescriber) Desc() string { - return reflect.TypeOf(r.obj).Name() + return fmt.Sprintf("%T", r.obj) } // Using reflection check if the object has the method or field. diff --git a/tick/node.go b/tick/node.go index 706b863c97..3d1e3384c6 100644 --- a/tick/node.go +++ b/tick/node.go @@ -7,7 +7,7 @@ import ( "strconv" "time" - "github.com/influxdb/influxdb/influxql" + "github.com/influxdata/influxdb/influxql" ) type unboundFunc func(obj interface{}) (interface{}, error) diff --git a/tmpldata b/tmpldata new file mode 100644 index 0000000000..971e96428b --- /dev/null +++ b/tmpldata @@ -0,0 +1,17 @@ +[ + { + "Name":"Float", + "name":"float", + "Type":"float64", + "Nil":"0", + "Zero":"float64(0)" + }, + { + "Name":"Integer", + "name":"integer", + "Type":"int64", + "Nil":"0", + "Zero":"int64(0)" + } +] + diff --git a/vendor/github.com/influxdata/influxdb/.dockerignore b/vendor/github.com/influxdata/influxdb/.dockerignore new file mode 100644 index 0000000000..378eac25d3 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/.dockerignore @@ -0,0 +1 @@ +build diff --git a/vendor/github.com/influxdata/influxdb/.github/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/influxdata/influxdb/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000000..b62c7f5b19 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,4 @@ +- [ ] CHANGELOG.md updated +- [ ] Rebased/mergable +- [ ] Tests pass +- [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed) diff --git a/vendor/github.com/influxdata/influxdb/.gitignore b/vendor/github.com/influxdata/influxdb/.gitignore new file mode 100644 index 0000000000..8c55003972 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/.gitignore @@ -0,0 +1,79 @@ +*~ +src/ + +config.json +/bin/ + +TAGS + +# vim temp files +*.swp + +*.test +/query/a.out* +.DS_Store + +# ignore generated files. +cmd/influxd/version.go + +# executables + +influx_tsm +**/influx_tsm +!**/influx_tsm/ + +influx_stress +**/influx_stress +!**/influx_stress/ + +influxd +**/influxd +!**/influxd/ + +influx +**/influx +!**/influx/ + +influxdb +**/influxdb +!**/influxdb/ + +influx_inspect +**/influx_inspect +!**/influx_inspect/ + +/benchmark-tool +/main +/benchmark-storage +godef +gosym +gocode +inspect-raft + +# dependencies +out_rpm/ +packages/ + +# autconf +autom4te.cache/ +config.log +config.status + +# log file +influxdb.log +benchmark.log + +# config file +config.toml + +# test data files +integration/migration_data/ + +# goide project files +.idea + +# goconvey config files +*.goconvey + +// Ingnore SourceGraph directory +.srclib-store/ diff --git a/vendor/github.com/influxdata/influxdb/.hooks/pre-commit b/vendor/github.com/influxdata/influxdb/.hooks/pre-commit new file mode 100755 index 0000000000..d17788f984 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/.hooks/pre-commit @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +fmtcount=`git ls-files | grep '.go$' | xargs gofmt -l 2>&1 | wc -l` +if [ $fmtcount -gt 0 ]; then + echo "Some files aren't formatted, please run 'go fmt ./...' to format your source code before committing" + exit 1 +fi + +# Due to the way composites work, vet will fail for some of our tests so we ignore it +vetcount=`go tool vet -composites=true ./ 2>&1 | wc -l` +if [ $vetcount -gt 0 ]; then + echo "Some files aren't passing vet heuristics, please run 'go vet ./...' to see the errors it flags and correct your source code before committing" + exit 1 +fi +exit 0 + +# Ensure FIXME lines are removed before commit. +fixme_lines=$(git diff --cached | grep ^+ | grep -v pre-commit | grep FIXME | sed 's_^+\s*__g') +if [ "$todo_lines" != "" ]; then + echo "Please remove the following lines:" + echo -e "$todo_lines" + exit 1 +fi + diff --git a/vendor/github.com/influxdata/influxdb/CHANGELOG.md b/vendor/github.com/influxdata/influxdb/CHANGELOG.md new file mode 100644 index 0000000000..dfcd19d7e7 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/CHANGELOG.md @@ -0,0 +1,1963 @@ +## v0.11.0 [unreleased] + +### Features + +- [#5596](https://github.com/influxdata/influxdb/pull/5596): Build improvements for ARM architectures. Also removed `--goarm` and `--pkgarch` build flags. +- [#5541](https://github.com/influxdata/influxdb/pull/5541): Client: Support for adding custom TLS Config for HTTP client. +- [#4299](https://github.com/influxdata/influxdb/pull/4299): Client: Reject uint64 Client.Point.Field values. Thanks @arussellsaw +- [#5550](https://github.com/influxdata/influxdb/pull/5550): Enabled golint for tsdb/engine/wal. @gabelev +- [#5419](https://github.com/influxdata/influxdb/pull/5419): Graphite: Support matching tags multiple times Thanks @m4ce +- [#5598](https://github.com/influxdata/influxdb/pull/5598): Client: Add Ping to v2 client @PSUdaemon +- [#4125](https://github.com/influxdata/influxdb/pull/4125): Admin UI: Fetch and display server version on connect. Thanks @alexiri! +- [#5681](https://github.com/influxdata/influxdb/pull/5681): Stats: Add durations, number currently active to httpd and query executor +- [#5602](https://github.com/influxdata/influxdb/pull/5602): Simplify cluster startup for scripting and deployment +- [#5562](https://github.com/influxdata/influxdb/pull/5562): Graphite: Support matching fields multiple times (@chrusty) +- [#5666](https://github.com/influxdata/influxdb/pull/5666): Manage dependencies with gdm +- [#5512](https://github.com/influxdata/influxdb/pull/5512): HTTP: Add config option to enable HTTP JSON write path which is now disabled by default. +- [#5336](https://github.com/influxdata/influxdb/pull/5366): Enabled golint for influxql. @gabelev +- [#5706](https://github.com/influxdata/influxdb/pull/5706): Cluster setup cleanup +- [#5691](https://github.com/influxdata/influxdb/pull/5691): Remove associated shard data when retention policies are dropped. +- [#5758](https://github.com/influxdata/influxdb/pull/5758): TSM engine stats for cache, WAL, and filestore. Thanks @jonseymour +- [#5844](https://github.com/influxdata/influxdb/pull/5844): Tag TSM engine stats with database and retention policy +- [#5593](https://github.com/influxdata/influxdb/issues/5593): Modify `SHOW TAG VALUES` output for the new query engine to normalize the output. + +### Bugfixes + +- [#5182](https://github.com/influxdata/influxdb/pull/5182): Graphite: Fix an issue where the default template would be used instead of a more specific one. Thanks @flisky +- [#5489](https://github.com/influxdata/influxdb/pull/5489): Fixes multiple issues causing tests to fail on windows. Thanks @runner-mei +- [#5594](https://github.com/influxdata/influxdb/pull/5594): Fix missing url params on lease redirect - @oldmantaiter +- [#5376](https://github.com/influxdata/influxdb/pull/5376): Fix golint issues in models package. @nuss-justin +- [#5535](https://github.com/influxdata/influxdb/pull/5535): Update README for referring to Collectd +- [#5590](https://github.com/influxdata/influxdb/pull/5590): Fix panic when dropping subscription for unknown retention policy. +- [#5375](https://github.com/influxdata/influxdb/pull/5375): Lint tsdb and tsdb/engine package @nuss-justin +- [#5624](https://github.com/influxdata/influxdb/pull/5624): Fix golint issues in client v2 package @PSUDaemon +- [#5510](https://github.com/influxdata/influxdb/pull/5510): Optimize ReducePercentile @bsideup +- [#5557](https://github.com/influxdata/influxdb/issues/5630): Fixes panic when surrounding the select statement arguments in brackets +- [#5628](https://github.com/influxdata/influxdb/issues/5628): Crashed the server with a bad derivative query +- [#5532](https://github.com/influxdata/influxdb/issues/5532): user passwords not changeable in cluster +- [#5695](https://github.com/influxdata/influxdb/pull/5695): Remove meta servers from node.json +- [#5606](https://github.com/influxdata/influxdb/issues/5606): TSM conversion reproducibly drops data silently +- [#5656](https://github.com/influxdata/influxdb/issues/5656): influx\_tsm: panic during conversion +- [#5696](https://github.com/influxdata/influxdb/issues/5696): Do not drop the database when creating with a retention policy +- [#5724](https://github.com/influxdata/influxdb/issues/5724): influx\_tsm doesn't close file handles properly +- [#5664](https://github.com/influxdata/influxdb/issues/5664): panic in model.Points.scanTo #5664 +- [#5716](https://github.com/influxdata/influxdb/pull/5716): models: improve handling of points with empty field names or with no fields. +- [#5719](https://github.com/influxdata/influxdb/issues/5719): Fix cache not deduplicating points +- [#5754](https://github.com/influxdata/influxdb/issues/5754): Adding a node as meta only results in a data node also being registered +- [#5787](https://github.com/influxdata/influxdb/pull/5787): HTTP: Add QueryAuthorizer instance to httpd service’s handler. @chris-ramon +- [#5753](https://github.com/influxdata/influxdb/pull/5753): Ensures that drop-type commands work correctly in a cluster +- [#5814](https://github.com/influxdata/influxdb/issues/5814): Run CQs with the same name from different databases +- [#5699](https://github.com/influxdata/influxdb/issues/5699): Fix potential thread safety issue in cache @jonseymour +- [#5832](https://github.com/influxdata/influxdb/issues/5832): tsm: cache: need to check that snapshot has been sorted @jonseymour +- [#5841](https://github.com/influxdata/influxdb/pull/5841): Reduce tsm allocations by converting time.Time to int64 +- [#5842](https://github.com/influxdata/influxdb/issues/5842): Add SeriesList binary marshaling +- [#5854](https://github.com/influxdata/influxdb/issues/5854): failures of tests in tsdb/engine/tsm1 when compiled with go master +- [#5610](https://github.com/influxdata/influxdb/issues/5610): Write into fully-replicated cluster is not replicated across all shards +- [#5880](https://github.com/influxdata/influxdb/issues/5880): TCP connection closed after write (regression/change from 0.9.6) +- [#5865](https://github.com/influxdata/influxdb/issues/5865): Conversion to tsm fails with exceeds max index value + +## v0.10.2 [2016-03-03] +### Bugfixes +- [#5719](https://github.com/influxdata/influxdb/issues/5719): Fix cache not deduplicating points +- [#5699](https://github.com/influxdata/influxdb/issues/5699): Fix potential thread safety issue in cache @jonseymour +- [#5832](https://github.com/influxdata/influxdb/issues/5832): tsm: cache: need to check that snapshot has been sorted @jonseymour +- [#5857](https://github.com/influxdata/influxdb/issues/5857): panic in tsm1.Values.Deduplicate +- [#5861](https://github.com/influxdata/influxdb/pull/5861): Fix panic when dropping subscription for unknown retention policy. +- [#5880](https://github.com/influxdata/influxdb/issues/5880): TCP connection closed after write (regression/change from 0.9.6) +- [#5865](https://github.com/influxdata/influxdb/issues/5865): Conversion to tsm fails with exceeds max index value + +## v0.10.1 [2016-02-18] + +### Bugfixes +- [#5696](https://github.com/influxdata/influxdb/issues/5696): Do not drop the database when creating with a retention policy +- [#5724](https://github.com/influxdata/influxdb/issues/5724): influx\_tsm doesn't close file handles properly +- [#5606](https://github.com/influxdata/influxdb/issues/5606): TSM conversion reproducibly drops data silently +- [#5656](https://github.com/influxdata/influxdb/issues/5656): influx\_tsm: panic during conversion +- [#5303](https://github.com/influxdata/influxdb/issues/5303): Protect against stateful mappers returning nothing in the raw executor + +## v0.10.0 [2016-02-04] + +### Release Notes + +This release now uses the TSM storage engine. Old bz1 and b1 shards can still be read, but in a future release you will be required to migrate old shards to TSM. For new shards getting created, or new installations, the TSM storage engine will be used. + +This release also changes how clusters are setup. The config file has changed so have a look at the new example. Also, upgrading a single node works, but for upgrading clusters, you'll need help from us. Sent us a note at contact@influxdb.com if you need assistance upgrading a cluster. + +### Features +- [#5183](https://github.com/influxdata/influxdb/pull/5183): CLI confirms database exists when USE executed. Thanks @pires +- [#5201](https://github.com/influxdata/influxdb/pull/5201): Allow max UDP buffer size to be configurable. Thanks @sebito91 +- [#5194](https://github.com/influxdata/influxdb/pull/5194): Custom continuous query options per query rather than per node. +- [#5224](https://github.com/influxdata/influxdb/pull/5224): Online backup/incremental backup. Restore (for TSM). +- [#5226](https://github.com/influxdata/influxdb/pull/5226): b\*1 to tsm1 shard conversion tool. +- [#5459](https://github.com/influxdata/influxdb/pull/5459): Create `/status` endpoint for health checks. +- [#5460](https://github.com/influxdata/influxdb/pull/5460): Prevent exponential growth in CLI history. Thanks @sczk! +- [#5522](https://github.com/influxdata/influxdb/pull/5522): Optimize tsm1 cache to reduce memory consumption and GC scan time. +- [#5565](https://github.com/influxdata/influxdb/pull/5565): Add configuration for time precision with UDP services. - @tpitale + +### Bugfixes +- [#5129](https://github.com/influxdata/influxdb/pull/5129): Ensure precision flag is respected by CLI. Thanks @e-dard +- [#5042](https://github.com/influxdata/influxdb/issues/5042): Count with fill(none) will drop 0 valued intervals. +- [#4735](https://github.com/influxdata/influxdb/issues/4735): Fix panic when merging empty results. +- [#5016](https://github.com/influxdata/influxdb/pull/5016): Don't panic if Meta data directory not writable. Thanks @oiooj +- [#5059](https://github.com/influxdata/influxdb/pull/5059): Fix unmarshal of database error by client code. Thanks @farshidtz +- [#4940](https://github.com/influxdata/influxdb/pull/4940): Fix distributed aggregate query query error. Thanks @li-ang +- [#4622](https://github.com/influxdata/influxdb/issues/4622): Fix panic when passing too large of timestamps to OpenTSDB input. +- [#5064](https://github.com/influxdata/influxdb/pull/5064): Full support for parenthesis in SELECT clause, fixes [#5054](https://github.com/influxdata/influxdb/issues/5054). Thanks @mengjinglei +- [#5079](https://github.com/influxdata/influxdb/pull/5079): Ensure tsm WAL encoding buffer can handle large batches. +- [#4303](https://github.com/influxdata/influxdb/issues/4303): Don't drop measurements or series from multiple databases. +- [#5078](https://github.com/influxdata/influxdb/issues/5078): influx non-interactive mode - INSERT must be handled. Thanks @grange74 +- [#5178](https://github.com/influxdata/influxdb/pull/5178): SHOW FIELD shouldn't consider VALUES to be valid. Thanks @pires +- [#5158](https://github.com/influxdata/influxdb/pull/5158): Fix panic when writing invalid input to the line protocol. +- [#5264](https://github.com/influxdata/influxdb/pull/5264): Fix panic: runtime error: slice bounds out of range +- [#5186](https://github.com/influxdata/influxdb/pull/5186): Fix database creation with retention statement parsing. Fixes [#5077](https://github.com/influxdata/influxdb/issues/5077). Thanks @pires +- [#5193](https://github.com/influxdata/influxdb/issues/5193): Missing data a minute before current time. Comes back later. +- [#5350](https://github.com/influxdata/influxdb/issues/5350): 'influxd backup' should create backup directory +- [#5262](https://github.com/influxdata/influxdb/issues/5262): Fix a panic when a tag value was empty. +- [#5382](https://github.com/influxdata/influxdb/pull/5382): Fixes some escaping bugs with tag keys and values. +- [#5349](https://github.com/influxdata/influxdb/issues/5349): Validate metadata blob for 'influxd backup' +- [#5469](https://github.com/influxdata/influxdb/issues/5469): Conversion from bz1 to tsm doesn't work as described +- [#5449](https://github.com/influxdata/influxdb/issues/5449): panic when dropping collectd points +- [#5455](https://github.com/influxdata/influxdb/issues/5455): panic: runtime error: slice bounds out of range when loading corrupted wal segment +- [#5478](https://github.com/influxdata/influxdb/issues/5478): panic: interface conversion: interface is float64, not int64 +- [#5475](https://github.com/influxdata/influxdb/issues/5475): Ensure appropriate exit code returned for non-interactive use of CLI. +- [#5479](https://github.com/influxdata/influxdb/issues/5479): Bringing up a node as a meta only node causes panic +- [#5504](https://github.com/influxdata/influxdb/issues/5475): create retention policy on unexistant DB crash InfluxDB +- [#5505](https://github.com/influxdata/influxdb/issues/5505): Clear authCache in meta.Client when password changes. + +## v0.9.6 [2015-12-09] + +### Release Notes +This release has an updated design and implementation of the TSM storage engine. If you had been using tsm1 as your storage engine prior to this release (either 0.9.5.x or 0.9.6 nightly builds) you will have to start with a fresh database. + +If you had TSM configuration options set, those have been updated. See the the updated sample configuration for more details: https://github.com/influxdata/influxdb/blob/master/etc/config.sample.toml#L98-L125 + +### Features +- [#4790](https://github.com/influxdata/influxdb/pull/4790): Allow openTSDB point-level error logging to be disabled +- [#4728](https://github.com/influxdata/influxdb/pull/4728): SHOW SHARD GROUPS. By @mateuszdyminski +- [#4841](https://github.com/influxdata/influxdb/pull/4841): Improve point parsing speed. Lint models pacakge. Thanks @e-dard! +- [#4889](https://github.com/influxdata/influxdb/pull/4889): Implement close notifier and timeout on executors +- [#2676](https://github.com/influxdata/influxdb/issues/2676), [#4866](https://github.com/influxdata/influxdb/pull/4866): Add support for specifying default retention policy in database create. Thanks @pires! +- [#4848](https://github.com/influxdata/influxdb/pull/4848): Added framework for cluster integration testing. +- [#4872](https://github.com/influxdata/influxdb/pull/4872): Add option to disable logging for meta service. +- [#4787](https://github.com/influxdata/influxdb/issues/4787): Now builds on Solaris + +### Bugfixes +- [#4849](https://github.com/influxdata/influxdb/issues/4849): Derivative works with count, mean, median, sum, first, last, max, min, and percentile. +- [#4984](https://github.com/influxdata/influxdb/pull/4984): Allow math on fields, fixes regression. Thanks @mengjinglei +- [#4666](https://github.com/influxdata/influxdb/issues/4666): Fix panic in derivative with invalid values. +- [#4404](https://github.com/influxdata/influxdb/issues/4404): Return better error for currently unsupported DELETE queries. +- [#4858](https://github.com/influxdata/influxdb/pull/4858): Validate nested aggregations in queries. Thanks @viru +- [#4921](https://github.com/influxdata/influxdb/pull/4921): Error responses should be JSON-formatted. Thanks @pires +- [#4974](https://github.com/influxdata/influxdb/issues/4974) Fix Data Race in TSDB when setting measurement field name +- [#4876](https://github.com/influxdata/influxdb/pull/4876): Complete lint for monitor and services packages. Thanks @e-dard! +- [#4833](https://github.com/influxdata/influxdb/pull/4833), [#4927](https://github.com/influxdata/influxdb/pull/4927): Fix SHOW MEASURMENTS for clusters. Thanks @li-ang! +- [#4918](https://github.com/influxdata/influxdb/pull/4918): Restore can hang, Fix [issue #4806](https://github.com/influxdata/influxdb/issues/4806). Thanks @oiooj +- [#4855](https://github.com/influxdata/influxdb/pull/4855): Fix race in TCP proxy shutdown. Thanks @runner-mei! +- [#4411](https://github.com/influxdata/influxdb/pull/4411): Add Access-Control-Expose-Headers to HTTP responses +- [#4768](https://github.com/influxdata/influxdb/pull/4768): CLI history skips blank lines. Thanks @pires +- [#4766](https://github.com/influxdata/influxdb/pull/4766): Update CLI usage output. Thanks @aneshas +- [#4804](https://github.com/influxdata/influxdb/pull/4804): Complete lint for services/admin. Thanks @nii236 +- [#4796](https://github.com/influxdata/influxdb/pull/4796): Check point without fields. Thanks @CrazyJvm +- [#4815](https://github.com/influxdata/influxdb/pull/4815): Added `Time` field into aggregate output across the cluster. Thanks @li-ang +- [#4817](https://github.com/influxdata/influxdb/pull/4817): Fix Min,Max,Top,Bottom function when query distributed node. Thanks @mengjinglei +- [#4878](https://github.com/influxdata/influxdb/pull/4878): Fix String() function for several InfluxQL statement types +- [#4913](https://github.com/influxdata/influxdb/pull/4913): Fix b1 flush deadlock +- [#3170](https://github.com/influxdata/influxdb/issues/3170), [#4921](https://github.com/influxdata/influxdb/pull/4921): Database does not exist error is now JSON. Thanks @pires! +- [#5029](https://github.com/influxdata/influxdb/pull/5029): Drop UDP point on bad parse. + +## v0.9.5 [2015-11-20] + +### Release Notes + +- Field names for the internal stats have been changed to be more inline with Go style. +- 0.9.5 is reverting to Go 1.4.2 due to unresolved issues with Go 1.5.1. + +There are breaking changes in this release: +- The filesystem hierarchy for packages has been changed, namely: + - Binaries are now located in `/usr/bin` (previously `/opt/influxdb`) + - Configuration files are now located in `/etc/influxdb` (previously `/etc/opt/influxdb`) + - Data directories are now located in `/var/lib/influxdb` (previously `/var/opt/influxdb`) + - Scripts are now located in `/usr/lib/influxdb/scripts` (previously `/opt/influxdb`) + +### Features +- [#4098](https://github.com/influxdata/influxdb/pull/4702): Support 'history' command at CLI +- [#4098](https://github.com/influxdata/influxdb/issues/4098): Enable `golint` on the code base - uuid subpackage +- [#4141](https://github.com/influxdata/influxdb/pull/4141): Control whether each query should be logged +- [#4065](https://github.com/influxdata/influxdb/pull/4065): Added precision support in cmd client. Thanks @sbouchex +- [#4140](https://github.com/influxdata/influxdb/pull/4140): Make storage engine configurable +- [#4161](https://github.com/influxdata/influxdb/pull/4161): Implement bottom selector function +- [#4204](https://github.com/influxdata/influxdb/pull/4204): Allow module-level selection for SHOW STATS +- [#4208](https://github.com/influxdata/influxdb/pull/4208): Allow module-level selection for SHOW DIAGNOSTICS +- [#4196](https://github.com/influxdata/influxdb/pull/4196): Export tsdb.Iterator +- [#4198](https://github.com/influxdata/influxdb/pull/4198): Add basic cluster-service stats +- [#4262](https://github.com/influxdata/influxdb/pull/4262): Allow configuration of UDP retention policy +- [#4265](https://github.com/influxdata/influxdb/pull/4265): Add statistics for Hinted-Handoff +- [#4284](https://github.com/influxdata/influxdb/pull/4284): Add exponential backoff for hinted-handoff failures +- [#4310](https://github.com/influxdata/influxdb/pull/4310): Support dropping non-Raft nodes. Work mostly by @corylanou +- [#4348](https://github.com/influxdata/influxdb/pull/4348): Public ApplyTemplate function for graphite parser. +- [#4178](https://github.com/influxdata/influxdb/pull/4178): Support fields in graphite parser. Thanks @roobert! +- [#4409](https://github.com/influxdata/influxdb/pull/4409): wire up INTO queries. +- [#4379](https://github.com/influxdata/influxdb/pull/4379): Auto-create database for UDP input. +- [#4375](https://github.com/influxdata/influxdb/pull/4375): Add Subscriptions so data can be 'forked' out of InfluxDB to another third party. +- [#4506](https://github.com/influxdata/influxdb/pull/4506): Register with Enterprise service and upload stats, if token is available. +- [#4516](https://github.com/influxdata/influxdb/pull/4516): Hinted-handoff refactor, with new statistics and diagnostics +- [#4501](https://github.com/influxdata/influxdb/pull/4501): Allow filtering SHOW MEASUREMENTS by regex. +- [#4547](https://github.com/influxdata/influxdb/pull/4547): Allow any node to be dropped, even a raft node (even the leader). +- [#4600](https://github.com/influxdata/influxdb/pull/4600): ping endpoint can wait for leader +- [#4648](https://github.com/influxdata/influxdb/pull/4648): UDP Client (v2 client) +- [#4690](https://github.com/influxdata/influxdb/pull/4690): SHOW SHARDS now includes database and policy. Thanks @pires +- [#4676](https://github.com/influxdata/influxdb/pull/4676): UDP service listener performance enhancements +- [#4659](https://github.com/influxdata/influxdb/pull/4659): Support IF EXISTS for DROP DATABASE. Thanks @ch33hau +- [#4721](https://github.com/influxdata/influxdb/pull/4721): Export tsdb.InterfaceValues +- [#4681](https://github.com/influxdata/influxdb/pull/4681): Increase default buffer size for collectd and graphite listeners +- [#4685](https://github.com/influxdata/influxdb/pull/4685): Automatically promote node to raft peer if drop server results in removing a raft peer. +- [#4846](https://github.com/influxdata/influxdb/pull/4846): Allow NaN as a valid value on the graphite service; discard these points silently (graphite compatibility). Thanks @jsternberg! + +### Bugfixes +- [#4193](https://github.com/influxdata/influxdb/issues/4193): Less than or equal to inequality is not inclusive for time in where clause +- [#4235](https://github.com/influxdata/influxdb/issues/4235): "ORDER BY DESC" doesn't properly order +- [#4789](https://github.com/influxdata/influxdb/pull/4789): Decode WHERE fields during aggregates. Fix [issue #4701](https://github.com/influxdata/influxdb/issues/4701). +- [#4778](https://github.com/influxdata/influxdb/pull/4778): If there are no points to count, count is 0. +- [#4715](https://github.com/influxdata/influxdb/pull/4715): Fix panic during Raft-close. Fix [issue #4707](https://github.com/influxdata/influxdb/issues/4707). Thanks @oiooj +- [#4643](https://github.com/influxdata/influxdb/pull/4643): Fix panic during backup restoration. Thanks @oiooj +- [#4632](https://github.com/influxdata/influxdb/pull/4632): Fix parsing of IPv6 hosts in client package. Thanks @miguelxpn +- [#4389](https://github.com/influxdata/influxdb/pull/4389): Don't add a new segment file on each hinted-handoff purge cycle. +- [#4166](https://github.com/influxdata/influxdb/pull/4166): Fix parser error on invalid SHOW +- [#3457](https://github.com/influxdata/influxdb/issues/3457): [0.9.3] cannot select field names with prefix + "." that match the measurement name +- [#4704](https://github.com/influxdata/influxdb/pull/4704). Tighten up command parsing within CLI. Thanks @pires +- [#4225](https://github.com/influxdata/influxdb/pull/4225): Always display diags in name-sorted order +- [#4111](https://github.com/influxdata/influxdb/pull/4111): Update pre-commit hook for go vet composites +- [#4136](https://github.com/influxdata/influxdb/pull/4136): Return an error-on-write if target retention policy does not exist. Thanks for the report @ymettier +- [#4228](https://github.com/influxdata/influxdb/pull/4228): Add build timestamp to version information. +- [#4124](https://github.com/influxdata/influxdb/issues/4124): Missing defer/recover/panic idiom in HTTPD service +- [#4238](https://github.com/influxdata/influxdb/pull/4238): Fully disable hinted-handoff service if so requested. +- [#4165](https://github.com/influxdata/influxdb/pull/4165): Tag all Go runtime stats when writing to internal database. +- [#4586](https://github.com/influxdata/influxdb/pull/4586): Exit when invalid engine is selected +- [#4118](https://github.com/influxdata/influxdb/issues/4118): Return consistent, correct result for SHOW MEASUREMENTS with multiple AND conditions +- [#4191](https://github.com/influxdata/influxdb/pull/4191): Correctly marshal remote mapper responses. Fixes [#4170](https://github.com/influxdata/influxdb/issues/4170) +- [#4222](https://github.com/influxdata/influxdb/pull/4222): Graphite TCP connections should not block shutdown +- [#4180](https://github.com/influxdata/influxdb/pull/4180): Cursor & SelectMapper Refactor +- [#1577](https://github.com/influxdata/influxdb/issues/1577): selectors (e.g. min, max, first, last) should have equivalents to return the actual point +- [#4264](https://github.com/influxdata/influxdb/issues/4264): Refactor map functions to use list of values +- [#4278](https://github.com/influxdata/influxdb/pull/4278): Fix error marshalling across the cluster +- [#4149](https://github.com/influxdata/influxdb/pull/4149): Fix derivative unnecessarily requires aggregate function. Thanks @peekeri! +- [#4674](https://github.com/influxdata/influxdb/pull/4674): Fix panic during restore. Thanks @simcap. +- [#4725](https://github.com/influxdata/influxdb/pull/4725): Don't list deleted shards during SHOW SHARDS. +- [#4237](https://github.com/influxdata/influxdb/issues/4237): DERIVATIVE() edge conditions +- [#4263](https://github.com/influxdata/influxdb/issues/4263): derivative does not work when data is missing +- [#4293](https://github.com/influxdata/influxdb/pull/4293): Ensure shell is invoked when touching PID file. Thanks @christopherjdickson +- [#4296](https://github.com/influxdata/influxdb/pull/4296): Reject line protocol ending with '-'. Fixes [#4272](https://github.com/influxdata/influxdb/issues/4272) +- [#4333](https://github.com/influxdata/influxdb/pull/4333): Retry monitor storage creation and storage only on Leader. +- [#4276](https://github.com/influxdata/influxdb/issues/4276): Walk DropSeriesStatement & check for empty sources +- [#4465](https://github.com/influxdata/influxdb/pull/4465): Actually display a message if the CLI can't connect to the database. +- [#4342](https://github.com/influxdata/influxdb/pull/4342): Fix mixing aggregates and math with non-aggregates. Thanks @kostya-sh. +- [#4349](https://github.com/influxdata/influxdb/issues/4349): If HH can't unmarshal a block, skip that block. +- [#4502](https://github.com/influxdata/influxdb/pull/4502): Don't crash on Graphite close, if Graphite not fully open. Thanks for the report @ranjib +- [#4354](https://github.com/influxdata/influxdb/pull/4353): Fully lock node queues during hinted handoff. Fixes one cause of missing data on clusters. +- [#4357](https://github.com/influxdata/influxdb/issues/4357): Fix similar float values encoding overflow Thanks @dgryski! +- [#4344](https://github.com/influxdata/influxdb/issues/4344): Make client.Write default to client.precision if none is given. +- [#3429](https://github.com/influxdata/influxdb/issues/3429): Incorrect parsing of regex containing '/' +- [#4374](https://github.com/influxdata/influxdb/issues/4374): Add tsm1 quickcheck tests +- [#4644](https://github.com/influxdata/influxdb/pull/4644): Check for response errors during token check, fixes issue [#4641](https://github.com/influxdata/influxdb/issues/4641) +- [#4377](https://github.com/influxdata/influxdb/pull/4377): Hinted handoff should not process dropped nodes +- [#4365](https://github.com/influxdata/influxdb/issues/4365): Prevent panic in DecodeSameTypeBlock +- [#4280](https://github.com/influxdata/influxdb/issues/4280): Only drop points matching WHERE clause +- [#4443](https://github.com/influxdata/influxdb/pull/4443): Fix race condition while listing store's shards. Fixes [#4442](https://github.com/influxdata/influxdb/issues/4442) +- [#4410](https://github.com/influxdata/influxdb/pull/4410): Fix infinite recursion in statement string(). Thanks @kostya-sh +- [#4360](https://github.com/influxdata/influxdb/issues/4360): Aggregate Selectors overwrite values during post-processing +- [#4421](https://github.com/influxdata/influxdb/issues/4421): Fix line protocol accepting tags with no values +- [#4434](https://github.com/influxdata/influxdb/pull/4434): Allow 'E' for scientific values. Fixes [#4433](https://github.com/influxdata/influxdb/issues/4433) +- [#4431](https://github.com/influxdata/influxdb/issues/4431): Add tsm1 WAL QuickCheck +- [#4438](https://github.com/influxdata/influxdb/pull/4438): openTSDB service shutdown fixes +- [#4447](https://github.com/influxdata/influxdb/pull/4447): Fixes to logrotate file. Thanks @linsomniac. +- [#3820](https://github.com/influxdata/influxdb/issues/3820): Fix js error in admin UI. +- [#4460](https://github.com/influxdata/influxdb/issues/4460): tsm1 meta lint +- [#4415](https://github.com/influxdata/influxdb/issues/4415): Selector (like max, min, first, etc) return a string instead of timestamp +- [#4472](https://github.com/influxdata/influxdb/issues/4472): Fix 'too many points in GROUP BY interval' error +- [#4475](https://github.com/influxdata/influxdb/issues/4475): Fix SHOW TAG VALUES error message. +- [#4486](https://github.com/influxdata/influxdb/pull/4486): Fix missing comments for runner package +- [#4497](https://github.com/influxdata/influxdb/pull/4497): Fix sequence in meta proto +- [#3367](https://github.com/influxdata/influxdb/issues/3367): Negative timestamps are parsed correctly by the line protocol. +- [#4563](https://github.com/influxdata/influxdb/pull/4536): Fix broken subscriptions updates. +- [#4538](https://github.com/influxdata/influxdb/issues/4538): Dropping database under a write load causes panics +- [#4582](https://github.com/influxdata/influxdb/pull/4582): Correct logging tags in cluster and TCP package. Thanks @oiooj +- [#4513](https://github.com/influxdata/influxdb/issues/4513): TSM1: panic: runtime error: index out of range +- [#4521](https://github.com/influxdata/influxdb/issues/4521): TSM1: panic: decode of short block: got 1, exp 9 +- [#4587](https://github.com/influxdata/influxdb/pull/4587): Prevent NaN float values from being stored +- [#4596](https://github.com/influxdata/influxdb/pull/4596): Skip empty string for start position when parsing line protocol @Thanks @ch33hau +- [#4610](https://github.com/influxdata/influxdb/pull/4610): Make internal stats names consistent with Go style. +- [#4625](https://github.com/influxdata/influxdb/pull/4625): Correctly handle bad write requests. Thanks @oiooj. +- [#4650](https://github.com/influxdata/influxdb/issues/4650): Importer should skip empty lines +- [#4651](https://github.com/influxdata/influxdb/issues/4651): Importer doesn't flush out last batch +- [#4602](https://github.com/influxdata/influxdb/issues/4602): Fixes data race between PointsWriter and Subscriber services. +- [#4691](https://github.com/influxdata/influxdb/issues/4691): Enable toml test `TestConfig_Encode`. +- [#4283](https://github.com/influxdata/influxdb/pull/4283): Disable HintedHandoff if configuration is not set. +- [#4703](https://github.com/influxdata/influxdb/pull/4703): Complete lint for cmd/influx. Thanks @pablolmiranda + +## v0.9.4 [2015-09-14] + +### Release Notes +With this release InfluxDB is moving to Go 1.5. + +### Features +- [#4050](https://github.com/influxdata/influxdb/pull/4050): Add stats to collectd +- [#3771](https://github.com/influxdata/influxdb/pull/3771): Close idle Graphite TCP connections +- [#3755](https://github.com/influxdata/influxdb/issues/3755): Add option to build script. Thanks @fg2it +- [#3863](https://github.com/influxdata/influxdb/pull/3863): Move to Go 1.5 +- [#3892](https://github.com/influxdata/influxdb/pull/3892): Support IF NOT EXISTS for CREATE DATABASE +- [#3916](https://github.com/influxdata/influxdb/pull/3916): New statistics and diagnostics support. Graphite first to be instrumented. +- [#3901](https://github.com/influxdata/influxdb/pull/3901): Add consistency level option to influx cli Thanks @takayuki +- [#4048](https://github.com/influxdata/influxdb/pull/4048): Add statistics to Continuous Query service +- [#4049](https://github.com/influxdata/influxdb/pull/4049): Add stats to the UDP input +- [#3876](https://github.com/influxdata/influxdb/pull/3876): Allow the following syntax in CQs: INTO "1hPolicy".:MEASUREMENT +- [#3975](https://github.com/influxdata/influxdb/pull/3975): Add shard copy service +- [#3986](https://github.com/influxdata/influxdb/pull/3986): Support sorting by time desc +- [#3930](https://github.com/influxdata/influxdb/pull/3930): Wire up TOP aggregate function - fixes [#1821](https://github.com/influxdata/influxdb/issues/1821) +- [#4045](https://github.com/influxdata/influxdb/pull/4045): Instrument cluster-level points writer +- [#3996](https://github.com/influxdata/influxdb/pull/3996): Add statistics to httpd package +- [#4003](https://github.com/influxdata/influxdb/pull/4033): Add logrotate configuration. +- [#4043](https://github.com/influxdata/influxdb/pull/4043): Add stats and batching to openTSDB input +- [#4042](https://github.com/influxdata/influxdb/pull/4042): Add pending batches control to batcher +- [#4006](https://github.com/influxdata/influxdb/pull/4006): Add basic statistics for shards +- [#4072](https://github.com/influxdata/influxdb/pull/4072): Add statistics for the WAL. + +### Bugfixes +- [#4042](https://github.com/influxdata/influxdb/pull/4042): Set UDP input batching defaults as needed. +- [#3785](https://github.com/influxdata/influxdb/issues/3785): Invalid time stamp in graphite metric causes panic +- [#3804](https://github.com/influxdata/influxdb/pull/3804): init.d script fixes, fixes issue 3803. +- [#3823](https://github.com/influxdata/influxdb/pull/3823): Deterministic ordering for first() and last() +- [#3869](https://github.com/influxdata/influxdb/issues/3869): Seemingly deadlocked when ingesting metrics via graphite plugin +- [#3856](https://github.com/influxdata/influxdb/pull/3856): Minor changes to retention enforcement. +- [#3884](https://github.com/influxdata/influxdb/pull/3884): Fix two panics in WAL that can happen at server startup +- [#3868](https://github.com/influxdata/influxdb/pull/3868): Add shell option to start the daemon on CentOS. Thanks @SwannCroiset. +- [#3886](https://github.com/influxdata/influxdb/pull/3886): Prevent write timeouts due to lock contention in WAL +- [#3574](https://github.com/influxdata/influxdb/issues/3574): Querying data node causes panic +- [#3913](https://github.com/influxdata/influxdb/issues/3913): Convert meta shard owners to objects +- [#4026](https://github.com/influxdata/influxdb/pull/4026): Support multiple Graphite inputs. Fixes issue [#3636](https://github.com/influxdata/influxdb/issues/3636) +- [#3927](https://github.com/influxdata/influxdb/issues/3927): Add WAL lock to prevent timing lock contention +- [#3928](https://github.com/influxdata/influxdb/issues/3928): Write fails for multiple points when tag starts with quote +- [#3901](https://github.com/influxdata/influxdb/pull/3901): Unblock relaxed write consistency level Thanks @takayuki! +- [#3950](https://github.com/influxdata/influxdb/pull/3950): Limit bz1 quickcheck tests to 10 iterations on CI +- [#3977](https://github.com/influxdata/influxdb/pull/3977): Silence wal logging during testing +- [#3931](https://github.com/influxdata/influxdb/pull/3931): Don't precreate shard groups entirely in the past +- [#3960](https://github.com/influxdata/influxdb/issues/3960): possible "catch up" bug with nodes down in a cluster +- [#3980](https://github.com/influxdata/influxdb/pull/3980): 'service stop' waits until service actually stops. Fixes issue #3548. +- [#4016](https://github.com/influxdata/influxdb/pull/4016): Shutdown Graphite UDP on SIGTERM. +- [#4034](https://github.com/influxdata/influxdb/pull/4034): Rollback bolt tx on mapper open error +- [#3848](https://github.com/influxdata/influxdb/issues/3848): restart influxdb causing panic +- [#3881](https://github.com/influxdata/influxdb/issues/3881): panic: runtime error: invalid memory address or nil pointer dereference +- [#3926](https://github.com/influxdata/influxdb/issues/3926): First or last value of `GROUP BY time(x)` is often null. Fixed by [#4038](https://github.com/influxdata/influxdb/pull/4038) +- [#4053](https://github.com/influxdata/influxdb/pull/4053): Prohibit dropping default retention policy. +- [#4060](https://github.com/influxdata/influxdb/pull/4060): Don't log EOF error in openTSDB input. +- [#3978](https://github.com/influxdata/influxdb/issues/3978): [0.9.3] (regression) cannot use GROUP BY * with more than a single field in SELECT clause +- [#4058](https://github.com/influxdata/influxdb/pull/4058): Disable bz1 recompression +- [#3902](https://github.com/influxdata/influxdb/issues/3902): [0.9.3] DB should not crash when using invalid expression "GROUP BY time" +- [#3718](https://github.com/influxdata/influxdb/issues/3718): Derivative query with group by time but no aggregate function should fail parse + +## v0.9.3 [2015-08-26] + +### Release Notes + +There are breaking changes in this release. + - To store data points as integers you must now append `i` to the number if using the line protocol. + - If you have a UDP input configured, you should check the UDP section of [the new sample configuration file](https://github.com/influxdata/influxdb/blob/master/etc/config.sample.toml) to learn how to modify existing configuration files, as 0.9.3 now expects multiple UDP inputs. + - Configuration files must now have an entry for `wal-dir` in the `[data]` section. Check [new sample configuration file](https://github.com/influxdata/influxdb/blob/master/etc/config.sample.toml) for more details. + - The implicit `GROUP BY *` that was added to every `SELECT *` has been removed. Instead any tags in the data are now part of the columns in the returned query. + +Please see the *Features* section below for full details. + +### Features +- [#3376](https://github.com/influxdata/influxdb/pull/3376): Support for remote shard query mapping +- [#3372](https://github.com/influxdata/influxdb/pull/3372): Support joining nodes to existing cluster +- [#3426](https://github.com/influxdata/influxdb/pull/3426): Additional logging for continuous queries. Thanks @jhorwit2 +- [#3478](https://github.com/influxdata/influxdb/pull/3478): Support incremental cluster joins +- [#3519](https://github.com/influxdata/influxdb/pull/3519): **--BREAKING CHANGE--** Update line protocol to require trailing i for field values that are integers +- [#3529](https://github.com/influxdata/influxdb/pull/3529): Add TLS support for OpenTSDB plugin. Thanks @nathanielc +- [#3421](https://github.com/influxdata/influxdb/issues/3421): Should update metastore and cluster if IP or hostname changes +- [#3502](https://github.com/influxdata/influxdb/pull/3502): Importer for 0.8.9 data via the CLI +- [#3564](https://github.com/influxdata/influxdb/pull/3564): Fix alias, maintain column sort order +- [#3585](https://github.com/influxdata/influxdb/pull/3585): Additional test coverage for non-existent fields +- [#3246](https://github.com/influxdata/influxdb/issues/3246): Allow overriding of configuration parameters using environment variables +- [#3599](https://github.com/influxdata/influxdb/pull/3599): **--BREAKING CHANGE--** Support multiple UDP inputs. Thanks @tpitale +- [#3636](https://github.com/influxdata/influxdb/pull/3639): Cap auto-created retention policy replica count at 3 +- [#3641](https://github.com/influxdata/influxdb/pull/3641): Logging enhancements and single-node rename +- [#3635](https://github.com/influxdata/influxdb/pull/3635): Add build branch to version output. +- [#3115](https://github.com/influxdata/influxdb/pull/3115): Various init.d script improvements. Thanks @KoeSystems. +- [#3628](https://github.com/influxdata/influxdb/pull/3628): Wildcard expansion of tags and fields for raw queries +- [#3721](https://github.com/influxdata/influxdb/pull/3721): interpret number literals compared against time as nanoseconds from epoch +- [#3514](https://github.com/influxdata/influxdb/issues/3514): Implement WAL outside BoltDB with compaction +- [#3544](https://github.com/influxdata/influxdb/pull/3544): Implement compression on top of BoltDB +- [#3795](https://github.com/influxdata/influxdb/pull/3795): Throttle import +- [#3584](https://github.com/influxdata/influxdb/pull/3584): Import/export documenation + +### Bugfixes +- [#3405](https://github.com/influxdata/influxdb/pull/3405): Prevent database panic when fields are missing. Thanks @jhorwit2 +- [#3411](https://github.com/influxdata/influxdb/issues/3411): 500 timeout on write +- [#3420](https://github.com/influxdata/influxdb/pull/3420): Catch opentsdb malformed tags. Thanks @nathanielc. +- [#3404](https://github.com/influxdata/influxdb/pull/3404): Added support for escaped single quotes in query string. Thanks @jhorwit2 +- [#3414](https://github.com/influxdata/influxdb/issues/3414): Shard mappers perform query re-writing +- [#3525](https://github.com/influxdata/influxdb/pull/3525): check if fields are valid during parse time. +- [#3511](https://github.com/influxdata/influxdb/issues/3511): Sending a large number of tag causes panic +- [#3288](https://github.com/influxdata/influxdb/issues/3288): Run go fuzz on the line-protocol input +- [#3545](https://github.com/influxdata/influxdb/issues/3545): Fix parsing string fields with newlines +- [#3579](https://github.com/influxdata/influxdb/issues/3579): Revert breaking change to `client.NewClient` function +- [#3580](https://github.com/influxdata/influxdb/issues/3580): Do not allow wildcards with fields in select statements +- [#3530](https://github.com/influxdata/influxdb/pull/3530): Aliasing a column no longer works +- [#3436](https://github.com/influxdata/influxdb/issues/3436): Fix panic in hinted handoff queue processor +- [#3401](https://github.com/influxdata/influxdb/issues/3401): Derivative on non-numeric fields panics db +- [#3583](https://github.com/influxdata/influxdb/issues/3583): Inserting value in scientific notation with a trailing i causes panic +- [#3611](https://github.com/influxdata/influxdb/pull/3611): Fix query arithmetic with integers +- [#3326](https://github.com/influxdata/influxdb/issues/3326): simple regex query fails with cryptic error +- [#3618](https://github.com/influxdata/influxdb/pull/3618): Fix collectd stats panic on i386. Thanks @richterger +- [#3625](https://github.com/influxdata/influxdb/pull/3625): Don't panic when aggregate and raw queries are in a single statement +- [#3629](https://github.com/influxdata/influxdb/pull/3629): Use sensible batching defaults for Graphite. +- [#3638](https://github.com/influxdata/influxdb/pull/3638): Cluster config fixes and removal of meta.peers config field +- [#3640](https://github.com/influxdata/influxdb/pull/3640): Shutdown Graphite service when signal received. +- [#3632](https://github.com/influxdata/influxdb/issues/3632): Make single-node host renames more seamless +- [#3656](https://github.com/influxdata/influxdb/issues/3656): Silence snapshotter logger for testing +- [#3651](https://github.com/influxdata/influxdb/pull/3651): Fully remove series when dropped. +- [#3517](https://github.com/influxdata/influxdb/pull/3517): Batch CQ writes to avoid timeouts. Thanks @dim. +- [#3522](https://github.com/influxdata/influxdb/pull/3522): Consume CQ results on request timeouts. Thanks @dim. +- [#3646](https://github.com/influxdata/influxdb/pull/3646): Fix nil FieldCodec panic. +- [#3672](https://github.com/influxdata/influxdb/pull/3672): Reduce in-memory index by 20%-30% +- [#3673](https://github.com/influxdata/influxdb/pull/3673): Improve query performance by removing unnecessary tagset sorting. +- [#3676](https://github.com/influxdata/influxdb/pull/3676): Improve query performance by memomizing mapper output keys. +- [#3686](https://github.com/influxdata/influxdb/pull/3686): Ensure 'p' parameter is not logged, even on OPTIONS requests. +- [#3687](https://github.com/influxdata/influxdb/issues/3687): Fix panic: runtime error: makeslice: len out of range in hinted handoff +- [#3697](https://github.com/influxdata/influxdb/issues/3697): Correctly merge non-chunked results for same series. Fix issue #3242. +- [#3708](https://github.com/influxdata/influxdb/issues/3708): Fix double escaping measurement name during cluster replication +- [#3704](https://github.com/influxdata/influxdb/issues/3704): cluster replication issue for measurement name containing backslash +- [#3681](https://github.com/influxdata/influxdb/issues/3681): Quoted measurement names fail +- [#3681](https://github.com/influxdata/influxdb/issues/3682): Fix inserting string value with backslashes +- [#3735](https://github.com/influxdata/influxdb/issues/3735): Append to small bz1 blocks +- [#3736](https://github.com/influxdata/influxdb/pull/3736): Update shard group duration with retention policy changes. Thanks for the report @papylhomme +- [#3539](https://github.com/influxdata/influxdb/issues/3539): parser incorrectly accepts NaN as numerical value, but not always +- [#3790](https://github.com/influxdata/influxdb/pull/3790): Fix line protocol parsing equals in measurements and NaN values +- [#3778](https://github.com/influxdata/influxdb/pull/3778): Don't panic if SELECT on time. +- [#3824](https://github.com/influxdata/influxdb/issues/3824): tsdb.Point.MarshalBinary needs to support all number types +- [#3828](https://github.com/influxdata/influxdb/pull/3828): Support all number types when decoding a point +- [#3853](https://github.com/influxdata/influxdb/pull/3853): Use 4KB default block size for bz1 +- [#3607](https://github.com/influxdata/influxdb/issues/3607): Fix unable to query influxdb due to deadlock in metastore. Thanks @ccutrer! + +## v0.9.2 [2015-07-24] + +### Features +- [#3177](https://github.com/influxdata/influxdb/pull/3177): Client supports making HTTPS requests. Thanks @jipperinbham +- [#3299](https://github.com/influxdata/influxdb/pull/3299): Refactor query engine for distributed query support. +- [#3334](https://github.com/influxdata/influxdb/pull/3334): Clean shutdown of influxd. Thanks @mcastilho + +### Bugfixes + +- [#3180](https://github.com/influxdata/influxdb/pull/3180): Log GOMAXPROCS, version, and commit on startup. +- [#3218](https://github.com/influxdata/influxdb/pull/3218): Allow write timeouts to be configurable. +- [#3184](https://github.com/influxdata/influxdb/pull/3184): Support basic auth in admin interface. Thanks @jipperinbham! +- [#3236](https://github.com/influxdata/influxdb/pull/3236): Fix display issues in admin interface. +- [#3232](https://github.com/influxdata/influxdb/pull/3232): Set logging prefix for metastore. +- [#3230](https://github.com/influxdata/influxdb/issues/3230): panic: unable to parse bool value +- [#3245](https://github.com/influxdata/influxdb/issues/3245): Error using graphite plugin with multiple filters +- [#3223](https://github.com/influxdata/influxdb/issues/323): default graphite template cannot have extra tags +- [#3255](https://github.com/influxdata/influxdb/pull/3255): Flush WAL on start-up as soon as possible. +- [#3289](https://github.com/influxdata/influxdb/issues/3289): InfluxDB crashes on floats without decimal +- [#3298](https://github.com/influxdata/influxdb/pull/3298): Corrected WAL & flush parameters in default config. Thanks @jhorwit2 +- [#3152](https://github.com/influxdata/influxdb/issues/3159): High CPU Usage with unsorted writes +- [#3307](https://github.com/influxdata/influxdb/pull/3307): Fix regression parsing boolean values True/False +- [#3304](https://github.com/influxdata/influxdb/pull/3304): Fixed httpd logger to log user from query params. Thanks @jhorwit2 +- [#3332](https://github.com/influxdata/influxdb/pull/3332): Add SLIMIT and SOFFSET to string version of AST. +- [#3335](https://github.com/influxdata/influxdb/pull/3335): Don't drop all data on DROP DATABASE. Thanks to @PierreF for the report +- [#2761](https://github.com/influxdata/influxdb/issues/2761): Make SHOW RETENTION POLICIES consistent with other queries. +- [#3356](https://github.com/influxdata/influxdb/pull/3356): Disregard semicolons after database name in use command. Thanks @timraymond. +- [#3351](https://github.com/influxdata/influxdb/pull/3351): Handle malformed regex comparisons during parsing. Thanks @rnubel +- [#3244](https://github.com/influxdata/influxdb/pull/3244): Wire up admin privilege grant and revoke. +- [#3259](https://github.com/influxdata/influxdb/issues/3259): Respect privileges for queries. +- [#3256](https://github.com/influxdata/influxdb/pull/3256): Remove unnecessary timeout in WaitForLeader(). Thanks @cannium. +- [#3380](https://github.com/influxdata/influxdb/issue/3380): Parser fix, only allow ORDER BY ASC and ORDER BY time ASC. +- [#3319](https://github.com/influxdata/influxdb/issues/3319): restarting process irrevocably BREAKS measurements with spaces +- [#3453](https://github.com/influxdata/influxdb/issues/3453): Remove outdated `dump` command from CLI. +- [#3463](https://github.com/influxdata/influxdb/issues/3463): Fix aggregate queries and time precision on where clauses. + +## v0.9.1 [2015-07-02] + +### Features + +- [2650](https://github.com/influxdata/influxdb/pull/2650): Add SHOW GRANTS FOR USER statement. Thanks @n1tr0g. +- [3125](https://github.com/influxdata/influxdb/pull/3125): Graphite Input Protocol Parsing +- [2746](https://github.com/influxdata/influxdb/pull/2746): New Admin UI/interface +- [3036](https://github.com/influxdata/influxdb/pull/3036): Write Ahead Log (WAL) +- [3014](https://github.com/influxdata/influxdb/issues/3014): Implement Raft snapshots + +### Bugfixes + +- [3013](https://github.com/influxdata/influxdb/issues/3013): Panic error with inserting values with commas +- [#2956](https://github.com/influxdata/influxdb/issues/2956): Type mismatch in derivative +- [#2908](https://github.com/influxdata/influxdb/issues/2908): Field mismatch error messages need to be updated +- [#2931](https://github.com/influxdata/influxdb/pull/2931): Services and reporting should wait until cluster has leader. +- [#2943](https://github.com/influxdata/influxdb/issues/2943): Ensure default retention policies are fully replicated +- [#2948](https://github.com/influxdata/influxdb/issues/2948): Field mismatch error message to include measurement name +- [#2919](https://github.com/influxdata/influxdb/issues/2919): Unable to insert negative floats +- [#2935](https://github.com/influxdata/influxdb/issues/2935): Hook CPU and memory profiling back up. +- [#2960](https://github.com/influxdata/influxdb/issues/2960): Cluster Write Errors. +- [#2928](https://github.com/influxdata/influxdb/pull/2928): Start work to set InfluxDB version in HTTP response headers. Thanks @neonstalwart. +- [#2969](https://github.com/influxdata/influxdb/pull/2969): Actually set HTTP version in responses. +- [#2993](https://github.com/influxdata/influxdb/pull/2993): Don't log each UDP batch. +- [#2994](https://github.com/influxdata/influxdb/pull/2994): Don't panic during wilcard expansion if no default database specified. +- [#3002](https://github.com/influxdata/influxdb/pull/3002): Remove measurement from shard's index on DROP MEASUREMENT. +- [#3021](https://github.com/influxdata/influxdb/pull/3021): Correct set HTTP write trace logging. Thanks @vladlopes. +- [#3027](https://github.com/influxdata/influxdb/pull/3027): Enforce minimum retention policy duration of 1 hour. +- [#3030](https://github.com/influxdata/influxdb/pull/3030): Fix excessive logging of shard creation. +- [#3038](https://github.com/influxdata/influxdb/pull/3038): Don't check deleted shards for precreation. Thanks @vladlopes. +- [#3033](https://github.com/influxdata/influxdb/pull/3033): Add support for marshaling `uint64` in client. +- [#3090](https://github.com/influxdata/influxdb/pull/3090): Remove database from TSDB index on DROP DATABASE. +- [#2944](https://github.com/influxdata/influxdb/issues/2944): Don't require "WHERE time" when creating continuous queries. +- [#3075](https://github.com/influxdata/influxdb/pull/3075): GROUP BY correctly when different tags have same value. +- [#3078](https://github.com/influxdata/influxdb/pull/3078): Fix CLI panic on malformed INSERT. +- [#2102](https://github.com/influxdata/influxdb/issues/2102): Re-work Graphite input and metric processing +- [#2996](https://github.com/influxdata/influxdb/issues/2996): Graphite Input Parsing +- [#3136](https://github.com/influxdata/influxdb/pull/3136): Fix various issues with init.d script. Thanks @ miguelcnf. +- [#2996](https://github.com/influxdata/influxdb/issues/2996): Graphite Input Parsing +- [#3127](https://github.com/influxdata/influxdb/issues/3127): Trying to insert a number larger than the largest signed 64-bit number kills influxd +- [#3131](https://github.com/influxdata/influxdb/pull/3131): Copy batch tags to each point before marshalling +- [#3155](https://github.com/influxdata/influxdb/pull/3155): Instantiate UDP batcher before listening for UDP traffic, otherwise a panic may result. +- [#2678](https://github.com/influxdata/influxdb/issues/2678): Server allows tags with an empty string for the key and/or value +- [#3061](https://github.com/influxdata/influxdb/issues/3061): syntactically incorrect line protocol insert panics the database +- [#2608](https://github.com/influxdata/influxdb/issues/2608): drop measurement while writing points to that measurement has race condition that can panic +- [#3183](https://github.com/influxdata/influxdb/issues/3183): using line protocol measurement names cannot contain commas +- [#3193](https://github.com/influxdata/influxdb/pull/3193): Fix panic for SHOW STATS and in collectd +- [#3102](https://github.com/influxdata/influxdb/issues/3102): Add authentication cache +- [#3209](https://github.com/influxdata/influxdb/pull/3209): Dump Run() errors to stderr +- [#3217](https://github.com/influxdata/influxdb/pull/3217): Allow WAL partition flush delay to be configurable. + +## v0.9.0 [2015-06-11] + +### Bugfixes + +- [#2869](https://github.com/influxdata/influxdb/issues/2869): Adding field to existing measurement causes panic +- [#2849](https://github.com/influxdata/influxdb/issues/2849): RC32: Frequent write errors +- [#2700](https://github.com/influxdata/influxdb/issues/2700): Incorrect error message in database EncodeFields +- [#2897](https://github.com/influxdata/influxdb/pull/2897): Ensure target Graphite database exists +- [#2898](https://github.com/influxdata/influxdb/pull/2898): Ensure target openTSDB database exists +- [#2895](https://github.com/influxdata/influxdb/pull/2895): Use Graphite input defaults where necessary +- [#2900](https://github.com/influxdata/influxdb/pull/2900): Use openTSDB input defaults where necessary +- [#2886](https://github.com/influxdata/influxdb/issues/2886): Refactor backup & restore +- [#2804](https://github.com/influxdata/influxdb/pull/2804): BREAKING: change time literals to be single quoted in InfluxQL. Thanks @nvcook42! +- [#2906](https://github.com/influxdata/influxdb/pull/2906): Restrict replication factor to the cluster size +- [#2905](https://github.com/influxdata/influxdb/pull/2905): Restrict clusters to 3 peers +- [#2904](https://github.com/influxdata/influxdb/pull/2904): Re-enable server reporting. +- [#2917](https://github.com/influxdata/influxdb/pull/2917): Fix int64 field values. +- [#2920](https://github.com/influxdata/influxdb/issues/2920): Ensure collectd database exists + +## v0.9.0-rc33 [2015-06-09] + +### Bugfixes + +- [#2816](https://github.com/influxdata/influxdb/pull/2816): Enable UDP service. Thanks @renan- +- [#2824](https://github.com/influxdata/influxdb/pull/2824): Add missing call to WaitGroup.Done in execConn. Thanks @liyichao +- [#2823](https://github.com/influxdata/influxdb/pull/2823): Convert OpenTSDB to a service. +- [#2838](https://github.com/influxdata/influxdb/pull/2838): Set auto-created retention policy period to infinite. +- [#2829](https://github.com/influxdata/influxdb/pull/2829): Re-enable Graphite support as a new Service-style component. +- [#2814](https://github.com/influxdata/influxdb/issues/2814): Convert collectd to a service. +- [#2852](https://github.com/influxdata/influxdb/pull/2852): Don't panic when altering retention policies. Thanks for the report @huhongbo +- [#2857](https://github.com/influxdata/influxdb/issues/2857): Fix parsing commas in string field values. +- [#2833](https://github.com/influxdata/influxdb/pull/2833): Make the default config valid. +- [#2859](https://github.com/influxdata/influxdb/pull/2859): Fix panic on aggregate functions. +- [#2878](https://github.com/influxdata/influxdb/pull/2878): Re-enable shard precreation. +- [2865](https://github.com/influxdata/influxdb/pull/2865) -- Return an empty set of results if database does not exist in shard metadata. + +### Features +- [2858](https://github.com/influxdata/influxdb/pull/2858): Support setting openTSDB write consistency. + +## v0.9.0-rc32 [2015-06-07] + +### Release Notes + +This released introduced an updated write path and clustering design. The data format has also changed, so you'll need to wipe out your data to upgrade from RC31. There should be no other data changes before v0.9.0 is released. + +### Features +- [#1997](https://github.com/influxdata/influxdb/pull/1997): Update SELECT * to return tag values. +- [#2599](https://github.com/influxdata/influxdb/issues/2599): Add "epoch" URL param and return JSON time values as epoch instead of date strings. +- [#2682](https://github.com/influxdata/influxdb/issues/2682): Adding pr checklist to CONTRIBUTING.md +- [#2683](https://github.com/influxdata/influxdb/issues/2683): Add batching support to Graphite inputs. +- [#2687](https://github.com/influxdata/influxdb/issues/2687): Add batching support to Collectd inputs. +- [#2696](https://github.com/influxdata/influxdb/pull/2696): Add line protocol. This is now the preferred way to write data. +- [#2751](https://github.com/influxdata/influxdb/pull/2751): Add UDP input. UDP only supports the line protocol now. +- [#2684](https://github.com/influxdata/influxdb/pull/2684): Include client timeout configuration. Thanks @vladlopes! + +### Bugfixes +- [#2776](https://github.com/influxdata/influxdb/issues/2776): Re-implement retention policy enforcement. +- [#2635](https://github.com/influxdata/influxdb/issues/2635): Fix querying against boolean field in WHERE clause. +- [#2644](https://github.com/influxdata/influxdb/issues/2644): Make SHOW queries work with FROM //. +- [#2501](https://github.com/influxdata/influxdb/issues/2501): Name the FlagSet for the shell and add a version flag. Thanks @neonstalwart +- [#2647](https://github.com/influxdata/influxdb/issues/2647): Fixes typos in sample config file - thanks @claws! + +## v0.9.0-rc31 [2015-05-21] + +### Features +- [#1822](https://github.com/influxdata/influxdb/issues/1822): Wire up DERIVATIVE aggregate +- [#1477](https://github.com/influxdata/influxdb/issues/1477): Wire up non_negative_derivative function +- [#2557](https://github.com/influxdata/influxdb/issues/2557): Fix false positive error with `GROUP BY time` +- [#1891](https://github.com/influxdata/influxdb/issues/1891): Wire up COUNT DISTINCT aggregate +- [#1989](https://github.com/influxdata/influxdb/issues/1989): Implement `SELECT tagName FROM m` + +### Bugfixes +- [#2545](https://github.com/influxdata/influxdb/pull/2545): Use "value" as the field name for graphite input. Thanks @cannium. +- [#2558](https://github.com/influxdata/influxdb/pull/2558): Fix client response check - thanks @vladlopes! +- [#2566](https://github.com/influxdata/influxdb/pull/2566): Wait until each data write has been commited by the Raft cluster. +- [#2602](https://github.com/influxdata/influxdb/pull/2602): CLI execute command exits without cleaning up liner package. +- [#2610](https://github.com/influxdata/influxdb/pull/2610): Fix shard group creation +- [#2596](https://github.com/influxdata/influxdb/pull/2596): RC30: `panic: runtime error: index out of range` when insert data points. +- [#2592](https://github.com/influxdata/influxdb/pull/2592): Should return an error if user attempts to group by a field. +- [#2499](https://github.com/influxdata/influxdb/pull/2499): Issuing a select query with tag as a values causes panic. +- [#2612](https://github.com/influxdata/influxdb/pull/2612): Query planner should validate distinct is passed a field. +- [#2531](https://github.com/influxdata/influxdb/issues/2531): Fix select with 3 or more terms in where clause. +- [#2564](https://github.com/influxdata/influxdb/issues/2564): Change "name" to "measurement" in JSON for writes. + +## PRs +- [#2569](https://github.com/influxdata/influxdb/pull/2569): Add derivative functions +- [#2598](https://github.com/influxdata/influxdb/pull/2598): Implement tag support in SELECT statements +- [#2624](https://github.com/influxdata/influxdb/pull/2624): Remove references to SeriesID in `DROP SERIES` handlers. + +## v0.9.0-rc30 [2015-05-12] + +### Release Notes + +This release has a breaking API change for writes -- the field previously called `timestamp` has been renamed to `time`. + +### Features +- [#2254](https://github.com/influxdata/influxdb/pull/2254): Add Support for OpenTSDB HTTP interface. Thanks @tcolgate +- [#2525](https://github.com/influxdata/influxdb/pull/2525): Serve broker diagnostics over HTTP +- [#2186](https://github.com/influxdata/influxdb/pull/2186): The default status code for queries is now `200 OK` +- [#2298](https://github.com/influxdata/influxdb/pull/2298): Successful writes now return a status code of `204 No Content` - thanks @neonstalwart! +- [#2549](https://github.com/influxdata/influxdb/pull/2549): Raft election timeout to 5 seconds, so system is more forgiving of CPU loads. +- [#2568](https://github.com/influxdata/influxdb/pull/2568): Wire up SELECT DISTINCT. + +### Bugfixes +- [#2535](https://github.com/influxdata/influxdb/pull/2535): Return exit status 0 if influxd already running. Thanks @haim0n. +- [#2521](https://github.com/influxdata/influxdb/pull/2521): Don't truncate topic data until fully replicated. +- [#2509](https://github.com/influxdata/influxdb/pull/2509): Parse config file correctly during restore. Thanks @neonstalwart +- [#2536](https://github.com/influxdata/influxdb/issues/2532): Set leader ID on restart of single-node cluster. +- [#2448](https://github.com/influxdata/influxdb/pull/2448): Fix inconsistent data type - thanks @cannium! +- [#2108](https://github.com/influxdata/influxdb/issues/2108): Change `timestamp` to `time` - thanks @neonstalwart! +- [#2539](https://github.com/influxdata/influxdb/issues/2539): Add additional vote request logging. +- [#2541](https://github.com/influxdata/influxdb/issues/2541): Update messaging client connection index with every message. +- [#2542](https://github.com/influxdata/influxdb/issues/2542): Throw parser error for invalid aggregate without where time. +- [#2548](https://github.com/influxdata/influxdb/issues/2548): Return an error when numeric aggregate applied to non-numeric data. +- [#2487](https://github.com/influxdata/influxdb/issues/2487): Aggregate query with exact timestamp causes panic. Thanks @neonstalwart! +- [#2552](https://github.com/influxdata/influxdb/issues/2552): Run CQ that is actually passed into go-routine. +- [#2553](https://github.com/influxdata/influxdb/issues/2553): Fix race condition during CQ execution. +- [#2557](https://github.com/influxdata/influxdb/issues/2557): RC30 WHERE time filter Regression. + +## v0.9.0-rc29 [2015-05-05] + +### Features +- [#2410](https://github.com/influxdata/influxdb/pull/2410): If needed, brokers respond with data nodes for peer shard replication. +- [#2469](https://github.com/influxdata/influxdb/pull/2469): Reduce default max topic size from 1GB to 50MB. +- [#1824](https://github.com/influxdata/influxdb/pull/1824): Wire up MEDIAN aggregate. Thanks @neonstalwart! + +### Bugfixes +- [#2446](https://github.com/influxdata/influxdb/pull/2446): Correctly count number of queries executed. Thanks @neonstalwart +- [#2452](https://github.com/influxdata/influxdb/issues/2452): Fix panic with shard stats on multiple clusters +- [#2453](https://github.com/influxdata/influxdb/pull/2453): Do not require snapshot on Log.WriteEntriesTo(). +- [#2460](https://github.com/influxdata/influxdb/issues/2460): Collectd input should use "value" for fields values. Fixes 2412. Thanks @josh-padnick +- [#2465](https://github.com/influxdata/influxdb/pull/2465): HTTP response logging paniced with chunked requests. Thanks @Jackkoz +- [#2475](https://github.com/influxdata/influxdb/pull/2475): RLock server when checking if shards groups are required during write. +- [#2471](https://github.com/influxdata/influxdb/issues/2471): Function calls normalized to be lower case. Fixes percentile not working when called uppercase. Thanks @neonstalwart +- [#2281](https://github.com/influxdata/influxdb/issues/2281): Fix Bad Escape error when parsing regex + +## v0.9.0-rc28 [2015-04-27] + +### Features +- [#2410](https://github.com/influxdata/influxdb/pull/2410) Allow configuration of Raft timers +- [#2354](https://github.com/influxdata/influxdb/pull/2354) Wire up STDDEV. Thanks @neonstalwart! + +### Bugfixes +- [#2374](https://github.com/influxdata/influxdb/issues/2374): Two different panics during SELECT percentile +- [#2404](https://github.com/influxdata/influxdb/pull/2404): Mean and percentile function fixes +- [#2408](https://github.com/influxdata/influxdb/pull/2408): Fix snapshot 500 error +- [#1896](https://github.com/influxdata/influxdb/issues/1896): Excessive heartbeater logging of "connection refused" on cluster node stop +- [#2418](https://github.com/influxdata/influxdb/pull/2418): Fix raft node getting stuck in candidate state +- [#2415](https://github.com/influxdata/influxdb/pull/2415): Raft leader ID now set on election after failover. Thanks @xiaost +- [#2426](https://github.com/influxdata/influxdb/pull/2426): Fix race condition around listener address in openTSDB server. +- [#2426](https://github.com/influxdata/influxdb/pull/2426): Fix race condition around listener address in Graphite server. +- [#2429](https://github.com/influxdata/influxdb/pull/2429): Ensure no field value is null. +- [#2431](https://github.com/influxdata/influxdb/pull/2431): Always append shard path in diags. Thanks @marcosnils +- [#2441](https://github.com/influxdata/influxdb/pull/2441): Correctly release server RLock during "drop series". +- [#2445](https://github.com/influxdata/influxdb/pull/2445): Read locks and data race fixes + +## v0.9.0-rc27 [04-23-2015] + +### Features +- [#2398](https://github.com/influxdata/influxdb/pull/2398) Track more stats and report errors for shards. + +### Bugfixes +- [#2370](https://github.com/influxdata/influxdb/pull/2370): Fix data race in openTSDB endpoint. +- [#2371](https://github.com/influxdata/influxdb/pull/2371): Don't set client to nil when closing broker Fixes #2352 +- [#2372](https://github.com/influxdata/influxdb/pull/2372): Fix data race in graphite endpoint. +- [#2373](https://github.com/influxdata/influxdb/pull/2373): Actually allow HTTP logging to be controlled. +- [#2376](https://github.com/influxdata/influxdb/pull/2376): Encode all types of integers. Thanks @jtakkala. +- [#2376](https://github.com/influxdata/influxdb/pull/2376): Add shard path to existing diags value. Fix issue #2369. +- [#2386](https://github.com/influxdata/influxdb/pull/2386): Fix shard datanodes stats getting appended too many times +- [#2393](https://github.com/influxdata/influxdb/pull/2393): Fix default hostname for connecting to cluster. +- [#2390](https://github.com/influxdata/influxdb/pull/2390): Handle large sums when calculating means - thanks @neonstalwart! +- [#2391](https://github.com/influxdata/influxdb/pull/2391): Unable to write points through Go client when authentication enabled +- [#2400](https://github.com/influxdata/influxdb/pull/2400): Always send auth headers for client requests if present + +## v0.9.0-rc26 [04-21-2015] + +### Features +- [#2301](https://github.com/influxdata/influxdb/pull/2301): Distributed query load balancing and failover +- [#2336](https://github.com/influxdata/influxdb/pull/2336): Handle distributed queries when shards != data nodes +- [#2353](https://github.com/influxdata/influxdb/pull/2353): Distributed Query/Clustering Fixes + +### Bugfixes +- [#2297](https://github.com/influxdata/influxdb/pull/2297): create /var/run during startup. Thanks @neonstalwart. +- [#2312](https://github.com/influxdata/influxdb/pull/2312): Re-use httpclient for continuous queries +- [#2318](https://github.com/influxdata/influxdb/pull/2318): Remove pointless use of 'done' channel for collectd. +- [#2242](https://github.com/influxdata/influxdb/pull/2242): Distributed Query should balance requests +- [#2243](https://github.com/influxdata/influxdb/pull/2243): Use Limit Reader instead of fixed 1MB/1GB slice for DQ +- [#2190](https://github.com/influxdata/influxdb/pull/2190): Implement failover to other data nodes for distributed queries +- [#2324](https://github.com/influxdata/influxdb/issues/2324): Race in Broker.Close()/Broker.RunContinousQueryProcessing() +- [#2325](https://github.com/influxdata/influxdb/pull/2325): Cluster open fixes +- [#2326](https://github.com/influxdata/influxdb/pull/2326): Fix parse error in CREATE CONTINUOUS QUERY +- [#2300](https://github.com/influxdata/influxdb/pull/2300): Refactor integration tests. Properly close Graphite/OpenTSDB listeners. +- [#2338](https://github.com/influxdata/influxdb/pull/2338): Fix panic if tag key isn't double quoted when it should have been +- [#2340](https://github.com/influxdata/influxdb/pull/2340): Fix SHOW DIAGNOSTICS panic if any shard was non-local. +- [#2351](https://github.com/influxdata/influxdb/pull/2351): Fix data race by rlocking shard during diagnostics. +- [#2348](https://github.com/influxdata/influxdb/pull/2348): Data node fail to join cluster in 0.9.0rc25 +- [#2343](https://github.com/influxdata/influxdb/pull/2343): Node falls behind Metastore updates +- [#2334](https://github.com/influxdata/influxdb/pull/2334): Test Partial replication is very problematic +- [#2272](https://github.com/influxdata/influxdb/pull/2272): clustering: influxdb 0.9.0-rc23 panics when doing a GET with merge_metrics in a +- [#2350](https://github.com/influxdata/influxdb/pull/2350): Issue fix for :influxd -hostname localhost. +- [#2367](https://github.com/influxdata/influxdb/pull/2367): PR for issue #2350 - Always use localhost, not host name. + +## v0.9.0-rc25 [2015-04-15] + +### Bugfixes +- [#2282](https://github.com/influxdata/influxdb/pull/2282): Use "value" as field name for OpenTSDB input. +- [#2283](https://github.com/influxdata/influxdb/pull/2283): Fix bug when restarting an entire existing cluster. +- [#2293](https://github.com/influxdata/influxdb/pull/2293): Open cluster listener before starting broker. +- [#2287](https://github.com/influxdata/influxdb/pull/2287): Fix data race during SHOW RETENTION POLICIES. +- [#2288](https://github.com/influxdata/influxdb/pull/2288): Fix expression parsing bug. +- [#2294](https://github.com/influxdata/influxdb/pull/2294): Fix async response flushing (invalid chunked response error). + +## Features +- [#2276](https://github.com/influxdata/influxdb/pull/2276): Broker topic truncation. +- [#2292](https://github.com/influxdata/influxdb/pull/2292): Wire up drop CQ statement - thanks @neonstalwart! +- [#2290](https://github.com/influxdata/influxdb/pull/2290): Allow hostname argument to override default config - thanks @neonstalwart! +- [#2295](https://github.com/influxdata/influxdb/pull/2295): Use nil as default return value for MapCount - thanks @neonstalwart! +- [#2246](https://github.com/influxdata/influxdb/pull/2246): Allow HTTP logging to be controlled. + +## v0.9.0-rc24 [2015-04-13] + +### Bugfixes +- [#2255](https://github.com/influxdata/influxdb/pull/2255): Fix panic when changing default retention policy. +- [#2257](https://github.com/influxdata/influxdb/pull/2257): Add "snapshotting" pseudo state & log entry cache. +- [#2261](https://github.com/influxdata/influxdb/pull/2261): Support int64 value types. +- [#2191](https://github.com/influxdata/influxdb/pull/2191): Case-insensitive check for "fill" +- [#2274](https://github.com/influxdata/influxdb/pull/2274): Snapshot and HTTP API endpoints +- [#2265](https://github.com/influxdata/influxdb/pull/2265): Fix auth for CLI. + +## v0.9.0-rc23 [2015-04-11] + +### Features +- [#2202](https://github.com/influxdata/influxdb/pull/2202): Initial implementation of Distributed Queries +- [#2202](https://github.com/influxdata/influxdb/pull/2202): 64-bit Series IDs. INCOMPATIBLE WITH PREVIOUS DATASTORES. + +### Bugfixes +- [#2225](https://github.com/influxdata/influxdb/pull/2225): Make keywords completely case insensitive +- [#2228](https://github.com/influxdata/influxdb/pull/2228): Accept keyword default unquoted in ALTER RETENTION POLICY statement +- [#2236](https://github.com/influxdata/influxdb/pull/2236): Immediate term changes, fix stale write issue, net/http/pprof +- [#2213](https://github.com/influxdata/influxdb/pull/2213): Seed random number generator for election timeout. Thanks @cannium. + +## v0.9.0-rc22 [2015-04-09] + +### Features +- [#2214](https://github.com/influxdata/influxdb/pull/2214): Added the option to influx CLI to execute single command and exit. Thanks @n1tr0g + +### Bugfixes +- [#2223](https://github.com/influxdata/influxdb/pull/2223): Always notify term change on RequestVote + +## v0.9.0-rc21 [2015-04-09] + +### Features +- [#870](https://github.com/influxdata/influxdb/pull/870): Add support for OpenTSDB telnet input protocol. Thanks @tcolgate +- [#2180](https://github.com/influxdata/influxdb/pull/2180): Allow http write handler to decode gzipped body +- [#2175](https://github.com/influxdata/influxdb/pull/2175): Separate broker and data nodes +- [#2158](https://github.com/influxdata/influxdb/pull/2158): Allow user password to be changed. Thanks @n1tr0g +- [#2201](https://github.com/influxdata/influxdb/pull/2201): Bring back config join URLs +- [#2121](https://github.com/influxdata/influxdb/pull/2121): Parser refactor + +### Bugfixes +- [#2181](https://github.com/influxdata/influxdb/pull/2181): Fix panic on "SHOW DIAGNOSTICS". +- [#2170](https://github.com/influxdata/influxdb/pull/2170): Make sure queries on missing tags return 200 status. +- [#2197](https://github.com/influxdata/influxdb/pull/2197): Lock server during Open(). +- [#2200](https://github.com/influxdata/influxdb/pull/2200): Re-enable Continuous Queries. +- [#2203](https://github.com/influxdata/influxdb/pull/2203): Fix race condition on continuous queries. +- [#2217](https://github.com/influxdata/influxdb/pull/2217): Only revert to follower if new term is greater. +- [#2219](https://github.com/influxdata/influxdb/pull/2219): Persist term change to disk when candidate. Thanks @cannium + +## v0.9.0-rc20 [2015-04-04] + +### Features +- [#2128](https://github.com/influxdata/influxdb/pull/2128): Data node discovery from brokers +- [#2142](https://github.com/influxdata/influxdb/pull/2142): Support chunked queries +- [#2154](https://github.com/influxdata/influxdb/pull/2154): Node redirection +- [#2168](https://github.com/influxdata/influxdb/pull/2168): Return raft term from vote, add term logging + +### Bugfixes +- [#2147](https://github.com/influxdata/influxdb/pull/2147): Set Go Max procs in a better location +- [#2137](https://github.com/influxdata/influxdb/pull/2137): Refactor `results` to `response`. Breaking Go Client change. +- [#2151](https://github.com/influxdata/influxdb/pull/2151): Ignore replay commands on the metastore. +- [#2152](https://github.com/influxdata/influxdb/issues/2152): Influxd process with stats enabled crashing with 'Unsuported protocol scheme for ""' +- [#2156](https://github.com/influxdata/influxdb/pull/2156): Propagate error when resolving UDP address in Graphite UDP server. +- [#2163](https://github.com/influxdata/influxdb/pull/2163): Fix up paths for default data and run storage. +- [#2164](https://github.com/influxdata/influxdb/pull/2164): Append STDOUT/STDERR in initscript. +- [#2165](https://github.com/influxdata/influxdb/pull/2165): Better name for config section for stats and diags. +- [#2165](https://github.com/influxdata/influxdb/pull/2165): Monitoring database and retention policy are not configurable. +- [#2167](https://github.com/influxdata/influxdb/pull/2167): Add broker log recovery. +- [#2166](https://github.com/influxdata/influxdb/pull/2166): Don't panic if presented with a field of unknown type. +- [#2149](https://github.com/influxdata/influxdb/pull/2149): Fix unit tests for win32 when directory doesn't exist. +- [#2150](https://github.com/influxdata/influxdb/pull/2150): Fix unit tests for win32 when a connection is refused. + +## v0.9.0-rc19 [2015-04-01] + +### Features +- [#2143](https://github.com/influxdata/influxdb/pull/2143): Add raft term logging. + +### Bugfixes +- [#2145](https://github.com/influxdata/influxdb/pull/2145): Encode toml durations correctly which fixes default configuration generation `influxd config`. + +## v0.9.0-rc18 [2015-03-31] + +### Bugfixes +- [#2100](https://github.com/influxdata/influxdb/pull/2100): Use channel to synchronize collectd shutdown. +- [#2100](https://github.com/influxdata/influxdb/pull/2100): Synchronize access to shard index. +- [#2131](https://github.com/influxdata/influxdb/pull/2131): Optimize marshalTags(). +- [#2130](https://github.com/influxdata/influxdb/pull/2130): Make fewer calls to marshalTags(). +- [#2105](https://github.com/influxdata/influxdb/pull/2105): Support != for tag values. Fix issue #2097, thanks to @smonkewitz for bug report. +- [#2105](https://github.com/influxdata/influxdb/pull/2105): Support !~ tags values. +- [#2138](https://github.com/influxdata/influxdb/pull/2136): Use map for marshaledTags cache. + +## v0.9.0-rc17 [2015-03-29] + +### Features +- [#2076](https://github.com/influxdata/influxdb/pull/2076): Separate stdout and stderr output in init.d script +- [#2091](https://github.com/influxdata/influxdb/pull/2091): Support disabling snapshot endpoint. +- [#2081](https://github.com/influxdata/influxdb/pull/2081): Support writing diagnostic data into the internal database. +- [#2095](https://github.com/influxdata/influxdb/pull/2095): Improved InfluxDB client docs. Thanks @derailed + +### Bugfixes +- [#2093](https://github.com/influxdata/influxdb/pull/2093): Point precision not marshalled correctly. Thanks @derailed +- [#2084](https://github.com/influxdata/influxdb/pull/2084): Allowing leading underscores in identifiers. +- [#2080](https://github.com/influxdata/influxdb/pull/2080): Graphite logs in seconds, not milliseconds. +- [#2101](https://github.com/influxdata/influxdb/pull/2101): SHOW DATABASES should name returned series "databases". +- [#2104](https://github.com/influxdata/influxdb/pull/2104): Include NEQ when calculating field filters. +- [#2112](https://github.com/influxdata/influxdb/pull/2112): Set GOMAXPROCS on startup. This may have been causing extra leader elections, which would cause a number of other bugs or instability. +- [#2111](https://github.com/influxdata/influxdb/pull/2111) and [#2025](https://github.com/influxdata/influxdb/issues/2025): Raft stability fixes. Non-contiguous log error and others. +- [#2114](https://github.com/influxdata/influxdb/pull/2114): Correctly start influxd on platforms without start-stop-daemon. + +## v0.9.0-rc16 [2015-03-24] + +### Features +- [#2058](https://github.com/influxdata/influxdb/pull/2058): Track number of queries executed in stats. +- [#2059](https://github.com/influxdata/influxdb/pull/2059): Retention policies sorted by name on return to client. +- [#2061](https://github.com/influxdata/influxdb/pull/2061): Implement SHOW DIAGNOSTICS. +- [#2064](https://github.com/influxdata/influxdb/pull/2064): Allow init.d script to return influxd version. +- [#2053](https://github.com/influxdata/influxdb/pull/2053): Implment backup and restore. +- [#1631](https://github.com/influxdata/influxdb/pull/1631): Wire up DROP CONTINUOUS QUERY. + +### Bugfixes +- [#2037](https://github.com/influxdata/influxdb/pull/2037): Don't check 'configExists' at Run() level. +- [#2039](https://github.com/influxdata/influxdb/pull/2039): Don't panic if getting current user fails. +- [#2034](https://github.com/influxdata/influxdb/pull/2034): GROUP BY should require an aggregate. +- [#2040](https://github.com/influxdata/influxdb/pull/2040): Add missing top-level help for config command. +- [#2057](https://github.com/influxdata/influxdb/pull/2057): Move racy "in order" test to integration test suite. +- [#2060](https://github.com/influxdata/influxdb/pull/2060): Reload server shard map on restart. +- [#2068](https://github.com/influxdata/influxdb/pull/2068): Fix misspelled JSON field. +- [#2067](https://github.com/influxdata/influxdb/pull/2067): Fixed issue where some queries didn't properly pull back data (introduced in RC15). Fixing intervals for GROUP BY. + +## v0.9.0-rc15 [2015-03-19] + +### Features +- [#2000](https://github.com/influxdata/influxdb/pull/2000): Log broker path when broker fails to start. Thanks @gst. +- [#2007](https://github.com/influxdata/influxdb/pull/2007): Track shard-level stats. + +### Bugfixes +- [#2001](https://github.com/influxdata/influxdb/pull/2001): Ensure measurement not found returns status code 200. +- [#1985](https://github.com/influxdata/influxdb/pull/1985): Set content-type JSON header before actually writing header. Thanks @dstrek. +- [#2003](https://github.com/influxdata/influxdb/pull/2003): Set timestamp when writing monitoring stats. +- [#2004](https://github.com/influxdata/influxdb/pull/2004): Limit group by to MaxGroupByPoints (currently 100,000). +- [#2016](https://github.com/influxdata/influxdb/pull/2016): Fixing bucket alignment for group by. Thanks @jnutzmann +- [#2021](https://github.com/influxdata/influxdb/pull/2021): Remove unnecessary formatting from log message. Thanks @simonkern + + +## v0.9.0-rc14 [2015-03-18] + +### Bugfixes +- [#1999](https://github.com/influxdata/influxdb/pull/1999): Return status code 200 for measurement not found errors on show series. + +## v0.9.0-rc13 [2015-03-17] + +### Features +- [#1974](https://github.com/influxdata/influxdb/pull/1974): Add time taken for request to the http server logs. + +### Bugfixes +- [#1971](https://github.com/influxdata/influxdb/pull/1971): Fix leader id initialization. +- [#1975](https://github.com/influxdata/influxdb/pull/1975): Require `q` parameter for query endpoint. +- [#1969](https://github.com/influxdata/influxdb/pull/1969): Print loaded config. +- [#1987](https://github.com/influxdata/influxdb/pull/1987): Fix config print startup statement for when no config is provided. +- [#1990](https://github.com/influxdata/influxdb/pull/1990): Drop measurement was taking too long due to transactions. + +## v0.9.0-rc12 [2015-03-15] + +### Bugfixes +- [#1942](https://github.com/influxdata/influxdb/pull/1942): Sort wildcard names. +- [#1957](https://github.com/influxdata/influxdb/pull/1957): Graphite numbers are always float64. +- [#1955](https://github.com/influxdata/influxdb/pull/1955): Prohibit creation of databases with no name. Thanks @dullgiulio +- [#1952](https://github.com/influxdata/influxdb/pull/1952): Handle delete statement with an error. Thanks again to @dullgiulio + +### Features +- [#1935](https://github.com/influxdata/influxdb/pull/1935): Implement stateless broker for Raft. +- [#1936](https://github.com/influxdata/influxdb/pull/1936): Implement "SHOW STATS" and self-monitoring + +### Features +- [#1909](https://github.com/influxdata/influxdb/pull/1909): Implement a dump command. + +## v0.9.0-rc11 [2015-03-13] + +### Bugfixes +- [#1917](https://github.com/influxdata/influxdb/pull/1902): Creating Infinite Retention Policy Failed. +- [#1758](https://github.com/influxdata/influxdb/pull/1758): Add Graphite Integration Test. +- [#1929](https://github.com/influxdata/influxdb/pull/1929): Default Retention Policy incorrectly auto created. +- [#1930](https://github.com/influxdata/influxdb/pull/1930): Auto create database for graphite if not specified. +- [#1908](https://github.com/influxdata/influxdb/pull/1908): Cosmetic CLI output fixes. +- [#1931](https://github.com/influxdata/influxdb/pull/1931): Add default column to SHOW RETENTION POLICIES. +- [#1937](https://github.com/influxdata/influxdb/pull/1937): OFFSET should be allowed to be 0. + +### Features +- [#1902](https://github.com/influxdata/influxdb/pull/1902): Enforce retention policies to have a minimum duration. +- [#1906](https://github.com/influxdata/influxdb/pull/1906): Add show servers to query language. +- [#1925](https://github.com/influxdata/influxdb/pull/1925): Add `fill(none)`, `fill(previous)`, and `fill()` to queries. + +## v0.9.0-rc10 [2015-03-09] + +### Bugfixes +- [#1867](https://github.com/influxdata/influxdb/pull/1867): Fix race accessing topic replicas map +- [#1864](https://github.com/influxdata/influxdb/pull/1864): fix race in startStateLoop +- [#1753](https://github.com/influxdata/influxdb/pull/1874): Do Not Panic on Missing Dirs +- [#1877](https://github.com/influxdata/influxdb/pull/1877): Broker clients track broker leader +- [#1862](https://github.com/influxdata/influxdb/pull/1862): Fix memory leak in `httpd.serveWait`. Thanks @mountkin +- [#1883](https://github.com/influxdata/influxdb/pull/1883): RLock server during retention policy enforcement. Thanks @grisha +- [#1868](https://github.com/influxdata/influxdb/pull/1868): Use `BatchPoints` for `client.Write` method. Thanks @vladlopes, @georgmu, @d2g, @evanphx, @akolosov. +- [#1881](https://github.com/influxdata/influxdb/pull/1881): Update documentation for `client` package. Misc library tweaks. +- Fix queries with multiple where clauses on tags, times and fields. Fix queries that have where clauses on fields not in the select + +### Features +- [#1875](https://github.com/influxdata/influxdb/pull/1875): Support trace logging of Raft. +- [#1895](https://github.com/influxdata/influxdb/pull/1895): Auto-create a retention policy when a database is created. +- [#1897](https://github.com/influxdata/influxdb/pull/1897): Pre-create shard groups. +- [#1900](https://github.com/influxdata/influxdb/pull/1900): Change `LIMIT` to `SLIMIT` and implement `LIMIT` and `OFFSET` + +## v0.9.0-rc9 [2015-03-06] + +### Bugfixes +- [#1872](https://github.com/influxdata/influxdb/pull/1872): Fix "stale term" errors with raft + +## v0.9.0-rc8 [2015-03-05] + +### Bugfixes +- [#1836](https://github.com/influxdata/influxdb/pull/1836): Store each parsed shell command in history file. +- [#1789](https://github.com/influxdata/influxdb/pull/1789): add --config-files option to fpm command. Thanks @kylezh +- [#1859](https://github.com/influxdata/influxdb/pull/1859): Queries with a `GROUP BY *` clause were returning a 500 if done against a measurement that didn't exist + +### Features +- [#1755](https://github.com/influxdata/influxdb/pull/1848): Support JSON data ingest over UDP +- [#1857](https://github.com/influxdata/influxdb/pull/1857): Support retention policies with infinite duration +- [#1858](https://github.com/influxdata/influxdb/pull/1858): Enable detailed tracing of write path + +## v0.9.0-rc7 [2015-03-02] + +### Features +- [#1813](https://github.com/influxdata/influxdb/pull/1813): Queries for missing measurements or fields now return a 200 with an error message in the series JSON. +- [#1826](https://github.com/influxdata/influxdb/pull/1826), [#1827](https://github.com/influxdata/influxdb/pull/1827): Fixed queries with `WHERE` clauses against fields. + +### Bugfixes + +- [#1744](https://github.com/influxdata/influxdb/pull/1744): Allow retention policies to be modified without specifying replication factor. Thanks @kylezh +- [#1809](https://github.com/influxdata/influxdb/pull/1809): Packaging post-install script unconditionally removes init.d symlink. Thanks @sineos + +## v0.9.0-rc6 [2015-02-27] + +### Bugfixes + +- [#1780](https://github.com/influxdata/influxdb/pull/1780): Malformed identifiers get through the parser +- [#1775](https://github.com/influxdata/influxdb/pull/1775): Panic "index out of range" on some queries +- [#1744](https://github.com/influxdata/influxdb/pull/1744): Select shard groups which completely encompass time range. Thanks @kylezh. + +## v0.9.0-rc5 [2015-02-27] + +### Bugfixes + +- [#1752](https://github.com/influxdata/influxdb/pull/1752): remove debug log output from collectd. +- [#1720](https://github.com/influxdata/influxdb/pull/1720): Parse Series IDs as unsigned 32-bits. +- [#1767](https://github.com/influxdata/influxdb/pull/1767): Drop Series was failing across shards. Issue #1761. +- [#1773](https://github.com/influxdata/influxdb/pull/1773): Fix bug when merging series together that have unequal number of points in a group by interval +- [#1771](https://github.com/influxdata/influxdb/pull/1771): Make `SHOW SERIES` return IDs and support `LIMIT` and `OFFSET` + +### Features + +- [#1698](https://github.com/influxdata/influxdb/pull/1698): Wire up DROP MEASUREMENT + +## v0.9.0-rc4 [2015-02-24] + +### Bugfixes + +- Fix authentication issue with continuous queries +- Print version in the log on startup + +## v0.9.0-rc3 [2015-02-23] + +### Features + +- [#1659](https://github.com/influxdata/influxdb/pull/1659): WHERE against regexes: `WHERE =~ '.*asdf' +- [#1580](https://github.com/influxdata/influxdb/pull/1580): Add support for fields with bool, int, or string data types +- [#1687](https://github.com/influxdata/influxdb/pull/1687): Change `Rows` to `Series` in results output. BREAKING API CHANGE +- [#1629](https://github.com/influxdata/influxdb/pull/1629): Add support for `DROP SERIES` queries +- [#1632](https://github.com/influxdata/influxdb/pull/1632): Add support for `GROUP BY *` to return all series within a measurement +- [#1689](https://github.com/influxdata/influxdb/pull/1689): Change `SHOW TAG VALUES WITH KEY="foo"` to use the key name in the result. BREAKING API CHANGE +- [#1699](https://github.com/influxdata/influxdb/pull/1699): Add CPU and memory profiling options to daemon +- [#1672](https://github.com/influxdata/influxdb/pull/1672): Add index tracking to metastore. Makes downed node recovery actually work +- [#1591](https://github.com/influxdata/influxdb/pull/1591): Add `spread` aggregate function +- [#1576](https://github.com/influxdata/influxdb/pull/1576): Add `first` and `last` aggregate functions +- [#1573](https://github.com/influxdata/influxdb/pull/1573): Add `stddev` aggregate function +- [#1565](https://github.com/influxdata/influxdb/pull/1565): Add the admin interface back into the server and update for new API +- [#1562](https://github.com/influxdata/influxdb/pull/1562): Enforce retention policies +- [#1700](https://github.com/influxdata/influxdb/pull/1700): Change `Values` to `Fields` on writes. BREAKING API CHANGE +- [#1706](https://github.com/influxdata/influxdb/pull/1706): Add support for `LIMIT` and `OFFSET`, which work on the number of series returned in a query. To limit the number of data points use a `WHERE time` clause + +### Bugfixes + +- [#1636](https://github.com/influxdata/influxdb/issues/1636): Don't store number of fields in raw data. THIS IS A BREAKING DATA CHANGE. YOU MUST START WITH A FRESH DATABASE +- [#1701](https://github.com/influxdata/influxdb/pull/1701), [#1667](https://github.com/influxdata/influxdb/pull/1667), [#1663](https://github.com/influxdata/influxdb/pull/1663), [#1615](https://github.com/influxdata/influxdb/pull/1615): Raft fixes +- [#1644](https://github.com/influxdata/influxdb/pull/1644): Add batching support for significantly improved write performance +- [#1704](https://github.com/influxdata/influxdb/pull/1704): Fix queries that pull back raw data (i.e. ones without aggregate functions) +- [#1718](https://github.com/influxdata/influxdb/pull/1718): Return an error on write if any of the points are don't have at least one field +- [#1806](https://github.com/influxdata/influxdb/pull/1806): Fix regex parsing. Change regex syntax to use / delimiters. + + +## v0.9.0-rc1,2 [no public release] + +### Features + +- Support for tags added +- New queries for showing measurement names, tag keys, and tag values +- Renamed shard spaces to retention policies +- Deprecated matching against regex in favor of explicit writing and querying on retention policies +- Pure Go InfluxQL parser +- Switch to BoltDB as underlying datastore +- BoltDB backed metastore to store schema information +- Updated HTTP API to only have two endpoints `/query` and `/write` +- Added all administrative functions to the query language +- Change cluster architecture to have brokers and data nodes +- Switch to streaming Raft implementation +- In memory inverted index of the tag data +- Pure Go implementation! + +## v0.8.6 [2014-11-15] + +### Features + +- [Issue #973](https://github.com/influxdata/influxdb/issues/973). Support + joining using a regex or list of time series +- [Issue #1068](https://github.com/influxdata/influxdb/issues/1068). Print + the processor chain when the query is started + +### Bugfixes + +- [Issue #584](https://github.com/influxdata/influxdb/issues/584). Don't + panic if the process died while initializing +- [Issue #663](https://github.com/influxdata/influxdb/issues/663). Make + sure all sub servies are closed when are stopping InfluxDB +- [Issue #671](https://github.com/influxdata/influxdb/issues/671). Fix + the Makefile package target for Mac OSX +- [Issue #800](https://github.com/influxdata/influxdb/issues/800). Use + su instead of sudo in the init script. This fixes the startup problem + on RHEL 6. +- [Issue #925](https://github.com/influxdata/influxdb/issues/925). Don't + generate invalid query strings for single point queries +- [Issue #943](https://github.com/influxdata/influxdb/issues/943). Don't + take two snapshots at the same time +- [Issue #947](https://github.com/influxdata/influxdb/issues/947). Exit + nicely if the daemon doesn't have permission to write to the log. +- [Issue #959](https://github.com/influxdata/influxdb/issues/959). Stop using + closed connections in the protobuf client. +- [Issue #978](https://github.com/influxdata/influxdb/issues/978). Check + for valgrind and mercurial in the configure script +- [Issue #996](https://github.com/influxdata/influxdb/issues/996). Fill should + fill the time range even if no points exists in the given time range +- [Issue #1008](https://github.com/influxdata/influxdb/issues/1008). Return + an appropriate exit status code depending on whether the process exits + due to an error or exits gracefully. +- [Issue #1024](https://github.com/influxdata/influxdb/issues/1024). Hitting + open files limit causes influxdb to create shards in loop. +- [Issue #1069](https://github.com/influxdata/influxdb/issues/1069). Fix + deprecated interface endpoint in Admin UI. +- [Issue #1076](https://github.com/influxdata/influxdb/issues/1076). Fix + the timestamps of data points written by the collectd plugin. (Thanks, + @renchap for reporting this bug) +- [Issue #1078](https://github.com/influxdata/influxdb/issues/1078). Make sure + we don't resurrect shard directories for shards that have already expired +- [Issue #1085](https://github.com/influxdata/influxdb/issues/1085). Set + the connection string of the local raft node +- [Issue #1092](https://github.com/influxdata/influxdb/issues/1093). Set + the connection string of the local node in the raft snapshot. +- [Issue #1100](https://github.com/influxdata/influxdb/issues/1100). Removing + a non-existent shard space causes the cluster to panic. +- [Issue #1113](https://github.com/influxdata/influxdb/issues/1113). A nil + engine.ProcessorChain causes a panic. + +## v0.8.5 [2014-10-27] + +### Features + +- [Issue #1055](https://github.com/influxdata/influxdb/issues/1055). Allow + graphite and collectd input plugins to have separate binding address + +### Bugfixes + +- [Issue #1058](https://github.com/influxdata/influxdb/issues/1058). Use + the query language instead of the continuous query endpoints that + were removed in 0.8.4 +- [Issue #1022](https://github.com/influxdata/influxdb/issues/1022). Return + an +Inf or NaN instead of panicing when we encounter a divide by zero +- [Issue #821](https://github.com/influxdata/influxdb/issues/821). Don't + scan through points when we hit the limit +- [Issue #1051](https://github.com/influxdata/influxdb/issues/1051). Fix + timestamps when the collectd is used and low resolution timestamps + is set. + +## v0.8.4 [2014-10-24] + +### Bugfixes + +- Remove the continuous query api endpoints since the query language + has all the features needed to list and delete continuous queries. +- [Issue #778](https://github.com/influxdata/influxdb/issues/778). Selecting + from a non-existent series should give a better error message indicating + that the series doesn't exist +- [Issue #988](https://github.com/influxdata/influxdb/issues/988). Check + the arguments of `top()` and `bottom()` +- [Issue #1021](https://github.com/influxdata/influxdb/issues/1021). Make + redirecting to standard output and standard error optional instead of + going to `/dev/null`. This can now be configured by setting `$STDOUT` + in `/etc/default/influxdb` +- [Issue #985](https://github.com/influxdata/influxdb/issues/985). Make + sure we drop a shard only when there's no one using it. Otherwise, the + shard can be closed when another goroutine is writing to it which will + cause random errors and possibly corruption of the database. + +### Features + +- [Issue #1047](https://github.com/influxdata/influxdb/issues/1047). Allow + merge() to take a list of series (as opposed to a regex in #72) + +## v0.8.4-rc.1 [2014-10-21] + +### Bugfixes + +- [Issue #1040](https://github.com/influxdata/influxdb/issues/1040). Revert + to older raft snapshot if the latest one is corrupted +- [Issue #1004](https://github.com/influxdata/influxdb/issues/1004). Querying + for data outside of existing shards returns an empty response instead of + throwing a `Couldn't lookup columns` error +- [Issue #1020](https://github.com/influxdata/influxdb/issues/1020). Change + init script exit codes to conform to the lsb standards. (Thanks, @spuder) +- [Issue #1011](https://github.com/influxdata/influxdb/issues/1011). Fix + the tarball for homebrew so that rocksdb is included and the directory + structure is clean +- [Issue #1007](https://github.com/influxdata/influxdb/issues/1007). Fix + the content type when an error occurs and the client requests + compression. +- [Issue #916](https://github.com/influxdata/influxdb/issues/916). Set + the ulimit in the init script with a way to override the limit +- [Issue #742](https://github.com/influxdata/influxdb/issues/742). Fix + rocksdb for Mac OSX +- [Issue #387](https://github.com/influxdata/influxdb/issues/387). Aggregations + with group by time(1w), time(1m) and time(1y) (for week, month and + year respectively) will cause the start time and end time of the bucket + to fall on the logical boundaries of the week, month or year. +- [Issue #334](https://github.com/influxdata/influxdb/issues/334). Derivative + for queries with group by time() and fill(), will take the difference + between the first value in the bucket and the first value of the next + bucket. +- [Issue #972](https://github.com/influxdata/influxdb/issues/972). Don't + assign duplicate server ids + +### Features + +- [Issue #722](https://github.com/influxdata/influxdb/issues/722). Add + an install target to the Makefile +- [Issue #1032](https://github.com/influxdata/influxdb/issues/1032). Include + the admin ui static assets in the binary +- [Issue #1019](https://github.com/influxdata/influxdb/issues/1019). Upgrade + to rocksdb 3.5.1 +- [Issue #992](https://github.com/influxdata/influxdb/issues/992). Add + an input plugin for collectd. (Thanks, @kimor79) +- [Issue #72](https://github.com/influxdata/influxdb/issues/72). Support merge + for multiple series using regex syntax + +## v0.8.3 [2014-09-24] + +### Bugfixes + +- [Issue #885](https://github.com/influxdata/influxdb/issues/885). Multiple + queries separated by semicolons work as expected. Queries are process + sequentially +- [Issue #652](https://github.com/influxdata/influxdb/issues/652). Return an + error if an invalid column is used in the where clause +- [Issue #794](https://github.com/influxdata/influxdb/issues/794). Fix case + insensitive regex matching +- [Issue #853](https://github.com/influxdata/influxdb/issues/853). Move + cluster config from raft to API. +- [Issue #714](https://github.com/influxdata/influxdb/issues/714). Don't + panic on invalid boolean operators. +- [Issue #843](https://github.com/influxdata/influxdb/issues/843). Prevent blank database names +- [Issue #780](https://github.com/influxdata/influxdb/issues/780). Fix + fill() for all aggregators +- [Issue #923](https://github.com/influxdata/influxdb/issues/923). Enclose + table names in double quotes in the result of GetQueryString() +- [Issue #923](https://github.com/influxdata/influxdb/issues/923). Enclose + table names in double quotes in the result of GetQueryString() +- [Issue #967](https://github.com/influxdata/influxdb/issues/967). Return an + error if the storage engine can't be created +- [Issue #954](https://github.com/influxdata/influxdb/issues/954). Don't automatically + create shards which was causing too many shards to be created when used with + grafana +- [Issue #939](https://github.com/influxdata/influxdb/issues/939). Aggregation should + ignore null values and invalid values, e.g. strings with mean(). +- [Issue #964](https://github.com/influxdata/influxdb/issues/964). Parse + big int in queries properly. + +## v0.8.2 [2014-09-05] + +### Bugfixes + +- [Issue #886](https://github.com/influxdata/influxdb/issues/886). Update shard space to not set defaults + +- [Issue #867](https://github.com/influxdata/influxdb/issues/867). Add option to return shard space mappings in list series + +### Bugfixes + +- [Issue #652](https://github.com/influxdata/influxdb/issues/652). Return + a meaningful error if an invalid column is used in where clause + after joining multiple series + +## v0.8.2 [2014-09-08] + +### Features + +- Added API endpoint to update shard space definitions + +### Bugfixes + +- [Issue #886](https://github.com/influxdata/influxdb/issues/886). Shard space regexes reset after restart of InfluxDB + +## v0.8.1 [2014-09-03] + +- [Issue #896](https://github.com/influxdata/influxdb/issues/896). Allow logging to syslog. Thanks @malthe + +### Bugfixes + +- [Issue #868](https://github.com/influxdata/influxdb/issues/868). Don't panic when upgrading a snapshot from 0.7.x +- [Issue #887](https://github.com/influxdata/influxdb/issues/887). The first continuous query shouldn't trigger backfill if it had backfill disabled +- [Issue #674](https://github.com/influxdata/influxdb/issues/674). Graceful exit when config file is invalid. (Thanks, @DavidBord) +- [Issue #857](https://github.com/influxdata/influxdb/issues/857). More informative list servers api. (Thanks, @oliveagle) + +## v0.8.0 [2014-08-22] + +### Features + +- [Issue #850](https://github.com/influxdata/influxdb/issues/850). Makes the server listing more informative + +### Bugfixes + +- [Issue #779](https://github.com/influxdata/influxdb/issues/779). Deleting expired shards isn't thread safe. +- [Issue #860](https://github.com/influxdata/influxdb/issues/860). Load database config should validate shard spaces. +- [Issue #862](https://github.com/influxdata/influxdb/issues/862). Data migrator should have option to set delay time. + +## v0.8.0-rc.5 [2014-08-15] + +### Features + +- [Issue #376](https://github.com/influxdata/influxdb/issues/376). List series should support regex filtering +- [Issue #745](https://github.com/influxdata/influxdb/issues/745). Add continuous queries to the database config +- [Issue #746](https://github.com/influxdata/influxdb/issues/746). Add data migration tool for 0.8.0 + +### Bugfixes + +- [Issue #426](https://github.com/influxdata/influxdb/issues/426). Fill should fill the entire time range that is requested +- [Issue #740](https://github.com/influxdata/influxdb/issues/740). Don't emit non existent fields when joining series with different fields +- [Issue #744](https://github.com/influxdata/influxdb/issues/744). Admin site should have all assets locally +- [Issue #767](https://github.com/influxdata/influxdb/issues/768). Remove shards whenever they expire +- [Issue #781](https://github.com/influxdata/influxdb/issues/781). Don't emit non existent fields when joining series with different fields +- [Issue #791](https://github.com/influxdata/influxdb/issues/791). Move database config loader to be an API endpoint +- [Issue #809](https://github.com/influxdata/influxdb/issues/809). Migration path from 0.7 -> 0.8 +- [Issue #811](https://github.com/influxdata/influxdb/issues/811). Gogoprotobuf removed `ErrWrongType`, which is depended on by Raft +- [Issue #820](https://github.com/influxdata/influxdb/issues/820). Query non-local shard with time range to avoid getting back points not in time range +- [Issue #827](https://github.com/influxdata/influxdb/issues/827). Don't leak file descriptors in the WAL +- [Issue #830](https://github.com/influxdata/influxdb/issues/830). List series should return series in lexicographic sorted order +- [Issue #831](https://github.com/influxdata/influxdb/issues/831). Move create shard space to be db specific + +## v0.8.0-rc.4 [2014-07-29] + +### Bugfixes + +- [Issue #774](https://github.com/influxdata/influxdb/issues/774). Don't try to parse "inf" shard retention policy +- [Issue #769](https://github.com/influxdata/influxdb/issues/769). Use retention duration when determining expired shards. (Thanks, @shugo) +- [Issue #736](https://github.com/influxdata/influxdb/issues/736). Only db admins should be able to drop a series +- [Issue #713](https://github.com/influxdata/influxdb/issues/713). Null should be a valid fill value +- [Issue #644](https://github.com/influxdata/influxdb/issues/644). Graphite api should write data in batches to the coordinator +- [Issue #740](https://github.com/influxdata/influxdb/issues/740). Panic when distinct fields are selected from an inner join +- [Issue #781](https://github.com/influxdata/influxdb/issues/781). Panic when distinct fields are added after an inner join + +## v0.8.0-rc.3 [2014-07-21] + +### Bugfixes + +- [Issue #752](https://github.com/influxdata/influxdb/issues/752). `./configure` should use goroot to find gofmt +- [Issue #758](https://github.com/influxdata/influxdb/issues/758). Clarify the reason behind graphite input plugin not starting. (Thanks, @otoolep) +- [Issue #759](https://github.com/influxdata/influxdb/issues/759). Don't revert the regex in the shard space. (Thanks, @shugo) +- [Issue #760](https://github.com/influxdata/influxdb/issues/760). Removing a server should remove it from the shard server ids. (Thanks, @shugo) +- [Issue #772](https://github.com/influxdata/influxdb/issues/772). Add sentinel values to all db. This caused the last key in the db to not be fetched properly. + + +## v0.8.0-rc.2 [2014-07-15] + +- This release is to fix a build error in rc1 which caused rocksdb to not be available +- Bump up the `max-open-files` option to 1000 on all storage engines +- Lower the `write-buffer-size` to 1000 + +## v0.8.0-rc.1 [2014-07-15] + +### Features + +- [Issue #643](https://github.com/influxdata/influxdb/issues/643). Support pretty print json. (Thanks, @otoolep) +- [Issue #641](https://github.com/influxdata/influxdb/issues/641). Support multiple storage engines +- [Issue #665](https://github.com/influxdata/influxdb/issues/665). Make build tmp directory configurable in the make file. (Thanks, @dgnorton) +- [Issue #667](https://github.com/influxdata/influxdb/issues/667). Enable compression on all GET requests and when writing data +- [Issue #648](https://github.com/influxdata/influxdb/issues/648). Return permissions when listing db users. (Thanks, @nicolai86) +- [Issue #682](https://github.com/influxdata/influxdb/issues/682). Allow continuous queries to run without backfill (Thanks, @dhammika) +- [Issue #689](https://github.com/influxdata/influxdb/issues/689). **REQUIRES DATA MIGRATION** Move metadata into raft +- [Issue #255](https://github.com/influxdata/influxdb/issues/255). Support millisecond precision using `ms` suffix +- [Issue #95](https://github.com/influxdata/influxdb/issues/95). Drop database should not be synchronous +- [Issue #571](https://github.com/influxdata/influxdb/issues/571). Add support for arbitrary number of shard spaces and retention policies +- Default storage engine changed to RocksDB + +### Bugfixes + +- [Issue #651](https://github.com/influxdata/influxdb/issues/651). Change permissions of symlink which fix some installation issues. (Thanks, @Dieterbe) +- [Issue #670](https://github.com/influxdata/influxdb/issues/670). Don't warn on missing influxdb user on fresh installs +- [Issue #676](https://github.com/influxdata/influxdb/issues/676). Allow storing high precision integer values without losing any information +- [Issue #695](https://github.com/influxdata/influxdb/issues/695). Prevent having duplicate field names in the write payload. (Thanks, @seunglee150) +- [Issue #731](https://github.com/influxdata/influxdb/issues/731). Don't enable the udp plugin if the `enabled` option is set to false +- [Issue #733](https://github.com/influxdata/influxdb/issues/733). Print an `INFO` message when the input plugin is disabled +- [Issue #707](https://github.com/influxdata/influxdb/issues/707). Graphite input plugin should work payload delimited by any whitespace character +- [Issue #734](https://github.com/influxdata/influxdb/issues/734). Don't buffer non replicated writes +- [Issue #465](https://github.com/influxdata/influxdb/issues/465). Recreating a currently deleting db or series doesn't bring back the old data anymore +- [Issue #358](https://github.com/influxdata/influxdb/issues/358). **BREAKING** List series should return as a single series +- [Issue #499](https://github.com/influxdata/influxdb/issues/499). **BREAKING** Querying non-existent database or series will return an error +- [Issue #570](https://github.com/influxdata/influxdb/issues/570). InfluxDB crashes during delete/drop of database +- [Issue #592](https://github.com/influxdata/influxdb/issues/592). Drop series is inefficient + +## v0.7.3 [2014-06-13] + +### Bugfixes + +- [Issue #637](https://github.com/influxdata/influxdb/issues/637). Truncate log files if the last request wasn't written properly +- [Issue #646](https://github.com/influxdata/influxdb/issues/646). CRITICAL: Duplicate shard ids for new shards if old shards are deleted. + +## v0.7.2 [2014-05-30] + +### Features + +- [Issue #521](https://github.com/influxdata/influxdb/issues/521). MODE works on all datatypes (Thanks, @richthegeek) + +### Bugfixes + +- [Issue #418](https://github.com/influxdata/influxdb/pull/418). Requests or responses larger than MAX_REQUEST_SIZE break things. +- [Issue #606](https://github.com/influxdata/influxdb/issues/606). InfluxDB will fail to start with invalid permission if log.txt didn't exist +- [Issue #602](https://github.com/influxdata/influxdb/issues/602). Merge will fail to work across shards + +### Features + +## v0.7.1 [2014-05-29] + +### Bugfixes + +- [Issue #579](https://github.com/influxdata/influxdb/issues/579). Reject writes to nonexistent databases +- [Issue #597](https://github.com/influxdata/influxdb/issues/597). Force compaction after deleting data + +### Features + +- [Issue #476](https://github.com/influxdata/influxdb/issues/476). Support ARM architecture +- [Issue #578](https://github.com/influxdata/influxdb/issues/578). Support aliasing for expressions in parenthesis +- [Issue #544](https://github.com/influxdata/influxdb/pull/544). Support forcing node removal from a cluster +- [Issue #591](https://github.com/influxdata/influxdb/pull/591). Support multiple udp input plugins (Thanks, @tpitale) +- [Issue #600](https://github.com/influxdata/influxdb/pull/600). Report version, os, arch, and raftName once per day. + +## v0.7.0 [2014-05-23] + +### Bugfixes + +- [Issue #557](https://github.com/influxdata/influxdb/issues/557). Group by time(1y) doesn't work while time(365d) works +- [Issue #547](https://github.com/influxdata/influxdb/issues/547). Add difference function (Thanks, @mboelstra) +- [Issue #550](https://github.com/influxdata/influxdb/issues/550). Fix tests on 32-bit ARM +- [Issue #524](https://github.com/influxdata/influxdb/issues/524). Arithmetic operators and where conditions don't play nice together +- [Issue #561](https://github.com/influxdata/influxdb/issues/561). Fix missing query in parsing errors +- [Issue #563](https://github.com/influxdata/influxdb/issues/563). Add sample config for graphite over udp +- [Issue #537](https://github.com/influxdata/influxdb/issues/537). Incorrect query syntax causes internal error +- [Issue #565](https://github.com/influxdata/influxdb/issues/565). Empty series names shouldn't cause a panic +- [Issue #575](https://github.com/influxdata/influxdb/issues/575). Single point select doesn't interpret timestamps correctly +- [Issue #576](https://github.com/influxdata/influxdb/issues/576). We shouldn't set timestamps and sequence numbers when listing cq +- [Issue #560](https://github.com/influxdata/influxdb/issues/560). Use /dev/urandom instead of /dev/random +- [Issue #502](https://github.com/influxdata/influxdb/issues/502). Fix a + race condition in assigning id to db+series+field (Thanks @ohurvitz + for reporting this bug and providing a script to repro) + +### Features + +- [Issue #567](https://github.com/influxdata/influxdb/issues/567). Allow selecting from multiple series names by separating them with commas (Thanks, @peekeri) + +### Deprecated + +- [Issue #460](https://github.com/influxdata/influxdb/issues/460). Don't start automatically after installing +- [Issue #529](https://github.com/influxdata/influxdb/issues/529). Don't run influxdb as root +- [Issue #443](https://github.com/influxdata/influxdb/issues/443). Use `name` instead of `username` when returning cluster admins + +## v0.6.5 [2014-05-19] + +### Features + +- [Issue #551](https://github.com/influxdata/influxdb/issues/551). Add TOP and BOTTOM aggregate functions (Thanks, @chobie) + +### Bugfixes + +- [Issue #555](https://github.com/influxdata/influxdb/issues/555). Fix a regression introduced in the raft snapshot format + +## v0.6.4 [2014-05-16] + +### Features + +- Make the write batch size configurable (also applies to deletes) +- Optimize writing to multiple series +- [Issue #546](https://github.com/influxdata/influxdb/issues/546). Add UDP support for Graphite API (Thanks, @peekeri) + +### Bugfixes + +- Fix a bug in shard logic that caused short term shards to be clobbered with long term shards +- [Issue #489](https://github.com/influxdata/influxdb/issues/489). Remove replication factor from CreateDatabase command + +## v0.6.3 [2014-05-13] + +### Features + +- [Issue #505](https://github.com/influxdata/influxdb/issues/505). Return a version header with http the response (Thanks, @majst01) +- [Issue #520](https://github.com/influxdata/influxdb/issues/520). Print the version to the log file + +### Bugfixes + +- [Issue #516](https://github.com/influxdata/influxdb/issues/516). Close WAL log/index files when they aren't being used +- [Issue #532](https://github.com/influxdata/influxdb/issues/532). Don't log graphite connection EOF as an error +- [Issue #535](https://github.com/influxdata/influxdb/issues/535). WAL Replay hangs if response isn't received +- [Issue #538](https://github.com/influxdata/influxdb/issues/538). Don't panic if the same series existed twice in the request with different columns +- [Issue #536](https://github.com/influxdata/influxdb/issues/536). Joining the cluster after shards are creating shouldn't cause new nodes to panic +- [Issue #539](https://github.com/influxdata/influxdb/issues/539). count(distinct()) with fill shouldn't panic on empty groups +- [Issue #534](https://github.com/influxdata/influxdb/issues/534). Create a new series when interpolating + +## v0.6.2 [2014-05-09] + +### Bugfixes + +- [Issue #511](https://github.com/influxdata/influxdb/issues/511). Don't automatically create the database when a db user is created +- [Issue #512](https://github.com/influxdata/influxdb/issues/512). Group by should respect null values +- [Issue #518](https://github.com/influxdata/influxdb/issues/518). Filter Infinities and NaNs from the returned json +- [Issue #522](https://github.com/influxdata/influxdb/issues/522). Committing requests while replaying caused the WAL to skip some log files +- [Issue #369](https://github.com/influxdata/influxdb/issues/369). Fix some edge cases with WAL recovery + +## v0.6.1 [2014-05-06] + +### Bugfixes + +- [Issue #500](https://github.com/influxdata/influxdb/issues/500). Support `y` suffix in time durations +- [Issue #501](https://github.com/influxdata/influxdb/issues/501). Writes with invalid payload should be rejected +- [Issue #507](https://github.com/influxdata/influxdb/issues/507). New cluster admin passwords don't propagate properly to other nodes in a cluster +- [Issue #508](https://github.com/influxdata/influxdb/issues/508). Don't replay WAL entries for servers with no shards +- [Issue #464](https://github.com/influxdata/influxdb/issues/464). Admin UI shouldn't draw graphs for string columns +- [Issue #480](https://github.com/influxdata/influxdb/issues/480). Large values on the y-axis get cut off + +## v0.6.0 [2014-05-02] + +### Feature + +- [Issue #477](https://github.com/influxdata/influxdb/issues/477). Add a udp json interface (Thanks, Julien Ammous) +- [Issue #491](https://github.com/influxdata/influxdb/issues/491). Make initial root password settable through env variable (Thanks, Edward Muller) + +### Bugfixes + +- [Issue #469](https://github.com/influxdata/influxdb/issues/469). Drop continuous queries when a database is dropped +- [Issue #431](https://github.com/influxdata/influxdb/issues/431). Don't log to standard output if a log file is specified in the config file +- [Issue #483](https://github.com/influxdata/influxdb/issues/483). Return 409 if a database already exist (Thanks, Edward Muller) +- [Issue #486](https://github.com/influxdata/influxdb/issues/486). Columns used in the target of continuous query shouldn't be inserted in the time series +- [Issue #490](https://github.com/influxdata/influxdb/issues/490). Database user password's cannot be changed (Thanks, Edward Muller) +- [Issue #495](https://github.com/influxdata/influxdb/issues/495). Enforce write permissions properly + +## v0.5.12 [2014-04-29] + +### Bugfixes + +- [Issue #419](https://github.com/influxdata/influxdb/issues/419),[Issue #478](https://github.com/influxdata/influxdb/issues/478). Allow hostname, raft and protobuf ports to be changed, without requiring manual intervention from the user + +## v0.5.11 [2014-04-25] + +### Features + +- [Issue #471](https://github.com/influxdata/influxdb/issues/471). Read and write permissions should be settable through the http api + +### Bugfixes + +- [Issue #323](https://github.com/influxdata/influxdb/issues/323). Continuous queries should guard against data loops +- [Issue #473](https://github.com/influxdata/influxdb/issues/473). Engine memory optimization + +## v0.5.10 [2014-04-22] + +### Features + +- [Issue #463](https://github.com/influxdata/influxdb/issues/463). Allow series names to use any character (escape by wrapping in double quotes) +- [Issue #447](https://github.com/influxdata/influxdb/issues/447). Allow @ in usernames +- [Issue #466](https://github.com/influxdata/influxdb/issues/466). Allow column names to use any character (escape by wrapping in double quotes) + +### Bugfixes + +- [Issue #458](https://github.com/influxdata/influxdb/issues/458). Continuous queries with group by time() and a column should insert sequence numbers of 1 +- [Issue #457](https://github.com/influxdata/influxdb/issues/457). Deleting series that start with capital letters should work + +## v0.5.9 [2014-04-18] + +### Bugfixes + +- [Issue #446](https://github.com/influxdata/influxdb/issues/446). Check for (de)serialization errors +- [Issue #456](https://github.com/influxdata/influxdb/issues/456). Continuous queries failed if one of the group by columns had null value +- [Issue #455](https://github.com/influxdata/influxdb/issues/455). Comparison operators should ignore null values + +## v0.5.8 [2014-04-17] + +- Renamed config.toml.sample to config.sample.toml + +### Bugfixes + +- [Issue #244](https://github.com/influxdata/influxdb/issues/244). Reconstruct the query from the ast +- [Issue #449](https://github.com/influxdata/influxdb/issues/449). Heartbeat timeouts can cause reading from connection to lock up +- [Issue #451](https://github.com/influxdata/influxdb/issues/451). Reduce the aggregation state that is kept in memory so that + aggregation queries over large periods of time don't take insance amount of memory + +## v0.5.7 [2014-04-15] + +### Features + +- Queries are now logged as INFO in the log file before they run + +### Bugfixes + +- [Issue #328](https://github.com/influxdata/influxdb/issues/328). Join queries with math expressions don't work +- [Issue #440](https://github.com/influxdata/influxdb/issues/440). Heartbeat timeouts in logs +- [Issue #442](https://github.com/influxdata/influxdb/issues/442). shouldQuerySequentially didn't work as expected + causing count(*) queries on large time series to use + lots of memory +- [Issue #437](https://github.com/influxdata/influxdb/issues/437). Queries with negative constants don't parse properly +- [Issue #432](https://github.com/influxdata/influxdb/issues/432). Deleted data using a delete query is resurrected after a server restart +- [Issue #439](https://github.com/influxdata/influxdb/issues/439). Report the right location of the error in the query +- Fix some bugs with the WAL recovery on startup + +## v0.5.6 [2014-04-08] + +### Features + +- [Issue #310](https://github.com/influxdata/influxdb/issues/310). Request should support multiple timeseries +- [Issue #416](https://github.com/influxdata/influxdb/issues/416). Improve the time it takes to drop database + +### Bugfixes + +- [Issue #413](https://github.com/influxdata/influxdb/issues/413). Don't assume that group by interval is greater than a second +- [Issue #415](https://github.com/influxdata/influxdb/issues/415). Include the database when sending an auth error back to the user +- [Issue #421](https://github.com/influxdata/influxdb/issues/421). Make read timeout a config option +- [Issue #392](https://github.com/influxdata/influxdb/issues/392). Different columns in different shards returns invalid results when a query spans those shards + +### Bugfixes + +## v0.5.5 [2014-04-04] + +- Upgrade leveldb 1.10 -> 1.15 + + This should be a backward compatible change, but is here for documentation only + +### Feature + +- Add a command line option to repair corrupted leveldb databases on startup +- [Issue #401](https://github.com/influxdata/influxdb/issues/401). No limit on the number of columns in the group by clause + +### Bugfixes + +- [Issue #398](https://github.com/influxdata/influxdb/issues/398). Support now() and NOW() in the query lang +- [Issue #403](https://github.com/influxdata/influxdb/issues/403). Filtering should work with join queries +- [Issue #404](https://github.com/influxdata/influxdb/issues/404). Filtering with invalid condition shouldn't crash the server +- [Issue #405](https://github.com/influxdata/influxdb/issues/405). Percentile shouldn't crash for small number of values +- [Issue #408](https://github.com/influxdata/influxdb/issues/408). Make InfluxDB recover from internal bugs and panics +- [Issue #390](https://github.com/influxdata/influxdb/issues/390). Multiple response.WriteHeader when querying as admin +- [Issue #407](https://github.com/influxdata/influxdb/issues/407). Start processing continuous queries only after the WAL is initialized +- Close leveldb databases properly if we couldn't create a new Shard. See leveldb\_shard\_datastore\_test:131 + +## v0.5.4 [2014-04-02] + +### Bugfixes + +- [Issue #386](https://github.com/influxdata/influxdb/issues/386). Drop series should work with series containing dots +- [Issue #389](https://github.com/influxdata/influxdb/issues/389). Filtering shouldn't stop prematurely +- [Issue #341](https://github.com/influxdata/influxdb/issues/341). Make the number of shards that are queried in parallel configurable +- [Issue #394](https://github.com/influxdata/influxdb/issues/394). Support count(distinct) and count(DISTINCT) +- [Issue #362](https://github.com/influxdata/influxdb/issues/362). Limit should be enforced after aggregation + +## v0.5.3 [2014-03-31] + +### Bugfixes + +- [Issue #378](https://github.com/influxdata/influxdb/issues/378). Indexing should return if there are no requests added since the last index +- [Issue #370](https://github.com/influxdata/influxdb/issues/370). Filtering and limit should be enforced on the shards +- [Issue #379](https://github.com/influxdata/influxdb/issues/379). Boolean columns should be usable in where clauses +- [Issue #381](https://github.com/influxdata/influxdb/issues/381). Should be able to do deletes as a cluster admin + +## v0.5.2 [2014-03-28] + +### Bugfixes + +- [Issue #342](https://github.com/influxdata/influxdb/issues/342). Data resurrected after a server restart +- [Issue #367](https://github.com/influxdata/influxdb/issues/367). Influxdb won't start if the api port is commented out +- [Issue #355](https://github.com/influxdata/influxdb/issues/355). Return an error on wrong time strings +- [Issue #331](https://github.com/influxdata/influxdb/issues/331). Allow negative time values in the where clause +- [Issue #371](https://github.com/influxdata/influxdb/issues/371). Seris index isn't deleted when the series is dropped +- [Issue #360](https://github.com/influxdata/influxdb/issues/360). Store and recover continuous queries + +## v0.5.1 [2014-03-24] + +### Bugfixes + +- Revert the version of goraft due to a bug found in the latest version + +## v0.5.0 [2014-03-24] + +### Features + +- [Issue #293](https://github.com/influxdata/influxdb/pull/293). Implement a Graphite listener + +### Bugfixes + +- [Issue #340](https://github.com/influxdata/influxdb/issues/340). Writing many requests while replaying seems to cause commits out of order + +## v0.5.0-rc.6 [2014-03-20] + +### Bugfixes + +- Increase raft election timeout to avoid unecessary relections +- Sort points before writing them to avoid an explosion in the request + number when the points are written randomly +- [Issue #335](https://github.com/influxdata/influxdb/issues/335). Fixes regexp for interpolating more than one column value in continuous queries +- [Issue #318](https://github.com/influxdata/influxdb/pull/318). Support EXPLAIN queries +- [Issue #333](https://github.com/influxdata/influxdb/pull/333). Fail + when the password is too short or too long instead of passing it to + the crypto library + +## v0.5.0-rc.5 [2014-03-11] + +### Bugfixes + +- [Issue #312](https://github.com/influxdata/influxdb/issues/312). WAL should wait for server id to be set before recovering +- [Issue #301](https://github.com/influxdata/influxdb/issues/301). Use ref counting to guard against race conditions in the shard cache +- [Issue #319](https://github.com/influxdata/influxdb/issues/319). Propagate engine creation error correctly to the user +- [Issue #316](https://github.com/influxdata/influxdb/issues/316). Make + sure we don't starve goroutines if we get an access denied error + from one of the shards +- [Issue #306](https://github.com/influxdata/influxdb/issues/306). Deleting/Dropping database takes a lot of memory +- [Issue #302](https://github.com/influxdata/influxdb/issues/302). Should be able to set negative timestamps on points +- [Issue #327](https://github.com/influxdata/influxdb/issues/327). Make delete queries not use WAL. This addresses #315, #317 and #314 +- [Issue #321](https://github.com/influxdata/influxdb/issues/321). Make sure we split points on shards properly + +## v0.5.0-rc.4 [2014-03-07] + +### Bugfixes + +- [Issue #298](https://github.com/influxdata/influxdb/issues/298). Fix limit when querying multiple shards +- [Issue #305](https://github.com/influxdata/influxdb/issues/305). Shard ids not unique after restart +- [Issue #309](https://github.com/influxdata/influxdb/issues/309). Don't relog the requests on the remote server +- Fix few bugs in the WAL and refactor the way it works (this requires purging the WAL from previous rc) + +## v0.5.0-rc.3 [2014-03-03] + +### Bugfixes +- [Issue #69](https://github.com/influxdata/influxdb/issues/69). Support column aliases +- [Issue #287](https://github.com/influxdata/influxdb/issues/287). Make the lru cache size configurable +- [Issue #38](https://github.com/influxdata/influxdb/issues/38). Fix a memory leak discussed in this story +- [Issue #286](https://github.com/influxdata/influxdb/issues/286). Make the number of open shards configurable +- Make LevelDB use the max open files configuration option. + +## v0.5.0-rc.2 [2014-02-27] + +### Bugfixes + +- [Issue #274](https://github.com/influxdata/influxdb/issues/274). Crash after restart +- [Issue #277](https://github.com/influxdata/influxdb/issues/277). Ensure duplicate shards won't be created +- [Issue #279](https://github.com/influxdata/influxdb/issues/279). Limits not working on regex queries +- [Issue #281](https://github.com/influxdata/influxdb/issues/281). `./influxdb -v` should print the sha when building from source +- [Issue #283](https://github.com/influxdata/influxdb/issues/283). Dropping shard and restart in cluster causes panic. +- [Issue #288](https://github.com/influxdata/influxdb/issues/288). Sequence numbers should be unique per server id + +## v0.5.0-rc.1 [2014-02-25] + +### Bugfixes + +- Ensure large deletes don't take too much memory +- [Issue #240](https://github.com/influxdata/influxdb/pull/240). Unable to query against columns with `.` in the name. +- [Issue #250](https://github.com/influxdata/influxdb/pull/250). different result between normal and continuous query with "group by" clause +- [Issue #216](https://github.com/influxdata/influxdb/pull/216). Results with no points should exclude columns and points + +### Features + +- [Issue #243](https://github.com/influxdata/influxdb/issues/243). Should have endpoint to GET a user's attributes. +- [Issue #269](https://github.com/influxdata/influxdb/pull/269), [Issue #65](https://github.com/influxdata/influxdb/issues/65) New clustering architecture (see docs), with the side effect that queries can be distributed between multiple shards +- [Issue #164](https://github.com/influxdata/influxdb/pull/269),[Issue #103](https://github.com/influxdata/influxdb/pull/269),[Issue #166](https://github.com/influxdata/influxdb/pull/269),[Issue #165](https://github.com/influxdata/influxdb/pull/269),[Issue #132](https://github.com/influxdata/influxdb/pull/269) Make request log a log file instead of leveldb with recovery on startup + +### Deprecated + +- [Issue #189](https://github.com/influxdata/influxdb/issues/189). `/cluster_admins` and `/db/:db/users` return usernames in a `name` key instead of `username` key. +- [Issue #216](https://github.com/influxdata/influxdb/pull/216). Results with no points should exclude columns and points + +## v0.4.4 [2014-02-05] + +### Features + +- Make the leveldb max open files configurable in the toml file + +## v0.4.3 [2014-01-31] + +### Bugfixes + +- [Issue #225](https://github.com/influxdata/influxdb/issues/225). Remove a hard limit on the points returned by the datastore +- [Issue #223](https://github.com/influxdata/influxdb/issues/223). Null values caused count(distinct()) to panic +- [Issue #224](https://github.com/influxdata/influxdb/issues/224). Null values broke replication due to protobuf limitation + +## v0.4.1 [2014-01-30] + +### Features + +- [Issue #193](https://github.com/influxdata/influxdb/issues/193). Allow logging to stdout. Thanks @schmurfy +- [Issue #190](https://github.com/influxdata/influxdb/pull/190). Add support for SSL. +- [Issue #194](https://github.com/influxdata/influxdb/pull/194). Should be able to disable Admin interface. + +### Bugfixes + +- [Issue #33](https://github.com/influxdata/influxdb/issues/33). Don't call WriteHeader more than once per request +- [Issue #195](https://github.com/influxdata/influxdb/issues/195). Allow the bind address to be configurable, Thanks @schmurfy. +- [Issue #199](https://github.com/influxdata/influxdb/issues/199). Make the test timeout configurable +- [Issue #200](https://github.com/influxdata/influxdb/issues/200). Selecting `time` or `sequence_number` silently fail +- [Issue #215](https://github.com/influxdata/influxdb/pull/215). Server fails to start up after Raft log compaction and restart. + +## v0.4.0 [2014-01-17] + +## Features + +- [Issue #86](https://github.com/influxdata/influxdb/issues/86). Support arithmetic expressions in select clause +- [Issue #92](https://github.com/influxdata/influxdb/issues/92). Change '==' to '=' and '!=' to '<>' +- [Issue #88](https://github.com/influxdata/influxdb/issues/88). Support datetime strings +- [Issue #64](https://github.com/influxdata/influxdb/issues/64). Shard writes and queries across cluster with replay for briefly downed nodes (< 24 hrs) +- [Issue #78](https://github.com/influxdata/influxdb/issues/78). Sequence numbers persist across restarts so they're not reused +- [Issue #102](https://github.com/influxdata/influxdb/issues/102). Support expressions in where condition +- [Issue #101](https://github.com/influxdata/influxdb/issues/101). Support expressions in aggregates +- [Issue #62](https://github.com/influxdata/influxdb/issues/62). Support updating and deleting column values +- [Issue #96](https://github.com/influxdata/influxdb/issues/96). Replicate deletes in a cluster +- [Issue #94](https://github.com/influxdata/influxdb/issues/94). delete queries +- [Issue #116](https://github.com/influxdata/influxdb/issues/116). Use proper logging +- [Issue #40](https://github.com/influxdata/influxdb/issues/40). Use TOML instead of JSON in the config file +- [Issue #99](https://github.com/influxdata/influxdb/issues/99). Support list series in the query language +- [Issue #149](https://github.com/influxdata/influxdb/issues/149). Cluster admins should be able to perform reads and writes. +- [Issue #108](https://github.com/influxdata/influxdb/issues/108). Querying one point using `time =` +- [Issue #114](https://github.com/influxdata/influxdb/issues/114). Servers should periodically check that they're consistent. +- [Issue #93](https://github.com/influxdata/influxdb/issues/93). Should be able to drop a time series +- [Issue #177](https://github.com/influxdata/influxdb/issues/177). Support drop series in the query language. +- [Issue #184](https://github.com/influxdata/influxdb/issues/184). Implement Raft log compaction. +- [Issue #153](https://github.com/influxdata/influxdb/issues/153). Implement continuous queries + +### Bugfixes + +- [Issue #90](https://github.com/influxdata/influxdb/issues/90). Group by multiple columns panic +- [Issue #89](https://github.com/influxdata/influxdb/issues/89). 'Group by' combined with 'where' not working +- [Issue #106](https://github.com/influxdata/influxdb/issues/106). Don't panic if we only see one point and can't calculate derivative +- [Issue #105](https://github.com/influxdata/influxdb/issues/105). Panic when using a where clause that reference columns with null values +- [Issue #61](https://github.com/influxdata/influxdb/issues/61). Remove default limits from queries +- [Issue #118](https://github.com/influxdata/influxdb/issues/118). Make column names starting with '_' legal +- [Issue #121](https://github.com/influxdata/influxdb/issues/121). Don't fall back to the cluster admin auth if the db user auth fails +- [Issue #127](https://github.com/influxdata/influxdb/issues/127). Return error on delete queries with where condition that don't have time +- [Issue #117](https://github.com/influxdata/influxdb/issues/117). Fill empty groups with default values +- [Issue #150](https://github.com/influxdata/influxdb/pull/150). Fix parser for when multiple divisions look like a regex. +- [Issue #158](https://github.com/influxdata/influxdb/issues/158). Logged deletes should be stored with the time range if missing. +- [Issue #136](https://github.com/influxdata/influxdb/issues/136). Make sure writes are replicated in order to avoid triggering replays +- [Issue #145](https://github.com/influxdata/influxdb/issues/145). Server fails to join cluster if all starting at same time. +- [Issue #176](https://github.com/influxdata/influxdb/issues/176). Drop database should take effect on all nodes +- [Issue #180](https://github.com/influxdata/influxdb/issues/180). Column names not returned when running multi-node cluster and writing more than one point. +- [Issue #182](https://github.com/influxdata/influxdb/issues/182). Queries with invalid limit clause crash the server + +### Deprecated + +- deprecate '==' and '!=' in favor of '=' and '<>', respectively +- deprecate `/dbs` (for listing databases) in favor of a more consistent `/db` endpoint +- deprecate `username` field for a more consistent `name` field in `/db/:db/users` and `/cluster_admins` +- deprecate endpoints `/db/:db/admins/:user` in favor of using `/db/:db/users/:user` which should + be used to update user flags, password, etc. +- Querying for column names that don't exist no longer throws an error. + +## v0.3.2 + +## Features + +- [Issue #82](https://github.com/influxdata/influxdb/issues/82). Add endpoint for listing available admin interfaces. +- [Issue #80](https://github.com/influxdata/influxdb/issues/80). Support durations when specifying start and end time +- [Issue #81](https://github.com/influxdata/influxdb/issues/81). Add support for IN + +## Bugfixes + +- [Issue #75](https://github.com/influxdata/influxdb/issues/75). Don't allow time series names that start with underscore +- [Issue #85](https://github.com/influxdata/influxdb/issues/85). Non-existing columns exist after they have been queried before + +## v0.3.0 + +## Features + +- [Issue #51](https://github.com/influxdata/influxdb/issues/51). Implement first and last aggregates +- [Issue #35](https://github.com/influxdata/influxdb/issues/35). Support table aliases in Join Queries +- [Issue #71](https://github.com/influxdata/influxdb/issues/71). Add WillReturnSingleSeries to the Query +- [Issue #61](https://github.com/influxdata/influxdb/issues/61). Limit should default to 10k +- [Issue #59](https://github.com/influxdata/influxdb/issues/59). Add histogram aggregate function + +## Bugfixes + +- Fix join and merges when the query is a descending order query +- [Issue #57](https://github.com/influxdata/influxdb/issues/57). Don't panic when type of time != float +- [Issue #63](https://github.com/influxdata/influxdb/issues/63). Aggregate queries should not have a sequence_number column + +## v0.2.0 + +### Features + +- [Issue #37](https://github.com/influxdata/influxdb/issues/37). Support the negation of the regex matcher !~ +- [Issue #47](https://github.com/influxdata/influxdb/issues/47). Spill out query and database detail at the time of bug report + +### Bugfixes + +- [Issue #36](https://github.com/influxdata/influxdb/issues/36). The regex operator should be =~ not ~= +- [Issue #39](https://github.com/influxdata/influxdb/issues/39). Return proper content types from the http api +- [Issue #42](https://github.com/influxdata/influxdb/issues/42). Make the api consistent with the docs +- [Issue #41](https://github.com/influxdata/influxdb/issues/41). Table/Points not deleted when database is dropped +- [Issue #45](https://github.com/influxdata/influxdb/issues/45). Aggregation shouldn't mess up the order of the points +- [Issue #44](https://github.com/influxdata/influxdb/issues/44). Fix crashes on RHEL 5.9 +- [Issue #34](https://github.com/influxdata/influxdb/issues/34). Ascending order always return null for columns that have a null value +- [Issue #55](https://github.com/influxdata/influxdb/issues/55). Limit should limit the points that match the Where clause +- [Issue #53](https://github.com/influxdata/influxdb/issues/53). Writing null values via HTTP API fails + +### Deprecated + +- Preparing to deprecate `/dbs` (for listing databases) in favor of a more consistent `/db` endpoint +- Preparing to deprecate `username` field for a more consistent `name` field in the `/db/:db/users` +- Preparing to deprecate endpoints `/db/:db/admins/:user` in favor of using `/db/:db/users/:user` which should + be used to update user flags, password, etc. + +## v0.1.0 + +### Features + +- [Issue #29](https://github.com/influxdata/influxdb/issues/29). Semicolon is now optional in queries +- [Issue #31](https://github.com/influxdata/influxdb/issues/31). Support Basic Auth as well as query params for authentication. + +### Bugfixes + +- Don't allow creating users with empty username +- [Issue #22](https://github.com/influxdata/influxdb/issues/22). Don't set goroot if it was set +- [Issue #25](https://github.com/influxdata/influxdb/issues/25). Fix queries that use the median aggregator +- [Issue #26](https://github.com/influxdata/influxdb/issues/26). Default log and db directories should be in /opt/influxdb/shared/data +- [Issue #27](https://github.com/influxdata/influxdb/issues/27). Group by should not blow up if the one of the columns in group by has null values +- [Issue #30](https://github.com/influxdata/influxdb/issues/30). Column indexes/names getting off somehow +- [Issue #32](https://github.com/influxdata/influxdb/issues/32). Fix many typos in the codebase. Thanks @pborreli + +## v0.0.9 + +#### Features + +- Add stddev(...) support +- Better docs, thanks @auxesis and @d-snp. + +#### Bugfixes + +- Set PYTHONPATH and CC appropriately on mac os x. +- [Issue #18](https://github.com/influxdata/influxdb/issues/18). Fix 386 debian and redhat packages +- [Issue #23](https://github.com/influxdata/influxdb/issues/23). Fix the init scripts on redhat + +## v0.0.8 + +#### Features + +- Add a way to reset the root password from the command line. +- Add distinct(..) and derivative(...) support +- Print test coverage if running go1.2 + +#### Bugfixes + +- Fix the default admin site path in the .deb and .rpm packages. +- Fix the configuration filename in the .tar.gz package. + +## v0.0.7 + +#### Features + +- include the admin site in the repo to make it easier for newcomers. + +## v0.0.6 + +#### Features + +- Add count(distinct(..)) support + +#### Bugfixes + +- Reuse levigo read/write options. + +## v0.0.5 + +#### Features + +- Cache passwords in memory to speed up password verification +- Add MERGE and INNER JOIN support + +#### Bugfixes + +- All columns should be returned if `select *` was used +- Read/Write benchmarks + +## v0.0.2 + +#### Features + +- Add an admin UI +- Deb and RPM packages + +#### Bugfixes + +- Fix some nil pointer dereferences +- Cleanup the aggregators implementation + +## v0.0.1 [2013-10-22] + + * Initial Release diff --git a/vendor/github.com/influxdata/influxdb/CODING_GUIDELINES.md b/vendor/github.com/influxdata/influxdb/CODING_GUIDELINES.md new file mode 100644 index 0000000000..6c1d2f2e8b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/CODING_GUIDELINES.md @@ -0,0 +1,82 @@ +_This document is currently in draft form._ + +# Background + +The goal of this guide is to capture some Do and Don'ts of Go code for the InfluxDB database. When it comes to Go, writing good code is often achieved with the help of tools like `go fmt` and `go vet`. However there are still some practices not enforceable by any tools. This guide lists some specific practices to follow when writing code for the database. + +*Like everything, one needs to use good judgment.* There will always be times when it doesn't make sense to follow a guideline outlined in this document. If that case arises, be ready to justify your choices. + +# The Guidelines + +## Try not to use third-party libraries + +A third-party package is defined as one that is not part of the standard Go distribution. Generally speaking we prefer to minimize our use of third-party packages, and avoid them unless absolutely necessarily. We'll often write a little bit of code rather than pull in a third-party package. Of course, we do use some third-party packages -- most importantly we use [BoltDB](https://github.com/boltdb/bolt) in some storage engines. So to maximise the chance your change will be accepted by us, use only the standard libraries, or the third-party packages we have decided to use. + +For rationale, check out the post [The Case Against Third Party Libraries](http://blog.gopheracademy.com/advent-2014/case-against-3pl/). + +## Always include a default case in a 'switch' statement +The lack of a `default` case in a `switch` statement can be a significant source of bugs. This is particularly true in the case of a type-assertions switch. So always include a `default` statement unless you have an explicit reason not to. + +## When -- and when not -- set a channel to 'nil' + +## Use defer with anonymous functions to handle complex locking +Consider a block of code like the following. +``` + mu.Lock() + if foo == "quit" { + mu.Unlock() + return + } else if foo == "continue" { + if bar == "quit" { + mu.Unlock() + return + } + bar = "still going" + } else { + qux = "here at last" + mu.Unlock() + return + } + foo = "more to do" + bar = "still more to do" + mu.Unlock() + + qux = "finished now" + return +``` +While this is obviously contrived, complex lock control like this is sometimes required, and doesn't lend itself to `defer`. But as the code evolves, it's easy to introduce new cases, and forget to release locks. One way to address this is to use an anonymous function like so: +``` + more := func() bool { + mu.Lock() + defer mu.Unlock() + if foo == "quit" { + return false + } else if foo == "continue" { + if bar == "quit" { + return false + } + bar = "still going" + } else { + qux = "here at last" + return false + } + foo = "more to do" + bar = "still more to do" + return true + }() + + if more { + qux = "finished" + } + return +``` +This allows us to use `defer` but ensures that if any new cases are added to the logic within the anonymous function, the lock will always be released. Another advantage of this approach is that `defer` will still run even in the event of a panic, ensuring the locks will be released even in that case. + +## When to call 'panic()' + +# Useful links +- [Useful techniques in Go](http://arslan.io/ten-useful-techniques-in-go) +- [Go in production](http://peter.bourgon.org/go-in-production/) +- [Principles of designing Go APIs with channels](https://inconshreveable.com/07-08-2014/principles-of-designing-go-apis-with-channels/) +- [Common mistakes in Golang](http://soryy.com/blog/2014/common-mistakes-with-go-lang/). Especially this section `Loops, Closures, and Local Variables` + diff --git a/vendor/github.com/influxdata/influxdb/CONTRIBUTING.md b/vendor/github.com/influxdata/influxdb/CONTRIBUTING.md new file mode 100644 index 0000000000..3c06275b0a --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/CONTRIBUTING.md @@ -0,0 +1,239 @@ +Contributing to InfluxDB +======================== + +Bug reports +--------------- +Before you file an issue, please search existing issues in case it has already been filed, or perhaps even fixed. If you file an issue, please include the following. +* Full details of your operating system (or distribution) e.g. 64-bit Ubuntu 14.04. +* The version of InfluxDB you are running +* Whether you installed it using a pre-built package, or built it from source. +* A small test case, if applicable, that demonstrates the issues. + +Remember the golden rule of bug reports: **The easier you make it for us to reproduce the problem, the faster it will get fixed.** +If you have never written a bug report before, or if you want to brush up on your bug reporting skills, we recommend reading [Simon Tatham's essay "How to Report Bugs Effectively."](http://www.chiark.greenend.org.uk/~sgtatham/bugs.html) + +Test cases should be in the form of `curl` commands. For example: +```bash +# create database +curl -G http://localhost:8086/query --data-urlencode "q=CREATE DATABASE mydb" + +# create retention policy +curl -G http://localhost:8086/query --data-urlencode "q=CREATE RETENTION POLICY myrp ON mydb DURATION 365d REPLICATION 1 DEFAULT" + +# write data +curl -X POST http://localhost:8086/write --data-urlencode "db=mydb" --data-binary "cpu,region=useast,host=server_1,service=redis value=61" + +# Delete a Measurement +curl -G http://localhost:8086/query --data-urlencode 'db=mydb' --data-urlencode 'q=DROP MEASUREMENT cpu' + +# Query the Measurement +# Bug: expected it to return no data, but data comes back. +curl -G http://localhost:8086/query --data-urlencode 'db=mydb' --data-urlencode 'q=SELECT * from cpu' +``` +**If you don't include a clear test case like this, your issue may not be investigated, and may even be closed**. If writing the data is too difficult, please zip up your data directory and include a link to it in your bug report. + +Please note that issues are *not the place to file general questions* such as "how do I use collectd with InfluxDB?" Questions of this nature should be sent to the [Google Group](https://groups.google.com/forum/#!forum/influxdb), not filed as issues. Issues like this will be closed. + +Feature requests +--------------- +We really like to receive feature requests, as it helps us prioritize our work. Please be clear about your requirements, as incomplete feature requests may simply be closed if we don't understand what you would like to see added to InfluxDB. + +Contributing to the source code +--------------- + +InfluxDB follows standard Go project structure. This means that all your Go development are done in `$GOPATH/src`. GOPATH can be any directory under which InfluxDB and all its dependencies will be cloned. For full details on the project structure, follow along below. + +You should also read our [coding guide](https://github.com/influxdata/influxdb/blob/master/CODING_GUIDELINES.md), to understand better how to write code for InfluxDB. + +Submitting a pull request +------------ +To submit a pull request you should fork the InfluxDB repository, and make your change on a feature branch of your fork. Then generate a pull request from your branch against *master* of the InfluxDB repository. Include in your pull request details of your change -- the why *and* the how -- as well as the testing your performed. Also, be sure to run the test suite with your change in place. Changes that cause tests to fail cannot be merged. + +There will usually be some back and forth as we finalize the change, but once that completes it may be merged. + +To assist in review for the PR, please add the following to your pull request comment: + +```md +- [ ] CHANGELOG.md updated +- [ ] Rebased/mergable +- [ ] Tests pass +- [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed) +``` + +Signing the CLA +--------------- + +If you are going to be contributing back to InfluxDB please take a +second to sign our CLA, which can be found +[on our website](https://influxdata.com/community/cla/). + +Installing Go +------------- +InfluxDB requires Go 1.4.3. + +At InfluxDB we find gvm, a Go version manager, useful for installing Go. For instructions +on how to install it see [the gvm page on github](https://github.com/moovweb/gvm). + +After installing gvm you can install and set the default go version by +running the following: + + gvm install go1.4.3 + gvm use go1.4.3 --default + +Installing GDM +------------- +InfluxDB uses [gdm](https://github.com/sparrc/gdm) to manage dependencies. Install it by running the following: + + go get github.com/sparrc/gdm + +Revision Control Systems +------------- +Go has the ability to import remote packages via revision control systems with the `go get` command. To ensure that you can retrieve any remote package, be sure to install the following rcs software to your system. +Currently the project only depends on `git` and `mercurial`. + +* [Install Git](http://git-scm.com/book/en/Getting-Started-Installing-Git) +* [Install Mercurial](http://mercurial.selenic.com/wiki/Download) + +Getting the source +------ +Setup the project structure and fetch the repo like so: + +```bash + mkdir $HOME/gocodez + export GOPATH=$HOME/gocodez + go get github.com/influxdata/influxdb +``` + +You can add the line `export GOPATH=$HOME/gocodez` to your bash/zsh file to be set for every shell instead of having to manually run it everytime. + +Cloning a fork +------------- +If you wish to work with fork of InfluxDB, your own fork for example, you must still follow the directory structure above. But instead of cloning the main repo, instead clone your fork. Follow the steps below to work with a fork: + +```bash + export GOPATH=$HOME/gocodez + mkdir -p $GOPATH/src/github.com/influxdata + cd $GOPATH/src/github.com/influxdata + git clone git@github.com:/influxdb +``` + +Retaining the directory structure `$GOPATH/src/github.com/influxdata` is necessary so that Go imports work correctly. + +Build and Test +----- + +Make sure you have Go installed and the project structure as shown above. To then get the dependencies for the project, execute the following commands: + +```bash +cd $GOPATH/src/github.com/influxdata/influxdb +gdm restore +``` + +To then build and install the binaries, run the following command. +```bash +go clean ./... +go install ./... +``` +The binaries will be located in `$GOPATH/bin`. Please note that the InfluxDB binary is named `influxd`, not `influxdb`. + +To set the version and commit flags during the build pass the following to the **install** command: + +```bash +-ldflags="-X main.version=$VERSION -X main.branch=$BRANCH -X main.commit=$COMMIT" +``` + +where `$VERSION` is the version, `$BRANCH` is the branch, and `$COMMIT` is the git commit hash. + +If you want to build packages, see `package.sh` help: +```bash +package.sh -h +``` + +To run the tests, execute the following command: + +```bash +cd $GOPATH/src/github.com/influxdata/influxdb +go test -v ./... + +# run tests that match some pattern +go test -run=TestDatabase . -v + +# run tests and show coverage +go test -coverprofile /tmp/cover . && go tool cover -html /tmp/cover +``` + +To install go cover, run the following command: +``` +go get golang.org/x/tools/cmd/cover +``` + +Generated Google Protobuf code +----------------- +Most changes to the source do not require that the generated protocol buffer code be changed. But if you need to modify the protocol buffer code, you'll first need to install the protocol buffers toolchain. + +First install the [protocol buffer compiler](https://developers.google.com/protocol-buffers/ +) 2.6.1 or later for your OS: + +Then install the go plugins: + +```bash +go get github.com/gogo/protobuf/proto +go get github.com/gogo/protobuf/protoc-gen-gogo +go get github.com/gogo/protobuf/gogoproto +``` + +Finally run, `go generate` after updating any `*.proto` file: + +```bash +go generate ./... +``` +**Troubleshooting** + +If generating the protobuf code is failing for you, check each of the following: +* Ensure the protobuf library can be found. Make sure that `LD_LIBRRARY_PATH` includes the directory in which the library `libprotoc.so` has been installed. +* Ensure the command `protoc-gen-gogo`, found in `GOPATH/bin`, is on your path. This can be done by adding `GOPATH/bin` to `PATH`. + +Pre-commit checks +------------- + +We have a pre-commit hook to make sure code is formatted properly and vetted before you commit any changes. We strongly recommend using the pre-commit hook to guard against accidentally committing unformatted code. To use the pre-commit hook, run the following: +```bash + cd $GOPATH/src/github.com/influxdata/influxdb + cp .hooks/pre-commit .git/hooks/ +``` +In case the commit is rejected because it's not formatted you can run +the following to format the code: + +``` +go fmt ./... +go vet ./... +``` + +To install go vet, run the following command: +``` +go get golang.org/x/tools/cmd/vet +``` + +NOTE: If you have not installed mercurial, the above command will fail. See [Revision Control Systems](#revision-control-systems) above. + +For more information on `go vet`, [read the GoDoc](https://godoc.org/golang.org/x/tools/cmd/vet). + +Profiling +----- +When troubleshooting problems with CPU or memory the Go toolchain can be helpful. You can start InfluxDB with CPU and memory profiling turned on. For example: + +```sh +# start influx with profiling +./influxd -cpuprofile influxdcpu.prof -memprof influxdmem.prof +# run queries, writes, whatever you're testing +# Quit out of influxd and influxd.prof will then be written. +# open up pprof to examine the profiling data. +go tool pprof ./influxd influxd.prof +# once inside run "web", opens up browser with the CPU graph +# can also run "web " to zoom in. Or "list " to see specific lines +``` +Note that when you pass the binary to `go tool pprof` *you must specify the path to the binary*. + +Continuous Integration testing +----- +InfluxDB uses CircleCI for continuous integration testing. To see how the code is built and tested, check out [this file](https://github.com/influxdata/influxdb/blob/master/circle-test.sh). It closely follows the build and test process outlined above. You can see the exact version of Go InfluxDB uses for testing by consulting that file. diff --git a/vendor/github.com/influxdata/influxdb/DOCKER.md b/vendor/github.com/influxdata/influxdb/DOCKER.md new file mode 100644 index 0000000000..e78187d9b7 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/DOCKER.md @@ -0,0 +1,44 @@ +# Docker Setup +======================== + +This document describes how to build and run a minimal InfluxDB container under Docker. Currently, it has only been tested for local development and assumes that you have a working docker environment. + +## Building Image + +To build a docker image for InfluxDB from your current checkout, run the following: + +``` +$ ./build-docker.sh +``` + +This script uses the `golang:1.5` image to build a fully static binary of `influxd` and then adds it to a minimal `scratch` image. + +To build the image using a different version of go: + +``` +$ GO_VER=1.4.2 ./build-docker.sh +``` + +Available version can be found [here](https://hub.docker.com/_/golang/). + +## Single Node Container + +This will start an interactive, single-node, that publishes the containers port `8086` and `8088` to the hosts ports `8086` and `8088` respectively. This is identical to starting `influxd` manually. + +``` +$ docker run -it -p 8086:8086 -p 8088:8088 influxdb +``` + +## Multi-Node Cluster + +This will create a simple 3-node cluster. The data is stored within the container and will be lost when the container is removed. This is only useful for test clusters. + +The `HOST_IP` env variable should be your host IP if running under linux or the virtualbox VM IP if running under OSX. On OSX, this would be something like: `$(docker-machine ip dev)` or `$(boot2docker ip)` depending on which docker tool you are using. + +``` +$ export HOST_IP= +$ docker run -it -p 8086:8086 -p 8088:8088 influxdb -hostname $HOST_IP:8088 +$ docker run -it -p 8186:8086 -p 8188:8088 influxdb -hostname $HOST_IP:8188 -join $HOST_IP:8088 +$ docker run -it -p 8286:8086 -p 8288:8088 influxdb -hostname $HOST_IP:8288 -join $HOST_IP:8088 +``` + diff --git a/vendor/github.com/influxdata/influxdb/Dockerfile b/vendor/github.com/influxdata/influxdb/Dockerfile new file mode 100644 index 0000000000..d30cd300db --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Dockerfile @@ -0,0 +1,24 @@ +FROM busybox:ubuntu-14.04 + +MAINTAINER Jason Wilder "" + +# admin, http, udp, cluster, graphite, opentsdb, collectd +EXPOSE 8083 8086 8086/udp 8088 2003 4242 25826 + +WORKDIR /app + +# copy binary into image +COPY influxd /app/ + +# Add influxd to the PATH +ENV PATH=/app:$PATH + +# Generate a default config +RUN influxd config > /etc/influxdb.toml + +# Use /data for all disk storage +RUN sed -i 's/dir = "\/.*influxdb/dir = "\/data/' /etc/influxdb.toml + +VOLUME ["/data"] + +ENTRYPOINT ["influxd", "--config", "/etc/influxdb.toml"] diff --git a/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu32 b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu32 new file mode 100644 index 0000000000..bb0fd65331 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu32 @@ -0,0 +1,35 @@ +FROM 32bit/ubuntu:14.04 + +RUN apt-get update && apt-get install -y \ + python-software-properties \ + software-properties-common \ + wget \ + git \ + mercurial \ + make \ + ruby \ + ruby-dev \ + rpm \ + zip \ + python \ + python-boto + +RUN gem install fpm + +# Install go +ENV GOPATH /root/go +ENV GO_VERSION 1.4.3 +ENV GO_ARCH 386 +RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \ + tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \ + rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz +ENV PATH /usr/local/go/bin:$PATH + +ENV PROJECT_DIR $GOPATH/src/github.com/influxdata/influxdb +ENV PATH $GOPATH/bin:$PATH +RUN mkdir -p $PROJECT_DIR +WORKDIR $PROJECT_DIR + +VOLUME $PROJECT_DIR + +ENTRYPOINT [ "/root/go/src/github.com/influxdata/influxdb/build.py" ] diff --git a/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64 b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64 new file mode 100644 index 0000000000..82a6816f4c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64 @@ -0,0 +1,35 @@ +FROM ubuntu:trusty + +RUN apt-get update && apt-get install -y \ + python-software-properties \ + software-properties-common \ + wget \ + git \ + mercurial \ + make \ + ruby \ + ruby-dev \ + rpm \ + zip \ + python \ + python-boto + +RUN gem install fpm + +# Install go +ENV GOPATH /root/go +ENV GO_VERSION 1.4.3 +ENV GO_ARCH amd64 +RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \ + tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \ + rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz +ENV PATH /usr/local/go/bin:$PATH + +ENV PROJECT_DIR $GOPATH/src/github.com/influxdata/influxdb +ENV PATH $GOPATH/bin:$PATH +RUN mkdir -p $PROJECT_DIR +WORKDIR $PROJECT_DIR + +VOLUME $PROJECT_DIR + +ENTRYPOINT [ "/root/go/src/github.com/influxdata/influxdb/build.py" ] diff --git a/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64_git b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64_git new file mode 100644 index 0000000000..a54bbf7190 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64_git @@ -0,0 +1,43 @@ +FROM ubuntu:trusty + +RUN apt-get update && apt-get install -y \ + python-software-properties \ + software-properties-common \ + wget \ + git \ + mercurial \ + make \ + ruby \ + ruby-dev \ + rpm \ + zip \ + python \ + python-boto + +RUN gem install fpm + +# Setup env +ENV GOPATH /root/go +ENV PROJECT_DIR $GOPATH/src/github.com/influxdata/influxdb +ENV PATH $GOPATH/bin:$PATH +RUN mkdir -p $PROJECT_DIR + +VOLUME $PROJECT_DIR + + +# Install go +ENV GO_VERSION 1.4.3 +ENV GO_ARCH amd64 +RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \ + tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \ + rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz + +# Clone Go tip for compilation +ENV GOROOT_BOOTSTRAP /usr/local/go +RUN git clone https://go.googlesource.com/go +ENV PATH /go/bin:$PATH + +# Add script for compiling go +ENV GO_CHECKOUT master +ADD ./gobuild.sh /gobuild.sh +ENTRYPOINT [ "/gobuild.sh" ] diff --git a/vendor/github.com/influxdata/influxdb/Dockerfile_test_ubuntu32 b/vendor/github.com/influxdata/influxdb/Dockerfile_test_ubuntu32 new file mode 100644 index 0000000000..2bd6b62fc4 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Dockerfile_test_ubuntu32 @@ -0,0 +1,12 @@ +FROM 32bit/ubuntu:14.04 + +RUN apt-get update && apt-get install -y python-software-properties software-properties-common git +RUN add-apt-repository ppa:evarlast/golang1.4 +RUN apt-get update && apt-get install -y -o Dpkg::Options::="--force-overwrite" golang-go + +ENV GOPATH=/root/go +RUN mkdir -p /root/go/src/github.com/influxdata/influxdb +RUN mkdir -p /tmp/artifacts + +VOLUME /root/go/src/github.com/influxdata/influxdb +VOLUME /tmp/artifacts diff --git a/vendor/github.com/influxdata/influxdb/Godeps b/vendor/github.com/influxdata/influxdb/Godeps new file mode 100644 index 0000000000..d429ffc4e2 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Godeps @@ -0,0 +1,22 @@ +collectd.org 9fc824c70f713ea0f058a07b49a4c563ef2a3b98 +github.com/BurntSushi/toml a4eecd407cf4129fc902ece859a0114e4cf1a7f4 +github.com/armon/go-metrics 345426c77237ece5dab0e1605c3e4b35c3f54757 +github.com/bmizerany/pat b8a35001b773c267eb260a691f4e5499a3531600 +github.com/boltdb/bolt 2f846c3551b76d7710f159be840d66c3d064abbe +github.com/davecgh/go-spew fc32781af5e85e548d3f1abaf0fa3dbe8a72495c +github.com/dgryski/go-bits 86c69b3c986f9d40065df5bd8f765796549eef2e +github.com/dgryski/go-bitstream 27cd5973303fde7d914860be1ea4b927a6be0c92 +github.com/gogo/protobuf 82d16f734d6d871204a3feb1a73cb220cc92574c +github.com/golang/snappy 5979233c5d6225d4a8e438cdd0b411888449ddab +github.com/hashicorp/go-msgpack fa3f63826f7c23912c15263591e65d54d080b458 +github.com/hashicorp/raft 8fd9a2fdfd154f4b393aa24cff91e3c317efe839 +github.com/hashicorp/raft-boltdb d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee +github.com/influxdata/usage-client 475977e68d79883d9c8d67131c84e4241523f452 +github.com/jwilder/encoding 07d88d4f35eec497617bee0c7bfe651a796dae13 +github.com/kimor79/gollectd 61d0deeb4ffcc167b2a1baa8efd72365692811bc +github.com/paulbellamy/ratecounter 5a11f585a31379765c190c033b6ad39956584447 +github.com/peterh/liner ad1edfd30321d8f006ccf05f1e0524adeb943060 +github.com/rakyll/statik 274df120e9065bdd08eb1120e0375e3dc1ae8465 +golang.org/x/crypto 1f22c0103821b9390939b6776727195525381532 +golang.org/x/tools 8b178a93c1f5b5c8f4e36cd6bd64e0d5bf0ee180 +gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715 diff --git a/vendor/github.com/influxdata/influxdb/LICENSE b/vendor/github.com/influxdata/influxdb/LICENSE new file mode 100644 index 0000000000..63cef79ba6 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013-2016 Errplane Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md b/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md new file mode 100644 index 0000000000..7aae45f9db --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md @@ -0,0 +1,19 @@ +# List +- github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE) +- gopkg.in/fatih/pool.v2 [MIT LICENSE](https://github.com/fatih/pool/blob/v2.0.0/LICENSE) +- github.com/BurntSushi/toml [WTFPL LICENSE](https://github.com/BurntSushi/toml/blob/master/COPYING) +- github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING) +- github.com/davecgh/go-spew/spew [ISC LICENSE](https://github.com/davecgh/go-spew/blob/master/LICENSE) +- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE) +- github.com/rakyll/statik/fs [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE) +- github.com/kimor79/gollectd [BSD LICENSE](https://github.com/kimor79/gollectd/blob/master/LICENSE) +- github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license) +- react 0.13.3 [BSD LICENSE](https://github.com/facebook/react/blob/master/LICENSE) +- bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE) +- jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt) +- glyphicons [LICENSE](http://glyphicons.com/license/) +- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE) +- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE) +- collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE) +- golang.org/x/crypto/* [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE) + diff --git a/vendor/github.com/influxdata/influxdb/Makefile b/vendor/github.com/influxdata/influxdb/Makefile new file mode 100644 index 0000000000..9fa9c8269c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Makefile @@ -0,0 +1,39 @@ +PACKAGES=$(shell find . -name '*.go' -print0 | xargs -0 -n1 dirname | sort --unique) + +default: + +metalint: deadcode cyclo aligncheck defercheck structcheck lint errcheck + +deadcode: + @deadcode $(PACKAGES) 2>&1 + +cyclo: + @gocyclo -over 10 $(PACKAGES) + +aligncheck: + @aligncheck $(PACKAGES) + +defercheck: + @defercheck $(PACKAGES) + + +structcheck: + @structcheck $(PACKAGES) + +lint: + @for pkg in $(PACKAGES); do golint $$pkg; done + +errcheck: + @for pkg in $(PACKAGES); do \ + errcheck -ignorepkg=bytes,fmt -ignore=":(Rollback|Close)" $$pkg \ + done + +tools: + go get github.com/remyoudompheng/go-misc/deadcode + go get github.com/alecthomas/gocyclo + go get github.com/opennota/check/... + go get github.com/golang/lint/golint + go get github.com/kisielk/errcheck + go get github.com/sparrc/gdm + +.PHONY: default,metalint,deadcode,cyclo,aligncheck,defercheck,structcheck,lint,errcheck,tools diff --git a/vendor/github.com/influxdata/influxdb/QUERIES.md b/vendor/github.com/influxdata/influxdb/QUERIES.md new file mode 100644 index 0000000000..46a9eb1daf --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/QUERIES.md @@ -0,0 +1,180 @@ +The top level name is called a measurement. These names can contain any characters. Then there are field names, field values, tag keys and tag values, which can also contain any characters. However, if the measurement, field, or tag contains any character other than [A-Z,a-z,0-9,_], or if it starts with a digit, it must be double-quoted. Therefore anywhere a measurement name, field key, or tag key appears it should be wrapped in double quotes. + +# Databases & retention policies + +```sql +-- create a database +CREATE DATABASE + +-- create a retention policy +CREATE RETENTION POLICY ON DURATION REPLICATION [DEFAULT] + +-- alter retention policy +ALTER RETENTION POLICY ON (DURATION | REPLICATION | DEFAULT)+ + +-- drop a database +DROP DATABASE + +-- drop a retention policy +DROP RETENTION POLICY ON +``` +where `` is either `INF` for infinite retention, or an integer followed by the desired unit of time: u,ms,s,m,h,d,w for microseconds, milliseconds, seconds, minutes, hours, days, or weeks, respectively. `` must be an integer. + +If present, `DEFAULT` sets the retention policy as the default retention policy for writes and reads. + +# Users and permissions + +```sql +-- create user +CREATE USER WITH PASSWORD '' + +-- grant privilege on a database +GRANT ON TO + +-- grant cluster admin privileges +GRANT ALL [PRIVILEGES] TO + +-- revoke privilege +REVOKE ON FROM + +-- revoke all privileges for a DB +REVOKE ALL [PRIVILEGES] ON FROM + +-- revoke all privileges including cluster admin +REVOKE ALL [PRIVILEGES] FROM + +-- combine db creation with privilege assignment (user must already exist) +CREATE DATABASE GRANT TO +CREATE DATABASE REVOKE FROM + +-- delete a user +DROP USER + + +``` +where ` := READ | WRITE | All `. + +Authentication must be enabled in the influxdb.conf file for user permissions to be in effect. + +By default, newly created users have no privileges to any databases. + +Cluster administration privileges automatically grant full read and write permissions to all databases, regardless of subsequent database-specific privilege revocation statements. + +# Select + +```sql +SELECT mean(value) from cpu WHERE host = 'serverA' AND time > now() - 4h GROUP BY time(5m) + +SELECT mean(value) from cpu WHERE time > now() - 4h GROUP BY time(5m), region +``` + +## Group By + +# Delete + +# Series + +## Destroy + +```sql +DROP MEASUREMENT +DROP MEASUREMENT cpu WHERE region = 'uswest' +``` + +## Show + +Show series queries are for pulling out individual series from measurement names and tag data. They're useful for discovery. + +```sql +-- show all databases +SHOW DATABASES + +-- show measurement names +SHOW MEASUREMENTS +SHOW MEASUREMENTS LIMIT 15 +SHOW MEASUREMENTS LIMIT 10 OFFSET 40 +SHOW MEASUREMENTS WHERE service = 'redis' +-- LIMIT and OFFSET can be applied to any of the SHOW type queries + +-- show all series across all measurements/tagsets +SHOW SERIES + +-- get a show of all series for any measurements where tag key region = tak value 'uswest' +SHOW SERIES WHERE region = 'uswest' + +SHOW SERIES FROM cpu_load WHERE region = 'uswest' LIMIT 10 + +-- returns the 100 - 109 rows in the result. In the case of SHOW SERIES, which returns +-- series split into measurements. Each series counts as a row. So you could see only a +-- single measurement returned, but 10 series within it. +SHOW SERIES FROM cpu_load WHERE region = 'uswest' LIMIT 10 OFFSET 100 + +-- show all retention policies on a database +SHOW RETENTION POLICIES ON mydb + +-- get a show of all tag keys across all measurements +SHOW TAG KEYS + +-- show all the tag keys for a given measurement +SHOW TAG KEYS FROM cpu +SHOW TAG KEYS FROM temperature, wind_speed + +-- show all the tag values. note that a single WHERE TAG KEY = '...' clause is required +SHOW TAG VALUES WITH TAG KEY = 'region' +SHOW TAG VALUES FROM cpu WHERE region = 'uswest' WITH TAG KEY = 'host' + +-- and you can do stuff against fields +SHOW FIELD KEYS FROM cpu + +-- but you can't do this +SHOW FIELD VALUES +-- we don't index field values, so this query should be invalid. + +-- show all users +SHOW USERS +``` + +Note that `FROM` and `WHERE` are optional clauses in most of the show series queries. + +And the show series output looks like this: + +```json +[ + { + "name": "cpu", + "columns": ["id", "region", "host"], + "values": [ + 1, "uswest", "servera", + 2, "uswest", "serverb" + ] + }, + { + "name": "reponse_time", + "columns": ["id", "application", "host"], + "values": [ + 3, "myRailsApp", "servera" + ] + } +] +``` + +# Continuous Queries + +Continuous queries are going to be inspired by MySQL `TRIGGER` syntax: + +http://dev.mysql.com/doc/refman/5.0/en/trigger-syntax.html + +Instead of having automatically-assigned ids, named continuous queries allows for some level of duplication prevention, +particularly in the case where creation is scripted. + +## Create + + CREATE CONTINUOUS QUERY AS SELECT ... FROM ... + +## Destroy + + DROP CONTINUOUS QUERY + +## List + + SHOW CONTINUOUS QUERIES diff --git a/vendor/github.com/influxdata/influxdb/README.md b/vendor/github.com/influxdata/influxdb/README.md new file mode 100644 index 0000000000..0c0d00143e --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/README.md @@ -0,0 +1,72 @@ +# InfluxDB [![Circle CI](https://circleci.com/gh/influxdata/influxdb/tree/master.svg?style=svg)](https://circleci.com/gh/influxdata/influxdb/tree/master) + +## An Open-Source, Distributed, Time Series Database + +InfluxDB is an open source **distributed time series database** with +**no external dependencies**. It's useful for recording metrics, +events, and performing analytics. + +## Features + +* Built-in [HTTP API](https://docs.influxdata.com/influxdb/v0.10/guides/writing_data/) so you don't have to write any server side code to get up and running. +* Data can be tagged, allowing very flexible querying. +* SQL-like query language. +* Clustering is supported out of the box, so that you can scale horizontally to handle your data. **Clustering is currently in an alpha state.** +* Simple to install and manage, and fast to get data in and out. +* It aims to answer queries in real-time. That means every data point is + indexed as it comes in and is immediately available in queries that + should return in < 100ms. + +## Installation + +We recommend installing InfluxDB using one of the [pre-built packages](https://influxdata.com/downloads/#influxdb). Then start InfluxDB using: + +* `service influxdb start` if you have installed InfluxDB using an official Debian or RPM package. +* `systemctl start influxdb` if you have installed InfluxDB using an official Debian or RPM package, and are running a distro with `systemd`. For example, Ubuntu 15 or later. +* `$GOPATH/bin/influxd` if you have built InfluxDB from source. + +## Getting Started + +### Create your first database + +``` +curl -G 'http://localhost:8086/query' --data-urlencode "q=CREATE DATABASE mydb" +``` + +### Insert some data +``` +curl -XPOST 'http://localhost:8086/write?db=mydb' \ +-d 'cpu,host=server01,region=uswest load=42 1434055562000000000' + +curl -XPOST 'http://localhost:8086/write?db=mydb' \ +-d 'cpu,host=server02,region=uswest load=78 1434055562000000000' + +curl -XPOST 'http://localhost:8086/write?db=mydb' \ +-d 'cpu,host=server03,region=useast load=15.4 1434055562000000000' +``` + +### Query for the data +```JSON +curl -G http://localhost:8086/query?pretty=true --data-urlencode "db=mydb" \ +--data-urlencode "q=SELECT * FROM cpu WHERE host='server01' AND time < now() - 1d" +``` + +### Analyze the data +```JSON +curl -G http://localhost:8086/query?pretty=true --data-urlencode "db=mydb" \ +--data-urlencode "q=SELECT mean(load) FROM cpu WHERE region='uswest'" +``` + +## Documentation + +* Read more about the [design goals and motivations of the project](https://docs.influxdata.com/influxdb/v0.10/). +* Follow the [getting started guide](https://docs.influxdata.com/influxdb/v0.10/introduction/getting_started/) to learn the basics in just a few minutes. +* Learn more about [InfluxDB's key concepts](https://docs.influxdata.com/influxdb/v0.10/guides/writing_data/). + +## Contributing + +If you're feeling adventurous and want to contribute to InfluxDB, see our [contributing doc](https://github.com/influxdata/influxdb/blob/master/CONTRIBUTING.md) for info on how to make feature requests, build from source, and run tests. + +## Looking for Support? + +InfluxDB offers a number of services to help your project succeed. We offer Developer Support for organizations in active development, Managed Hosting to make it easy to move into production, and Enterprise Support for companies requiring the best response times, SLAs, and technical fixes. Visit our [support page](https://influxdata.com/services/) or contact [sales@influxdb.com](mailto:sales@influxdb.com) to learn how we can best help you succeed. diff --git a/vendor/github.com/influxdata/influxdb/build-docker.sh b/vendor/github.com/influxdata/influxdb/build-docker.sh new file mode 100755 index 0000000000..1eb889bf24 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/build-docker.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +set -e -x + +GO_VER=${GO_VER:-1.5} + +docker run -it -v "${GOPATH}":/gopath -v "$(pwd)":/app -e "GOPATH=/gopath" -w /app golang:$GO_VER sh -c 'CGO_ENABLED=0 go build -a --installsuffix cgo --ldflags="-s" -o influxd ./cmd/influxd' + +docker build -t influxdb . diff --git a/vendor/github.com/influxdata/influxdb/build.py b/vendor/github.com/influxdata/influxdb/build.py new file mode 100755 index 0000000000..4c63b1bce6 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/build.py @@ -0,0 +1,819 @@ +#!/usr/bin/python2.7 -u + +import sys +import os +import subprocess +import time +import datetime +import shutil +import tempfile +import hashlib +import re + +debug = False + +################ +#### InfluxDB Variables +################ + +# Packaging variables +PACKAGE_NAME = "influxdb" +INSTALL_ROOT_DIR = "/usr/bin" +LOG_DIR = "/var/log/influxdb" +DATA_DIR = "/var/lib/influxdb" +SCRIPT_DIR = "/usr/lib/influxdb/scripts" +CONFIG_DIR = "/etc/influxdb" +LOGROTATE_DIR = "/etc/logrotate.d" + +INIT_SCRIPT = "scripts/init.sh" +SYSTEMD_SCRIPT = "scripts/influxdb.service" +PREINST_SCRIPT = "scripts/pre-install.sh" +POSTINST_SCRIPT = "scripts/post-install.sh" +POSTUNINST_SCRIPT = "scripts/post-uninstall.sh" +LOGROTATE_SCRIPT = "scripts/logrotate" +DEFAULT_CONFIG = "etc/config.sample.toml" + +# Default AWS S3 bucket for uploads +DEFAULT_BUCKET = "influxdb" + +CONFIGURATION_FILES = [ + CONFIG_DIR + '/influxdb.conf', + LOGROTATE_DIR + '/influxdb', +] + +PACKAGE_LICENSE = "MIT" +PACKAGE_URL = "https://github.com/influxdata/influxdb" +MAINTAINER = "support@influxdb.com" +VENDOR = "InfluxData" +DESCRIPTION = "Distributed time-series database." + +prereqs = [ 'git', 'go', 'gdm' ] +go_vet_command = ["go", "tool", "vet", "-composites=true", "./"] +optional_prereqs = [ 'gvm', 'fpm', 'rpmbuild' ] + +fpm_common_args = "-f -s dir --log error \ +--vendor {} \ +--url {} \ +--after-install {} \ +--before-install {} \ +--after-remove {} \ +--license {} \ +--maintainer {} \ +--directories {} \ +--directories {} \ +--description \"{}\"".format( + VENDOR, + PACKAGE_URL, + POSTINST_SCRIPT, + PREINST_SCRIPT, + POSTUNINST_SCRIPT, + PACKAGE_LICENSE, + MAINTAINER, + LOG_DIR, + DATA_DIR, + DESCRIPTION) + +for f in CONFIGURATION_FILES: + fpm_common_args += " --config-files {}".format(f) + +targets = { + 'influx' : './cmd/influx', + 'influxd' : './cmd/influxd', + 'influx_stress' : './cmd/influx_stress', + 'influx_inspect' : './cmd/influx_inspect', + 'influx_tsm' : './cmd/influx_tsm', +} + +supported_builds = { + 'darwin': [ "amd64", "i386" ], + 'windows': [ "amd64", "i386" ], + 'linux': [ "amd64", "i386", "armhf", "arm64", "armel" ] +} + +supported_packages = { + "darwin": [ "tar" ], + "linux": [ "deb", "rpm", "tar" ], + "windows": [ "tar" ], +} + +################ +#### InfluxDB Functions +################ + +def create_package_fs(build_root): + print "Creating package filesystem at root: {}".format(build_root) + # Using [1:] for the path names due to them being absolute + # (will overwrite previous paths, per 'os.path.join' documentation) + dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], DATA_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:] ] + for d in dirs: + create_dir(os.path.join(build_root, d)) + os.chmod(os.path.join(build_root, d), 0755) + +def package_scripts(build_root): + print "Copying scripts and sample configuration to build directory" + shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1])) + os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0644) + shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1])) + os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0644) + shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "influxdb")) + os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "influxdb"), 0644) + shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "influxdb.conf")) + os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "influxdb.conf"), 0644) + +def run_generate(): + print "Running go generate to rebuild admin UI static filesystem..." + run("go generate ./services/admin") + return True + +def go_get(branch, update=False, no_stash=False): + if not check_path_for("gdm"): + print "Downloading `gdm`..." + get_command = "go get github.com/sparrc/gdm" + run(get_command) + print "Retrieving dependencies with `gdm`..." + sys.stdout.flush() + run("{}/bin/gdm restore -v".format(os.environ.get("GOPATH"))) + return True + +################ +#### All InfluxDB-specific content above this line +################ + +def run(command, allow_failure=False, shell=False): + out = None + if debug: + print "[DEBUG] {}".format(command) + try: + if shell: + out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell) + else: + out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT) + if debug: + print "[DEBUG] command output: \n{}\n".format(out) + except subprocess.CalledProcessError as e: + print "" + print "" + print "Executed command failed!" + print "-- Command run was: {}".format(command) + print "-- Failure was: {}".format(e.output) + if allow_failure: + print "Continuing..." + return None + else: + print "" + print "Stopping." + sys.exit(1) + except OSError as e: + print "" + print "" + print "Invalid command!" + print "-- Command run was: {}".format(command) + print "-- Failure was: {}".format(e) + if allow_failure: + print "Continuing..." + return out + else: + print "" + print "Stopping." + sys.exit(1) + else: + return out + +def create_temp_dir(prefix = None): + if prefix is None: + return tempfile.mkdtemp(prefix="{}-build.".format(PACKAGE_NAME)) + else: + return tempfile.mkdtemp(prefix=prefix) + +def get_current_version_tag(): + version = run("git describe --always --tags --abbrev=0").strip() + return version + +def get_current_version(): + version_tag = get_current_version_tag() + # Remove leading 'v' and possible '-rc\d+' + version = re.sub(r'-rc\d+', '', version_tag[1:]) + return version + +def get_current_rc(): + rc = None + version_tag = get_current_version_tag() + matches = re.match(r'.*-rc(\d+)', version_tag) + if matches: + rc, = matches.groups(1) + return rc + +def get_current_commit(short=False): + command = None + if short: + command = "git log --pretty=format:'%h' -n 1" + else: + command = "git rev-parse HEAD" + out = run(command) + return out.strip('\'\n\r ') + +def get_current_branch(): + command = "git rev-parse --abbrev-ref HEAD" + out = run(command) + return out.strip() + +def get_system_arch(): + arch = os.uname()[4] + if arch == "x86_64": + arch = "amd64" + return arch + +def get_system_platform(): + if sys.platform.startswith("linux"): + return "linux" + else: + return sys.platform + +def get_go_version(): + out = run("go version") + matches = re.search('go version go(\S+)', out) + if matches is not None: + return matches.groups()[0].strip() + return None + +def check_path_for(b): + def is_exe(fpath): + return os.path.isfile(fpath) and os.access(fpath, os.X_OK) + + for path in os.environ["PATH"].split(os.pathsep): + path = path.strip('"') + full_path = os.path.join(path, b) + if os.path.isfile(full_path) and os.access(full_path, os.X_OK): + return full_path + +def check_environ(build_dir = None): + print "" + print "Checking environment:" + for v in [ "GOPATH", "GOBIN", "GOROOT" ]: + print "- {} -> {}".format(v, os.environ.get(v)) + + cwd = os.getcwd() + if build_dir is None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd: + print "!! WARNING: Your current directory is not under your GOPATH. This may lead to build failures." + +def check_prereqs(): + print "" + print "Checking for dependencies:" + for req in prereqs: + print "- {} ->".format(req), + path = check_path_for(req) + if path: + print "{}".format(path) + else: + print "?" + for req in optional_prereqs: + print "- {} (optional) ->".format(req), + path = check_path_for(req) + if path: + print "{}".format(path) + else: + print "?" + print "" + return True + +def upload_packages(packages, bucket_name=None, nightly=False): + if debug: + print "[DEBUG] upload_packages: {}".format(packages) + try: + import boto + from boto.s3.key import Key + except ImportError: + print "!! Cannot upload packages without the 'boto' Python library." + return 1 + print "Connecting to S3...".format(bucket_name) + c = boto.connect_s3() + if bucket_name is None: + bucket_name = DEFAULT_BUCKET + bucket = c.get_bucket(bucket_name.split('/')[0]) + print "Using bucket: {}".format(bucket_name) + for p in packages: + if '/' in bucket_name: + # Allow for nested paths within the bucket name (ex: + # bucket/folder). Assuming forward-slashes as path + # delimiter. + name = os.path.join('/'.join(bucket_name.split('/')[1:]), + os.path.basename(p)) + else: + name = os.path.basename(p) + if bucket.get_key(name) is None or nightly: + print "Uploading {}...".format(name) + sys.stdout.flush() + k = Key(bucket) + k.key = name + if nightly: + n = k.set_contents_from_filename(p, replace=True) + else: + n = k.set_contents_from_filename(p, replace=False) + k.make_public() + else: + print "!! Not uploading package {}, as it already exists.".format(p) + print "" + return 0 + +def run_tests(race, parallel, timeout, no_vet): + print "Running tests:" + print "\tRace: ", race + if parallel is not None: + print "\tParallel:", parallel + if timeout is not None: + print "\tTimeout:", timeout + sys.stdout.flush() + p = subprocess.Popen(["go", "fmt", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = p.communicate() + if len(out) > 0 or len(err) > 0: + print "Code not formatted. Please use 'go fmt ./...' to fix formatting errors." + print out + print err + return False + if not no_vet: + print "Installing 'go vet' tool..." + run("go install golang.org/x/tools/cmd/vet") + p = subprocess.Popen(go_vet_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = p.communicate() + if len(out) > 0 or len(err) > 0: + print "Go vet failed. Please run 'go vet ./...' and fix any errors." + print out + print err + return False + else: + print "Skipping go vet ..." + sys.stdout.flush() + test_command = "go test -v" + if race: + test_command += " -race" + if parallel is not None: + test_command += " -parallel {}".format(parallel) + if timeout is not None: + test_command += " -timeout {}".format(timeout) + test_command += " ./..." + code = os.system(test_command) + if code != 0: + print "Tests Failed" + return False + else: + print "Tests Passed" + return True + +def build(version=None, + branch=None, + commit=None, + platform=None, + arch=None, + nightly=False, + rc=None, + race=False, + clean=False, + outdir="."): + print "" + print "-------------------------" + print "" + print "Build Plan:" + print "- version: {}".format(version) + if rc: + print "- release candidate: {}".format(rc) + print "- commit: {}".format(get_current_commit(short=True)) + print "- branch: {}".format(get_current_branch()) + print "- platform: {}".format(platform) + print "- arch: {}".format(arch) + print "- nightly? {}".format(str(nightly).lower()) + print "- race enabled? {}".format(str(race).lower()) + print "" + + if not os.path.exists(outdir): + os.makedirs(outdir) + elif clean and outdir != '/': + print "Cleaning build directory..." + shutil.rmtree(outdir) + os.makedirs(outdir) + + if rc: + # If a release candidate, update the version information accordingly + version = "{}rc{}".format(version, rc) + + print "Starting build..." + tmp_build_dir = create_temp_dir() + for b, c in targets.iteritems(): + print "Building '{}'...".format(os.path.join(outdir, b)) + build_command = "" + if "arm" in arch: + build_command += "GOOS={} GOARCH={} ".format(platform, "arm") + else: + if arch == 'i386': + arch = '386' + elif arch == 'x86_64': + arch = 'amd64' + build_command += "GOOS={} GOARCH={} ".format(platform, arch) + if "arm" in arch: + if arch == "armel": + build_command += "GOARM=5 " + elif arch == "armhf" or arch == "arm": + build_command += "GOARM=6 " + elif arch == "arm64": + build_command += "GOARM=arm64 " + else: + print "!! Invalid ARM architecture specifed: {}".format(arch) + print "Please specify either 'armel', 'armhf', or 'arm64'" + return 1 + if platform == 'windows': + build_command += "go build -o {} ".format(os.path.join(outdir, b + '.exe')) + else: + build_command += "go build -o {} ".format(os.path.join(outdir, b)) + if race: + build_command += "-race " + go_version = get_go_version() + if "1.4" in go_version: + build_command += "-ldflags=\"-X main.version {} -X main.branch {} -X main.commit {}\" ".format(version, + get_current_branch(), + get_current_commit()) + else: + # With Go 1.5, the linker flag arguments changed to 'name=value' from 'name value' + build_command += "-ldflags=\"-X main.version={} -X main.branch={} -X main.commit={}\" ".format(version, + get_current_branch(), + get_current_commit()) + build_command += c + run(build_command, shell=True) + return 0 + +def create_dir(path): + try: + os.makedirs(path) + except OSError as e: + print e + +def rename_file(fr, to): + try: + os.rename(fr, to) + except OSError as e: + print e + # Return the original filename + return fr + else: + # Return the new filename + return to + +def copy_file(fr, to): + try: + shutil.copy(fr, to) + except OSError as e: + print e + +def generate_md5_from_file(path): + m = hashlib.md5() + with open(path, 'rb') as f: + for chunk in iter(lambda: f.read(4096), b""): + m.update(chunk) + return m.hexdigest() + +def build_packages(build_output, version, nightly=False, rc=None, iteration=1): + outfiles = [] + tmp_build_dir = create_temp_dir() + if debug: + print "[DEBUG] build_output = {}".format(build_output) + try: + print "-------------------------" + print "" + print "Packaging..." + for platform in build_output: + # Create top-level folder displaying which platform (linux, etc) + create_dir(os.path.join(tmp_build_dir, platform)) + for arch in build_output[platform]: + # Create second-level directory displaying the architecture (amd64, etc) + current_location = build_output[platform][arch] + + # Create directory tree to mimic file system of package + build_root = os.path.join(tmp_build_dir, + platform, + arch, + '{}-{}-{}'.format(PACKAGE_NAME, version, iteration)) + create_dir(build_root) + create_package_fs(build_root) + + # Copy packaging scripts to build directory + package_scripts(build_root) + + for binary in targets: + # Copy newly-built binaries to packaging directory + if platform == 'windows': + binary = binary + '.exe' + # Where the binary currently is located + fr = os.path.join(current_location, binary) + # Where the binary should go in the package filesystem + to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], binary) + if debug: + print "[{}][{}] - Moving from '{}' to '{}'".format(platform, + arch, + fr, + to) + copy_file(fr, to) + + for package_type in supported_packages[platform]: + # Package the directory structure for each package type for the platform + print "Packaging directory '{}' as '{}'...".format(build_root, package_type) + name = PACKAGE_NAME + # Reset version, iteration, and current location on each run + # since they may be modified below. + package_version = version + package_iteration = iteration + package_build_root = build_root + current_location = build_output[platform][arch] + + if package_type in ['zip', 'tar']: + # For tars and zips, start the packaging one folder above + # the build root (to include the package name) + package_build_root = os.path.join('/', '/'.join(build_root.split('/')[:-1])) + if nightly: + name = '{}-nightly_{}_{}'.format(name, + platform, + arch) + else: + name = '{}-{}-{}_{}_{}'.format(name, + package_version, + package_iteration, + platform, + arch) + + if package_type == 'tar': + # Add `tar.gz` to path to compress package output + current_location = os.path.join(current_location, name + '.tar.gz') + elif package_type == 'zip': + current_location = os.path.join(current_location, name + '.zip') + + if rc is not None: + # Set iteration to 0 since it's a release candidate + package_iteration = "0.rc{}".format(rc) + + fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format( + fpm_common_args, + name, + arch, + package_type, + package_version, + package_iteration, + package_build_root, + current_location) + if debug: + fpm_command += "--verbose " + if package_type == "rpm": + fpm_command += "--depends coreutils " + out = run(fpm_command, shell=True) + matches = re.search(':path=>"(.*)"', out) + outfile = None + if matches is not None: + outfile = matches.groups()[0] + if outfile is None: + print "!! Could not determine output from packaging command." + else: + # Strip nightly version (the unix epoch) from filename + if nightly and package_type in [ 'deb', 'rpm' ]: + outfile = rename_file(outfile, outfile.replace("{}-{}".format(version, iteration), "nightly")) + outfiles.append(os.path.join(os.getcwd(), outfile)) + # Display MD5 hash for generated package + print "MD5({}) = {}".format(outfile, generate_md5_from_file(outfile)) + print "" + if debug: + print "[DEBUG] package outfiles: {}".format(outfiles) + return outfiles + finally: + # Cleanup + shutil.rmtree(tmp_build_dir) + +def print_usage(): + print "Usage: ./build.py [options]" + print "" + print "Options:" + print "\t --outdir= \n\t\t- Send build output to a specified path. Defaults to ./build." + print "\t --arch= \n\t\t- Build for specified architecture. Acceptable values: x86_64|amd64, 386|i386, arm, or all" + print "\t --platform= \n\t\t- Build for specified platform. Acceptable values: linux, windows, darwin, or all" + print "\t --version= \n\t\t- Version information to apply to build metadata. If not specified, will be pulled from repo tag." + print "\t --commit= \n\t\t- Use specific commit for build (currently a NOOP)." + print "\t --branch= \n\t\t- Build from a specific branch (currently a NOOP)." + print "\t --rc= \n\t\t- Whether or not the build is a release candidate (affects version information)." + print "\t --iteration= \n\t\t- The iteration to display on the package output (defaults to 0 for RC's, and 1 otherwise)." + print "\t --race \n\t\t- Whether the produced build should have race detection enabled." + print "\t --package \n\t\t- Whether the produced builds should be packaged for the target platform(s)." + print "\t --nightly \n\t\t- Whether the produced build is a nightly (affects version information)." + print "\t --update \n\t\t- Whether dependencies should be updated prior to building." + print "\t --test \n\t\t- Run Go tests. Will not produce a build." + print "\t --parallel \n\t\t- Run Go tests in parallel up to the count specified." + print "\t --generate \n\t\t- Run `go generate`." + print "\t --timeout \n\t\t- Timeout for Go tests. Defaults to 480s." + print "\t --clean \n\t\t- Clean the build output directory prior to creating build." + print "\t --no-get \n\t\t- Do not run `go get` before building." + print "\t --bucket=\n\t\t- Full path of the bucket to upload packages to (must also specify --upload)." + print "\t --debug \n\t\t- Displays debug output." + print "" + +def print_package_summary(packages): + print packages + +def main(): + global debug + + # Command-line arguments + outdir = "build" + commit = None + target_platform = None + target_arch = None + nightly = False + race = False + branch = None + version = get_current_version() + rc = get_current_rc() + package = False + update = False + clean = False + upload = False + test = False + parallel = None + timeout = None + iteration = 1 + no_vet = False + run_get = True + upload_bucket = None + generate = False + no_stash = False + + for arg in sys.argv[1:]: + if '--outdir' in arg: + # Output directory. If none is specified, then builds will be placed in the same directory. + outdir = arg.split("=")[1] + if '--commit' in arg: + # Commit to build from. If none is specified, then it will build from the most recent commit. + commit = arg.split("=")[1] + if '--branch' in arg: + # Branch to build from. If none is specified, then it will build from the current branch. + branch = arg.split("=")[1] + elif '--arch' in arg: + # Target architecture. If none is specified, then it will build for the current arch. + target_arch = arg.split("=")[1] + elif '--platform' in arg: + # Target platform. If none is specified, then it will build for the current platform. + target_platform = arg.split("=")[1] + elif '--version' in arg: + # Version to assign to this build (0.9.5, etc) + version = arg.split("=")[1] + elif '--rc' in arg: + # Signifies that this is a release candidate build. + rc = arg.split("=")[1] + elif '--race' in arg: + # Signifies that race detection should be enabled. + race = True + elif '--package' in arg: + # Signifies that packages should be built. + package = True + # If packaging do not allow stashing of local changes + no_stash = True + elif '--nightly' in arg: + # Signifies that this is a nightly build. + nightly = True + elif '--update' in arg: + # Signifies that dependencies should be updated. + update = True + elif '--upload' in arg: + # Signifies that the resulting packages should be uploaded to S3 + upload = True + elif '--test' in arg: + # Run tests and exit + test = True + elif '--parallel' in arg: + # Set parallel for tests. + parallel = int(arg.split("=")[1]) + elif '--timeout' in arg: + # Set timeout for tests. + timeout = arg.split("=")[1] + elif '--clean' in arg: + # Signifies that the outdir should be deleted before building + clean = True + elif '--iteration' in arg: + iteration = arg.split("=")[1] + elif '--no-vet' in arg: + no_vet = True + elif '--no-get' in arg: + run_get = False + elif '--bucket' in arg: + # The bucket to upload the packages to, relies on boto + upload_bucket = arg.split("=")[1] + elif '--no-stash' in arg: + # Do not stash uncommited changes + # Fail if uncommited changes exist + no_stash = True + elif '--generate' in arg: + generate = True + elif '--debug' in arg: + print "[DEBUG] Using debug output" + debug = True + elif '--help' in arg: + print_usage() + return 0 + else: + print "!! Unknown argument: {}".format(arg) + print_usage() + return 1 + + if nightly and rc: + print "!! Cannot be both nightly and a release candidate! Stopping." + return 1 + + if nightly: + # In order to cleanly delineate nightly version, we are adding the epoch timestamp + # to the version so that version numbers are always greater than the previous nightly. + version = "{}~n{}".format(version, int(time.time())) + iteration = 0 + elif rc: + iteration = 0 + + # Pre-build checks + check_environ() + if not check_prereqs(): + return 1 + + if not commit: + commit = get_current_commit(short=True) + if not branch: + branch = get_current_branch() + if not target_arch: + system_arch = get_system_arch() + if 'arm' in system_arch: + # Prevent uname from reporting ARM arch (eg 'armv7l') + target_arch = "arm" + else: + target_arch = system_arch + if target_arch == '386': + target_arch = 'i386' + elif target_arch == 'x86_64': + target_arch = 'amd64' + if target_platform: + if target_platform not in supported_builds and target_platform != 'all': + print "! Invalid build platform: {}".format(target_platform) + return 1 + else: + target_platform = get_system_platform() + + build_output = {} + + if generate: + if not run_generate(): + return 1 + + if run_get: + if not go_get(branch, update=update, no_stash=no_stash): + return 1 + + if test: + if not run_tests(race, parallel, timeout, no_vet): + return 1 + return 0 + + platforms = [] + single_build = True + if target_platform == 'all': + platforms = supported_builds.keys() + single_build = False + else: + platforms = [target_platform] + + for platform in platforms: + build_output.update( { platform : {} } ) + archs = [] + if target_arch == "all": + single_build = False + archs = supported_builds.get(platform) + else: + archs = [target_arch] + + for arch in archs: + od = outdir + if not single_build: + od = os.path.join(outdir, platform, arch) + if build(version=version, + branch=branch, + commit=commit, + platform=platform, + arch=arch, + nightly=nightly, + rc=rc, + race=race, + clean=clean, + outdir=od): + return 1 + build_output.get(platform).update( { arch : od } ) + + # Build packages + if package: + if not check_path_for("fpm"): + print "!! Cannot package without command 'fpm'." + return 1 + + packages = build_packages(build_output, version, nightly=nightly, rc=rc, iteration=iteration) + if upload: + upload_packages(packages, bucket_name=upload_bucket, nightly=nightly) + print "Done!" + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/vendor/github.com/influxdata/influxdb/build.sh b/vendor/github.com/influxdata/influxdb/build.sh new file mode 100755 index 0000000000..0f80ac7872 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/build.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# Run the build utility via Docker + +set -e + +# Make sure our working dir is the dir of the script +DIR=$(cd $(dirname ${BASH_SOURCE[0]}) && pwd) +cd $DIR + + +# Build new docker image +docker build -f Dockerfile_build_ubuntu64 -t influxdb-builder $DIR +echo "Running build.py" +# Run docker +docker run --rm \ + -e AWS_ACCESS_KEY_ID="$AWS_ACCESS_KEY_ID" \ + -e AWS_SECRET_ACCESS_KEY="$AWS_SECRET_ACCESS_KEY" \ + -v $HOME/.aws.conf:/root/.aws.conf \ + -v $DIR:/root/go/src/github.com/influxdata/influxdb \ + influxdb-builder \ + "$@" + diff --git a/vendor/github.com/influxdata/influxdb/circle-test.sh b/vendor/github.com/influxdata/influxdb/circle-test.sh new file mode 100755 index 0000000000..6b34043b18 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/circle-test.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# +# This is the InfluxDB test script for CircleCI, it is a light wrapper around ./test.sh. + +# Exit if any command fails +set -e + +# Get dir of script and make it is our working directory. +DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) +cd $DIR + +export OUTPUT_DIR="$CIRCLE_ARTIFACTS" +# Don't delete the container since CircleCI doesn't have permission to do so. +export DOCKER_RM="false" + +# Get number of test environments. +count=$(./test.sh count) +# Check that we aren't wasting CircleCI nodes. +if [ $CIRCLE_NODE_TOTAL -gt $count ] +then + echo "More CircleCI nodes allocated than tests environments to run!" + exit 1 +fi + +# Map CircleCI nodes to test environments. +tests=$(seq 0 $((count - 1))) +for i in $tests +do + mine=$(( $i % $CIRCLE_NODE_TOTAL )) + if [ $mine -eq $CIRCLE_NODE_INDEX ] + then + echo "Running test env index: $i" + ./test.sh $i + fi +done diff --git a/vendor/github.com/influxdata/influxdb/circle.yml b/vendor/github.com/influxdata/influxdb/circle.yml new file mode 100644 index 0000000000..d407675c2a --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/circle.yml @@ -0,0 +1,16 @@ +machine: + services: + - docker + +dependencies: + cache_directories: + - "~/docker" + override: + - ./test.sh save: + # building the docker images can take a long time, hence caching + timeout: 1800 + +test: + override: + - bash circle-test.sh: + parallel: true diff --git a/vendor/github.com/influxdata/influxdb/client/README.md b/vendor/github.com/influxdata/influxdb/client/README.md new file mode 100644 index 0000000000..ec2d4de749 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/README.md @@ -0,0 +1,257 @@ +# InfluxDB Client + +[![GoDoc](https://godoc.org/github.com/influxdata/influxdb?status.svg)](http://godoc.org/github.com/influxdata/influxdb/client/v2) + +## Description + +**NOTE:** The Go client library now has a "v2" version, with the old version +being deprecated. The new version can be imported at +`import "github.com/influxdata/influxdb/client/v2"`. It is not backwards-compatible. + +A Go client library written and maintained by the **InfluxDB** team. +This package provides convenience functions to read and write time series data. +It uses the HTTP protocol to communicate with your **InfluxDB** cluster. + + +## Getting Started + +### Connecting To Your Database + +Connecting to an **InfluxDB** database is straightforward. You will need a host +name, a port and the cluster user credentials if applicable. The default port is +8086. You can customize these settings to your specific installation via the +**InfluxDB** configuration file. + +Though not necessary for experimentation, you may want to create a new user +and authenticate the connection to your database. + +For more information please check out the +[Admin Docs](https://docs.influxdata.com/influxdb/v0.10/administration). + +For the impatient, you can create a new admin user _bubba_ by firing off the +[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go). + +```shell +influx +> create user bubba with password 'bumblebeetuna' +> grant all privileges to bubba +``` + +And now for good measure set the credentials in you shell environment. +In the example below we will use $INFLUX_USER and $INFLUX_PWD + +Now with the administrivia out of the way, let's connect to our database. + +NOTE: If you've opted out of creating a user, you can omit Username and Password in +the configuration below. + +```go +package main + +import +import ( + "net/url" + "fmt" + "log" + "os" + + "github.com/influxdata/influxdb/client/v2" +) + +const ( + MyDB = "square_holes" + username = "bubba" + password = "bumblebeetuna" +) + +func main() { + // Make client + c := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + Username: username, + Password: password, + }) + + // Create a new point batch + bp := client.NewBatchPoints(client.BatchPointsConfig{ + Database: MyDB, + Precision: "s", + }) + + // Create a point and add to batch + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt := client.NewPoint("cpu_usage", tags, fields, time.Now()) + bp.AddPoint(pt) + + // Write the batch + c.Write(bp) +} + +``` + +### Inserting Data + +Time series data aka *points* are written to the database using batch inserts. +The mechanism is to create one or more points and then create a batch aka +*batch points* and write these to a given database and series. A series is a +combination of a measurement (time/values) and a set of tags. + +In this sample we will create a batch of a 1,000 points. Each point has a time and +a single value as well as 2 tags indicating a shape and color. We write these points +to a database called _square_holes_ using a measurement named _shapes_. + +NOTE: You can specify a RetentionPolicy as part of the batch points. If not +provided InfluxDB will use the database _default_ retention policy. + +```go +func writePoints(clnt client.Client) { + sampleSize := 1000 + rand.Seed(42) + + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ + Database: "systemstats", + Precision: "us", + }) + + for i := 0; i < sampleSize; i++ { + regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"} + tags := map[string]string{ + "cpu": "cpu-total", + "host": fmt.Sprintf("host%d", rand.Intn(1000)), + "region": regions[rand.Intn(len(regions))], + } + + idle := rand.Float64() * 100.0 + fields := map[string]interface{}{ + "idle": idle, + "busy": 100.0 - idle, + } + + bp.AddPoint(client.NewPoint( + "cpu_usage", + tags, + fields, + time.Now(), + )) + } + + err := clnt.Write(bp) + if err != nil { + log.Fatal(err) + } +} +``` + + +### Querying Data + +One nice advantage of using **InfluxDB** the ability to query your data using familiar +SQL constructs. In this example we can create a convenience function to query the database +as follows: + +```go +// queryDB convenience function to query the database +func queryDB(clnt client.Client, cmd string) (res []client.Result, err error) { + q := client.Query{ + Command: cmd, + Database: MyDB, + } + if response, err := clnt.Query(q); err == nil { + if response.Error() != nil { + return res, response.Error() + } + res = response.Results + } else { + return res, err + } + return res, nil +} +``` + +#### Creating a Database + +```go +_, err := queryDB(clnt, fmt.Sprintf("CREATE DATABASE %s", MyDB)) +if err != nil { + log.Fatal(err) +} +``` + +#### Count Records + +```go +q := fmt.Sprintf("SELECT count(%s) FROM %s", "value", MyMeasurement) +res, err := queryDB(clnt, q) +if err != nil { + log.Fatal(err) +} +count := res[0].Series[0].Values[0][1] +log.Printf("Found a total of %v records\n", count) +``` + +#### Find the last 10 _shapes_ records + +```go +q := fmt.Sprintf("SELECT * FROM %s LIMIT %d", MyMeasurement, 20) +res, err = queryDB(clnt, q) +if err != nil { + log.Fatal(err) +} + +for i, row := range res[0].Series[0].Values { + t, err := time.Parse(time.RFC3339, row[0].(string)) + if err != nil { + log.Fatal(err) + } + val := row[1].(string) + log.Printf("[%2d] %s: %s\n", i, t.Format(time.Stamp), val) +} +``` + +### Using the UDP Client + +The **InfluxDB** client also supports writing over UDP. + +```go +func WriteUDP() { + // Make client + c := client.NewUDPClient("localhost:8089") + + // Create a new point batch + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ + Precision: "s", + }) + + // Create a point and add to batch + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + if err != nil { + panic(err.Error()) + } + bp.AddPoint(pt) + + // Write the batch + c.Write(bp) +} +``` + +## Go Docs + +Please refer to +[http://godoc.org/github.com/influxdata/influxdb/client/v2](http://godoc.org/github.com/influxdata/influxdb/client/v2) +for documentation. + +## See Also + +You can also examine how the client library is used by the +[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go). diff --git a/vendor/github.com/influxdata/influxdb/client/example_test.go b/vendor/github.com/influxdata/influxdb/client/example_test.go new file mode 100644 index 0000000000..f3753834a9 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/example_test.go @@ -0,0 +1,113 @@ +package client_test + +import ( + "fmt" + "log" + "math/rand" + "net/url" + "os" + "strconv" + "time" + + "github.com/influxdata/influxdb/client" +) + +func ExampleNewClient() { + host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) + if err != nil { + log.Fatal(err) + } + + // NOTE: this assumes you've setup a user and have setup shell env variables, + // namely INFLUX_USER/INFLUX_PWD. If not just omit Username/Password below. + conf := client.Config{ + URL: *host, + Username: os.Getenv("INFLUX_USER"), + Password: os.Getenv("INFLUX_PWD"), + } + con, err := client.NewClient(conf) + if err != nil { + log.Fatal(err) + } + log.Println("Connection", con) +} + +func ExampleClient_Ping() { + host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) + if err != nil { + log.Fatal(err) + } + con, err := client.NewClient(client.Config{URL: *host}) + if err != nil { + log.Fatal(err) + } + + dur, ver, err := con.Ping() + if err != nil { + log.Fatal(err) + } + log.Printf("Happy as a hippo! %v, %s", dur, ver) +} + +func ExampleClient_Query() { + host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) + if err != nil { + log.Fatal(err) + } + con, err := client.NewClient(client.Config{URL: *host}) + if err != nil { + log.Fatal(err) + } + + q := client.Query{ + Command: "select count(value) from shapes", + Database: "square_holes", + } + if response, err := con.Query(q); err == nil && response.Error() == nil { + log.Println(response.Results) + } +} + +func ExampleClient_Write() { + host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) + if err != nil { + log.Fatal(err) + } + con, err := client.NewClient(client.Config{URL: *host}) + if err != nil { + log.Fatal(err) + } + + var ( + shapes = []string{"circle", "rectangle", "square", "triangle"} + colors = []string{"red", "blue", "green"} + sampleSize = 1000 + pts = make([]client.Point, sampleSize) + ) + + rand.Seed(42) + for i := 0; i < sampleSize; i++ { + pts[i] = client.Point{ + Measurement: "shapes", + Tags: map[string]string{ + "color": strconv.Itoa(rand.Intn(len(colors))), + "shape": strconv.Itoa(rand.Intn(len(shapes))), + }, + Fields: map[string]interface{}{ + "value": rand.Intn(sampleSize), + }, + Time: time.Now(), + Precision: "s", + } + } + + bps := client.BatchPoints{ + Points: pts, + Database: "BumbeBeeTuna", + RetentionPolicy: "default", + } + _, err = con.Write(bps) + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/influxdata/influxdb/client/influxdb.go b/vendor/github.com/influxdata/influxdb/client/influxdb.go new file mode 100644 index 0000000000..2f62aacb80 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/influxdb.go @@ -0,0 +1,715 @@ +package client // import "github.com/influxdata/influxdb/client" + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/influxdata/influxdb/models" +) + +const ( + // DefaultHost is the default host used to connect to an InfluxDB instance + DefaultHost = "localhost" + + // DefaultPort is the default port used to connect to an InfluxDB instance + DefaultPort = 8086 + + // DefaultTimeout is the default connection timeout used to connect to an InfluxDB instance + DefaultTimeout = 0 +) + +// Query is used to send a command to the server. Both Command and Database are required. +type Query struct { + Command string + Database string +} + +// ParseConnectionString will parse a string to create a valid connection URL +func ParseConnectionString(path string, ssl bool) (url.URL, error) { + var host string + var port int + + h, p, err := net.SplitHostPort(path) + if err != nil { + if path == "" { + host = DefaultHost + } else { + host = path + } + // If they didn't specify a port, always use the default port + port = DefaultPort + } else { + host = h + port, err = strconv.Atoi(p) + if err != nil { + return url.URL{}, fmt.Errorf("invalid port number %q: %s\n", path, err) + } + } + + u := url.URL{ + Scheme: "http", + } + if ssl { + u.Scheme = "https" + } + + u.Host = net.JoinHostPort(host, strconv.Itoa(port)) + + return u, nil +} + +// Config is used to specify what server to connect to. +// URL: The URL of the server connecting to. +// Username/Password are optional. They will be passed via basic auth if provided. +// UserAgent: If not provided, will default "InfluxDBClient", +// Timeout: If not provided, will default to 0 (no timeout) +type Config struct { + URL url.URL + Username string + Password string + UserAgent string + Timeout time.Duration + Precision string + UnsafeSsl bool +} + +// NewConfig will create a config to be used in connecting to the client +func NewConfig() Config { + return Config{ + Timeout: DefaultTimeout, + } +} + +// Client is used to make calls to the server. +type Client struct { + url url.URL + username string + password string + httpClient *http.Client + userAgent string + precision string +} + +const ( + // ConsistencyOne requires at least one data node acknowledged a write. + ConsistencyOne = "one" + + // ConsistencyAll requires all data nodes to acknowledge a write. + ConsistencyAll = "all" + + // ConsistencyQuorum requires a quorum of data nodes to acknowledge a write. + ConsistencyQuorum = "quorum" + + // ConsistencyAny allows for hinted hand off, potentially no write happened yet. + ConsistencyAny = "any" +) + +// NewClient will instantiate and return a connected client to issue commands to the server. +func NewClient(c Config) (*Client, error) { + tlsConfig := &tls.Config{ + InsecureSkipVerify: c.UnsafeSsl, + } + + tr := &http.Transport{ + TLSClientConfig: tlsConfig, + } + + client := Client{ + url: c.URL, + username: c.Username, + password: c.Password, + httpClient: &http.Client{Timeout: c.Timeout, Transport: tr}, + userAgent: c.UserAgent, + precision: c.Precision, + } + if client.userAgent == "" { + client.userAgent = "InfluxDBClient" + } + return &client, nil +} + +// SetAuth will update the username and passwords +func (c *Client) SetAuth(u, p string) { + c.username = u + c.password = p +} + +// SetPrecision will update the precision +func (c *Client) SetPrecision(precision string) { + c.precision = precision +} + +// Query sends a command to the server and returns the Response +func (c *Client) Query(q Query) (*Response, error) { + u := c.url + + u.Path = "query" + values := u.Query() + values.Set("q", q.Command) + values.Set("db", q.Database) + if c.precision != "" { + values.Set("epoch", c.precision) + } + u.RawQuery = values.Encode() + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var response Response + dec := json.NewDecoder(resp.Body) + dec.UseNumber() + decErr := dec.Decode(&response) + + // ignore this error if we got an invalid status code + if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK { + decErr = nil + } + // If we got a valid decode error, send that back + if decErr != nil { + return nil, decErr + } + // If we don't have an error in our json response, and didn't get StatusOK, then send back an error + if resp.StatusCode != http.StatusOK && response.Error() == nil { + return &response, fmt.Errorf("received status code %d from server", resp.StatusCode) + } + return &response, nil +} + +// Write takes BatchPoints and allows for writing of multiple points with defaults +// If successful, error is nil and Response is nil +// If an error occurs, Response may contain additional information if populated. +func (c *Client) Write(bp BatchPoints) (*Response, error) { + u := c.url + u.Path = "write" + + var b bytes.Buffer + for _, p := range bp.Points { + err := checkPointTypes(p) + if err != nil { + return nil, err + } + if p.Raw != "" { + if _, err := b.WriteString(p.Raw); err != nil { + return nil, err + } + } else { + for k, v := range bp.Tags { + if p.Tags == nil { + p.Tags = make(map[string]string, len(bp.Tags)) + } + p.Tags[k] = v + } + + if _, err := b.WriteString(p.MarshalString()); err != nil { + return nil, err + } + } + + if err := b.WriteByte('\n'); err != nil { + return nil, err + } + } + + req, err := http.NewRequest("POST", u.String(), &b) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + precision := bp.Precision + if precision == "" { + precision = c.precision + } + + params := req.URL.Query() + params.Set("db", bp.Database) + params.Set("rp", bp.RetentionPolicy) + params.Set("precision", precision) + params.Set("consistency", bp.WriteConsistency) + req.URL.RawQuery = params.Encode() + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var response Response + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + var err = fmt.Errorf(string(body)) + response.Err = err + return &response, err + } + + return nil, nil +} + +// WriteLineProtocol takes a string with line returns to delimit each write +// If successful, error is nil and Response is nil +// If an error occurs, Response may contain additional information if populated. +func (c *Client) WriteLineProtocol(data, database, retentionPolicy, precision, writeConsistency string) (*Response, error) { + u := c.url + u.Path = "write" + + r := strings.NewReader(data) + + req, err := http.NewRequest("POST", u.String(), r) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + params := req.URL.Query() + params.Set("db", database) + params.Set("rp", retentionPolicy) + params.Set("precision", precision) + params.Set("consistency", writeConsistency) + req.URL.RawQuery = params.Encode() + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var response Response + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + err := fmt.Errorf(string(body)) + response.Err = err + return &response, err + } + + return nil, nil +} + +// Ping will check to see if the server is up +// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. +func (c *Client) Ping() (time.Duration, string, error) { + now := time.Now() + u := c.url + u.Path = "ping" + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return 0, "", err + } + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return 0, "", err + } + defer resp.Body.Close() + + version := resp.Header.Get("X-Influxdb-Version") + return time.Since(now), version, nil +} + +// Structs + +// Result represents a resultset returned from a single statement. +type Result struct { + Series []models.Row + Err error +} + +// MarshalJSON encodes the result into JSON. +func (r *Result) MarshalJSON() ([]byte, error) { + // Define a struct that outputs "error" as a string. + var o struct { + Series []models.Row `json:"series,omitempty"` + Err string `json:"error,omitempty"` + } + + // Copy fields to output struct. + o.Series = r.Series + if r.Err != nil { + o.Err = r.Err.Error() + } + + return json.Marshal(&o) +} + +// UnmarshalJSON decodes the data into the Result struct +func (r *Result) UnmarshalJSON(b []byte) error { + var o struct { + Series []models.Row `json:"series,omitempty"` + Err string `json:"error,omitempty"` + } + + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + err := dec.Decode(&o) + if err != nil { + return err + } + r.Series = o.Series + if o.Err != "" { + r.Err = errors.New(o.Err) + } + return nil +} + +// Response represents a list of statement results. +type Response struct { + Results []Result + Err error +} + +// MarshalJSON encodes the response into JSON. +func (r *Response) MarshalJSON() ([]byte, error) { + // Define a struct that outputs "error" as a string. + var o struct { + Results []Result `json:"results,omitempty"` + Err string `json:"error,omitempty"` + } + + // Copy fields to output struct. + o.Results = r.Results + if r.Err != nil { + o.Err = r.Err.Error() + } + + return json.Marshal(&o) +} + +// UnmarshalJSON decodes the data into the Response struct +func (r *Response) UnmarshalJSON(b []byte) error { + var o struct { + Results []Result `json:"results,omitempty"` + Err string `json:"error,omitempty"` + } + + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + err := dec.Decode(&o) + if err != nil { + return err + } + r.Results = o.Results + if o.Err != "" { + r.Err = errors.New(o.Err) + } + return nil +} + +// Error returns the first error from any statement. +// Returns nil if no errors occurred on any statements. +func (r Response) Error() error { + if r.Err != nil { + return r.Err + } + for _, result := range r.Results { + if result.Err != nil { + return result.Err + } + } + return nil +} + +// Point defines the fields that will be written to the database +// Measurement, Time, and Fields are required +// Precision can be specified if the time is in epoch format (integer). +// Valid values for Precision are n, u, ms, s, m, and h +type Point struct { + Measurement string + Tags map[string]string + Time time.Time + Fields map[string]interface{} + Precision string + Raw string +} + +// MarshalJSON will format the time in RFC3339Nano +// Precision is also ignored as it is only used for writing, not reading +// Or another way to say it is we always send back in nanosecond precision +func (p *Point) MarshalJSON() ([]byte, error) { + point := struct { + Measurement string `json:"measurement,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Time string `json:"time,omitempty"` + Fields map[string]interface{} `json:"fields,omitempty"` + Precision string `json:"precision,omitempty"` + }{ + Measurement: p.Measurement, + Tags: p.Tags, + Fields: p.Fields, + Precision: p.Precision, + } + // Let it omit empty if it's really zero + if !p.Time.IsZero() { + point.Time = p.Time.UTC().Format(time.RFC3339Nano) + } + return json.Marshal(&point) +} + +// MarshalString renders string representation of a Point with specified +// precision. The default precision is nanoseconds. +func (p *Point) MarshalString() string { + pt, err := models.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time) + if err != nil { + return "# ERROR: " + err.Error() + " " + p.Measurement + } + if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" { + return pt.String() + } + return pt.PrecisionString(p.Precision) +} + +// UnmarshalJSON decodes the data into the Point struct +func (p *Point) UnmarshalJSON(b []byte) error { + var normal struct { + Measurement string `json:"measurement"` + Tags map[string]string `json:"tags"` + Time time.Time `json:"time"` + Precision string `json:"precision"` + Fields map[string]interface{} `json:"fields"` + } + var epoch struct { + Measurement string `json:"measurement"` + Tags map[string]string `json:"tags"` + Time *int64 `json:"time"` + Precision string `json:"precision"` + Fields map[string]interface{} `json:"fields"` + } + + if err := func() error { + var err error + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + if err = dec.Decode(&epoch); err != nil { + return err + } + // Convert from epoch to time.Time, but only if Time + // was actually set. + var ts time.Time + if epoch.Time != nil { + ts, err = EpochToTime(*epoch.Time, epoch.Precision) + if err != nil { + return err + } + } + p.Measurement = epoch.Measurement + p.Tags = epoch.Tags + p.Time = ts + p.Precision = epoch.Precision + p.Fields = normalizeFields(epoch.Fields) + return nil + }(); err == nil { + return nil + } + + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + if err := dec.Decode(&normal); err != nil { + return err + } + normal.Time = SetPrecision(normal.Time, normal.Precision) + p.Measurement = normal.Measurement + p.Tags = normal.Tags + p.Time = normal.Time + p.Precision = normal.Precision + p.Fields = normalizeFields(normal.Fields) + + return nil +} + +// Remove any notion of json.Number +func normalizeFields(fields map[string]interface{}) map[string]interface{} { + newFields := map[string]interface{}{} + + for k, v := range fields { + switch v := v.(type) { + case json.Number: + jv, e := v.Float64() + if e != nil { + panic(fmt.Sprintf("unable to convert json.Number to float64: %s", e)) + } + newFields[k] = jv + default: + newFields[k] = v + } + } + return newFields +} + +// BatchPoints is used to send batched data in a single write. +// Database and Points are required +// If no retention policy is specified, it will use the databases default retention policy. +// If tags are specified, they will be "merged" with all points. If a point already has that tag, it will be ignored. +// If time is specified, it will be applied to any point with an empty time. +// Precision can be specified if the time is in epoch format (integer). +// Valid values for Precision are n, u, ms, s, m, and h +type BatchPoints struct { + Points []Point `json:"points,omitempty"` + Database string `json:"database,omitempty"` + RetentionPolicy string `json:"retentionPolicy,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Time time.Time `json:"time,omitempty"` + Precision string `json:"precision,omitempty"` + WriteConsistency string `json:"-"` +} + +// UnmarshalJSON decodes the data into the BatchPoints struct +func (bp *BatchPoints) UnmarshalJSON(b []byte) error { + var normal struct { + Points []Point `json:"points"` + Database string `json:"database"` + RetentionPolicy string `json:"retentionPolicy"` + Tags map[string]string `json:"tags"` + Time time.Time `json:"time"` + Precision string `json:"precision"` + } + var epoch struct { + Points []Point `json:"points"` + Database string `json:"database"` + RetentionPolicy string `json:"retentionPolicy"` + Tags map[string]string `json:"tags"` + Time *int64 `json:"time"` + Precision string `json:"precision"` + } + + if err := func() error { + var err error + if err = json.Unmarshal(b, &epoch); err != nil { + return err + } + // Convert from epoch to time.Time + var ts time.Time + if epoch.Time != nil { + ts, err = EpochToTime(*epoch.Time, epoch.Precision) + if err != nil { + return err + } + } + bp.Points = epoch.Points + bp.Database = epoch.Database + bp.RetentionPolicy = epoch.RetentionPolicy + bp.Tags = epoch.Tags + bp.Time = ts + bp.Precision = epoch.Precision + return nil + }(); err == nil { + return nil + } + + if err := json.Unmarshal(b, &normal); err != nil { + return err + } + normal.Time = SetPrecision(normal.Time, normal.Precision) + bp.Points = normal.Points + bp.Database = normal.Database + bp.RetentionPolicy = normal.RetentionPolicy + bp.Tags = normal.Tags + bp.Time = normal.Time + bp.Precision = normal.Precision + + return nil +} + +// utility functions + +// Addr provides the current url as a string of the server the client is connected to. +func (c *Client) Addr() string { + return c.url.String() +} + +// checkPointTypes ensures no unsupported types are submitted to influxdb, returning error if they are found. +func checkPointTypes(p Point) error { + for _, v := range p.Fields { + switch v.(type) { + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, float32, float64, bool, string, nil: + return nil + default: + return fmt.Errorf("unsupported point type: %T", v) + } + } + return nil +} + +// helper functions + +// EpochToTime takes a unix epoch time and uses precision to return back a time.Time +func EpochToTime(epoch int64, precision string) (time.Time, error) { + if precision == "" { + precision = "s" + } + var t time.Time + switch precision { + case "h": + t = time.Unix(0, epoch*int64(time.Hour)) + case "m": + t = time.Unix(0, epoch*int64(time.Minute)) + case "s": + t = time.Unix(0, epoch*int64(time.Second)) + case "ms": + t = time.Unix(0, epoch*int64(time.Millisecond)) + case "u": + t = time.Unix(0, epoch*int64(time.Microsecond)) + case "n": + t = time.Unix(0, epoch) + default: + return time.Time{}, fmt.Errorf("Unknown precision %q", precision) + } + return t, nil +} + +// SetPrecision will round a time to the specified precision +func SetPrecision(t time.Time, precision string) time.Time { + switch precision { + case "n": + case "u": + return t.Round(time.Microsecond) + case "ms": + return t.Round(time.Millisecond) + case "s": + return t.Round(time.Second) + case "m": + return t.Round(time.Minute) + case "h": + return t.Round(time.Hour) + } + return t +} diff --git a/vendor/github.com/influxdata/influxdb/client/influxdb_test.go b/vendor/github.com/influxdata/influxdb/client/influxdb_test.go new file mode 100644 index 0000000000..c82eb6dd0a --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/influxdb_test.go @@ -0,0 +1,743 @@ +package client_test + +import ( + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "strings" + "testing" + "time" + + "github.com/influxdata/influxdb/client" +) + +func BenchmarkWrite(b *testing.B) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + b.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + bp := client.BatchPoints{ + Points: []client.Point{ + {Fields: map[string]interface{}{"value": 101}}}, + } + for i := 0; i < b.N; i++ { + r, err := c.Write(bp) + if err != nil { + b.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if r != nil { + b.Fatalf("unexpected response. expected %v, actual %v", nil, r) + } + } +} + +func BenchmarkUnmarshalJSON2Tags(b *testing.B) { + var bp client.BatchPoints + data := []byte(` +{ + "database": "foo", + "retentionPolicy": "bar", + "points": [ + { + "name": "cpu", + "tags": { + "host": "server01", + "region": "us-east1" + }, + "time": 14244733039069373, + "precision": "n", + "fields": { + "value": 4541770385657154000 + } + } + ] +} +`) + + for i := 0; i < b.N; i++ { + if err := json.Unmarshal(data, &bp); err != nil { + b.Errorf("unable to unmarshal nanosecond data: %s", err.Error()) + } + b.SetBytes(int64(len(data))) + } +} + +func BenchmarkUnmarshalJSON10Tags(b *testing.B) { + var bp client.BatchPoints + data := []byte(` +{ + "database": "foo", + "retentionPolicy": "bar", + "points": [ + { + "name": "cpu", + "tags": { + "host": "server01", + "region": "us-east1", + "tag1": "value1", + "tag2": "value2", + "tag2": "value3", + "tag4": "value4", + "tag5": "value5", + "tag6": "value6", + "tag7": "value7", + "tag8": "value8" + }, + "time": 14244733039069373, + "precision": "n", + "fields": { + "value": 4541770385657154000 + } + } + ] +} +`) + + for i := 0; i < b.N; i++ { + if err := json.Unmarshal(data, &bp); err != nil { + b.Errorf("unable to unmarshal nanosecond data: %s", err.Error()) + } + b.SetBytes(int64(len(data))) + } +} + +func TestNewClient(t *testing.T) { + config := client.Config{} + _, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_Ping(t *testing.T) { + ts := emptyTestServer() + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + d, version, err := c.Ping() + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if d == 0 { + t.Fatalf("expected a duration greater than zero. actual %v", d) + } + if version != "x.x" { + t.Fatalf("unexpected version. expected %s, actual %v", "x.x", version) + } +} + +func TestClient_Query(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + query := client.Query{} + _, err = c.Query(query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_BasicAuth(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + u, p, ok := r.BasicAuth() + + if !ok { + t.Errorf("basic auth error") + } + if u != "username" { + t.Errorf("unexpected username, expected %q, actual %q", "username", u) + } + if p != "password" { + t.Errorf("unexpected password, expected %q, actual %q", "password", p) + } + w.WriteHeader(http.StatusNoContent) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + u.User = url.UserPassword("username", "password") + config := client.Config{URL: *u, Username: "username", Password: "password"} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + _, _, err = c.Ping() + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_Write(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + bp := client.BatchPoints{} + r, err := c.Write(bp) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if r != nil { + t.Fatalf("unexpected response. expected %v, actual %v", nil, r) + } +} + +func TestClient_UserAgent(t *testing.T) { + receivedUserAgent := "" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + receivedUserAgent = r.UserAgent() + + var data client.Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + _, err := http.Get(ts.URL) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + tests := []struct { + name string + userAgent string + expected string + }{ + { + name: "Empty user agent", + userAgent: "", + expected: "InfluxDBClient", + }, + { + name: "Custom user agent", + userAgent: "Test Influx Client", + expected: "Test Influx Client", + }, + } + + for _, test := range tests { + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u, UserAgent: test.userAgent} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + receivedUserAgent = "" + query := client.Query{} + _, err = c.Query(query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if !strings.HasPrefix(receivedUserAgent, test.expected) { + t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) + } + + receivedUserAgent = "" + bp := client.BatchPoints{} + _, err = c.Write(bp) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if !strings.HasPrefix(receivedUserAgent, test.expected) { + t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) + } + + receivedUserAgent = "" + _, _, err = c.Ping() + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + if receivedUserAgent != test.expected { + t.Fatalf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) + } + } +} + +func TestPoint_UnmarshalEpoch(t *testing.T) { + now := time.Now() + tests := []struct { + name string + epoch int64 + precision string + expected time.Time + }{ + { + name: "nanoseconds", + epoch: now.UnixNano(), + precision: "n", + expected: now, + }, + { + name: "microseconds", + epoch: now.Round(time.Microsecond).UnixNano() / int64(time.Microsecond), + precision: "u", + expected: now.Round(time.Microsecond), + }, + { + name: "milliseconds", + epoch: now.Round(time.Millisecond).UnixNano() / int64(time.Millisecond), + precision: "ms", + expected: now.Round(time.Millisecond), + }, + { + name: "seconds", + epoch: now.Round(time.Second).UnixNano() / int64(time.Second), + precision: "s", + expected: now.Round(time.Second), + }, + { + name: "minutes", + epoch: now.Round(time.Minute).UnixNano() / int64(time.Minute), + precision: "m", + expected: now.Round(time.Minute), + }, + { + name: "hours", + epoch: now.Round(time.Hour).UnixNano() / int64(time.Hour), + precision: "h", + expected: now.Round(time.Hour), + }, + { + name: "max int64", + epoch: 9223372036854775807, + precision: "n", + expected: time.Unix(0, 9223372036854775807), + }, + { + name: "100 years from now", + epoch: now.Add(time.Hour * 24 * 365 * 100).UnixNano(), + precision: "n", + expected: now.Add(time.Hour * 24 * 365 * 100), + }, + } + + for _, test := range tests { + t.Logf("testing %q\n", test.name) + data := []byte(fmt.Sprintf(`{"time": %d, "precision":"%s"}`, test.epoch, test.precision)) + t.Logf("json: %s", string(data)) + var p client.Point + err := json.Unmarshal(data, &p) + if err != nil { + t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err) + } + if !p.Time.Equal(test.expected) { + t.Fatalf("Unexpected time. expected: %v, actual: %v", test.expected, p.Time) + } + } +} + +func TestPoint_UnmarshalRFC(t *testing.T) { + now := time.Now().UTC() + tests := []struct { + name string + rfc string + now time.Time + expected time.Time + }{ + { + name: "RFC3339Nano", + rfc: time.RFC3339Nano, + now: now, + expected: now, + }, + { + name: "RFC3339", + rfc: time.RFC3339, + now: now.Round(time.Second), + expected: now.Round(time.Second), + }, + } + + for _, test := range tests { + t.Logf("testing %q\n", test.name) + ts := test.now.Format(test.rfc) + data := []byte(fmt.Sprintf(`{"time": %q}`, ts)) + t.Logf("json: %s", string(data)) + var p client.Point + err := json.Unmarshal(data, &p) + if err != nil { + t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err) + } + if !p.Time.Equal(test.expected) { + t.Fatalf("Unexpected time. expected: %v, actual: %v", test.expected, p.Time) + } + } +} + +func TestPoint_MarshalOmitempty(t *testing.T) { + now := time.Now().UTC() + tests := []struct { + name string + point client.Point + now time.Time + expected string + }{ + { + name: "all empty", + point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}}, + now: now, + expected: `{"measurement":"cpu","fields":{"value":1.1}}`, + }, + { + name: "with time", + point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}, Time: now}, + now: now, + expected: fmt.Sprintf(`{"measurement":"cpu","time":"%s","fields":{"value":1.1}}`, now.Format(time.RFC3339Nano)), + }, + { + name: "with tags", + point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}, Tags: map[string]string{"foo": "bar"}}, + now: now, + expected: `{"measurement":"cpu","tags":{"foo":"bar"},"fields":{"value":1.1}}`, + }, + { + name: "with precision", + point: client.Point{Measurement: "cpu", Fields: map[string]interface{}{"value": 1.1}, Precision: "ms"}, + now: now, + expected: `{"measurement":"cpu","fields":{"value":1.1},"precision":"ms"}`, + }, + } + + for _, test := range tests { + t.Logf("testing %q\n", test.name) + b, err := json.Marshal(&test.point) + if err != nil { + t.Fatalf("unexpected error. exptected: %v, actual: %v", nil, err) + } + if test.expected != string(b) { + t.Fatalf("Unexpected result. expected: %v, actual: %v", test.expected, string(b)) + } + } +} + +func TestEpochToTime(t *testing.T) { + now := time.Now() + + tests := []struct { + name string + epoch int64 + precision string + expected time.Time + }{ + {name: "nanoseconds", epoch: now.UnixNano(), precision: "n", expected: now}, + {name: "microseconds", epoch: now.Round(time.Microsecond).UnixNano() / int64(time.Microsecond), precision: "u", expected: now.Round(time.Microsecond)}, + {name: "milliseconds", epoch: now.Round(time.Millisecond).UnixNano() / int64(time.Millisecond), precision: "ms", expected: now.Round(time.Millisecond)}, + {name: "seconds", epoch: now.Round(time.Second).UnixNano() / int64(time.Second), precision: "s", expected: now.Round(time.Second)}, + {name: "minutes", epoch: now.Round(time.Minute).UnixNano() / int64(time.Minute), precision: "m", expected: now.Round(time.Minute)}, + {name: "hours", epoch: now.Round(time.Hour).UnixNano() / int64(time.Hour), precision: "h", expected: now.Round(time.Hour)}, + } + + for _, test := range tests { + t.Logf("testing %q\n", test.name) + tm, e := client.EpochToTime(test.epoch, test.precision) + if e != nil { + t.Fatalf("unexpected error: expected %v, actual: %v", nil, e) + } + if tm != test.expected { + t.Fatalf("unexpected time: expected %v, actual %v", test.expected, tm) + } + } +} + +// helper functions + +func emptyTestServer() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("X-Influxdb-Version", "x.x") + return + })) +} + +// Ensure that data with epoch times can be decoded. +func TestBatchPoints_Normal(t *testing.T) { + var bp client.BatchPoints + data := []byte(` +{ + "database": "foo", + "retentionPolicy": "bar", + "points": [ + { + "name": "cpu", + "tags": { + "host": "server01" + }, + "time": 14244733039069373, + "precision": "n", + "values": { + "value": 4541770385657154000 + } + }, + { + "name": "cpu", + "tags": { + "host": "server01" + }, + "time": 14244733039069380, + "precision": "n", + "values": { + "value": 7199311900554737000 + } + } + ] +} +`) + + if err := json.Unmarshal(data, &bp); err != nil { + t.Errorf("unable to unmarshal nanosecond data: %s", err.Error()) + } +} + +func TestClient_Timeout(t *testing.T) { + done := make(chan bool) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + <-done + })) + defer ts.Close() + defer func() { done <- true }() + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u, Timeout: 500 * time.Millisecond} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + query := client.Query{} + _, err = c.Query(query) + if err == nil { + t.Fatalf("unexpected success. expected timeout error") + } else if !strings.Contains(err.Error(), "request canceled") && + !strings.Contains(err.Error(), "use of closed network connection") { + t.Fatalf("unexpected error. expected 'request canceled' error, got %v", err) + } +} + +func TestClient_NoTimeout(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(1 * time.Second) + var data client.Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + + query := client.Query{} + _, err = c.Query(query) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_WriteUint64(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + bp := client.BatchPoints{ + Points: []client.Point{ + { + Fields: map[string]interface{}{"value": uint64(10)}, + }, + }, + } + r, err := c.Write(bp) + if err == nil { + t.Fatalf("unexpected error. expected err, actual %v", err) + } + if r != nil { + t.Fatalf("unexpected response. expected %v, actual %v", nil, r) + } +} + +func TestClient_ParseConnectionString_IPv6(t *testing.T) { + path := "[fdf5:9ede:1875:0:a9ee:a600:8fe3:d495]:8086" + u, err := client.ParseConnectionString(path, false) + if err != nil { + t.Fatalf("unexpected error, expected %v, actual %v", nil, err) + } + if u.Host != path { + t.Fatalf("ipv6 parse failed, expected %s, actual %s", path, u.Host) + } +} + +func TestClient_CustomCertificates(t *testing.T) { + // generated with: + // openssl req -x509 -newkey rsa:2048 -keyout key.pem -out cert.pem -days 3650 -nodes -config influx.cnf + // influx.cnf: + // [req] + // distinguished_name = req_distinguished_name + // x509_extensions = v3_req + // prompt = no + // [req_distinguished_name] + // C = US + // ST = CA + // L = San Francisco + // O = InfluxDB + // CN = github.com/influxdata + // [v3_req] + // keyUsage = keyEncipherment, dataEncipherment + // extendedKeyUsage = serverAuth + // subjectAltName = @alt_names + // [alt_names] + // IP.1 = 127.0.0.1 + // + key := ` +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDLswqKJLxfhBRi +4qdj7+jpBxTAi4MewrcMPp+9YlbLke3F7w2DPrZVkYVeWmg8LyTPAigrXeadK6hv +qjRr05a7sMc5+ynivGbWUySWT+u17V85x6VR5TMIkJEOqpiIU8aYk0l+3UcrzVjS +1QZCUBoxVwAVaSR6AXTA8YrVXdk/AI3f22dYiBjFmV4LJJkGjTaCnlDKu54hMU1t +WTyFcoY9TBzZ1XA+ng5RQ/QADeL2PYrTW4s/mLI3jfKKD53EI4uM2FjW37ZfuxTa +mhCR7/lxM4COg9K70y5uebfqJvuoXAwXLOzVbdfF5b9fJFbL67kaK2tiMT3Wt39m +hXzclLTDAgMBAAECggEAK8mpElkjRUUXPMqMQSdpYe5rv5g973bb8n3jyMpC7i/I +dSwWM4hfmbVWfhnhHk7kErvb9raQxGiGJLrp2eP6Gw69RPGA54SodpoY21cCzHDi +b4FDQH+MoOKyy/xQHb4kitfejK70ha320huI5OhjOQgCtJeNh8yYVIGX3pX2BVyu +36UB9tfX1S5pbiHeih3vZGd322Muj/joNzIelnYRBnoO0xqvQ0S1Dk+dLCTHO0/m +u9AZN8c2TsRWZpJPMWwBv8LuABbE0e66/TSsrfklAn86ELCo44lZURDE7uPZ4pIH +FWtmf+nW5Hy6aPhy60E40MqotlejhWwB3ktY/m3JAQKBgQDuB4nhxzJA9lH9EaCt +byvJ9wGVvI3k79hOwc/Z2R3dNe+Ma+TJy+aBppvsLF4qz83aWC+canyasbHcPNR/ +vXQGlsgKfucrmd1PfMV7uvOIkfOjK0E6mRC+jMuKtNTQrdtM1BU/Z7LY0iy0fNJ6 +aNqhFdlJmmk0g+4bR4SAWB6FkwKBgQDbE/7r1u+GdJk/mhdjTi1aegr9lXb0l7L6 +BCvOYhs/Z/pXfsaYPSXhgk2w+LiGk6BaEA2/4Sr0YS2MAAaIhBVeFBIXVpNrXB3K +Yg1jOEeLQ3qoVBeJFhJNrN9ZQx33HANC1W/Y1apMwaYqCRUGVQkrdcsN2KNea1z0 +3qeYeCCSEQKBgCKZKeuNfrp+k1BLnaVYAW9r3ekb7SwXyMM53LJ3oqWiz10D2c+T +OcAirYtYr59dcTiJlPIRcGcz6PxwQxsGOLU0eYM9CvEFfmutYS8o73ksbdOL2AFi +elKYOIXC3yQuATBbq3L56b8mXaUmd5mfYBgGCv1t2ljtzFBext248UbNAoGBAIv1 +2V24YiwnH6THf/ucfVMZNx5Mt8OJivk5YvcmLDw05HWzc5LdNe89PP871z963u3K +5c3ZP4UC9INFnOboY3JIJkqsr9/d6NZcECt8UBDDmoAhwSt+Y1EmiUZQn7s4NUkk +bKE919/Ts6GVTc5O013lkkUVS0HOG4QBH1dEH6LRAoGAStl11WA9tuKXiBl5XG/C +cq9mFPNJK3pEgd6YH874vEnYEEqENR4MFK3uWXus9Nm+VYxbUbPEzFF4kpsfukDg +/JAVqY4lUam7g6fyyaoIIPQEp7jGjbsUf46IjnUjFcaojOugA3EAfn9awREUDuJZ +cvh4WzEegcExTppINW1NB5E= +-----END PRIVATE KEY----- +` + cert := ` +-----BEGIN CERTIFICATE----- +MIIDdjCCAl6gAwIBAgIJAMYGAwkxUV51MA0GCSqGSIb3DQEBCwUAMFgxCzAJBgNV +BAYTAlVTMQswCQYDVQQIDAJDQTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzERMA8G +A1UECgwISW5mbHV4REIxETAPBgNVBAMMCGluZmx1eGRiMB4XDTE1MTIyOTAxNTg1 +NloXDTI1MTIyNjAxNTg1NlowWDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRYw +FAYDVQQHDA1TYW4gRnJhbmNpc2NvMREwDwYDVQQKDAhJbmZsdXhEQjERMA8GA1UE +AwwIaW5mbHV4ZGIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDLswqK +JLxfhBRi4qdj7+jpBxTAi4MewrcMPp+9YlbLke3F7w2DPrZVkYVeWmg8LyTPAigr +XeadK6hvqjRr05a7sMc5+ynivGbWUySWT+u17V85x6VR5TMIkJEOqpiIU8aYk0l+ +3UcrzVjS1QZCUBoxVwAVaSR6AXTA8YrVXdk/AI3f22dYiBjFmV4LJJkGjTaCnlDK +u54hMU1tWTyFcoY9TBzZ1XA+ng5RQ/QADeL2PYrTW4s/mLI3jfKKD53EI4uM2FjW +37ZfuxTamhCR7/lxM4COg9K70y5uebfqJvuoXAwXLOzVbdfF5b9fJFbL67kaK2ti +MT3Wt39mhXzclLTDAgMBAAGjQzBBMAwGA1UdEwQFMAMBAf8wCwYDVR0PBAQDAgQw +MBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEQQIMAaHBH8AAAEwDQYJKoZIhvcN +AQELBQADggEBAJxgHeduV9q2BuKnrt+sjXLGn/HwbMbgGbgFK6kUKJBWtv6Pa7JJ +m4teDmTMWiaeB2g4N2bmaWTuEZzzShNKG5roFeWm1ilFMAyzkb+VifN4YuDKH62F +3e259qsytiGbbJF3F//4sjfMw8qZVEPvspG1zKsASo0PpSOOUFmxcj0oMAXhnMrk +rRcbk6fufhyq0iZGl8ZLKTCrkjk0b3qlNs6UaRD9/XBB59VlQ8I338sfjV06edwY +jn5Amab0uyoFNEp70Y4WGxrxUTS1GAC1LCA13S7EnidD440UrnWALTarjmHAK6aW +war3JNM1mGB3o2iAtuOJlFIKLpI1x+1e8pI= +-----END CERTIFICATE----- +` + cer, err := tls.X509KeyPair([]byte(cert), []byte(key)) + + if err != nil { + t.Fatalf("Received error: %v", err) + } + + server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data client.Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + server.TLS = &tls.Config{Certificates: []tls.Certificate{cer}} + server.TLS.BuildNameToCertificate() + server.StartTLS() + defer server.Close() + + certFile, _ := ioutil.TempFile("", "influx-cert-") + certFile.WriteString(cert) + certFile.Close() + defer os.Remove(certFile.Name()) + + u, _ := url.Parse(server.URL) + + tests := []struct { + name string + unsafeSsl bool + expected error + }{ + {name: "validate certificates", unsafeSsl: false, expected: errors.New("error")}, + {name: "not validate certificates", unsafeSsl: true, expected: nil}, + } + + for _, test := range tests { + config := client.Config{URL: *u, UnsafeSsl: test.unsafeSsl} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + query := client.Query{} + _, err = c.Query(query) + + if (test.expected == nil) != (err == nil) { + t.Fatalf("%s: expected %v. got %v. unsafeSsl: %v", test.name, test.expected, err, test.unsafeSsl) + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/client/v2/client.go b/vendor/github.com/influxdata/influxdb/client/v2/client.go new file mode 100644 index 0000000000..369b5d5445 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/v2/client.go @@ -0,0 +1,562 @@ +package client // import "github.com/influxdata/influxdb/client/v2" + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "time" + + "github.com/influxdata/influxdb/models" +) + +// UDPPayloadSize is a reasonable default payload size for UDP packets that +// could be travelling over the internet. +const ( + UDPPayloadSize = 512 +) + +// HTTPConfig is the config data needed to create an HTTP Client +type HTTPConfig struct { + // Addr should be of the form "http://host:port" + // or "http://[ipv6-host%zone]:port". + Addr string + + // Username is the influxdb username, optional + Username string + + // Password is the influxdb password, optional + Password string + + // UserAgent is the http User Agent, defaults to "InfluxDBClient" + UserAgent string + + // Timeout for influxdb writes, defaults to no timeout + Timeout time.Duration + + // InsecureSkipVerify gets passed to the http client, if true, it will + // skip https certificate verification. Defaults to false + InsecureSkipVerify bool + + // TLSConfig allows the user to set their own TLS config for the HTTP + // Client. If set, this option overrides InsecureSkipVerify. + TLSConfig *tls.Config +} + +// UDPConfig is the config data needed to create a UDP Client +type UDPConfig struct { + // Addr should be of the form "host:port" + // or "[ipv6-host%zone]:port". + Addr string + + // PayloadSize is the maximum size of a UDP client message, optional + // Tune this based on your network. Defaults to UDPBufferSize. + PayloadSize int +} + +// BatchPointsConfig is the config data needed to create an instance of the BatchPoints struct +type BatchPointsConfig struct { + // Precision is the write precision of the points, defaults to "ns" + Precision string + + // Database is the database to write points to + Database string + + // RetentionPolicy is the retention policy of the points + RetentionPolicy string + + // Write consistency is the number of servers required to confirm write + WriteConsistency string +} + +// Client is a client interface for writing & querying the database +type Client interface { + // Ping checks that status of cluster + Ping(timeout time.Duration) (time.Duration, string, error) + + // Write takes a BatchPoints object and writes all Points to InfluxDB. + Write(bp BatchPoints) error + + // Query makes an InfluxDB Query on the database. This will fail if using + // the UDP client. + Query(q Query) (*Response, error) + + // Close releases any resources a Client may be using. + Close() error +} + +// NewHTTPClient creates a client interface from the given config. +func NewHTTPClient(conf HTTPConfig) (Client, error) { + if conf.UserAgent == "" { + conf.UserAgent = "InfluxDBClient" + } + + u, err := url.Parse(conf.Addr) + if err != nil { + return nil, err + } else if u.Scheme != "http" && u.Scheme != "https" { + m := fmt.Sprintf("Unsupported protocol scheme: %s, your address"+ + " must start with http:// or https://", u.Scheme) + return nil, errors.New(m) + } + + tr := &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: conf.InsecureSkipVerify, + }, + } + if conf.TLSConfig != nil { + tr.TLSClientConfig = conf.TLSConfig + } + return &client{ + url: u, + username: conf.Username, + password: conf.Password, + useragent: conf.UserAgent, + httpClient: &http.Client{ + Timeout: conf.Timeout, + Transport: tr, + }, + }, nil +} + +// Ping will check to see if the server is up with an optional timeout on waiting for leader. +// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. +func (c *client) Ping(timeout time.Duration) (time.Duration, string, error) { + now := time.Now() + u := c.url + u.Path = "ping" + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return 0, "", err + } + + req.Header.Set("User-Agent", c.useragent) + + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + if timeout > 0 { + params := req.URL.Query() + params.Set("wait_for_leader", fmt.Sprintf("%.0fs", timeout.Seconds())) + req.URL.RawQuery = params.Encode() + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return 0, "", err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return 0, "", err + } + + if resp.StatusCode != http.StatusNoContent { + var err = fmt.Errorf(string(body)) + return 0, "", err + } + + version := resp.Header.Get("X-Influxdb-Version") + return time.Since(now), version, nil +} + +// Close releases the client's resources. +func (c *client) Close() error { + return nil +} + +// NewUDPClient returns a client interface for writing to an InfluxDB UDP +// service from the given config. +func NewUDPClient(conf UDPConfig) (Client, error) { + var udpAddr *net.UDPAddr + udpAddr, err := net.ResolveUDPAddr("udp", conf.Addr) + if err != nil { + return nil, err + } + + conn, err := net.DialUDP("udp", nil, udpAddr) + if err != nil { + return nil, err + } + + payloadSize := conf.PayloadSize + if payloadSize == 0 { + payloadSize = UDPPayloadSize + } + + return &udpclient{ + conn: conn, + payloadSize: payloadSize, + }, nil +} + +// Ping will check to see if the server is up with an optional timeout on waiting for leader. +// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. +func (uc *udpclient) Ping(timeout time.Duration) (time.Duration, string, error) { + return 0, "", nil +} + +// Close releases the udpclient's resources. +func (uc *udpclient) Close() error { + return uc.conn.Close() +} + +type client struct { + url *url.URL + username string + password string + useragent string + httpClient *http.Client +} + +type udpclient struct { + conn *net.UDPConn + payloadSize int +} + +// BatchPoints is an interface into a batched grouping of points to write into +// InfluxDB together. BatchPoints is NOT thread-safe, you must create a separate +// batch for each goroutine. +type BatchPoints interface { + // AddPoint adds the given point to the Batch of points + AddPoint(p *Point) + // Points lists the points in the Batch + Points() []*Point + + // Precision returns the currently set precision of this Batch + Precision() string + // SetPrecision sets the precision of this batch. + SetPrecision(s string) error + + // Database returns the currently set database of this Batch + Database() string + // SetDatabase sets the database of this Batch + SetDatabase(s string) + + // WriteConsistency returns the currently set write consistency of this Batch + WriteConsistency() string + // SetWriteConsistency sets the write consistency of this Batch + SetWriteConsistency(s string) + + // RetentionPolicy returns the currently set retention policy of this Batch + RetentionPolicy() string + // SetRetentionPolicy sets the retention policy of this Batch + SetRetentionPolicy(s string) +} + +// NewBatchPoints returns a BatchPoints interface based on the given config. +func NewBatchPoints(conf BatchPointsConfig) (BatchPoints, error) { + if conf.Precision == "" { + conf.Precision = "ns" + } + if _, err := time.ParseDuration("1" + conf.Precision); err != nil { + return nil, err + } + bp := &batchpoints{ + database: conf.Database, + precision: conf.Precision, + retentionPolicy: conf.RetentionPolicy, + writeConsistency: conf.WriteConsistency, + } + return bp, nil +} + +type batchpoints struct { + points []*Point + database string + precision string + retentionPolicy string + writeConsistency string +} + +func (bp *batchpoints) AddPoint(p *Point) { + bp.points = append(bp.points, p) +} + +func (bp *batchpoints) Points() []*Point { + return bp.points +} + +func (bp *batchpoints) Precision() string { + return bp.precision +} + +func (bp *batchpoints) Database() string { + return bp.database +} + +func (bp *batchpoints) WriteConsistency() string { + return bp.writeConsistency +} + +func (bp *batchpoints) RetentionPolicy() string { + return bp.retentionPolicy +} + +func (bp *batchpoints) SetPrecision(p string) error { + if _, err := time.ParseDuration("1" + p); err != nil { + return err + } + bp.precision = p + return nil +} + +func (bp *batchpoints) SetDatabase(db string) { + bp.database = db +} + +func (bp *batchpoints) SetWriteConsistency(wc string) { + bp.writeConsistency = wc +} + +func (bp *batchpoints) SetRetentionPolicy(rp string) { + bp.retentionPolicy = rp +} + +// Point represents a single data point +type Point struct { + pt models.Point +} + +// NewPoint returns a point with the given timestamp. If a timestamp is not +// given, then data is sent to the database without a timestamp, in which case +// the server will assign local time upon reception. NOTE: it is recommended to +// send data with a timestamp. +func NewPoint( + name string, + tags map[string]string, + fields map[string]interface{}, + t ...time.Time, +) (*Point, error) { + var T time.Time + if len(t) > 0 { + T = t[0] + } + + pt, err := models.NewPoint(name, tags, fields, T) + if err != nil { + return nil, err + } + return &Point{ + pt: pt, + }, nil +} + +// String returns a line-protocol string of the Point +func (p *Point) String() string { + return p.pt.String() +} + +// PrecisionString returns a line-protocol string of the Point, at precision +func (p *Point) PrecisionString(precison string) string { + return p.pt.PrecisionString(precison) +} + +// Name returns the measurement name of the point +func (p *Point) Name() string { + return p.pt.Name() +} + +// Tags returns the tags associated with the point +func (p *Point) Tags() map[string]string { + return p.pt.Tags() +} + +// Time return the timestamp for the point +func (p *Point) Time() time.Time { + return p.pt.Time() +} + +// UnixNano returns the unix nano time of the point +func (p *Point) UnixNano() int64 { + return p.pt.UnixNano() +} + +// Fields returns the fields for the point +func (p *Point) Fields() map[string]interface{} { + return p.pt.Fields() +} + +func (uc *udpclient) Write(bp BatchPoints) error { + var b bytes.Buffer + var d time.Duration + d, _ = time.ParseDuration("1" + bp.Precision()) + + for _, p := range bp.Points() { + pointstring := p.pt.RoundedString(d) + "\n" + + // Write and reset the buffer if we reach the max size + if b.Len()+len(pointstring) >= uc.payloadSize { + if _, err := uc.conn.Write(b.Bytes()); err != nil { + return err + } + b.Reset() + } + + if _, err := b.WriteString(pointstring); err != nil { + return err + } + } + + _, err := uc.conn.Write(b.Bytes()) + return err +} + +func (c *client) Write(bp BatchPoints) error { + var b bytes.Buffer + + for _, p := range bp.Points() { + if _, err := b.WriteString(p.pt.PrecisionString(bp.Precision())); err != nil { + return err + } + + if err := b.WriteByte('\n'); err != nil { + return err + } + } + + u := c.url + u.Path = "write" + req, err := http.NewRequest("POST", u.String(), &b) + if err != nil { + return err + } + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.useragent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + params := req.URL.Query() + params.Set("db", bp.Database()) + params.Set("rp", bp.RetentionPolicy()) + params.Set("precision", bp.Precision()) + params.Set("consistency", bp.WriteConsistency()) + req.URL.RawQuery = params.Encode() + + resp, err := c.httpClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + var err = fmt.Errorf(string(body)) + return err + } + + return nil +} + +// Query defines a query to send to the server +type Query struct { + Command string + Database string + Precision string +} + +// NewQuery returns a query object +// database and precision strings can be empty strings if they are not needed +// for the query. +func NewQuery(command, database, precision string) Query { + return Query{ + Command: command, + Database: database, + Precision: precision, + } +} + +// Response represents a list of statement results. +type Response struct { + Results []Result + Err string `json:"error,omitempty"` +} + +// Error returns the first error from any statement. +// Returns nil if no errors occurred on any statements. +func (r *Response) Error() error { + if r.Err != "" { + return fmt.Errorf(r.Err) + } + for _, result := range r.Results { + if result.Err != "" { + return fmt.Errorf(result.Err) + } + } + return nil +} + +// Result represents a resultset returned from a single statement. +type Result struct { + Series []models.Row + Err string `json:"error,omitempty"` +} + +func (uc *udpclient) Query(q Query) (*Response, error) { + return nil, fmt.Errorf("Querying via UDP is not supported") +} + +// Query sends a command to the server and returns the Response +func (c *client) Query(q Query) (*Response, error) { + u := c.url + u.Path = "query" + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.useragent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + params := req.URL.Query() + params.Set("q", q.Command) + params.Set("db", q.Database) + if q.Precision != "" { + params.Set("epoch", q.Precision) + } + req.URL.RawQuery = params.Encode() + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var response Response + dec := json.NewDecoder(resp.Body) + dec.UseNumber() + decErr := dec.Decode(&response) + + // ignore this error if we got an invalid status code + if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK { + decErr = nil + } + // If we got a valid decode error, send that back + if decErr != nil { + return nil, decErr + } + // If we don't have an error in our json response, and didn't get statusOK + // then send back an error + if resp.StatusCode != http.StatusOK && response.Error() == nil { + return &response, fmt.Errorf("received status code %d from server", + resp.StatusCode) + } + return &response, nil +} diff --git a/vendor/github.com/influxdata/influxdb/client/v2/client_test.go b/vendor/github.com/influxdata/influxdb/client/v2/client_test.go new file mode 100644 index 0000000000..9272292b66 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/v2/client_test.go @@ -0,0 +1,369 @@ +package client + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "reflect" + "strings" + "testing" + "time" +) + +func TestUDPClient_Query(t *testing.T) { + config := UDPConfig{Addr: "localhost:8089"} + c, err := NewUDPClient(config) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + defer c.Close() + query := Query{} + _, err = c.Query(query) + if err == nil { + t.Error("Querying UDP client should fail") + } +} + +func TestUDPClient_Ping(t *testing.T) { + config := UDPConfig{Addr: "localhost:8089"} + c, err := NewUDPClient(config) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + defer c.Close() + + rtt, version, err := c.Ping(0) + if rtt != 0 || version != "" || err != nil { + t.Errorf("unexpected error. expected (%v, '%v', %v), actual (%v, '%v', %v)", 0, "", nil, rtt, version, err) + } +} + +func TestUDPClient_Write(t *testing.T) { + config := UDPConfig{Addr: "localhost:8089"} + c, err := NewUDPClient(config) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + defer c.Close() + + bp, err := NewBatchPoints(BatchPointsConfig{}) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + + fields := make(map[string]interface{}) + fields["value"] = 1.0 + pt, _ := NewPoint("cpu", make(map[string]string), fields) + bp.AddPoint(pt) + + err = c.Write(bp) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestUDPClient_BadAddr(t *testing.T) { + config := UDPConfig{Addr: "foobar@wahoo"} + c, err := NewUDPClient(config) + if err == nil { + defer c.Close() + t.Error("Expected resolve error") + } +} + +func TestClient_Query(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + query := Query{} + _, err := c.Query(query) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_BasicAuth(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + u, p, ok := r.BasicAuth() + + if !ok { + t.Errorf("basic auth error") + } + if u != "username" { + t.Errorf("unexpected username, expected %q, actual %q", "username", u) + } + if p != "password" { + t.Errorf("unexpected password, expected %q, actual %q", "password", p) + } + var data Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL, Username: "username", Password: "password"} + c, _ := NewHTTPClient(config) + defer c.Close() + + query := Query{} + _, err := c.Query(query) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_Ping(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data Response + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + _, _, err := c.Ping(0) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_Write(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var data Response + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + config := HTTPConfig{Addr: ts.URL} + c, _ := NewHTTPClient(config) + defer c.Close() + + bp, err := NewBatchPoints(BatchPointsConfig{}) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + err = c.Write(bp) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } +} + +func TestClient_UserAgent(t *testing.T) { + receivedUserAgent := "" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + receivedUserAgent = r.UserAgent() + + var data Response + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + _, err := http.Get(ts.URL) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + + tests := []struct { + name string + userAgent string + expected string + }{ + { + name: "Empty user agent", + userAgent: "", + expected: "InfluxDBClient", + }, + { + name: "Custom user agent", + userAgent: "Test Influx Client", + expected: "Test Influx Client", + }, + } + + for _, test := range tests { + + config := HTTPConfig{Addr: ts.URL, UserAgent: test.userAgent} + c, _ := NewHTTPClient(config) + defer c.Close() + + receivedUserAgent = "" + query := Query{} + _, err = c.Query(query) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + if !strings.HasPrefix(receivedUserAgent, test.expected) { + t.Errorf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) + } + + receivedUserAgent = "" + bp, _ := NewBatchPoints(BatchPointsConfig{}) + err = c.Write(bp) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + if !strings.HasPrefix(receivedUserAgent, test.expected) { + t.Errorf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) + } + + receivedUserAgent = "" + _, err := c.Query(query) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + if receivedUserAgent != test.expected { + t.Errorf("Unexpected user agent. expected %v, actual %v", test.expected, receivedUserAgent) + } + } +} + +func TestClient_PointString(t *testing.T) { + const shortForm = "2006-Jan-02" + time1, _ := time.Parse(shortForm, "2013-Feb-03") + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} + p, _ := NewPoint("cpu_usage", tags, fields, time1) + + s := "cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39 1359849600000000000" + if p.String() != s { + t.Errorf("Point String Error, got %s, expected %s", p.String(), s) + } + + s = "cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39 1359849600000" + if p.PrecisionString("ms") != s { + t.Errorf("Point String Error, got %s, expected %s", + p.PrecisionString("ms"), s) + } +} + +func TestClient_PointWithoutTimeString(t *testing.T) { + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} + p, _ := NewPoint("cpu_usage", tags, fields) + + s := "cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39" + if p.String() != s { + t.Errorf("Point String Error, got %s, expected %s", p.String(), s) + } + + if p.PrecisionString("ms") != s { + t.Errorf("Point String Error, got %s, expected %s", + p.PrecisionString("ms"), s) + } +} + +func TestClient_PointName(t *testing.T) { + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} + p, _ := NewPoint("cpu_usage", tags, fields) + + exp := "cpu_usage" + if p.Name() != exp { + t.Errorf("Error, got %s, expected %s", + p.Name(), exp) + } +} + +func TestClient_PointTags(t *testing.T) { + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} + p, _ := NewPoint("cpu_usage", tags, fields) + + if !reflect.DeepEqual(tags, p.Tags()) { + t.Errorf("Error, got %v, expected %v", + p.Tags(), tags) + } +} + +func TestClient_PointUnixNano(t *testing.T) { + const shortForm = "2006-Jan-02" + time1, _ := time.Parse(shortForm, "2013-Feb-03") + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} + p, _ := NewPoint("cpu_usage", tags, fields, time1) + + exp := int64(1359849600000000000) + if p.UnixNano() != exp { + t.Errorf("Error, got %d, expected %d", + p.UnixNano(), exp) + } +} + +func TestClient_PointFields(t *testing.T) { + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{"idle": 10.1, "system": 50.9, "user": 39.0} + p, _ := NewPoint("cpu_usage", tags, fields) + + if !reflect.DeepEqual(fields, p.Fields()) { + t.Errorf("Error, got %v, expected %v", + p.Fields(), fields) + } +} + +func TestBatchPoints_PrecisionError(t *testing.T) { + _, err := NewBatchPoints(BatchPointsConfig{Precision: "foobar"}) + if err == nil { + t.Errorf("Precision: foobar should have errored") + } + + bp, _ := NewBatchPoints(BatchPointsConfig{Precision: "ns"}) + err = bp.SetPrecision("foobar") + if err == nil { + t.Errorf("Precision: foobar should have errored") + } +} + +func TestBatchPoints_SettersGetters(t *testing.T) { + bp, _ := NewBatchPoints(BatchPointsConfig{ + Precision: "ns", + Database: "db", + RetentionPolicy: "rp", + WriteConsistency: "wc", + }) + if bp.Precision() != "ns" { + t.Errorf("Expected: %s, got %s", bp.Precision(), "ns") + } + if bp.Database() != "db" { + t.Errorf("Expected: %s, got %s", bp.Database(), "db") + } + if bp.RetentionPolicy() != "rp" { + t.Errorf("Expected: %s, got %s", bp.RetentionPolicy(), "rp") + } + if bp.WriteConsistency() != "wc" { + t.Errorf("Expected: %s, got %s", bp.WriteConsistency(), "wc") + } + + bp.SetDatabase("db2") + bp.SetRetentionPolicy("rp2") + bp.SetWriteConsistency("wc2") + err := bp.SetPrecision("s") + if err != nil { + t.Errorf("Did not expect error: %s", err.Error()) + } + + if bp.Precision() != "s" { + t.Errorf("Expected: %s, got %s", bp.Precision(), "s") + } + if bp.Database() != "db2" { + t.Errorf("Expected: %s, got %s", bp.Database(), "db2") + } + if bp.RetentionPolicy() != "rp2" { + t.Errorf("Expected: %s, got %s", bp.RetentionPolicy(), "rp2") + } + if bp.WriteConsistency() != "wc2" { + t.Errorf("Expected: %s, got %s", bp.WriteConsistency(), "wc2") + } +} diff --git a/vendor/github.com/influxdata/influxdb/client/v2/example_test.go b/vendor/github.com/influxdata/influxdb/client/v2/example_test.go new file mode 100644 index 0000000000..68bb24bc70 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/v2/example_test.go @@ -0,0 +1,265 @@ +package client_test + +import ( + "fmt" + "math/rand" + "os" + "time" + + "github.com/influxdata/influxdb/client/v2" +) + +// Create a new client +func ExampleClient() { + // NOTE: this assumes you've setup a user and have setup shell env variables, + // namely INFLUX_USER/INFLUX_PWD. If not just omit Username/Password below. + _, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + Username: os.Getenv("INFLUX_USER"), + Password: os.Getenv("INFLUX_PWD"), + }) + if err != nil { + fmt.Println("Error creating InfluxDB Client: ", err.Error()) + } +} + +// Write a point using the UDP client +func ExampleClient_uDP() { + // Make client + config := client.UDPConfig{Addr: "localhost:8089"} + c, err := client.NewUDPClient(config) + if err != nil { + fmt.Println("Error: ", err.Error()) + } + defer c.Close() + + // Create a new point batch + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ + Precision: "s", + }) + + // Create a point and add to batch + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + if err != nil { + fmt.Println("Error: ", err.Error()) + } + bp.AddPoint(pt) + + // Write the batch + c.Write(bp) +} + +// Ping the cluster using the HTTP client +func ExampleClient_Ping() { + // Make client + c, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + }) + if err != nil { + fmt.Println("Error creating InfluxDB Client: ", err.Error()) + } + defer c.Close() + + _, _, err = c.Ping(0) + if err != nil { + fmt.Println("Error pinging InfluxDB Cluster: ", err.Error()) + } +} + +// Write a point using the HTTP client +func ExampleClient_write() { + // Make client + c, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + }) + if err != nil { + fmt.Println("Error creating InfluxDB Client: ", err.Error()) + } + defer c.Close() + + // Create a new point batch + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ + Database: "BumbleBeeTuna", + Precision: "s", + }) + + // Create a point and add to batch + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + if err != nil { + fmt.Println("Error: ", err.Error()) + } + bp.AddPoint(pt) + + // Write the batch + c.Write(bp) +} + +// Create a batch and add a point +func ExampleBatchPoints() { + // Create a new point batch + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ + Database: "BumbleBeeTuna", + Precision: "s", + }) + + // Create a point and add to batch + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + if err != nil { + fmt.Println("Error: ", err.Error()) + } + bp.AddPoint(pt) +} + +// Using the BatchPoints setter functions +func ExampleBatchPoints_setters() { + // Create a new point batch + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{}) + bp.SetDatabase("BumbleBeeTuna") + bp.SetPrecision("ms") + + // Create a point and add to batch + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + if err != nil { + fmt.Println("Error: ", err.Error()) + } + bp.AddPoint(pt) +} + +// Create a new point with a timestamp +func ExamplePoint() { + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) + if err == nil { + fmt.Println("We created a point: ", pt.String()) + } +} + +// Create a new point without a timestamp +func ExamplePoint_withoutTime() { + tags := map[string]string{"cpu": "cpu-total"} + fields := map[string]interface{}{ + "idle": 10.1, + "system": 53.3, + "user": 46.6, + } + pt, err := client.NewPoint("cpu_usage", tags, fields) + if err == nil { + fmt.Println("We created a point w/o time: ", pt.String()) + } +} + +// Write 1000 points +func ExampleClient_write1000() { + sampleSize := 1000 + + // Make client + c, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + }) + if err != nil { + fmt.Println("Error creating InfluxDB Client: ", err.Error()) + } + defer c.Close() + + rand.Seed(42) + + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ + Database: "systemstats", + Precision: "us", + }) + + for i := 0; i < sampleSize; i++ { + regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"} + tags := map[string]string{ + "cpu": "cpu-total", + "host": fmt.Sprintf("host%d", rand.Intn(1000)), + "region": regions[rand.Intn(len(regions))], + } + + idle := rand.Float64() * 100.0 + fields := map[string]interface{}{ + "idle": idle, + "busy": 100.0 - idle, + } + + pt, err := client.NewPoint( + "cpu_usage", + tags, + fields, + time.Now(), + ) + if err != nil { + println("Error:", err.Error()) + continue + } + bp.AddPoint(pt) + } + + err = c.Write(bp) + if err != nil { + fmt.Println("Error: ", err.Error()) + } +} + +// Make a Query +func ExampleClient_query() { + // Make client + c, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + }) + if err != nil { + fmt.Println("Error creating InfluxDB Client: ", err.Error()) + } + defer c.Close() + + q := client.NewQuery("SELECT count(value) FROM shapes", "square_holes", "ns") + if response, err := c.Query(q); err == nil && response.Error() == nil { + fmt.Println(response.Results) + } +} + +// Create a Database with a query +func ExampleClient_createDatabase() { + // Make client + c, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + }) + if err != nil { + fmt.Println("Error creating InfluxDB Client: ", err.Error()) + } + defer c.Close() + + q := client.NewQuery("CREATE DATABASE telegraf", "", "") + if response, err := c.Query(q); err == nil && response.Error() == nil { + fmt.Println(response.Results) + } +} diff --git a/vendor/github.com/influxdata/influxdb/cluster/balancer.go b/vendor/github.com/influxdata/influxdb/cluster/balancer.go new file mode 100644 index 0000000000..cb565f2388 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cluster/balancer.go @@ -0,0 +1,69 @@ +package cluster + +import ( + "math/rand" + + "github.com/influxdata/influxdb/services/meta" +) + +// Balancer represents a load-balancing algorithm for a set of nodes +type Balancer interface { + // Next returns the next Node according to the balancing method + // or nil if there are no nodes available + Next() *meta.NodeInfo +} + +type nodeBalancer struct { + nodes []meta.NodeInfo // data nodes to balance between + p int // current node index +} + +// NewNodeBalancer create a shuffled, round-robin balancer so that +// multiple instances will return nodes in randomized order and each +// each returned node will be repeated in a cycle +func NewNodeBalancer(nodes []meta.NodeInfo) Balancer { + // make a copy of the node slice so we can randomize it + // without affecting the original instance as well as ensure + // that each Balancer returns nodes in a different order + b := &nodeBalancer{} + + b.nodes = make([]meta.NodeInfo, len(nodes)) + copy(b.nodes, nodes) + + b.shuffle() + return b +} + +// shuffle randomizes the ordering the balancers available nodes +func (b *nodeBalancer) shuffle() { + for i := range b.nodes { + j := rand.Intn(i + 1) + b.nodes[i], b.nodes[j] = b.nodes[j], b.nodes[i] + } +} + +// online returns a slice of the nodes that are online +func (b *nodeBalancer) online() []meta.NodeInfo { + return b.nodes +} + +// Next returns the next available nodes +func (b *nodeBalancer) Next() *meta.NodeInfo { + // only use online nodes + up := b.online() + + // no nodes online + if len(up) == 0 { + return nil + } + + // rollover back to the beginning + if b.p >= len(up) { + b.p = 0 + } + + d := &up[b.p] + b.p++ + + return d +} diff --git a/vendor/github.com/influxdata/influxdb/cluster/balancer_test.go b/vendor/github.com/influxdata/influxdb/cluster/balancer_test.go new file mode 100644 index 0000000000..0e52d2381a --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cluster/balancer_test.go @@ -0,0 +1,115 @@ +package cluster_test + +import ( + "fmt" + "testing" + + "github.com/influxdata/influxdb/cluster" + "github.com/influxdata/influxdb/services/meta" +) + +func NewNodes() []meta.NodeInfo { + var nodes []meta.NodeInfo + for i := 1; i <= 2; i++ { + nodes = append(nodes, meta.NodeInfo{ + ID: uint64(i), + Host: fmt.Sprintf("localhost:999%d", i), + }) + } + return nodes +} + +func TestBalancerEmptyNodes(t *testing.T) { + b := cluster.NewNodeBalancer([]meta.NodeInfo{}) + got := b.Next() + if got != nil { + t.Errorf("expected nil, got %v", got) + } +} + +func TestBalancerUp(t *testing.T) { + nodes := NewNodes() + b := cluster.NewNodeBalancer(nodes) + + // First node in randomized round-robin order + first := b.Next() + if first == nil { + t.Errorf("expected datanode, got %v", first) + } + + // Second node in randomized round-robin order + second := b.Next() + if second == nil { + t.Errorf("expected datanode, got %v", second) + } + + // Should never get the same node in order twice + if first.ID == second.ID { + t.Errorf("expected first != second. got %v = %v", first.ID, second.ID) + } +} + +/* +func TestBalancerDown(t *testing.T) { + nodes := NewNodes() + b := cluster.NewNodeBalancer(nodes) + + nodes[0].Down() + + // First node in randomized round-robin order + first := b.Next() + if first == nil { + t.Errorf("expected datanode, got %v", first) + } + + // Second node should rollover to the first up node + second := b.Next() + if second == nil { + t.Errorf("expected datanode, got %v", second) + } + + // Health node should be returned each time + if first.ID != 2 && first.ID != second.ID { + t.Errorf("expected first != second. got %v = %v", first.ID, second.ID) + } +} +*/ + +/* +func TestBalancerBackUp(t *testing.T) { + nodes := newDataNodes() + b := cluster.NewNodeBalancer(nodes) + + nodes[0].Down() + + for i := 0; i < 3; i++ { + got := b.Next() + if got == nil { + t.Errorf("expected datanode, got %v", got) + } + + if exp := uint64(2); got.ID != exp { + t.Errorf("wrong node id: exp %v, got %v", exp, got.ID) + } + } + + nodes[0].Up() + + // First node in randomized round-robin order + first := b.Next() + if first == nil { + t.Errorf("expected datanode, got %v", first) + } + + // Second node should rollover to the first up node + second := b.Next() + if second == nil { + t.Errorf("expected datanode, got %v", second) + } + + // Should get both nodes returned + if first.ID == second.ID { + t.Errorf("expected first != second. got %v = %v", first.ID, second.ID) + } +} +*/ diff --git a/vendor/github.com/influxdata/influxdb/cluster/client_pool.go b/vendor/github.com/influxdata/influxdb/cluster/client_pool.go new file mode 100644 index 0000000000..fed7e18e0e --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cluster/client_pool.go @@ -0,0 +1,57 @@ +package cluster + +import ( + "net" + "sync" + + "gopkg.in/fatih/pool.v2" +) + +type clientPool struct { + mu sync.RWMutex + pool map[uint64]pool.Pool +} + +func newClientPool() *clientPool { + return &clientPool{ + pool: make(map[uint64]pool.Pool), + } +} + +func (c *clientPool) setPool(nodeID uint64, p pool.Pool) { + c.mu.Lock() + c.pool[nodeID] = p + c.mu.Unlock() +} + +func (c *clientPool) getPool(nodeID uint64) (pool.Pool, bool) { + c.mu.RLock() + p, ok := c.pool[nodeID] + c.mu.RUnlock() + return p, ok +} + +func (c *clientPool) size() int { + c.mu.RLock() + var size int + for _, p := range c.pool { + size += p.Len() + } + c.mu.RUnlock() + return size +} + +func (c *clientPool) conn(nodeID uint64) (net.Conn, error) { + c.mu.RLock() + conn, err := c.pool[nodeID].Get() + c.mu.RUnlock() + return conn, err +} + +func (c *clientPool) close() { + c.mu.Lock() + for _, p := range c.pool { + p.Close() + } + c.mu.Unlock() +} diff --git a/vendor/github.com/influxdata/influxdb/cluster/cluster.go b/vendor/github.com/influxdata/influxdb/cluster/cluster.go new file mode 100644 index 0000000000..ed37c240f5 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cluster/cluster.go @@ -0,0 +1 @@ +package cluster // import "github.com/influxdata/influxdb/cluster" diff --git a/vendor/github.com/influxdata/influxdb/cluster/config.go b/vendor/github.com/influxdata/influxdb/cluster/config.go new file mode 100644 index 0000000000..0f1453ec2f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cluster/config.go @@ -0,0 +1,41 @@ +package cluster + +import ( + "time" + + "github.com/influxdata/influxdb/toml" +) + +const ( + // DefaultWriteTimeout is the default timeout for a complete write to succeed. + DefaultWriteTimeout = 5 * time.Second + + // DefaultShardWriterTimeout is the default timeout set on shard writers. + DefaultShardWriterTimeout = 5 * time.Second + + // DefaultShardMapperTimeout is the default timeout set on shard mappers. + DefaultShardMapperTimeout = 5 * time.Second + + // DefaultMaxRemoteWriteConnections is the maximum number of open connections + // that will be available for remote writes to another host. + DefaultMaxRemoteWriteConnections = 3 +) + +// Config represents the configuration for the clustering service. +type Config struct { + ForceRemoteShardMapping bool `toml:"force-remote-mapping"` + WriteTimeout toml.Duration `toml:"write-timeout"` + ShardWriterTimeout toml.Duration `toml:"shard-writer-timeout"` + MaxRemoteWriteConnections int `toml:"max-remote-write-connections"` + ShardMapperTimeout toml.Duration `toml:"shard-mapper-timeout"` +} + +// NewConfig returns an instance of Config with defaults. +func NewConfig() Config { + return Config{ + WriteTimeout: toml.Duration(DefaultWriteTimeout), + ShardWriterTimeout: toml.Duration(DefaultShardWriterTimeout), + ShardMapperTimeout: toml.Duration(DefaultShardMapperTimeout), + MaxRemoteWriteConnections: DefaultMaxRemoteWriteConnections, + } +} diff --git a/vendor/github.com/influxdata/influxdb/cluster/config_test.go b/vendor/github.com/influxdata/influxdb/cluster/config_test.go new file mode 100644 index 0000000000..ed3bdf8c3c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cluster/config_test.go @@ -0,0 +1,27 @@ +package cluster_test + +import ( + "testing" + "time" + + "github.com/BurntSushi/toml" + "github.com/influxdata/influxdb/cluster" +) + +func TestConfig_Parse(t *testing.T) { + // Parse configuration. + var c cluster.Config + if _, err := toml.Decode(` +shard-writer-timeout = "10s" +write-timeout = "20s" +`, &c); err != nil { + t.Fatal(err) + } + + // Validate configuration. + if time.Duration(c.ShardWriterTimeout) != 10*time.Second { + t.Fatalf("unexpected shard-writer timeout: %s", c.ShardWriterTimeout) + } else if time.Duration(c.WriteTimeout) != 20*time.Second { + t.Fatalf("unexpected write timeout s: %s", c.WriteTimeout) + } +} diff --git a/vendor/github.com/influxdata/influxdb/cluster/internal/data.pb.go b/vendor/github.com/influxdata/influxdb/cluster/internal/data.pb.go new file mode 100644 index 0000000000..667cea9fb1 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cluster/internal/data.pb.go @@ -0,0 +1,301 @@ +// Code generated by protoc-gen-gogo. +// source: internal/data.proto +// DO NOT EDIT! + +/* +Package internal is a generated protocol buffer package. + +It is generated from these files: + internal/data.proto + +It has these top-level messages: + WriteShardRequest + WriteShardResponse + ExecuteStatementRequest + ExecuteStatementResponse + CreateIteratorRequest + CreateIteratorResponse + FieldDimensionsRequest + FieldDimensionsResponse + SeriesKeysRequest + SeriesKeysResponse +*/ +package internal + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type WriteShardRequest struct { + ShardID *uint64 `protobuf:"varint,1,req,name=ShardID" json:"ShardID,omitempty"` + Points [][]byte `protobuf:"bytes,2,rep,name=Points" json:"Points,omitempty"` + Database *string `protobuf:"bytes,3,opt,name=Database" json:"Database,omitempty"` + RetentionPolicy *string `protobuf:"bytes,4,opt,name=RetentionPolicy" json:"RetentionPolicy,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *WriteShardRequest) Reset() { *m = WriteShardRequest{} } +func (m *WriteShardRequest) String() string { return proto.CompactTextString(m) } +func (*WriteShardRequest) ProtoMessage() {} + +func (m *WriteShardRequest) GetShardID() uint64 { + if m != nil && m.ShardID != nil { + return *m.ShardID + } + return 0 +} + +func (m *WriteShardRequest) GetPoints() [][]byte { + if m != nil { + return m.Points + } + return nil +} + +func (m *WriteShardRequest) GetDatabase() string { + if m != nil && m.Database != nil { + return *m.Database + } + return "" +} + +func (m *WriteShardRequest) GetRetentionPolicy() string { + if m != nil && m.RetentionPolicy != nil { + return *m.RetentionPolicy + } + return "" +} + +type WriteShardResponse struct { + Code *int32 `protobuf:"varint,1,req,name=Code" json:"Code,omitempty"` + Message *string `protobuf:"bytes,2,opt,name=Message" json:"Message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *WriteShardResponse) Reset() { *m = WriteShardResponse{} } +func (m *WriteShardResponse) String() string { return proto.CompactTextString(m) } +func (*WriteShardResponse) ProtoMessage() {} + +func (m *WriteShardResponse) GetCode() int32 { + if m != nil && m.Code != nil { + return *m.Code + } + return 0 +} + +func (m *WriteShardResponse) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + +type ExecuteStatementRequest struct { + Statement *string `protobuf:"bytes,1,req,name=Statement" json:"Statement,omitempty"` + Database *string `protobuf:"bytes,2,req,name=Database" json:"Database,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ExecuteStatementRequest) Reset() { *m = ExecuteStatementRequest{} } +func (m *ExecuteStatementRequest) String() string { return proto.CompactTextString(m) } +func (*ExecuteStatementRequest) ProtoMessage() {} + +func (m *ExecuteStatementRequest) GetStatement() string { + if m != nil && m.Statement != nil { + return *m.Statement + } + return "" +} + +func (m *ExecuteStatementRequest) GetDatabase() string { + if m != nil && m.Database != nil { + return *m.Database + } + return "" +} + +type ExecuteStatementResponse struct { + Code *int32 `protobuf:"varint,1,req,name=Code" json:"Code,omitempty"` + Message *string `protobuf:"bytes,2,opt,name=Message" json:"Message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ExecuteStatementResponse) Reset() { *m = ExecuteStatementResponse{} } +func (m *ExecuteStatementResponse) String() string { return proto.CompactTextString(m) } +func (*ExecuteStatementResponse) ProtoMessage() {} + +func (m *ExecuteStatementResponse) GetCode() int32 { + if m != nil && m.Code != nil { + return *m.Code + } + return 0 +} + +func (m *ExecuteStatementResponse) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + +type CreateIteratorRequest struct { + ShardIDs []uint64 `protobuf:"varint,1,rep,name=ShardIDs" json:"ShardIDs,omitempty"` + Opt []byte `protobuf:"bytes,2,req,name=Opt" json:"Opt,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateIteratorRequest) Reset() { *m = CreateIteratorRequest{} } +func (m *CreateIteratorRequest) String() string { return proto.CompactTextString(m) } +func (*CreateIteratorRequest) ProtoMessage() {} + +func (m *CreateIteratorRequest) GetShardIDs() []uint64 { + if m != nil { + return m.ShardIDs + } + return nil +} + +func (m *CreateIteratorRequest) GetOpt() []byte { + if m != nil { + return m.Opt + } + return nil +} + +type CreateIteratorResponse struct { + Err *string `protobuf:"bytes,1,opt,name=Err" json:"Err,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateIteratorResponse) Reset() { *m = CreateIteratorResponse{} } +func (m *CreateIteratorResponse) String() string { return proto.CompactTextString(m) } +func (*CreateIteratorResponse) ProtoMessage() {} + +func (m *CreateIteratorResponse) GetErr() string { + if m != nil && m.Err != nil { + return *m.Err + } + return "" +} + +type FieldDimensionsRequest struct { + ShardIDs []uint64 `protobuf:"varint,1,rep,name=ShardIDs" json:"ShardIDs,omitempty"` + Sources []byte `protobuf:"bytes,2,req,name=Sources" json:"Sources,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldDimensionsRequest) Reset() { *m = FieldDimensionsRequest{} } +func (m *FieldDimensionsRequest) String() string { return proto.CompactTextString(m) } +func (*FieldDimensionsRequest) ProtoMessage() {} + +func (m *FieldDimensionsRequest) GetShardIDs() []uint64 { + if m != nil { + return m.ShardIDs + } + return nil +} + +func (m *FieldDimensionsRequest) GetSources() []byte { + if m != nil { + return m.Sources + } + return nil +} + +type FieldDimensionsResponse struct { + Fields []string `protobuf:"bytes,1,rep,name=Fields" json:"Fields,omitempty"` + Dimensions []string `protobuf:"bytes,2,rep,name=Dimensions" json:"Dimensions,omitempty"` + Err *string `protobuf:"bytes,3,opt,name=Err" json:"Err,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldDimensionsResponse) Reset() { *m = FieldDimensionsResponse{} } +func (m *FieldDimensionsResponse) String() string { return proto.CompactTextString(m) } +func (*FieldDimensionsResponse) ProtoMessage() {} + +func (m *FieldDimensionsResponse) GetFields() []string { + if m != nil { + return m.Fields + } + return nil +} + +func (m *FieldDimensionsResponse) GetDimensions() []string { + if m != nil { + return m.Dimensions + } + return nil +} + +func (m *FieldDimensionsResponse) GetErr() string { + if m != nil && m.Err != nil { + return *m.Err + } + return "" +} + +type SeriesKeysRequest struct { + ShardIDs []uint64 `protobuf:"varint,1,rep,name=ShardIDs" json:"ShardIDs,omitempty"` + Opt []byte `protobuf:"bytes,2,req,name=Opt" json:"Opt,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SeriesKeysRequest) Reset() { *m = SeriesKeysRequest{} } +func (m *SeriesKeysRequest) String() string { return proto.CompactTextString(m) } +func (*SeriesKeysRequest) ProtoMessage() {} + +func (m *SeriesKeysRequest) GetShardIDs() []uint64 { + if m != nil { + return m.ShardIDs + } + return nil +} + +func (m *SeriesKeysRequest) GetOpt() []byte { + if m != nil { + return m.Opt + } + return nil +} + +type SeriesKeysResponse struct { + SeriesList []byte `protobuf:"bytes,1,opt,name=SeriesList" json:"SeriesList,omitempty"` + Err *string `protobuf:"bytes,2,opt,name=Err" json:"Err,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SeriesKeysResponse) Reset() { *m = SeriesKeysResponse{} } +func (m *SeriesKeysResponse) String() string { return proto.CompactTextString(m) } +func (*SeriesKeysResponse) ProtoMessage() {} + +func (m *SeriesKeysResponse) GetSeriesList() []byte { + if m != nil { + return m.SeriesList + } + return nil +} + +func (m *SeriesKeysResponse) GetErr() string { + if m != nil && m.Err != nil { + return *m.Err + } + return "" +} + +func init() { + proto.RegisterType((*WriteShardRequest)(nil), "internal.WriteShardRequest") + proto.RegisterType((*WriteShardResponse)(nil), "internal.WriteShardResponse") + proto.RegisterType((*ExecuteStatementRequest)(nil), "internal.ExecuteStatementRequest") + proto.RegisterType((*ExecuteStatementResponse)(nil), "internal.ExecuteStatementResponse") + proto.RegisterType((*CreateIteratorRequest)(nil), "internal.CreateIteratorRequest") + proto.RegisterType((*CreateIteratorResponse)(nil), "internal.CreateIteratorResponse") + proto.RegisterType((*FieldDimensionsRequest)(nil), "internal.FieldDimensionsRequest") + proto.RegisterType((*FieldDimensionsResponse)(nil), "internal.FieldDimensionsResponse") + proto.RegisterType((*SeriesKeysRequest)(nil), "internal.SeriesKeysRequest") + proto.RegisterType((*SeriesKeysResponse)(nil), "internal.SeriesKeysResponse") +} diff --git a/vendor/github.com/influxdata/influxdb/cluster/internal/data.proto b/vendor/github.com/influxdata/influxdb/cluster/internal/data.proto new file mode 100644 index 0000000000..97e686e306 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cluster/internal/data.proto @@ -0,0 +1,54 @@ +package internal; + +message WriteShardRequest { + required uint64 ShardID = 1; + repeated bytes Points = 2; + optional string Database = 3; + optional string RetentionPolicy = 4; +} + +message WriteShardResponse { + required int32 Code = 1; + optional string Message = 2; +} + +message ExecuteStatementRequest { + required string Statement = 1; + required string Database = 2; +} + +message ExecuteStatementResponse { + required int32 Code = 1; + optional string Message = 2; +} + +message CreateIteratorRequest { + repeated uint64 ShardIDs = 1; + required bytes Opt = 2; +} + +message CreateIteratorResponse { + optional string Err = 1; +} + +message FieldDimensionsRequest { + repeated uint64 ShardIDs = 1; + required bytes Sources = 2; +} + +message FieldDimensionsResponse { + repeated string Fields = 1; + repeated string Dimensions = 2; + optional string Err = 3; +} + +message SeriesKeysRequest { + repeated uint64 ShardIDs = 1; + required bytes Opt = 2; +} + +message SeriesKeysResponse { + optional bytes SeriesList = 1; + optional string Err = 2; +} + diff --git a/vendor/github.com/influxdata/influxdb/cluster/meta_client.go b/vendor/github.com/influxdata/influxdb/cluster/meta_client.go new file mode 100644 index 0000000000..dfdb7a09d7 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cluster/meta_client.go @@ -0,0 +1,40 @@ +package cluster + +import ( + "time" + + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/services/meta" +) + +// MetaClient is an interface for accessing meta data. +type MetaClient interface { + CreateContinuousQuery(database, name, query string) error + CreateDatabase(name string) (*meta.DatabaseInfo, error) + CreateDatabaseWithRetentionPolicy(name string, rpi *meta.RetentionPolicyInfo) (*meta.DatabaseInfo, error) + CreateRetentionPolicy(database string, rpi *meta.RetentionPolicyInfo) (*meta.RetentionPolicyInfo, error) + CreateSubscription(database, rp, name, mode string, destinations []string) error + CreateUser(name, password string, admin bool) (*meta.UserInfo, error) + Database(name string) (*meta.DatabaseInfo, error) + Databases() ([]meta.DatabaseInfo, error) + DataNode(id uint64) (*meta.NodeInfo, error) + DataNodes() ([]meta.NodeInfo, error) + DeleteDataNode(id uint64) error + DeleteMetaNode(id uint64) error + DropContinuousQuery(database, name string) error + DropDatabase(name string) error + DropRetentionPolicy(database, name string) error + DropSubscription(database, rp, name string) error + DropUser(name string) error + MetaNodes() ([]meta.NodeInfo, error) + RetentionPolicy(database, name string) (rpi *meta.RetentionPolicyInfo, err error) + SetAdminPrivilege(username string, admin bool) error + SetDefaultRetentionPolicy(database, name string) error + SetPrivilege(username, database string, p influxql.Privilege) error + ShardsByTimeRange(sources influxql.Sources, tmin, tmax time.Time) (a []meta.ShardInfo, err error) + UpdateRetentionPolicy(database, name string, rpu *meta.RetentionPolicyUpdate) error + UpdateUser(name, password string) error + UserPrivilege(username, database string) (*influxql.Privilege, error) + UserPrivileges(username string) (map[string]influxql.Privilege, error) + Users() []meta.UserInfo +} diff --git a/vendor/github.com/influxdata/influxdb/cluster/meta_client_test.go b/vendor/github.com/influxdata/influxdb/cluster/meta_client_test.go new file mode 100644 index 0000000000..ebaff4c8db --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cluster/meta_client_test.go @@ -0,0 +1,160 @@ +package cluster_test + +import ( + "time" + + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/services/meta" +) + +// MetaClient is a mockable implementation of cluster.MetaClient. +type MetaClient struct { + CreateContinuousQueryFn func(database, name, query string) error + CreateDatabaseFn func(name string) (*meta.DatabaseInfo, error) + CreateDatabaseWithRetentionPolicyFn func(name string, rpi *meta.RetentionPolicyInfo) (*meta.DatabaseInfo, error) + CreateRetentionPolicyFn func(database string, rpi *meta.RetentionPolicyInfo) (*meta.RetentionPolicyInfo, error) + CreateSubscriptionFn func(database, rp, name, mode string, destinations []string) error + CreateUserFn func(name, password string, admin bool) (*meta.UserInfo, error) + DatabaseFn func(name string) (*meta.DatabaseInfo, error) + DatabasesFn func() ([]meta.DatabaseInfo, error) + DataNodeFn func(id uint64) (*meta.NodeInfo, error) + DataNodesFn func() ([]meta.NodeInfo, error) + DeleteDataNodeFn func(id uint64) error + DeleteMetaNodeFn func(id uint64) error + DropContinuousQueryFn func(database, name string) error + DropDatabaseFn func(name string) error + DropRetentionPolicyFn func(database, name string) error + DropSubscriptionFn func(database, rp, name string) error + DropUserFn func(name string) error + MetaNodesFn func() ([]meta.NodeInfo, error) + RetentionPolicyFn func(database, name string) (rpi *meta.RetentionPolicyInfo, err error) + SetAdminPrivilegeFn func(username string, admin bool) error + SetDefaultRetentionPolicyFn func(database, name string) error + SetPrivilegeFn func(username, database string, p influxql.Privilege) error + ShardsByTimeRangeFn func(sources influxql.Sources, tmin, tmax time.Time) (a []meta.ShardInfo, err error) + UpdateRetentionPolicyFn func(database, name string, rpu *meta.RetentionPolicyUpdate) error + UpdateUserFn func(name, password string) error + UserPrivilegeFn func(username, database string) (*influxql.Privilege, error) + UserPrivilegesFn func(username string) (map[string]influxql.Privilege, error) + UsersFn func() []meta.UserInfo +} + +func (c *MetaClient) CreateContinuousQuery(database, name, query string) error { + return c.CreateContinuousQueryFn(database, name, query) +} + +func (c *MetaClient) CreateDatabase(name string) (*meta.DatabaseInfo, error) { + return c.CreateDatabaseFn(name) +} + +func (c *MetaClient) CreateDatabaseWithRetentionPolicy(name string, rpi *meta.RetentionPolicyInfo) (*meta.DatabaseInfo, error) { + return c.CreateDatabaseWithRetentionPolicyFn(name, rpi) +} + +func (c *MetaClient) CreateRetentionPolicy(database string, rpi *meta.RetentionPolicyInfo) (*meta.RetentionPolicyInfo, error) { + return c.CreateRetentionPolicyFn(database, rpi) +} + +func (c *MetaClient) CreateSubscription(database, rp, name, mode string, destinations []string) error { + return c.CreateSubscriptionFn(database, rp, name, mode, destinations) +} + +func (c *MetaClient) CreateUser(name, password string, admin bool) (*meta.UserInfo, error) { + return c.CreateUserFn(name, password, admin) +} + +func (c *MetaClient) Database(name string) (*meta.DatabaseInfo, error) { + return c.DatabaseFn(name) +} + +func (c *MetaClient) Databases() ([]meta.DatabaseInfo, error) { + return c.DatabasesFn() +} + +func (c *MetaClient) DataNode(id uint64) (*meta.NodeInfo, error) { + return c.DataNodeFn(id) +} + +func (c *MetaClient) DataNodes() ([]meta.NodeInfo, error) { + return c.DataNodesFn() +} + +func (c *MetaClient) DeleteDataNode(id uint64) error { + return c.DeleteDataNodeFn(id) +} + +func (c *MetaClient) DeleteMetaNode(id uint64) error { + return c.DeleteMetaNodeFn(id) +} + +func (c *MetaClient) DropContinuousQuery(database, name string) error { + return c.DropContinuousQueryFn(database, name) +} + +func (c *MetaClient) DropDatabase(name string) error { + return c.DropDatabaseFn(name) +} + +func (c *MetaClient) DropRetentionPolicy(database, name string) error { + return c.DropRetentionPolicyFn(database, name) +} + +func (c *MetaClient) DropSubscription(database, rp, name string) error { + return c.DropSubscriptionFn(database, rp, name) +} + +func (c *MetaClient) DropUser(name string) error { + return c.DropUserFn(name) +} + +func (c *MetaClient) MetaNodes() ([]meta.NodeInfo, error) { + return c.MetaNodesFn() +} + +func (c *MetaClient) RetentionPolicy(database, name string) (rpi *meta.RetentionPolicyInfo, err error) { + return c.RetentionPolicyFn(database, name) +} + +func (c *MetaClient) SetAdminPrivilege(username string, admin bool) error { + return c.SetAdminPrivilegeFn(username, admin) +} + +func (c *MetaClient) SetDefaultRetentionPolicy(database, name string) error { + return c.SetDefaultRetentionPolicyFn(database, name) +} + +func (c *MetaClient) SetPrivilege(username, database string, p influxql.Privilege) error { + return c.SetPrivilegeFn(username, database, p) +} + +func (c *MetaClient) ShardsByTimeRange(sources influxql.Sources, tmin, tmax time.Time) (a []meta.ShardInfo, err error) { + return c.ShardsByTimeRangeFn(sources, tmin, tmax) +} + +func (c *MetaClient) UpdateRetentionPolicy(database, name string, rpu *meta.RetentionPolicyUpdate) error { + return c.UpdateRetentionPolicyFn(database, name, rpu) +} + +func (c *MetaClient) UpdateUser(name, password string) error { + return c.UpdateUserFn(name, password) +} + +func (c *MetaClient) UserPrivilege(username, database string) (*influxql.Privilege, error) { + return c.UserPrivilegeFn(username, database) +} + +func (c *MetaClient) UserPrivileges(username string) (map[string]influxql.Privilege, error) { + return c.UserPrivilegesFn(username) +} + +func (c *MetaClient) Users() []meta.UserInfo { + return c.UsersFn() +} + +// DefaultMetaClientDatabaseFn returns a single database (db0) with a retention policy. +func DefaultMetaClientDatabaseFn(name string) (*meta.DatabaseInfo, error) { + return &meta.DatabaseInfo{ + Name: DefaultDatabase, + DefaultRetentionPolicy: DefaultRetentionPolicy, + }, nil +} diff --git a/vendor/github.com/influxdata/influxdb/cluster/meta_executor.go b/vendor/github.com/influxdata/influxdb/cluster/meta_executor.go new file mode 100644 index 0000000000..3433b0fa61 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cluster/meta_executor.go @@ -0,0 +1,171 @@ +package cluster + +import ( + "fmt" + "log" + "net" + "os" + "sync" + "time" + + "github.com/influxdata/influxdb" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/services/meta" +) + +const ( + metaExecutorWriteTimeout = 5 * time.Second + metaExecutorMaxWriteConnections = 10 +) + +// MetaExecutor executes meta queries on all data nodes. +type MetaExecutor struct { + mu sync.RWMutex + timeout time.Duration + pool *clientPool + maxConnections int + Logger *log.Logger + Node *influxdb.Node + + nodeExecutor interface { + executeOnNode(stmt influxql.Statement, database string, node *meta.NodeInfo) error + } + + MetaClient interface { + DataNode(id uint64) (ni *meta.NodeInfo, err error) + DataNodes() ([]meta.NodeInfo, error) + } +} + +// NewMetaExecutor returns a new initialized *MetaExecutor. +func NewMetaExecutor() *MetaExecutor { + m := &MetaExecutor{ + timeout: metaExecutorWriteTimeout, + pool: newClientPool(), + maxConnections: metaExecutorMaxWriteConnections, + Logger: log.New(os.Stderr, "[meta-executor] ", log.LstdFlags), + } + m.nodeExecutor = m + + return m +} + +// remoteNodeError wraps an error with context about a node that +// returned the error. +type remoteNodeError struct { + id uint64 + err error +} + +func (e remoteNodeError) Error() string { + return fmt.Sprintf("partial success, node %d may be down (%s)", e.id, e.err) +} + +// ExecuteStatement executes a single InfluxQL statement on all nodes in the cluster concurrently. +func (m *MetaExecutor) ExecuteStatement(stmt influxql.Statement, database string) error { + // Get a list of all nodes the query needs to be executed on. + nodes, err := m.MetaClient.DataNodes() + if err != nil { + return err + } else if len(nodes) < 1 { + return nil + } + + // Start a goroutine to execute the statement on each of the remote nodes. + var wg sync.WaitGroup + errs := make(chan error, len(nodes)-1) + for _, node := range nodes { + if m.Node.ID == node.ID { + continue // Don't execute statement on ourselves. + } + + wg.Add(1) + go func(node meta.NodeInfo) { + defer wg.Done() + if err := m.nodeExecutor.executeOnNode(stmt, database, &node); err != nil { + errs <- remoteNodeError{id: node.ID, err: err} + } + }(node) + } + + // Wait on n-1 nodes to execute the statement and respond. + wg.Wait() + + select { + case err = <-errs: + return err + default: + return nil + } +} + +// executeOnNode executes a single InfluxQL statement on a single node. +func (m *MetaExecutor) executeOnNode(stmt influxql.Statement, database string, node *meta.NodeInfo) error { + // We're executing on a remote node so establish a connection. + c, err := m.dial(node.ID) + if err != nil { + return err + } + + conn, ok := c.(*pooledConn) + if !ok { + panic("wrong connection type in MetaExecutor") + } + // Return connection to pool by "closing" it. + defer conn.Close() + + // Build RPC request. + var request ExecuteStatementRequest + request.SetStatement(stmt.String()) + request.SetDatabase(database) + + // Marshal into protocol buffer. + buf, err := request.MarshalBinary() + if err != nil { + return err + } + + // Send request. + conn.SetWriteDeadline(time.Now().Add(m.timeout)) + if err := WriteTLV(conn, executeStatementRequestMessage, buf); err != nil { + conn.MarkUnusable() + return err + } + + // Read the response. + conn.SetReadDeadline(time.Now().Add(m.timeout)) + _, buf, err = ReadTLV(conn) + if err != nil { + conn.MarkUnusable() + return err + } + + // Unmarshal response. + var response ExecuteStatementResponse + if err := response.UnmarshalBinary(buf); err != nil { + return err + } + + if response.Code() != 0 { + return fmt.Errorf("error code %d: %s", response.Code(), response.Message()) + } + + return nil +} + +// dial returns a connection to a single node in the cluster. +func (m *MetaExecutor) dial(nodeID uint64) (net.Conn, error) { + // If we don't have a connection pool for that addr yet, create one + _, ok := m.pool.getPool(nodeID) + if !ok { + factory := &connFactory{nodeID: nodeID, clientPool: m.pool, timeout: m.timeout} + factory.metaClient = m.MetaClient + + p, err := NewBoundedPool(1, m.maxConnections, m.timeout, factory.dial) + if err != nil { + return nil, err + } + m.pool.setPool(nodeID, p) + } + return m.pool.conn(nodeID) +} diff --git a/vendor/github.com/influxdata/influxdb/cluster/meta_executor_test.go b/vendor/github.com/influxdata/influxdb/cluster/meta_executor_test.go new file mode 100644 index 0000000000..32eda8e6e0 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cluster/meta_executor_test.go @@ -0,0 +1,121 @@ +package cluster + +import ( + "fmt" + "sync" + "testing" + + "github.com/influxdata/influxdb" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/services/meta" +) + +func Test_ExecuteStatement(t *testing.T) { + numOfNodes := 3 + + mock := newMockExecutor() + // Expect each statement twice because we have 3 nodes, 2 of which + // are remote and should be executed on. + mock.expect("DROP RETENTION POLICY rp0 on foo") + mock.expect("DROP RETENTION POLICY rp0 on foo") + mock.expect("DROP DATABASE foo") + mock.expect("DROP DATABASE foo") + + e := NewMetaExecutor() + e.MetaClient = newMockMetaClient(numOfNodes) + e.Node = influxdb.NewNode("/tmp/node") + e.Node.ID = 1 + // Replace MetaExecutor's nodeExecutor with our mock. + e.nodeExecutor = mock + + if err := e.ExecuteStatement(mustParseStatement("DROP RETENTION POLICY rp0 on foo"), "foo"); err != nil { + t.Fatal(err) + } + if err := e.ExecuteStatement(mustParseStatement("DROP DATABASE foo"), "foo"); err != nil { + t.Fatal(err) + } + + if err := mock.done(); err != nil { + t.Fatal(err) + } +} + +type mockExecutor struct { + mu sync.Mutex + expectStatements []influxql.Statement + idx int +} + +func newMockExecutor() *mockExecutor { + return &mockExecutor{ + idx: -1, + } +} + +func (e *mockExecutor) expect(stmt string) { + s := mustParseStatement(stmt) + e.expectStatements = append(e.expectStatements, s) +} + +func (e *mockExecutor) done() error { + if e.idx+1 != len(e.expectStatements) { + return fmt.Errorf("expected %d mockExecuteOnNode calls, got %d", len(e.expectStatements), e.idx+1) + } + return nil +} + +func (e *mockExecutor) executeOnNode(stmt influxql.Statement, database string, node *meta.NodeInfo) error { + e.mu.Lock() + defer e.mu.Unlock() + + e.idx++ + + if e.idx > len(e.expectStatements)-1 { + return fmt.Errorf("extra statement: %s", stmt.String()) + } + + if e.expectStatements[e.idx].String() != stmt.String() { + return fmt.Errorf("unexpected statement:\n\texp: %s\n\tgot: %s\n", e.expectStatements[e.idx].String(), stmt.String()) + } + return nil +} + +func mustParseStatement(stmt string) influxql.Statement { + s, err := influxql.ParseStatement(stmt) + if err != nil { + panic(err) + } + return s +} + +type mockMetaClient struct { + nodes []meta.NodeInfo +} + +func newMockMetaClient(nodeCnt int) *mockMetaClient { + c := &mockMetaClient{} + for i := 0; i < nodeCnt; i++ { + n := meta.NodeInfo{ + ID: uint64(i + 1), + Host: fmt.Sprintf("localhost:%d", 8000+i), + TCPHost: fmt.Sprintf("localhost:%d", 9000+i), + } + c.nodes = append(c.nodes, n) + } + + return c +} + +func (c *mockMetaClient) DataNode(id uint64) (ni *meta.NodeInfo, err error) { + for i := 0; i < len(c.nodes); i++ { + if c.nodes[i].ID == id { + ni = &c.nodes[i] + return + } + } + return +} + +func (c *mockMetaClient) DataNodes() ([]meta.NodeInfo, error) { + return c.nodes, nil +} diff --git a/vendor/github.com/influxdata/influxdb/cluster/points_writer.go b/vendor/github.com/influxdata/influxdb/cluster/points_writer.go new file mode 100644 index 0000000000..da21cb51e3 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cluster/points_writer.go @@ -0,0 +1,399 @@ +package cluster + +import ( + "errors" + "expvar" + "fmt" + "log" + "os" + "strings" + "sync" + "time" + + "github.com/influxdata/influxdb" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/tsdb" +) + +// ConsistencyLevel represent a required replication criteria before a write can +// be returned as successful +type ConsistencyLevel int + +// The statistics generated by the "write" mdoule +const ( + statWriteReq = "req" + statPointWriteReq = "pointReq" + statPointWriteReqLocal = "pointReqLocal" + statPointWriteReqRemote = "pointReqRemote" + statWriteOK = "writeOk" + statWritePartial = "writePartial" + statWriteTimeout = "writeTimeout" + statWriteErr = "writeError" + statWritePointReqHH = "pointReqHH" + statSubWriteOK = "subWriteOk" + statSubWriteDrop = "subWriteDrop" +) + +const ( + // ConsistencyLevelAny allows for hinted hand off, potentially no write happened yet + ConsistencyLevelAny ConsistencyLevel = iota + + // ConsistencyLevelOne requires at least one data node acknowledged a write + ConsistencyLevelOne + + // ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write + ConsistencyLevelQuorum + + // ConsistencyLevelAll requires all data nodes to acknowledge a write + ConsistencyLevelAll +) + +var ( + // ErrTimeout is returned when a write times out. + ErrTimeout = errors.New("timeout") + + // ErrPartialWrite is returned when a write partially succeeds but does + // not meet the requested consistency level. + ErrPartialWrite = errors.New("partial write") + + // ErrWriteFailed is returned when no writes succeeded. + ErrWriteFailed = errors.New("write failed") + + // ErrInvalidConsistencyLevel is returned when parsing the string version + // of a consistency level. + ErrInvalidConsistencyLevel = errors.New("invalid consistency level") +) + +// ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const +func ParseConsistencyLevel(level string) (ConsistencyLevel, error) { + switch strings.ToLower(level) { + case "any": + return ConsistencyLevelAny, nil + case "one": + return ConsistencyLevelOne, nil + case "quorum": + return ConsistencyLevelQuorum, nil + case "all": + return ConsistencyLevelAll, nil + default: + return 0, ErrInvalidConsistencyLevel + } +} + +// PointsWriter handles writes across multiple local and remote data nodes. +type PointsWriter struct { + mu sync.RWMutex + closing chan struct{} + WriteTimeout time.Duration + Logger *log.Logger + + Node *influxdb.Node + + MetaClient interface { + Database(name string) (di *meta.DatabaseInfo, err error) + RetentionPolicy(database, policy string) (*meta.RetentionPolicyInfo, error) + CreateShardGroup(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) + ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo) + } + + TSDBStore interface { + CreateShard(database, retentionPolicy string, shardID uint64) error + WriteToShard(shardID uint64, points []models.Point) error + } + + ShardWriter interface { + WriteShard(shardID, ownerID uint64, points []models.Point) error + } + + HintedHandoff interface { + WriteShard(shardID, ownerID uint64, points []models.Point) error + } + + Subscriber interface { + Points() chan<- *WritePointsRequest + } + subPoints chan<- *WritePointsRequest + + statMap *expvar.Map +} + +// NewPointsWriter returns a new instance of PointsWriter for a node. +func NewPointsWriter() *PointsWriter { + return &PointsWriter{ + closing: make(chan struct{}), + WriteTimeout: DefaultWriteTimeout, + Logger: log.New(os.Stderr, "[write] ", log.LstdFlags), + statMap: influxdb.NewStatistics("write", "write", nil), + } +} + +// ShardMapping contains a mapping of a shards to a points. +type ShardMapping struct { + Points map[uint64][]models.Point // The points associated with a shard ID + Shards map[uint64]*meta.ShardInfo // The shards that have been mapped, keyed by shard ID +} + +// NewShardMapping creates an empty ShardMapping +func NewShardMapping() *ShardMapping { + return &ShardMapping{ + Points: map[uint64][]models.Point{}, + Shards: map[uint64]*meta.ShardInfo{}, + } +} + +// MapPoint maps a point to shard +func (s *ShardMapping) MapPoint(shardInfo *meta.ShardInfo, p models.Point) { + points, ok := s.Points[shardInfo.ID] + if !ok { + s.Points[shardInfo.ID] = []models.Point{p} + } else { + s.Points[shardInfo.ID] = append(points, p) + } + s.Shards[shardInfo.ID] = shardInfo +} + +// Open opens the communication channel with the point writer +func (w *PointsWriter) Open() error { + w.mu.Lock() + defer w.mu.Unlock() + w.closing = make(chan struct{}) + if w.Subscriber != nil { + w.subPoints = w.Subscriber.Points() + } + return nil +} + +// Close closes the communication channel with the point writer +func (w *PointsWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + if w.closing != nil { + close(w.closing) + } + if w.subPoints != nil { + // 'nil' channels always block so this makes the + // select statement in WritePoints hit its default case + // dropping any in-flight writes. + w.subPoints = nil + } + return nil +} + +// MapShards maps the points contained in wp to a ShardMapping. If a point +// maps to a shard group or shard that does not currently exist, it will be +// created before returning the mapping. +func (w *PointsWriter) MapShards(wp *WritePointsRequest) (*ShardMapping, error) { + + // holds the start time ranges for required shard groups + timeRanges := map[time.Time]*meta.ShardGroupInfo{} + + rp, err := w.MetaClient.RetentionPolicy(wp.Database, wp.RetentionPolicy) + if err != nil { + return nil, err + } + if rp == nil { + return nil, influxdb.ErrRetentionPolicyNotFound(wp.RetentionPolicy) + } + + for _, p := range wp.Points { + timeRanges[p.Time().Truncate(rp.ShardGroupDuration)] = nil + } + + // holds all the shard groups and shards that are required for writes + for t := range timeRanges { + sg, err := w.MetaClient.CreateShardGroup(wp.Database, wp.RetentionPolicy, t) + if err != nil { + return nil, err + } + timeRanges[t] = sg + } + + mapping := NewShardMapping() + for _, p := range wp.Points { + sg := timeRanges[p.Time().Truncate(rp.ShardGroupDuration)] + sh := sg.ShardFor(p.HashID()) + mapping.MapPoint(&sh, p) + } + return mapping, nil +} + +// WritePointsInto is a copy of WritePoints that uses a tsdb structure instead of +// a cluster structure for information. This is to avoid a circular dependency +func (w *PointsWriter) WritePointsInto(p *IntoWriteRequest) error { + req := WritePointsRequest{ + Database: p.Database, + RetentionPolicy: p.RetentionPolicy, + ConsistencyLevel: ConsistencyLevelAny, + Points: p.Points, + } + return w.WritePoints(&req) +} + +// WritePoints writes across multiple local and remote data nodes according the consistency level. +func (w *PointsWriter) WritePoints(p *WritePointsRequest) error { + w.statMap.Add(statWriteReq, 1) + w.statMap.Add(statPointWriteReq, int64(len(p.Points))) + + if p.RetentionPolicy == "" { + db, err := w.MetaClient.Database(p.Database) + if err != nil { + return err + } else if db == nil { + return influxdb.ErrDatabaseNotFound(p.Database) + } + p.RetentionPolicy = db.DefaultRetentionPolicy + } + + shardMappings, err := w.MapShards(p) + if err != nil { + return err + } + + // Write each shard in it's own goroutine and return as soon + // as one fails. + ch := make(chan error, len(shardMappings.Points)) + for shardID, points := range shardMappings.Points { + go func(shard *meta.ShardInfo, database, retentionPolicy string, points []models.Point) { + ch <- w.writeToShard(shard, p.Database, p.RetentionPolicy, p.ConsistencyLevel, points) + }(shardMappings.Shards[shardID], p.Database, p.RetentionPolicy, points) + } + + // Send points to subscriptions if possible. + ok := false + // We need to lock just in case the channel is about to be nil'ed + w.mu.RLock() + select { + case w.subPoints <- p: + ok = true + default: + } + w.mu.RUnlock() + if ok { + w.statMap.Add(statSubWriteOK, 1) + } else { + w.statMap.Add(statSubWriteDrop, 1) + } + + for range shardMappings.Points { + select { + case <-w.closing: + return ErrWriteFailed + case err := <-ch: + if err != nil { + return err + } + } + } + return nil +} + +// writeToShards writes points to a shard and ensures a write consistency level has been met. If the write +// partially succeeds, ErrPartialWrite is returned. +func (w *PointsWriter) writeToShard(shard *meta.ShardInfo, database, retentionPolicy string, + consistency ConsistencyLevel, points []models.Point) error { + // The required number of writes to achieve the requested consistency level + required := len(shard.Owners) + switch consistency { + case ConsistencyLevelAny, ConsistencyLevelOne: + required = 1 + case ConsistencyLevelQuorum: + required = required/2 + 1 + } + + // response channel for each shard writer go routine + type AsyncWriteResult struct { + Owner meta.ShardOwner + Err error + } + ch := make(chan *AsyncWriteResult, len(shard.Owners)) + + for _, owner := range shard.Owners { + go func(shardID uint64, owner meta.ShardOwner, points []models.Point) { + if w.Node.ID == owner.NodeID { + w.statMap.Add(statPointWriteReqLocal, int64(len(points))) + + err := w.TSDBStore.WriteToShard(shardID, points) + // If we've written to shard that should exist on the current node, but the store has + // not actually created this shard, tell it to create it and retry the write + if err == tsdb.ErrShardNotFound { + err = w.TSDBStore.CreateShard(database, retentionPolicy, shardID) + if err != nil { + ch <- &AsyncWriteResult{owner, err} + return + } + err = w.TSDBStore.WriteToShard(shardID, points) + } + ch <- &AsyncWriteResult{owner, err} + return + } + + w.statMap.Add(statPointWriteReqRemote, int64(len(points))) + err := w.ShardWriter.WriteShard(shardID, owner.NodeID, points) + if err != nil && tsdb.IsRetryable(err) { + // The remote write failed so queue it via hinted handoff + w.statMap.Add(statWritePointReqHH, int64(len(points))) + hherr := w.HintedHandoff.WriteShard(shardID, owner.NodeID, points) + if hherr != nil { + ch <- &AsyncWriteResult{owner, hherr} + return + } + + // If the write consistency level is ANY, then a successful hinted handoff can + // be considered a successful write so send nil to the response channel + // otherwise, let the original error propagate to the response channel + if hherr == nil && consistency == ConsistencyLevelAny { + ch <- &AsyncWriteResult{owner, nil} + return + } + } + ch <- &AsyncWriteResult{owner, err} + + }(shard.ID, owner, points) + } + + var wrote int + timeout := time.After(w.WriteTimeout) + var writeError error + for range shard.Owners { + select { + case <-w.closing: + return ErrWriteFailed + case <-timeout: + w.statMap.Add(statWriteTimeout, 1) + // return timeout error to caller + return ErrTimeout + case result := <-ch: + // If the write returned an error, continue to the next response + if result.Err != nil { + w.statMap.Add(statWriteErr, 1) + w.Logger.Printf("write failed for shard %d on node %d: %v", shard.ID, result.Owner.NodeID, result.Err) + + // Keep track of the first error we see to return back to the client + if writeError == nil { + writeError = result.Err + } + continue + } + + wrote++ + + // We wrote the required consistency level + if wrote >= required { + w.statMap.Add(statWriteOK, 1) + return nil + } + } + } + + if wrote > 0 { + w.statMap.Add(statWritePartial, 1) + return ErrPartialWrite + } + + if writeError != nil { + return fmt.Errorf("write failed: %v", writeError) + } + + return ErrWriteFailed +} diff --git a/vendor/github.com/influxdata/influxdb/cluster/points_writer_test.go b/vendor/github.com/influxdata/influxdb/cluster/points_writer_test.go new file mode 100644 index 0000000000..c0cc0f1fbf --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cluster/points_writer_test.go @@ -0,0 +1,497 @@ +package cluster_test + +import ( + "fmt" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/influxdata/influxdb" + "github.com/influxdata/influxdb/cluster" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/services/meta" +) + +// TODO(benbjohnson): Rewrite tests to use cluster_test.MetaClient. + +// Ensures the points writer maps a single point to a single shard. +func TestPointsWriter_MapShards_One(t *testing.T) { + ms := PointsWriterMetaClient{} + rp := NewRetentionPolicy("myp", time.Hour, 3) + + ms.NodeIDFn = func() uint64 { return 1 } + ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { + return rp, nil + } + + ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + return &rp.ShardGroups[0], nil + } + + c := cluster.PointsWriter{MetaClient: ms} + pr := &cluster.WritePointsRequest{ + Database: "mydb", + RetentionPolicy: "myrp", + ConsistencyLevel: cluster.ConsistencyLevelOne, + } + pr.AddPoint("cpu", 1.0, time.Now(), nil) + + var ( + shardMappings *cluster.ShardMapping + err error + ) + if shardMappings, err = c.MapShards(pr); err != nil { + t.Fatalf("unexpected an error: %v", err) + } + + if exp := 1; len(shardMappings.Points) != exp { + t.Errorf("MapShards() len mismatch. got %v, exp %v", len(shardMappings.Points), exp) + } +} + +// Ensures the points writer maps a multiple points across shard group boundaries. +func TestPointsWriter_MapShards_Multiple(t *testing.T) { + ms := PointsWriterMetaClient{} + rp := NewRetentionPolicy("myp", time.Hour, 3) + AttachShardGroupInfo(rp, []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }) + AttachShardGroupInfo(rp, []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }) + + ms.NodeIDFn = func() uint64 { return 1 } + ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { + return rp, nil + } + + ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + for i, sg := range rp.ShardGroups { + if timestamp.Equal(sg.StartTime) || timestamp.After(sg.StartTime) && timestamp.Before(sg.EndTime) { + return &rp.ShardGroups[i], nil + } + } + panic("should not get here") + } + + c := cluster.PointsWriter{MetaClient: ms} + pr := &cluster.WritePointsRequest{ + Database: "mydb", + RetentionPolicy: "myrp", + ConsistencyLevel: cluster.ConsistencyLevelOne, + } + + // Three points that range over the shardGroup duration (1h) and should map to two + // distinct shards + pr.AddPoint("cpu", 1.0, time.Unix(0, 0), nil) + pr.AddPoint("cpu", 2.0, time.Unix(0, 0).Add(time.Hour), nil) + pr.AddPoint("cpu", 3.0, time.Unix(0, 0).Add(time.Hour+time.Second), nil) + + var ( + shardMappings *cluster.ShardMapping + err error + ) + if shardMappings, err = c.MapShards(pr); err != nil { + t.Fatalf("unexpected an error: %v", err) + } + + if exp := 2; len(shardMappings.Points) != exp { + t.Errorf("MapShards() len mismatch. got %v, exp %v", len(shardMappings.Points), exp) + } + + for _, points := range shardMappings.Points { + // First shard shoud have 1 point w/ first point added + if len(points) == 1 && points[0].Time() != pr.Points[0].Time() { + t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[0].Time(), pr.Points[0].Time()) + } + + // Second shard shoud have the last two points added + if len(points) == 2 && points[0].Time() != pr.Points[1].Time() { + t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[0].Time(), pr.Points[1].Time()) + } + + if len(points) == 2 && points[1].Time() != pr.Points[2].Time() { + t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[1].Time(), pr.Points[2].Time()) + } + } +} + +func TestPointsWriter_WritePoints(t *testing.T) { + tests := []struct { + name string + database string + retentionPolicy string + consistency cluster.ConsistencyLevel + + // the responses returned by each shard write call. node ID 1 = pos 0 + err []error + expErr error + }{ + // Consistency one + { + name: "write one success", + database: "mydb", + retentionPolicy: "myrp", + consistency: cluster.ConsistencyLevelOne, + err: []error{nil, nil, nil}, + expErr: nil, + }, + { + name: "write one error", + database: "mydb", + retentionPolicy: "myrp", + consistency: cluster.ConsistencyLevelOne, + err: []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), fmt.Errorf("a failure")}, + expErr: fmt.Errorf("write failed: a failure"), + }, + + // Consistency any + { + name: "write any success", + database: "mydb", + retentionPolicy: "myrp", + consistency: cluster.ConsistencyLevelAny, + err: []error{fmt.Errorf("a failure"), nil, fmt.Errorf("a failure")}, + expErr: nil, + }, + // Consistency all + { + name: "write all success", + database: "mydb", + retentionPolicy: "myrp", + consistency: cluster.ConsistencyLevelAll, + err: []error{nil, nil, nil}, + expErr: nil, + }, + { + name: "write all, 2/3, partial write", + database: "mydb", + retentionPolicy: "myrp", + consistency: cluster.ConsistencyLevelAll, + err: []error{nil, fmt.Errorf("a failure"), nil}, + expErr: cluster.ErrPartialWrite, + }, + { + name: "write all, 1/3 (failure)", + database: "mydb", + retentionPolicy: "myrp", + consistency: cluster.ConsistencyLevelAll, + err: []error{nil, fmt.Errorf("a failure"), fmt.Errorf("a failure")}, + expErr: cluster.ErrPartialWrite, + }, + + // Consistency quorum + { + name: "write quorum, 1/3 failure", + consistency: cluster.ConsistencyLevelQuorum, + database: "mydb", + retentionPolicy: "myrp", + err: []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), nil}, + expErr: cluster.ErrPartialWrite, + }, + { + name: "write quorum, 2/3 success", + database: "mydb", + retentionPolicy: "myrp", + consistency: cluster.ConsistencyLevelQuorum, + err: []error{nil, nil, fmt.Errorf("a failure")}, + expErr: nil, + }, + { + name: "write quorum, 3/3 success", + database: "mydb", + retentionPolicy: "myrp", + consistency: cluster.ConsistencyLevelQuorum, + err: []error{nil, nil, nil}, + expErr: nil, + }, + + // Error write error + { + name: "no writes succeed", + database: "mydb", + retentionPolicy: "myrp", + consistency: cluster.ConsistencyLevelOne, + err: []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), fmt.Errorf("a failure")}, + expErr: fmt.Errorf("write failed: a failure"), + }, + + // Hinted handoff w/ ANY + { + name: "hinted handoff write succeed", + database: "mydb", + retentionPolicy: "myrp", + consistency: cluster.ConsistencyLevelAny, + err: []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), fmt.Errorf("a failure")}, + expErr: nil, + }, + + // Write to non-existent database + { + name: "write to non-existent database", + database: "doesnt_exist", + retentionPolicy: "", + consistency: cluster.ConsistencyLevelAny, + err: []error{nil, nil, nil}, + expErr: fmt.Errorf("database not found: doesnt_exist"), + }, + } + + for _, test := range tests { + + pr := &cluster.WritePointsRequest{ + Database: test.database, + RetentionPolicy: test.retentionPolicy, + ConsistencyLevel: test.consistency, + } + + // Three points that range over the shardGroup duration (1h) and should map to two + // distinct shards + pr.AddPoint("cpu", 1.0, time.Unix(0, 0), nil) + pr.AddPoint("cpu", 2.0, time.Unix(0, 0).Add(time.Hour), nil) + pr.AddPoint("cpu", 3.0, time.Unix(0, 0).Add(time.Hour+time.Second), nil) + + // copy to prevent data race + theTest := test + sm := cluster.NewShardMapping() + sm.MapPoint( + &meta.ShardInfo{ID: uint64(1), Owners: []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }}, + pr.Points[0]) + sm.MapPoint( + &meta.ShardInfo{ID: uint64(2), Owners: []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }}, + pr.Points[1]) + sm.MapPoint( + &meta.ShardInfo{ID: uint64(2), Owners: []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }}, + pr.Points[2]) + + // Local cluster.Node ShardWriter + // lock on the write increment since these functions get called in parallel + var mu sync.Mutex + sw := &fakeShardWriter{ + ShardWriteFn: func(shardID, nodeID uint64, points []models.Point) error { + mu.Lock() + defer mu.Unlock() + return theTest.err[int(nodeID)-1] + }, + } + + store := &fakeStore{ + WriteFn: func(shardID uint64, points []models.Point) error { + mu.Lock() + defer mu.Unlock() + return theTest.err[0] + }, + } + + hh := &fakeShardWriter{ + ShardWriteFn: func(shardID, nodeID uint64, points []models.Point) error { + return nil + }, + } + + ms := NewPointsWriterMetaClient() + ms.DatabaseFn = func(database string) (*meta.DatabaseInfo, error) { + return nil, nil + } + ms.NodeIDFn = func() uint64 { return 1 } + + subPoints := make(chan *cluster.WritePointsRequest, 1) + sub := Subscriber{} + sub.PointsFn = func() chan<- *cluster.WritePointsRequest { + return subPoints + } + + c := cluster.NewPointsWriter() + c.MetaClient = ms + c.ShardWriter = sw + c.TSDBStore = store + c.HintedHandoff = hh + c.Subscriber = sub + c.Node = &influxdb.Node{ID: 1} + + c.Open() + defer c.Close() + + err := c.WritePoints(pr) + if err == nil && test.expErr != nil { + t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr) + } + + if err != nil && test.expErr == nil { + t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr) + } + if err != nil && test.expErr != nil && err.Error() != test.expErr.Error() { + t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr) + } + if test.expErr == nil { + select { + case p := <-subPoints: + if p != pr { + t.Errorf("PointsWriter.WritePoints(): '%s' error: unexpected WritePointsRequest got %v, exp %v", test.name, p, pr) + } + default: + t.Errorf("PointsWriter.WritePoints(): '%s' error: Subscriber.Points not called", test.name) + } + } + } +} + +var shardID uint64 + +type fakeShardWriter struct { + ShardWriteFn func(shardID, nodeID uint64, points []models.Point) error +} + +func (f *fakeShardWriter) WriteShard(shardID, nodeID uint64, points []models.Point) error { + return f.ShardWriteFn(shardID, nodeID, points) +} + +type fakeStore struct { + WriteFn func(shardID uint64, points []models.Point) error + CreateShardfn func(database, retentionPolicy string, shardID uint64) error +} + +func (f *fakeStore) WriteToShard(shardID uint64, points []models.Point) error { + return f.WriteFn(shardID, points) +} + +func (f *fakeStore) CreateShard(database, retentionPolicy string, shardID uint64) error { + return f.CreateShardfn(database, retentionPolicy, shardID) +} + +func NewPointsWriterMetaClient() *PointsWriterMetaClient { + ms := &PointsWriterMetaClient{} + rp := NewRetentionPolicy("myp", time.Hour, 3) + AttachShardGroupInfo(rp, []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }) + AttachShardGroupInfo(rp, []meta.ShardOwner{ + {NodeID: 1}, + {NodeID: 2}, + {NodeID: 3}, + }) + + ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { + return rp, nil + } + + ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + for i, sg := range rp.ShardGroups { + if timestamp.Equal(sg.StartTime) || timestamp.After(sg.StartTime) && timestamp.Before(sg.EndTime) { + return &rp.ShardGroups[i], nil + } + } + panic("should not get here") + } + return ms +} + +type PointsWriterMetaClient struct { + NodeIDFn func() uint64 + RetentionPolicyFn func(database, name string) (*meta.RetentionPolicyInfo, error) + CreateShardGroupIfNotExistsFn func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) + DatabaseFn func(database string) (*meta.DatabaseInfo, error) + ShardOwnerFn func(shardID uint64) (string, string, *meta.ShardGroupInfo) +} + +func (m PointsWriterMetaClient) NodeID() uint64 { return m.NodeIDFn() } + +func (m PointsWriterMetaClient) RetentionPolicy(database, name string) (*meta.RetentionPolicyInfo, error) { + return m.RetentionPolicyFn(database, name) +} + +func (m PointsWriterMetaClient) CreateShardGroup(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { + return m.CreateShardGroupIfNotExistsFn(database, policy, timestamp) +} + +func (m PointsWriterMetaClient) Database(database string) (*meta.DatabaseInfo, error) { + return m.DatabaseFn(database) +} + +func (m PointsWriterMetaClient) ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo) { + return m.ShardOwnerFn(shardID) +} + +type Subscriber struct { + PointsFn func() chan<- *cluster.WritePointsRequest +} + +func (s Subscriber) Points() chan<- *cluster.WritePointsRequest { + return s.PointsFn() +} + +func NewRetentionPolicy(name string, duration time.Duration, nodeCount int) *meta.RetentionPolicyInfo { + shards := []meta.ShardInfo{} + owners := []meta.ShardOwner{} + for i := 1; i <= nodeCount; i++ { + owners = append(owners, meta.ShardOwner{NodeID: uint64(i)}) + } + + // each node is fully replicated with each other + shards = append(shards, meta.ShardInfo{ + ID: nextShardID(), + Owners: owners, + }) + + rp := &meta.RetentionPolicyInfo{ + Name: "myrp", + ReplicaN: nodeCount, + Duration: duration, + ShardGroupDuration: duration, + ShardGroups: []meta.ShardGroupInfo{ + meta.ShardGroupInfo{ + ID: nextShardID(), + StartTime: time.Unix(0, 0), + EndTime: time.Unix(0, 0).Add(duration).Add(-1), + Shards: shards, + }, + }, + } + return rp +} + +func AttachShardGroupInfo(rp *meta.RetentionPolicyInfo, owners []meta.ShardOwner) { + var startTime, endTime time.Time + if len(rp.ShardGroups) == 0 { + startTime = time.Unix(0, 0) + } else { + startTime = rp.ShardGroups[len(rp.ShardGroups)-1].StartTime.Add(rp.ShardGroupDuration) + } + endTime = startTime.Add(rp.ShardGroupDuration).Add(-1) + + sh := meta.ShardGroupInfo{ + ID: uint64(len(rp.ShardGroups) + 1), + StartTime: startTime, + EndTime: endTime, + Shards: []meta.ShardInfo{ + meta.ShardInfo{ + ID: nextShardID(), + Owners: owners, + }, + }, + } + rp.ShardGroups = append(rp.ShardGroups, sh) +} + +func nextShardID() uint64 { + return atomic.AddUint64(&shardID, 1) +} diff --git a/vendor/github.com/influxdata/influxdb/cluster/pool.go b/vendor/github.com/influxdata/influxdb/cluster/pool.go new file mode 100644 index 0000000000..b8ff541a2b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cluster/pool.go @@ -0,0 +1,188 @@ +package cluster + +import ( + "errors" + "fmt" + "net" + "sync" + "sync/atomic" + "time" + + "gopkg.in/fatih/pool.v2" +) + +// boundedPool implements the Pool interface based on buffered channels. +type boundedPool struct { + // storage for our net.Conn connections + mu sync.Mutex + conns chan net.Conn + + timeout time.Duration + total int32 + // net.Conn generator + factory Factory +} + +// Factory is a function to create new connections. +type Factory func() (net.Conn, error) + +// NewBoundedPool returns a new pool based on buffered channels with an initial +// capacity, maximum capacity and timeout to wait for a connection from the pool. +// Factory is used when initial capacity is +// greater than zero to fill the pool. A zero initialCap doesn't fill the Pool +// until a new Get() is called. During a Get(), If there is no new connection +// available in the pool and total connections is less than the max, a new connection +// will be created via the Factory() method. Othewise, the call will block until +// a connection is available or the timeout is reached. +func NewBoundedPool(initialCap, maxCap int, timeout time.Duration, factory Factory) (pool.Pool, error) { + if initialCap < 0 || maxCap <= 0 || initialCap > maxCap { + return nil, errors.New("invalid capacity settings") + } + + c := &boundedPool{ + conns: make(chan net.Conn, maxCap), + factory: factory, + timeout: timeout, + } + + // create initial connections, if something goes wrong, + // just close the pool error out. + for i := 0; i < initialCap; i++ { + conn, err := factory() + if err != nil { + c.Close() + return nil, fmt.Errorf("factory is not able to fill the pool: %s", err) + } + c.conns <- conn + atomic.AddInt32(&c.total, 1) + } + + return c, nil +} + +func (c *boundedPool) getConns() chan net.Conn { + c.mu.Lock() + conns := c.conns + c.mu.Unlock() + return conns +} + +// Get implements the Pool interfaces Get() method. If there is no new +// connection available in the pool, a new connection will be created via the +// Factory() method. +func (c *boundedPool) Get() (net.Conn, error) { + conns := c.getConns() + if conns == nil { + return nil, pool.ErrClosed + } + + // Try and grab a connection from the pool + select { + case conn := <-conns: + if conn == nil { + return nil, pool.ErrClosed + } + return c.wrapConn(conn), nil + default: + // Could not get connection, can we create a new one? + if atomic.LoadInt32(&c.total) < int32(cap(conns)) { + conn, err := c.factory() + if err != nil { + return nil, err + } + atomic.AddInt32(&c.total, 1) + + return c.wrapConn(conn), nil + } + } + + // The pool was empty and we couldn't create a new one to + // retry until one is free or we timeout + select { + case conn := <-conns: + if conn == nil { + return nil, pool.ErrClosed + } + return c.wrapConn(conn), nil + case <-time.After(c.timeout): + return nil, fmt.Errorf("timed out waiting for free connection") + } + +} + +// put puts the connection back to the pool. If the pool is full or closed, +// conn is simply closed. A nil conn will be rejected. +func (c *boundedPool) put(conn net.Conn) error { + if conn == nil { + return errors.New("connection is nil. rejecting") + } + + c.mu.Lock() + defer c.mu.Unlock() + + if c.conns == nil { + // pool is closed, close passed connection + return conn.Close() + } + + // put the resource back into the pool. If the pool is full, this will + // block and the default case will be executed. + select { + case c.conns <- conn: + return nil + default: + // pool is full, close passed connection + return conn.Close() + } +} + +func (c *boundedPool) Close() { + c.mu.Lock() + conns := c.conns + c.conns = nil + c.factory = nil + c.mu.Unlock() + + if conns == nil { + return + } + + close(conns) + for conn := range conns { + conn.Close() + } +} + +func (c *boundedPool) Len() int { return len(c.getConns()) } + +// newConn wraps a standard net.Conn to a poolConn net.Conn. +func (c *boundedPool) wrapConn(conn net.Conn) net.Conn { + p := &pooledConn{c: c} + p.Conn = conn + return p +} + +// pooledConn is a wrapper around net.Conn to modify the the behavior of +// net.Conn's Close() method. +type pooledConn struct { + net.Conn + c *boundedPool + unusable bool +} + +// Close() puts the given connects back to the pool instead of closing it. +func (p pooledConn) Close() error { + if p.unusable { + if p.Conn != nil { + return p.Conn.Close() + } + return nil + } + return p.c.put(p.Conn) +} + +// MarkUnusable() marks the connection not usable any more, to let the pool close it instead of returning it to pool. +func (p *pooledConn) MarkUnusable() { + p.unusable = true + atomic.AddInt32(&p.c.total, -1) +} diff --git a/vendor/github.com/influxdata/influxdb/cluster/query_executor.go b/vendor/github.com/influxdata/influxdb/cluster/query_executor.go new file mode 100644 index 0000000000..a48f0abca9 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cluster/query_executor.go @@ -0,0 +1,1184 @@ +package cluster + +import ( + "bytes" + "errors" + "expvar" + "fmt" + "io" + "io/ioutil" + "log" + "math/rand" + "net" + "sort" + "strconv" + "time" + + "github.com/influxdata/influxdb" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/monitor" + "github.com/influxdata/influxdb/services/meta" +) + +// A QueryExecutor is responsible for processing a influxql.Query and +// executing all of the statements within, on nodes in a cluster. +type QueryExecutor struct { + // Reference to local node. + Node *influxdb.Node + + MetaClient MetaClient + + // TSDB storage for local node. + TSDBStore TSDBStore + + // Holds monitoring data for SHOW STATS and SHOW DIAGNOSTICS. + Monitor *monitor.Monitor + + // Used for rewriting points back into system for SELECT INTO statements. + PointsWriter *PointsWriter + + // Used for executing meta statements on all data nodes. + MetaExecutor *MetaExecutor + + // Remote execution timeout + Timeout time.Duration + + // Output of all logging. + // Defaults to discarding all log output. + LogOutput io.Writer + + // expvar-based stats. + statMap *expvar.Map +} + +// Statistics for the QueryExecutor +const ( + statQueriesActive = "queriesActive" // Number of queries currently being executed + statQueryExecutionDuration = "queryDurationNs" // Total (wall) time spent executing queries +) + +// NewQueryExecutor returns a new instance of QueryExecutor. +func NewQueryExecutor() *QueryExecutor { + return &QueryExecutor{ + Timeout: DefaultShardMapperTimeout, + LogOutput: ioutil.Discard, + statMap: influxdb.NewStatistics("queryExecutor", "queryExecutor", nil), + } +} + +// ExecuteQuery executes each statement within a query. +func (e *QueryExecutor) ExecuteQuery(query *influxql.Query, database string, chunkSize int, closing chan struct{}) <-chan *influxql.Result { + results := make(chan *influxql.Result) + go e.executeQuery(query, database, chunkSize, closing, results) + return results +} + +func (e *QueryExecutor) executeQuery(query *influxql.Query, database string, chunkSize int, closing chan struct{}, results chan *influxql.Result) { + defer close(results) + + e.statMap.Add(statQueriesActive, 1) + defer func(start time.Time) { + e.statMap.Add(statQueriesActive, -1) + e.statMap.Add(statQueryExecutionDuration, time.Since(start).Nanoseconds()) + }(time.Now()) + + logger := e.logger() + + var i int + for ; i < len(query.Statements); i++ { + stmt := query.Statements[i] + + // If a default database wasn't passed in by the caller, check the statement. + defaultDB := database + if defaultDB == "" { + if s, ok := stmt.(influxql.HasDefaultDatabase); ok { + defaultDB = s.DefaultDatabase() + } + } + + // Rewrite statements, if necessary. + // This can occur on meta read statements which convert to SELECT statements. + newStmt, err := influxql.RewriteStatement(stmt) + if err != nil { + results <- &influxql.Result{Err: err} + break + } + stmt = newStmt + + // Normalize each statement. + if err := e.normalizeStatement(stmt, defaultDB); err != nil { + results <- &influxql.Result{Err: err} + break + } + + // Log each normalized statement. + logger.Println(stmt.String()) + + // Select statements are handled separately so that they can be streamed. + if stmt, ok := stmt.(*influxql.SelectStatement); ok { + if err := e.executeSelectStatement(stmt, chunkSize, i, results, closing); err != nil { + results <- &influxql.Result{StatementID: i, Err: err} + break + } + continue + } + + var rows models.Rows + switch stmt := stmt.(type) { + case *influxql.AlterRetentionPolicyStatement: + err = e.executeAlterRetentionPolicyStatement(stmt) + case *influxql.CreateContinuousQueryStatement: + err = e.executeCreateContinuousQueryStatement(stmt) + case *influxql.CreateDatabaseStatement: + err = e.executeCreateDatabaseStatement(stmt) + case *influxql.CreateRetentionPolicyStatement: + err = e.executeCreateRetentionPolicyStatement(stmt) + case *influxql.CreateSubscriptionStatement: + err = e.executeCreateSubscriptionStatement(stmt) + case *influxql.CreateUserStatement: + err = e.executeCreateUserStatement(stmt) + case *influxql.DropContinuousQueryStatement: + err = e.executeDropContinuousQueryStatement(stmt) + case *influxql.DropDatabaseStatement: + err = e.executeDropDatabaseStatement(stmt) + case *influxql.DropMeasurementStatement: + err = e.executeDropMeasurementStatement(stmt, database) + case *influxql.DropSeriesStatement: + err = e.executeDropSeriesStatement(stmt, database) + case *influxql.DropRetentionPolicyStatement: + err = e.executeDropRetentionPolicyStatement(stmt) + case *influxql.DropServerStatement: + err = e.executeDropServerStatement(stmt) + case *influxql.DropSubscriptionStatement: + err = e.executeDropSubscriptionStatement(stmt) + case *influxql.DropUserStatement: + err = e.executeDropUserStatement(stmt) + case *influxql.GrantStatement: + err = e.executeGrantStatement(stmt) + case *influxql.GrantAdminStatement: + err = e.executeGrantAdminStatement(stmt) + case *influxql.RevokeStatement: + err = e.executeRevokeStatement(stmt) + case *influxql.RevokeAdminStatement: + err = e.executeRevokeAdminStatement(stmt) + case *influxql.ShowContinuousQueriesStatement: + rows, err = e.executeShowContinuousQueriesStatement(stmt) + case *influxql.ShowDatabasesStatement: + rows, err = e.executeShowDatabasesStatement(stmt) + case *influxql.ShowDiagnosticsStatement: + rows, err = e.executeShowDiagnosticsStatement(stmt) + case *influxql.ShowGrantsForUserStatement: + rows, err = e.executeShowGrantsForUserStatement(stmt) + case *influxql.ShowRetentionPoliciesStatement: + rows, err = e.executeShowRetentionPoliciesStatement(stmt) + case *influxql.ShowSeriesStatement: + rows, err = e.executeShowSeriesStatement(stmt, database) + case *influxql.ShowServersStatement: + rows, err = e.executeShowServersStatement(stmt) + case *influxql.ShowShardsStatement: + rows, err = e.executeShowShardsStatement(stmt) + case *influxql.ShowShardGroupsStatement: + rows, err = e.executeShowShardGroupsStatement(stmt) + case *influxql.ShowStatsStatement: + rows, err = e.executeShowStatsStatement(stmt) + case *influxql.ShowSubscriptionsStatement: + rows, err = e.executeShowSubscriptionsStatement(stmt) + case *influxql.ShowTagValuesStatement: + rows, err = e.executeShowTagValuesStatement(stmt, database) + case *influxql.ShowUsersStatement: + rows, err = e.executeShowUsersStatement(stmt) + case *influxql.SetPasswordUserStatement: + err = e.executeSetPasswordUserStatement(stmt) + default: + err = influxql.ErrInvalidQuery + } + + // Send results for each statement. + results <- &influxql.Result{ + StatementID: i, + Series: rows, + Err: err, + } + + // Stop of the first error. + if err != nil { + break + } + } + + // Send error results for any statements which were not executed. + for ; i < len(query.Statements)-1; i++ { + results <- &influxql.Result{ + StatementID: i, + Err: influxql.ErrNotExecuted, + } + } +} + +func (e *QueryExecutor) executeAlterRetentionPolicyStatement(stmt *influxql.AlterRetentionPolicyStatement) error { + rpu := &meta.RetentionPolicyUpdate{ + Duration: stmt.Duration, + ReplicaN: stmt.Replication, + } + + // Update the retention policy. + if err := e.MetaClient.UpdateRetentionPolicy(stmt.Database, stmt.Name, rpu); err != nil { + return err + } + + // If requested, set as default retention policy. + if stmt.Default { + if err := e.MetaClient.SetDefaultRetentionPolicy(stmt.Database, stmt.Name); err != nil { + return err + } + } + + return nil +} + +func (e *QueryExecutor) executeCreateContinuousQueryStatement(q *influxql.CreateContinuousQueryStatement) error { + return e.MetaClient.CreateContinuousQuery(q.Database, q.Name, q.String()) +} + +func (e *QueryExecutor) executeCreateDatabaseStatement(stmt *influxql.CreateDatabaseStatement) error { + if !stmt.RetentionPolicyCreate { + _, err := e.MetaClient.CreateDatabase(stmt.Name) + return err + } + + rpi := meta.NewRetentionPolicyInfo(stmt.RetentionPolicyName) + rpi.Duration = stmt.RetentionPolicyDuration + rpi.ReplicaN = stmt.RetentionPolicyReplication + _, err := e.MetaClient.CreateDatabaseWithRetentionPolicy(stmt.Name, rpi) + return err +} + +func (e *QueryExecutor) executeCreateRetentionPolicyStatement(stmt *influxql.CreateRetentionPolicyStatement) error { + rpi := meta.NewRetentionPolicyInfo(stmt.Name) + rpi.Duration = stmt.Duration + rpi.ReplicaN = stmt.Replication + + // Create new retention policy. + if _, err := e.MetaClient.CreateRetentionPolicy(stmt.Database, rpi); err != nil { + return err + } + + // If requested, set new policy as the default. + if stmt.Default { + if err := e.MetaClient.SetDefaultRetentionPolicy(stmt.Database, stmt.Name); err != nil { + return err + } + } + return nil +} + +func (e *QueryExecutor) executeCreateSubscriptionStatement(q *influxql.CreateSubscriptionStatement) error { + return e.MetaClient.CreateSubscription(q.Database, q.RetentionPolicy, q.Name, q.Mode, q.Destinations) +} + +func (e *QueryExecutor) executeCreateUserStatement(q *influxql.CreateUserStatement) error { + _, err := e.MetaClient.CreateUser(q.Name, q.Password, q.Admin) + return err +} + +func (e *QueryExecutor) executeDropContinuousQueryStatement(q *influxql.DropContinuousQueryStatement) error { + return e.MetaClient.DropContinuousQuery(q.Database, q.Name) +} + +// executeDropDatabaseStatement drops a database from the cluster. +// It does not return an error if the database was not found on any of +// the nodes, or in the Meta store. +func (e *QueryExecutor) executeDropDatabaseStatement(stmt *influxql.DropDatabaseStatement) error { + // Remove the database from the Meta Store. + if err := e.MetaClient.DropDatabase(stmt.Name); err != nil { + return err + } + + // Locally delete the datababse. + if err := e.TSDBStore.DeleteDatabase(stmt.Name); err != nil { + return err + } + + // Execute the statement on the other data nodes in the cluster. + return e.MetaExecutor.ExecuteStatement(stmt, "") +} + +func (e *QueryExecutor) executeDropMeasurementStatement(stmt *influxql.DropMeasurementStatement, database string) error { + if dbi, err := e.MetaClient.Database(database); err != nil { + return err + } else if dbi == nil { + return influxql.ErrDatabaseNotFound(database) + } + + // Locally drop the measurement + if err := e.TSDBStore.DeleteMeasurement(database, stmt.Name); err != nil { + return err + } + + // Execute the statement on the other data nodes in the cluster. + return e.MetaExecutor.ExecuteStatement(stmt, database) +} + +func (e *QueryExecutor) executeDropSeriesStatement(stmt *influxql.DropSeriesStatement, database string) error { + if dbi, err := e.MetaClient.Database(database); err != nil { + return err + } else if dbi == nil { + return influxql.ErrDatabaseNotFound(database) + } + + // Check for time in WHERE clause (not supported). + if influxql.HasTimeExpr(stmt.Condition) { + return errors.New("DROP SERIES doesn't support time in WHERE clause") + } + + // Locally drop the series. + if err := e.TSDBStore.DeleteSeries(database, stmt.Sources, stmt.Condition); err != nil { + return err + } + + // Execute the statement on the other data nodes in the cluster. + return e.MetaExecutor.ExecuteStatement(stmt, database) +} + +func (e *QueryExecutor) executeDropServerStatement(q *influxql.DropServerStatement) error { + if q.Meta { + return e.MetaClient.DeleteMetaNode(q.NodeID) + } + return e.MetaClient.DeleteDataNode(q.NodeID) +} + +func (e *QueryExecutor) executeDropRetentionPolicyStatement(stmt *influxql.DropRetentionPolicyStatement) error { + if err := e.MetaClient.DropRetentionPolicy(stmt.Database, stmt.Name); err != nil { + return err + } + + // Locally drop the retention policy. + if err := e.TSDBStore.DeleteRetentionPolicy(stmt.Database, stmt.Name); err != nil { + return err + } + + // Execute the statement on the other data nodes in the cluster. + return e.MetaExecutor.ExecuteStatement(stmt, stmt.Database) +} + +func (e *QueryExecutor) executeDropSubscriptionStatement(q *influxql.DropSubscriptionStatement) error { + return e.MetaClient.DropSubscription(q.Database, q.RetentionPolicy, q.Name) +} + +func (e *QueryExecutor) executeDropUserStatement(q *influxql.DropUserStatement) error { + return e.MetaClient.DropUser(q.Name) +} + +func (e *QueryExecutor) executeGrantStatement(stmt *influxql.GrantStatement) error { + return e.MetaClient.SetPrivilege(stmt.User, stmt.On, stmt.Privilege) +} + +func (e *QueryExecutor) executeGrantAdminStatement(stmt *influxql.GrantAdminStatement) error { + return e.MetaClient.SetAdminPrivilege(stmt.User, true) +} + +func (e *QueryExecutor) executeRevokeStatement(stmt *influxql.RevokeStatement) error { + priv := influxql.NoPrivileges + + // Revoking all privileges means there's no need to look at existing user privileges. + if stmt.Privilege != influxql.AllPrivileges { + p, err := e.MetaClient.UserPrivilege(stmt.User, stmt.On) + if err != nil { + return err + } + // Bit clear (AND NOT) the user's privilege with the revoked privilege. + priv = *p &^ stmt.Privilege + } + + return e.MetaClient.SetPrivilege(stmt.User, stmt.On, priv) +} + +func (e *QueryExecutor) executeRevokeAdminStatement(stmt *influxql.RevokeAdminStatement) error { + return e.MetaClient.SetAdminPrivilege(stmt.User, false) +} + +func (e *QueryExecutor) executeSetPasswordUserStatement(q *influxql.SetPasswordUserStatement) error { + return e.MetaClient.UpdateUser(q.Name, q.Password) +} + +func (e *QueryExecutor) executeSelectStatement(stmt *influxql.SelectStatement, chunkSize, statementID int, results chan *influxql.Result, closing <-chan struct{}) error { + // It is important to "stamp" this time so that everywhere we evaluate `now()` in the statement is EXACTLY the same `now` + now := time.Now().UTC() + opt := influxql.SelectOptions{} + + // Replace instances of "now()" with the current time, and check the resultant times. + stmt.Condition = influxql.Reduce(stmt.Condition, &influxql.NowValuer{Now: now}) + opt.MinTime, opt.MaxTime = influxql.TimeRange(stmt.Condition) + if opt.MaxTime.IsZero() { + opt.MaxTime = now + } + if opt.MinTime.IsZero() { + opt.MinTime = time.Unix(0, 0) + } + + // Expand regex sources to their actual source names. + sources, err := e.TSDBStore.ExpandSources(stmt.Sources) + if err != nil { + return err + } + stmt.Sources = sources + + // Convert DISTINCT into a call. + stmt.RewriteDistinct() + + // Remove "time" from fields list. + stmt.RewriteTimeFields() + + // Create an iterator creator based on the shards in the cluster. + ic, err := e.iteratorCreator(stmt, &opt) + if err != nil { + return err + } + + // Rewrite wildcards, if any exist. + tmp, err := stmt.RewriteWildcards(ic) + if err != nil { + return err + } + stmt = tmp + + // Create a set of iterators from a selection. + itrs, err := influxql.Select(stmt, ic, &opt) + if err != nil { + return err + } + + // Generate a row emitter from the iterator set. + em := influxql.NewEmitter(itrs, stmt.TimeAscending()) + em.Columns = stmt.ColumnNames() + em.OmitTime = stmt.OmitTime + defer em.Close() + + // Emit rows to the results channel. + var writeN int64 + var emitted bool + for { + row := em.Emit() + if row == nil { + break + } + + result := &influxql.Result{ + StatementID: statementID, + Series: []*models.Row{row}, + } + + // Write points back into system for INTO statements. + if stmt.Target != nil { + if err := e.writeInto(stmt, row); err != nil { + return err + } + writeN += int64(len(row.Values)) + continue + } + + // Send results or exit if closing. + select { + case <-closing: + return nil + case results <- result: + } + + emitted = true + } + + // Emit write count if an INTO statement. + if stmt.Target != nil { + results <- &influxql.Result{ + StatementID: statementID, + Series: []*models.Row{{ + Name: "result", + Columns: []string{"time", "written"}, + Values: [][]interface{}{{time.Unix(0, 0).UTC(), writeN}}, + }}, + } + return nil + } + + // Always emit at least one result. + if !emitted { + results <- &influxql.Result{ + StatementID: statementID, + Series: make([]*models.Row, 0), + } + } + + return nil +} + +// iteratorCreator returns a new instance of IteratorCreator based on stmt. +func (e *QueryExecutor) iteratorCreator(stmt *influxql.SelectStatement, opt *influxql.SelectOptions) (influxql.IteratorCreator, error) { + // Retrieve a list of shard IDs. + shards, err := e.MetaClient.ShardsByTimeRange(stmt.Sources, opt.MinTime, opt.MaxTime) + if err != nil { + return nil, err + } + + // Map shards to nodes. + shardIDsByNodeID := make(map[uint64][]uint64) + for _, si := range shards { + // Always assign to local node if it has the shard. + // Otherwise randomly select a remote node. + var nodeID uint64 + if si.OwnedBy(e.Node.ID) { + nodeID = e.Node.ID + } else if len(si.Owners) > 0 { + nodeID = si.Owners[rand.Intn(len(si.Owners))].NodeID + } else { + // This should not occur but if the shard has no owners then + // we don't want this to panic by trying to randomly select a node. + continue + } + + // Otherwise assign it to a remote shard randomly. + shardIDsByNodeID[nodeID] = append(shardIDsByNodeID[nodeID], si.ID) + } + + // Generate iterators for each node. + ics := make([]influxql.IteratorCreator, 0) + if err := func() error { + for nodeID, shardIDs := range shardIDsByNodeID { + // Sort shard IDs so we get more predicable execution. + sort.Sort(uint64Slice(shardIDs)) + + // Create iterator creators from TSDB if local. + if nodeID == e.Node.ID { + for _, shardID := range shardIDs { + ic := e.TSDBStore.ShardIteratorCreator(shardID) + if ic == nil { + continue + } + ics = append(ics, ic) + } + continue + } + + // Otherwise create iterator creator remotely. + dialer := &NodeDialer{ + MetaClient: e.MetaClient, + Timeout: e.Timeout, + } + ics = append(ics, newRemoteIteratorCreator(dialer, nodeID, shardIDs)) + } + + return nil + }(); err != nil { + influxql.IteratorCreators(ics).Close() + return nil, err + } + + return influxql.IteratorCreators(ics), nil +} + +func (e *QueryExecutor) executeShowContinuousQueriesStatement(stmt *influxql.ShowContinuousQueriesStatement) (models.Rows, error) { + dis, err := e.MetaClient.Databases() + if err != nil { + return nil, err + } + + rows := []*models.Row{} + for _, di := range dis { + row := &models.Row{Columns: []string{"name", "query"}, Name: di.Name} + for _, cqi := range di.ContinuousQueries { + row.Values = append(row.Values, []interface{}{cqi.Name, cqi.Query}) + } + rows = append(rows, row) + } + return rows, nil +} + +func (e *QueryExecutor) executeShowDatabasesStatement(q *influxql.ShowDatabasesStatement) (models.Rows, error) { + dis, err := e.MetaClient.Databases() + if err != nil { + return nil, err + } + + row := &models.Row{Name: "databases", Columns: []string{"name"}} + for _, di := range dis { + row.Values = append(row.Values, []interface{}{di.Name}) + } + return []*models.Row{row}, nil +} + +func (e *QueryExecutor) executeShowDiagnosticsStatement(stmt *influxql.ShowDiagnosticsStatement) (models.Rows, error) { + diags, err := e.Monitor.Diagnostics() + if err != nil { + return nil, err + } + + // Get a sorted list of diagnostics keys. + sortedKeys := make([]string, 0, len(diags)) + for k := range diags { + sortedKeys = append(sortedKeys, k) + } + sort.Strings(sortedKeys) + + rows := make([]*models.Row, 0, len(diags)) + for _, k := range sortedKeys { + if stmt.Module != "" && k != stmt.Module { + continue + } + + row := &models.Row{Name: k} + + row.Columns = diags[k].Columns + row.Values = diags[k].Rows + rows = append(rows, row) + } + return rows, nil +} + +func (e *QueryExecutor) executeShowFieldKeysStatement(stmt *influxql.ShowFieldKeysStatement, database string) (models.Rows, error) { + // FIXME(benbjohnson): Rewrite to use new query engine. + return e.TSDBStore.ExecuteShowFieldKeysStatement(stmt, database) +} + +func (e *QueryExecutor) executeShowGrantsForUserStatement(q *influxql.ShowGrantsForUserStatement) (models.Rows, error) { + priv, err := e.MetaClient.UserPrivileges(q.Name) + if err != nil { + return nil, err + } + + row := &models.Row{Columns: []string{"database", "privilege"}} + for d, p := range priv { + row.Values = append(row.Values, []interface{}{d, p.String()}) + } + return []*models.Row{row}, nil +} + +func (e *QueryExecutor) executeShowRetentionPoliciesStatement(q *influxql.ShowRetentionPoliciesStatement) (models.Rows, error) { + di, err := e.MetaClient.Database(q.Database) + if err != nil { + return nil, err + } else if di == nil { + return nil, influxdb.ErrDatabaseNotFound(q.Database) + } + + row := &models.Row{Columns: []string{"name", "duration", "replicaN", "default"}} + for _, rpi := range di.RetentionPolicies { + row.Values = append(row.Values, []interface{}{rpi.Name, rpi.Duration.String(), rpi.ReplicaN, di.DefaultRetentionPolicy == rpi.Name}) + } + return []*models.Row{row}, nil +} + +func (e *QueryExecutor) executeShowSeriesStatement(stmt *influxql.ShowSeriesStatement, database string) (models.Rows, error) { + return e.TSDBStore.ExecuteShowSeriesStatement(stmt, database) +} + +func (e *QueryExecutor) executeShowServersStatement(q *influxql.ShowServersStatement) (models.Rows, error) { + nis, err := e.MetaClient.DataNodes() + if err != nil { + return nil, err + } + + dataNodes := &models.Row{Columns: []string{"id", "http_addr", "tcp_addr"}} + dataNodes.Name = "data_nodes" + for _, ni := range nis { + dataNodes.Values = append(dataNodes.Values, []interface{}{ni.ID, ni.Host, ni.TCPHost}) + } + + nis, err = e.MetaClient.MetaNodes() + if err != nil { + return nil, err + } + + metaNodes := &models.Row{Columns: []string{"id", "http_addr", "tcp_addr"}} + metaNodes.Name = "meta_nodes" + for _, ni := range nis { + metaNodes.Values = append(metaNodes.Values, []interface{}{ni.ID, ni.Host, ni.TCPHost}) + } + + return []*models.Row{dataNodes, metaNodes}, nil +} + +func (e *QueryExecutor) executeShowShardsStatement(stmt *influxql.ShowShardsStatement) (models.Rows, error) { + dis, err := e.MetaClient.Databases() + if err != nil { + return nil, err + } + + rows := []*models.Row{} + for _, di := range dis { + row := &models.Row{Columns: []string{"id", "database", "retention_policy", "shard_group", "start_time", "end_time", "expiry_time", "owners"}, Name: di.Name} + for _, rpi := range di.RetentionPolicies { + for _, sgi := range rpi.ShardGroups { + // Shards associated with deleted shard groups are effectively deleted. + // Don't list them. + if sgi.Deleted() { + continue + } + + for _, si := range sgi.Shards { + ownerIDs := make([]uint64, len(si.Owners)) + for i, owner := range si.Owners { + ownerIDs[i] = owner.NodeID + } + + row.Values = append(row.Values, []interface{}{ + si.ID, + di.Name, + rpi.Name, + sgi.ID, + sgi.StartTime.UTC().Format(time.RFC3339), + sgi.EndTime.UTC().Format(time.RFC3339), + sgi.EndTime.Add(rpi.Duration).UTC().Format(time.RFC3339), + joinUint64(ownerIDs), + }) + } + } + } + rows = append(rows, row) + } + return rows, nil +} + +func (e *QueryExecutor) executeShowShardGroupsStatement(stmt *influxql.ShowShardGroupsStatement) (models.Rows, error) { + dis, err := e.MetaClient.Databases() + if err != nil { + return nil, err + } + + row := &models.Row{Columns: []string{"id", "database", "retention_policy", "start_time", "end_time", "expiry_time"}, Name: "shard groups"} + for _, di := range dis { + for _, rpi := range di.RetentionPolicies { + for _, sgi := range rpi.ShardGroups { + // Shards associated with deleted shard groups are effectively deleted. + // Don't list them. + if sgi.Deleted() { + continue + } + + row.Values = append(row.Values, []interface{}{ + sgi.ID, + di.Name, + rpi.Name, + sgi.StartTime.UTC().Format(time.RFC3339), + sgi.EndTime.UTC().Format(time.RFC3339), + sgi.EndTime.Add(rpi.Duration).UTC().Format(time.RFC3339), + }) + } + } + } + + return []*models.Row{row}, nil +} + +func (e *QueryExecutor) executeShowStatsStatement(stmt *influxql.ShowStatsStatement) (models.Rows, error) { + stats, err := e.Monitor.Statistics(nil) + if err != nil { + return nil, err + } + + var rows []*models.Row + for _, stat := range stats { + if stmt.Module != "" && stat.Name != stmt.Module { + continue + } + row := &models.Row{Name: stat.Name, Tags: stat.Tags} + + values := make([]interface{}, 0, len(stat.Values)) + for _, k := range stat.ValueNames() { + row.Columns = append(row.Columns, k) + values = append(values, stat.Values[k]) + } + row.Values = [][]interface{}{values} + rows = append(rows, row) + } + return rows, nil +} + +func (e *QueryExecutor) executeShowSubscriptionsStatement(stmt *influxql.ShowSubscriptionsStatement) (models.Rows, error) { + dis, err := e.MetaClient.Databases() + if err != nil { + return nil, err + } + + rows := []*models.Row{} + for _, di := range dis { + row := &models.Row{Columns: []string{"retention_policy", "name", "mode", "destinations"}, Name: di.Name} + for _, rpi := range di.RetentionPolicies { + for _, si := range rpi.Subscriptions { + row.Values = append(row.Values, []interface{}{rpi.Name, si.Name, si.Mode, si.Destinations}) + } + } + if len(row.Values) > 0 { + rows = append(rows, row) + } + } + return rows, nil +} + +func (e *QueryExecutor) executeShowTagValuesStatement(stmt *influxql.ShowTagValuesStatement, database string) (models.Rows, error) { + return e.TSDBStore.ExecuteShowTagValuesStatement(stmt, database) +} + +func (e *QueryExecutor) executeShowUsersStatement(q *influxql.ShowUsersStatement) (models.Rows, error) { + row := &models.Row{Columns: []string{"user", "admin"}} + for _, ui := range e.MetaClient.Users() { + row.Values = append(row.Values, []interface{}{ui.Name, ui.Admin}) + } + return []*models.Row{row}, nil +} + +func (e *QueryExecutor) logger() *log.Logger { + return log.New(e.LogOutput, "[query] ", log.LstdFlags) +} + +func (e *QueryExecutor) writeInto(stmt *influxql.SelectStatement, row *models.Row) error { + if stmt.Target.Measurement.Database == "" { + return errNoDatabaseInTarget + } + + // It might seem a bit weird that this is where we do this, since we will have to + // convert rows back to points. The Executors (both aggregate and raw) are complex + // enough that changing them to write back to the DB is going to be clumsy + // + // it might seem weird to have the write be in the QueryExecutor, but the interweaving of + // limitedRowWriter and ExecuteAggregate/Raw makes it ridiculously hard to make sure that the + // results will be the same as when queried normally. + name := stmt.Target.Measurement.Name + if name == "" { + name = row.Name + } + + points, err := convertRowToPoints(name, row) + if err != nil { + return err + } + + if err := e.PointsWriter.WritePointsInto(&IntoWriteRequest{ + Database: stmt.Target.Measurement.Database, + RetentionPolicy: stmt.Target.Measurement.RetentionPolicy, + Points: points, + }); err != nil { + return err + } + + return nil +} + +var errNoDatabaseInTarget = errors.New("no database in target") + +// convertRowToPoints will convert a query result Row into Points that can be written back in. +func convertRowToPoints(measurementName string, row *models.Row) ([]models.Point, error) { + // figure out which parts of the result are the time and which are the fields + timeIndex := -1 + fieldIndexes := make(map[string]int) + for i, c := range row.Columns { + if c == "time" { + timeIndex = i + } else { + fieldIndexes[c] = i + } + } + + if timeIndex == -1 { + return nil, errors.New("error finding time index in result") + } + + points := make([]models.Point, 0, len(row.Values)) + for _, v := range row.Values { + vals := make(map[string]interface{}) + for fieldName, fieldIndex := range fieldIndexes { + val := v[fieldIndex] + if val != nil { + vals[fieldName] = v[fieldIndex] + } + } + + p, err := models.NewPoint(measurementName, row.Tags, vals, v[timeIndex].(time.Time)) + if err != nil { + // Drop points that can't be stored + continue + } + + points = append(points, p) + } + + return points, nil +} + +// normalizeStatement adds a default database and policy to the measurements in statement. +func (e *QueryExecutor) normalizeStatement(stmt influxql.Statement, defaultDatabase string) (err error) { + influxql.WalkFunc(stmt, func(node influxql.Node) { + if err != nil { + return + } + switch node := node.(type) { + case *influxql.Measurement: + e := e.normalizeMeasurement(node, defaultDatabase) + if e != nil { + err = e + return + } + } + }) + return +} + +func (e *QueryExecutor) normalizeMeasurement(m *influxql.Measurement, defaultDatabase string) error { + // Targets (measurements in an INTO clause) can have blank names, which means it will be + // the same as the measurement name it came from in the FROM clause. + if !m.IsTarget && m.Name == "" && m.Regex == nil { + return errors.New("invalid measurement") + } + + // Measurement does not have an explicit database? Insert default. + if m.Database == "" { + m.Database = defaultDatabase + } + + // The database must now be specified by this point. + if m.Database == "" { + return errors.New("database name required") + } + + // Find database. + di, err := e.MetaClient.Database(m.Database) + if err != nil { + return err + } else if di == nil { + return influxdb.ErrDatabaseNotFound(m.Database) + } + + // If no retention policy was specified, use the default. + if m.RetentionPolicy == "" { + if di.DefaultRetentionPolicy == "" { + return fmt.Errorf("default retention policy not set for: %s", di.Name) + } + m.RetentionPolicy = di.DefaultRetentionPolicy + } + + return nil +} + +// IntoWriteRequest is a partial copy of cluster.WriteRequest +type IntoWriteRequest struct { + Database string + RetentionPolicy string + Points []models.Point +} + +// remoteIteratorCreator creates iterators for remote shards. +type remoteIteratorCreator struct { + dialer *NodeDialer + nodeID uint64 + shardIDs []uint64 +} + +// newRemoteIteratorCreator returns a new instance of remoteIteratorCreator for a remote shard. +func newRemoteIteratorCreator(dialer *NodeDialer, nodeID uint64, shardIDs []uint64) *remoteIteratorCreator { + return &remoteIteratorCreator{ + dialer: dialer, + nodeID: nodeID, + shardIDs: shardIDs, + } +} + +// CreateIterator creates a remote streaming iterator. +func (ic *remoteIteratorCreator) CreateIterator(opt influxql.IteratorOptions) (influxql.Iterator, error) { + conn, err := ic.dialer.DialNode(ic.nodeID) + if err != nil { + return nil, err + } + + if err := func() error { + // Write request. + if err := EncodeTLV(conn, createIteratorRequestMessage, &CreateIteratorRequest{ + ShardIDs: ic.shardIDs, + Opt: opt, + }); err != nil { + return err + } + + // Read the response. + var resp CreateIteratorResponse + if _, err := DecodeTLV(conn, &resp); err != nil { + return err + } else if resp.Err != nil { + return err + } + + return nil + }(); err != nil { + conn.Close() + return nil, err + } + + return influxql.NewReaderIterator(conn) +} + +// FieldDimensions returns the unique fields and dimensions across a list of sources. +func (ic *remoteIteratorCreator) FieldDimensions(sources influxql.Sources) (fields, dimensions map[string]struct{}, err error) { + conn, err := ic.dialer.DialNode(ic.nodeID) + if err != nil { + return nil, nil, err + } + defer conn.Close() + + // Write request. + if err := EncodeTLV(conn, fieldDimensionsRequestMessage, &FieldDimensionsRequest{ + ShardIDs: ic.shardIDs, + Sources: sources, + }); err != nil { + return nil, nil, err + } + + // Read the response. + var resp FieldDimensionsResponse + if _, err := DecodeTLV(conn, &resp); err != nil { + return nil, nil, err + } + return resp.Fields, resp.Dimensions, resp.Err +} + +// SeriesKeys returns a list of series keys from the underlying shard. +func (ic *remoteIteratorCreator) SeriesKeys(opt influxql.IteratorOptions) (influxql.SeriesList, error) { + conn, err := ic.dialer.DialNode(ic.nodeID) + if err != nil { + return nil, err + } + defer conn.Close() + + // Write request. + if err := EncodeTLV(conn, seriesKeysRequestMessage, &SeriesKeysRequest{ + ShardIDs: ic.shardIDs, + Opt: opt, + }); err != nil { + return nil, err + } + + // Read the response. + var resp SeriesKeysResponse + if _, err := DecodeTLV(conn, &resp); err != nil { + return nil, err + } + return resp.SeriesList, resp.Err +} + +// NodeDialer dials connections to a given node. +type NodeDialer struct { + MetaClient MetaClient + Timeout time.Duration +} + +// DialNode returns a connection to a node. +func (d *NodeDialer) DialNode(nodeID uint64) (net.Conn, error) { + ni, err := d.MetaClient.DataNode(nodeID) + if err != nil { + return nil, err + } + + conn, err := net.Dial("tcp", ni.TCPHost) + if err != nil { + return nil, err + } + conn.SetDeadline(time.Now().Add(d.Timeout)) + + // Write the cluster multiplexing header byte + if _, err := conn.Write([]byte{MuxHeader}); err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +// TSDBStore is an interface for accessing the time series data store. +type TSDBStore interface { + CreateShard(database, policy string, shardID uint64) error + WriteToShard(shardID uint64, points []models.Point) error + + DeleteDatabase(name string) error + DeleteMeasurement(database, name string) error + DeleteRetentionPolicy(database, name string) error + DeleteSeries(database string, sources []influxql.Source, condition influxql.Expr) error + ExecuteShowFieldKeysStatement(stmt *influxql.ShowFieldKeysStatement, database string) (models.Rows, error) + ExecuteShowSeriesStatement(stmt *influxql.ShowSeriesStatement, database string) (models.Rows, error) + ExecuteShowTagValuesStatement(stmt *influxql.ShowTagValuesStatement, database string) (models.Rows, error) + ExpandSources(sources influxql.Sources) (influxql.Sources, error) + ShardIteratorCreator(id uint64) influxql.IteratorCreator +} + +// joinUint64 returns a comma-delimited string of uint64 numbers. +func joinUint64(a []uint64) string { + var buf bytes.Buffer + for i, x := range a { + buf.WriteString(strconv.FormatUint(x, 10)) + if i < len(a)-1 { + buf.WriteRune(',') + } + } + return buf.String() +} + +// stringSet represents a set of strings. +type stringSet map[string]struct{} + +// newStringSet returns an empty stringSet. +func newStringSet() stringSet { + return make(map[string]struct{}) +} + +// add adds strings to the set. +func (s stringSet) add(ss ...string) { + for _, n := range ss { + s[n] = struct{}{} + } +} + +// contains returns whether the set contains the given string. +func (s stringSet) contains(ss string) bool { + _, ok := s[ss] + return ok +} + +// list returns the current elements in the set, in sorted order. +func (s stringSet) list() []string { + l := make([]string, 0, len(s)) + for k := range s { + l = append(l, k) + } + sort.Strings(l) + return l +} + +// union returns the union of this set and another. +func (s stringSet) union(o stringSet) stringSet { + ns := newStringSet() + for k := range s { + ns[k] = struct{}{} + } + for k := range o { + ns[k] = struct{}{} + } + return ns +} + +// intersect returns the intersection of this set and another. +func (s stringSet) intersect(o stringSet) stringSet { + shorter, longer := s, o + if len(longer) < len(shorter) { + shorter, longer = longer, shorter + } + + ns := newStringSet() + for k := range shorter { + if _, ok := longer[k]; ok { + ns[k] = struct{}{} + } + } + return ns +} + +type uint64Slice []uint64 + +func (a uint64Slice) Len() int { return len(a) } +func (a uint64Slice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a uint64Slice) Less(i, j int) bool { return a[i] < a[j] } diff --git a/vendor/github.com/influxdata/influxdb/cluster/query_executor_test.go b/vendor/github.com/influxdata/influxdb/cluster/query_executor_test.go new file mode 100644 index 0000000000..b7347696dd --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cluster/query_executor_test.go @@ -0,0 +1,314 @@ +package cluster_test + +import ( + "bytes" + "io" + "os" + "reflect" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/influxdata/influxdb" + "github.com/influxdata/influxdb/cluster" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/services/meta" +) + +const ( + // DefaultDatabase is the default database name used in tests. + DefaultDatabase = "db0" + + // DefaultRetentionPolicy is the default retention policy name used in tests. + DefaultRetentionPolicy = "rp0" +) + +// Ensure query executor can execute a simple SELECT statement. +func TestQueryExecutor_ExecuteQuery_SelectStatement(t *testing.T) { + e := DefaultQueryExecutor() + + // The meta client should return a single shard owned by the local node. + e.MetaClient.ShardsByTimeRangeFn = func(sources influxql.Sources, tmin, tmax time.Time) (a []meta.ShardInfo, err error) { + return []meta.ShardInfo{{ID: 100, Owners: []meta.ShardOwner{{NodeID: 0}}}}, nil + } + + // The TSDB store should return an IteratorCreator for shard. + // This IteratorCreator returns a single iterator with "value" in the aux fields. + e.TSDBStore.ShardIteratorCreatorFn = func(id uint64) influxql.IteratorCreator { + if id != 100 { + t.Fatalf("unexpected shard id: %d", id) + } + + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Time: int64(0 * time.Second), Aux: []interface{}{float64(100)}}, + {Name: "cpu", Time: int64(1 * time.Second), Aux: []interface{}{float64(200)}}, + }}, nil + } + ic.FieldDimensionsFn = func(sources influxql.Sources) (fields, dimensions map[string]struct{}, err error) { + return map[string]struct{}{"value": struct{}{}}, nil, nil + } + ic.SeriesKeysFn = func(opt influxql.IteratorOptions) (influxql.SeriesList, error) { + return influxql.SeriesList{ + {Name: "cpu", Aux: []influxql.DataType{influxql.Float}}, + }, nil + } + return &ic + } + + // Verify all results from the query. + if a := ReadAllResults(e.ExecuteQuery(`SELECT * FROM cpu`, "db0", 0)); !reflect.DeepEqual(a, []*influxql.Result{ + { + StatementID: 0, + Series: []*models.Row{{ + Name: "cpu", + Columns: []string{"time", "value"}, + Values: [][]interface{}{ + {time.Unix(0, 0).UTC(), float64(100)}, + {time.Unix(1, 0).UTC(), float64(200)}, + }, + }}, + }, + }) { + t.Fatalf("unexpected results: %s", spew.Sdump(a)) + } +} + +// Ensure query executor can execute a distributed SELECT statement. +func TestQueryExecutor_ExecuteQuery_SelectStatement_Remote(t *testing.T) { + // Local executor. + e := DefaultQueryExecutor() + + // Start a second service. + s := MustOpenService() + defer s.Close() + + // Mock the remote service to create an iterator. + s.TSDBStore.ShardIteratorCreatorFn = func(shardID uint64) influxql.IteratorCreator { + if shardID != 200 { + t.Fatalf("unexpected remote shard id: %d", shardID) + } + + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Time: int64(0 * time.Second), Value: 20}, + }}, nil + } + return &ic + } + + // Two shards are returned. One local and one remote. + e.MetaClient.ShardsByTimeRangeFn = func(sources influxql.Sources, tmin, tmax time.Time) (a []meta.ShardInfo, err error) { + return []meta.ShardInfo{ + {ID: 100, Owners: []meta.ShardOwner{{NodeID: 0}}}, + {ID: 200, Owners: []meta.ShardOwner{{NodeID: 1}}}, + }, nil + } + + // The meta client should return node data for the remote node. + e.MetaClient.DataNodeFn = func(id uint64) (*meta.NodeInfo, error) { + return &meta.NodeInfo{ID: 1, TCPHost: s.Addr().String()}, nil + } + + // The local node should return a single iterator. + e.TSDBStore.ShardIteratorCreatorFn = func(id uint64) influxql.IteratorCreator { + if id != 100 { + t.Fatalf("unexpected shard id: %d", id) + } + + var ic IteratorCreator + ic.CreateIteratorFn = func(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Time: int64(0 * time.Second), Value: 10}, + }}, nil + } + return &ic + } + + // Verify all results from the query. + if a := ReadAllResults(e.ExecuteQuery(`SELECT count(value) FROM cpu`, "db0", 0)); !reflect.DeepEqual(a, []*influxql.Result{ + { + StatementID: 0, + Series: []*models.Row{{ + Name: "cpu", + Columns: []string{"time", "count"}, + Values: [][]interface{}{ + {time.Unix(0, 0).UTC(), float64(30)}, + }, + }}, + }, + }) { + t.Fatalf("unexpected results: %s", spew.Sdump(a)) + } +} + +// QueryExecutor is a test wrapper for cluster.QueryExecutor. +type QueryExecutor struct { + *cluster.QueryExecutor + + MetaClient MetaClient + TSDBStore TSDBStore + LogOutput bytes.Buffer +} + +// NewQueryExecutor returns a new instance of QueryExecutor. +// This query executor always has a node id of 0. +func NewQueryExecutor() *QueryExecutor { + e := &QueryExecutor{ + QueryExecutor: cluster.NewQueryExecutor(), + } + e.Node = &influxdb.Node{ID: 0} + e.QueryExecutor.MetaClient = &e.MetaClient + e.QueryExecutor.TSDBStore = &e.TSDBStore + + e.QueryExecutor.LogOutput = &e.LogOutput + if testing.Verbose() { + e.QueryExecutor.LogOutput = io.MultiWriter(e.QueryExecutor.LogOutput, os.Stderr) + } + + return e +} + +// DefaultQueryExecutor returns a QueryExecutor with a database (db0) and retention policy (rp0). +func DefaultQueryExecutor() *QueryExecutor { + e := NewQueryExecutor() + e.MetaClient.DatabaseFn = DefaultMetaClientDatabaseFn + e.TSDBStore.ExpandSourcesFn = DefaultTSDBStoreExpandSourcesFn + return e +} + +// ExecuteQuery parses query and executes against the database. +func (e *QueryExecutor) ExecuteQuery(query, database string, chunkSize int) <-chan *influxql.Result { + return e.QueryExecutor.ExecuteQuery(MustParseQuery(query), database, chunkSize, make(chan struct{})) +} + +// TSDBStore is a mockable implementation of cluster.TSDBStore. +type TSDBStore struct { + CreateShardFn func(database, policy string, shardID uint64) error + WriteToShardFn func(shardID uint64, points []models.Point) error + + DeleteDatabaseFn func(name string) error + DeleteMeasurementFn func(database, name string) error + DeleteRetentionPolicyFn func(database, name string) error + DeleteSeriesFn func(database string, sources []influxql.Source, condition influxql.Expr) error + ExecuteShowFieldKeysStatementFn func(stmt *influxql.ShowFieldKeysStatement, database string) (models.Rows, error) + ExecuteShowSeriesStatementFn func(stmt *influxql.ShowSeriesStatement, database string) (models.Rows, error) + ExecuteShowTagValuesStatementFn func(stmt *influxql.ShowTagValuesStatement, database string) (models.Rows, error) + ExpandSourcesFn func(sources influxql.Sources) (influxql.Sources, error) + ShardIteratorCreatorFn func(id uint64) influxql.IteratorCreator +} + +func (s *TSDBStore) CreateShard(database, policy string, shardID uint64) error { + if s.CreateShardFn == nil { + return nil + } + return s.CreateShardFn(database, policy, shardID) +} + +func (s *TSDBStore) WriteToShard(shardID uint64, points []models.Point) error { + return s.WriteToShardFn(shardID, points) +} + +func (s *TSDBStore) DeleteDatabase(name string) error { + return s.DeleteDatabaseFn(name) +} + +func (s *TSDBStore) DeleteMeasurement(database, name string) error { + return s.DeleteMeasurementFn(database, name) +} + +func (s *TSDBStore) DeleteRetentionPolicy(database, name string) error { + return s.DeleteRetentionPolicyFn(database, name) +} + +func (s *TSDBStore) DeleteSeries(database string, sources []influxql.Source, condition influxql.Expr) error { + return s.DeleteSeriesFn(database, sources, condition) +} + +func (s *TSDBStore) ExecuteShowFieldKeysStatement(stmt *influxql.ShowFieldKeysStatement, database string) (models.Rows, error) { + return s.ExecuteShowFieldKeysStatementFn(stmt, database) +} + +func (s *TSDBStore) ExecuteShowSeriesStatement(stmt *influxql.ShowSeriesStatement, database string) (models.Rows, error) { + return s.ExecuteShowSeriesStatementFn(stmt, database) +} + +func (s *TSDBStore) ExecuteShowTagValuesStatement(stmt *influxql.ShowTagValuesStatement, database string) (models.Rows, error) { + return s.ExecuteShowTagValuesStatementFn(stmt, database) +} + +func (s *TSDBStore) ExpandSources(sources influxql.Sources) (influxql.Sources, error) { + return s.ExpandSourcesFn(sources) +} + +func (s *TSDBStore) ShardIteratorCreator(id uint64) influxql.IteratorCreator { + return s.ShardIteratorCreatorFn(id) +} + +// DefaultTSDBStoreExpandSourcesFn expands a single source using the default database & retention policy. +func DefaultTSDBStoreExpandSourcesFn(sources influxql.Sources) (influxql.Sources, error) { + return influxql.Sources{&influxql.Measurement{ + Database: DefaultDatabase, + RetentionPolicy: DefaultRetentionPolicy, + Name: sources[0].(*influxql.Measurement).Name}, + }, nil +} + +// MustParseQuery parses s into a query. Panic on error. +func MustParseQuery(s string) *influxql.Query { + q, err := influxql.ParseQuery(s) + if err != nil { + panic(err) + } + return q +} + +// ReadAllResults reads all results from c and returns as a slice. +func ReadAllResults(c <-chan *influxql.Result) []*influxql.Result { + var a []*influxql.Result + for result := range c { + a = append(a, result) + } + return a +} + +// IteratorCreator is a mockable implementation of IteratorCreator. +type IteratorCreator struct { + CreateIteratorFn func(opt influxql.IteratorOptions) (influxql.Iterator, error) + FieldDimensionsFn func(sources influxql.Sources) (fields, dimensions map[string]struct{}, err error) + SeriesKeysFn func(opt influxql.IteratorOptions) (influxql.SeriesList, error) +} + +func (ic *IteratorCreator) CreateIterator(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return ic.CreateIteratorFn(opt) +} + +func (ic *IteratorCreator) FieldDimensions(sources influxql.Sources) (fields, dimensions map[string]struct{}, err error) { + return ic.FieldDimensionsFn(sources) +} + +func (ic *IteratorCreator) SeriesKeys(opt influxql.IteratorOptions) (influxql.SeriesList, error) { + return ic.SeriesKeysFn(opt) +} + +// FloatIterator is a represents an iterator that reads from a slice. +type FloatIterator struct { + Points []influxql.FloatPoint +} + +// Close is a no-op. +func (itr *FloatIterator) Close() error { return nil } + +// Next returns the next value and shifts it off the beginning of the points slice. +func (itr *FloatIterator) Next() *influxql.FloatPoint { + if len(itr.Points) == 0 { + return nil + } + + v := &itr.Points[0] + itr.Points = itr.Points[1:] + return v +} diff --git a/vendor/github.com/influxdata/influxdb/cluster/rpc.go b/vendor/github.com/influxdata/influxdb/cluster/rpc.go new file mode 100644 index 0000000000..8710e89980 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cluster/rpc.go @@ -0,0 +1,413 @@ +package cluster + +import ( + "errors" + "fmt" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/influxdata/influxdb/cluster/internal" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/models" +) + +//go:generate protoc --gogo_out=. internal/data.proto + +// WritePointsRequest represents a request to write point data to the cluster +type WritePointsRequest struct { + Database string + RetentionPolicy string + ConsistencyLevel ConsistencyLevel + Points []models.Point +} + +// AddPoint adds a point to the WritePointRequest with field key 'value' +func (w *WritePointsRequest) AddPoint(name string, value interface{}, timestamp time.Time, tags map[string]string) { + pt, err := models.NewPoint( + name, tags, map[string]interface{}{"value": value}, timestamp, + ) + if err != nil { + return + } + w.Points = append(w.Points, pt) +} + +// WriteShardRequest represents the a request to write a slice of points to a shard +type WriteShardRequest struct { + pb internal.WriteShardRequest +} + +// WriteShardResponse represents the response returned from a remote WriteShardRequest call +type WriteShardResponse struct { + pb internal.WriteShardResponse +} + +// SetShardID sets the ShardID +func (w *WriteShardRequest) SetShardID(id uint64) { w.pb.ShardID = &id } + +// ShardID gets the ShardID +func (w *WriteShardRequest) ShardID() uint64 { return w.pb.GetShardID() } + +func (w *WriteShardRequest) SetDatabase(db string) { w.pb.Database = &db } + +func (w *WriteShardRequest) SetRetentionPolicy(rp string) { w.pb.RetentionPolicy = &rp } + +func (w *WriteShardRequest) Database() string { return w.pb.GetDatabase() } + +func (w *WriteShardRequest) RetentionPolicy() string { return w.pb.GetRetentionPolicy() } + +// Points returns the time series Points +func (w *WriteShardRequest) Points() []models.Point { return w.unmarshalPoints() } + +// AddPoint adds a new time series point +func (w *WriteShardRequest) AddPoint(name string, value interface{}, timestamp time.Time, tags map[string]string) { + pt, err := models.NewPoint( + name, tags, map[string]interface{}{"value": value}, timestamp, + ) + if err != nil { + return + } + w.AddPoints([]models.Point{pt}) +} + +// AddPoints adds a new time series point +func (w *WriteShardRequest) AddPoints(points []models.Point) { + for _, p := range points { + b, err := p.MarshalBinary() + if err != nil { + // A error here means that we create a point higher in the stack that we could + // not marshal to a byte slice. If that happens, the endpoint that created that + // point needs to be fixed. + panic(fmt.Sprintf("failed to marshal point: `%v`: %v", p, err)) + } + w.pb.Points = append(w.pb.Points, b) + } +} + +// MarshalBinary encodes the object to a binary format. +func (w *WriteShardRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(&w.pb) +} + +// UnmarshalBinary populates WritePointRequest from a binary format. +func (w *WriteShardRequest) UnmarshalBinary(buf []byte) error { + if err := proto.Unmarshal(buf, &w.pb); err != nil { + return err + } + return nil +} + +func (w *WriteShardRequest) unmarshalPoints() []models.Point { + points := make([]models.Point, len(w.pb.GetPoints())) + for i, p := range w.pb.GetPoints() { + pt, err := models.NewPointFromBytes(p) + if err != nil { + // A error here means that one node created a valid point and sent us an + // unparseable version. We could log and drop the point and allow + // anti-entropy to resolve the discrepancy, but this shouldn't ever happen. + panic(fmt.Sprintf("failed to parse point: `%v`: %v", string(p), err)) + } + + points[i] = pt + } + return points +} + +// SetCode sets the Code +func (w *WriteShardResponse) SetCode(code int) { w.pb.Code = proto.Int32(int32(code)) } + +// SetMessage sets the Message +func (w *WriteShardResponse) SetMessage(message string) { w.pb.Message = &message } + +// Code returns the Code +func (w *WriteShardResponse) Code() int { return int(w.pb.GetCode()) } + +// Message returns the Message +func (w *WriteShardResponse) Message() string { return w.pb.GetMessage() } + +// MarshalBinary encodes the object to a binary format. +func (w *WriteShardResponse) MarshalBinary() ([]byte, error) { + return proto.Marshal(&w.pb) +} + +// UnmarshalBinary populates WritePointRequest from a binary format. +func (w *WriteShardResponse) UnmarshalBinary(buf []byte) error { + if err := proto.Unmarshal(buf, &w.pb); err != nil { + return err + } + return nil +} + +// ExecuteStatementRequest represents the a request to execute a statement on a node. +type ExecuteStatementRequest struct { + pb internal.ExecuteStatementRequest +} + +// Statement returns the InfluxQL statement. +func (r *ExecuteStatementRequest) Statement() string { return r.pb.GetStatement() } + +// SetStatement sets the InfluxQL statement. +func (r *ExecuteStatementRequest) SetStatement(statement string) { + r.pb.Statement = proto.String(statement) +} + +// Database returns the database name. +func (r *ExecuteStatementRequest) Database() string { return r.pb.GetDatabase() } + +// SetDatabase sets the database name. +func (r *ExecuteStatementRequest) SetDatabase(database string) { r.pb.Database = proto.String(database) } + +// MarshalBinary encodes the object to a binary format. +func (r *ExecuteStatementRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(&r.pb) +} + +// UnmarshalBinary populates ExecuteStatementRequest from a binary format. +func (r *ExecuteStatementRequest) UnmarshalBinary(buf []byte) error { + if err := proto.Unmarshal(buf, &r.pb); err != nil { + return err + } + return nil +} + +// ExecuteStatementResponse represents the response returned from a remote ExecuteStatementRequest call. +type ExecuteStatementResponse struct { + pb internal.WriteShardResponse +} + +// Code returns the response code. +func (w *ExecuteStatementResponse) Code() int { return int(w.pb.GetCode()) } + +// SetCode sets the Code +func (w *ExecuteStatementResponse) SetCode(code int) { w.pb.Code = proto.Int32(int32(code)) } + +// Message returns the repsonse message. +func (w *ExecuteStatementResponse) Message() string { return w.pb.GetMessage() } + +// SetMessage sets the Message +func (w *ExecuteStatementResponse) SetMessage(message string) { w.pb.Message = &message } + +// MarshalBinary encodes the object to a binary format. +func (w *ExecuteStatementResponse) MarshalBinary() ([]byte, error) { + return proto.Marshal(&w.pb) +} + +// UnmarshalBinary populates ExecuteStatementResponse from a binary format. +func (w *ExecuteStatementResponse) UnmarshalBinary(buf []byte) error { + if err := proto.Unmarshal(buf, &w.pb); err != nil { + return err + } + return nil +} + +// CreateIteratorRequest represents a request to create a remote iterator. +type CreateIteratorRequest struct { + ShardIDs []uint64 + Opt influxql.IteratorOptions +} + +// MarshalBinary encodes r to a binary format. +func (r *CreateIteratorRequest) MarshalBinary() ([]byte, error) { + buf, err := r.Opt.MarshalBinary() + if err != nil { + return nil, err + } + return proto.Marshal(&internal.CreateIteratorRequest{ + ShardIDs: r.ShardIDs, + Opt: buf, + }) +} + +// UnmarshalBinary decodes data into r. +func (r *CreateIteratorRequest) UnmarshalBinary(data []byte) error { + var pb internal.CreateIteratorRequest + if err := proto.Unmarshal(data, &pb); err != nil { + return err + } + + r.ShardIDs = pb.GetShardIDs() + if err := r.Opt.UnmarshalBinary(pb.GetOpt()); err != nil { + return err + } + return nil +} + +// CreateIteratorResponse represents a response from remote iterator creation. +type CreateIteratorResponse struct { + Err error +} + +// MarshalBinary encodes r to a binary format. +func (r *CreateIteratorResponse) MarshalBinary() ([]byte, error) { + var pb internal.CreateIteratorResponse + if r.Err != nil { + pb.Err = proto.String(r.Err.Error()) + } + return proto.Marshal(&pb) +} + +// UnmarshalBinary decodes data into r. +func (r *CreateIteratorResponse) UnmarshalBinary(data []byte) error { + var pb internal.CreateIteratorResponse + if err := proto.Unmarshal(data, &pb); err != nil { + return err + } + if pb.Err != nil { + r.Err = errors.New(pb.GetErr()) + } + return nil +} + +// FieldDimensionsRequest represents a request to retrieve unique fields & dimensions. +type FieldDimensionsRequest struct { + ShardIDs []uint64 + Sources influxql.Sources +} + +// MarshalBinary encodes r to a binary format. +func (r *FieldDimensionsRequest) MarshalBinary() ([]byte, error) { + buf, err := r.Sources.MarshalBinary() + if err != nil { + return nil, err + } + return proto.Marshal(&internal.FieldDimensionsRequest{ + ShardIDs: r.ShardIDs, + Sources: buf, + }) +} + +// UnmarshalBinary decodes data into r. +func (r *FieldDimensionsRequest) UnmarshalBinary(data []byte) error { + var pb internal.FieldDimensionsRequest + if err := proto.Unmarshal(data, &pb); err != nil { + return err + } + + r.ShardIDs = pb.GetShardIDs() + if err := r.Sources.UnmarshalBinary(pb.GetSources()); err != nil { + return err + } + return nil +} + +// FieldDimensionsResponse represents a response from remote iterator creation. +type FieldDimensionsResponse struct { + Fields map[string]struct{} + Dimensions map[string]struct{} + Err error +} + +// MarshalBinary encodes r to a binary format. +func (r *FieldDimensionsResponse) MarshalBinary() ([]byte, error) { + var pb internal.FieldDimensionsResponse + + pb.Fields = make([]string, 0, len(r.Fields)) + for k := range r.Fields { + pb.Fields = append(pb.Fields, k) + } + + pb.Dimensions = make([]string, 0, len(r.Dimensions)) + for k := range r.Dimensions { + pb.Dimensions = append(pb.Dimensions, k) + } + + if r.Err != nil { + pb.Err = proto.String(r.Err.Error()) + } + return proto.Marshal(&pb) +} + +// UnmarshalBinary decodes data into r. +func (r *FieldDimensionsResponse) UnmarshalBinary(data []byte) error { + var pb internal.FieldDimensionsResponse + if err := proto.Unmarshal(data, &pb); err != nil { + return err + } + + r.Fields = make(map[string]struct{}, len(pb.GetFields())) + for _, s := range pb.GetFields() { + r.Fields[s] = struct{}{} + } + + r.Dimensions = make(map[string]struct{}, len(pb.GetDimensions())) + for _, s := range pb.GetDimensions() { + r.Dimensions[s] = struct{}{} + } + + if pb.Err != nil { + r.Err = errors.New(pb.GetErr()) + } + return nil +} + +// SeriesKeysRequest represents a request to retrieve a list of series keys. +type SeriesKeysRequest struct { + ShardIDs []uint64 + Opt influxql.IteratorOptions +} + +// MarshalBinary encodes r to a binary format. +func (r *SeriesKeysRequest) MarshalBinary() ([]byte, error) { + buf, err := r.Opt.MarshalBinary() + if err != nil { + return nil, err + } + return proto.Marshal(&internal.SeriesKeysRequest{ + ShardIDs: r.ShardIDs, + Opt: buf, + }) +} + +// UnmarshalBinary decodes data into r. +func (r *SeriesKeysRequest) UnmarshalBinary(data []byte) error { + var pb internal.SeriesKeysRequest + if err := proto.Unmarshal(data, &pb); err != nil { + return err + } + + r.ShardIDs = pb.GetShardIDs() + if err := r.Opt.UnmarshalBinary(pb.GetOpt()); err != nil { + return err + } + return nil +} + +// SeriesKeysResponse represents a response from retrieving series keys. +type SeriesKeysResponse struct { + SeriesList influxql.SeriesList + Err error +} + +// MarshalBinary encodes r to a binary format. +func (r *SeriesKeysResponse) MarshalBinary() ([]byte, error) { + var pb internal.SeriesKeysResponse + + buf, err := r.SeriesList.MarshalBinary() + if err != nil { + return nil, err + } + pb.SeriesList = buf + + if r.Err != nil { + pb.Err = proto.String(r.Err.Error()) + } + return proto.Marshal(&pb) +} + +// UnmarshalBinary decodes data into r. +func (r *SeriesKeysResponse) UnmarshalBinary(data []byte) error { + var pb internal.SeriesKeysResponse + if err := proto.Unmarshal(data, &pb); err != nil { + return err + } + + if err := r.SeriesList.UnmarshalBinary(pb.GetSeriesList()); err != nil { + return err + } + + if pb.Err != nil { + r.Err = errors.New(pb.GetErr()) + } + + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/cluster/rpc_test.go b/vendor/github.com/influxdata/influxdb/cluster/rpc_test.go new file mode 100644 index 0000000000..54393edb46 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cluster/rpc_test.go @@ -0,0 +1,139 @@ +package cluster + +import ( + "errors" + "reflect" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/influxdata/influxdb/influxql" +) + +func TestWriteShardRequestBinary(t *testing.T) { + sr := &WriteShardRequest{} + + sr.SetShardID(uint64(1)) + if exp := uint64(1); sr.ShardID() != exp { + t.Fatalf("ShardID mismatch: got %v, exp %v", sr.ShardID(), exp) + } + + sr.AddPoint("cpu", 1.0, time.Unix(0, 0), map[string]string{"host": "serverA"}) + sr.AddPoint("cpu", 2.0, time.Unix(0, 0).Add(time.Hour), nil) + sr.AddPoint("cpu_load", 3.0, time.Unix(0, 0).Add(time.Hour+time.Second), nil) + + b, err := sr.MarshalBinary() + if err != nil { + t.Fatalf("WritePointsRequest.MarshalBinary() failed: %v", err) + } + if len(b) == 0 { + t.Fatalf("WritePointsRequest.MarshalBinary() returned 0 bytes") + } + + got := &WriteShardRequest{} + if err := got.UnmarshalBinary(b); err != nil { + t.Fatalf("WritePointsRequest.UnmarshalMarshalBinary() failed: %v", err) + } + + if got.ShardID() != sr.ShardID() { + t.Errorf("ShardID mismatch: got %v, exp %v", got.ShardID(), sr.ShardID()) + } + + if len(got.Points()) != len(sr.Points()) { + t.Errorf("Points count mismatch: got %v, exp %v", len(got.Points()), len(sr.Points())) + } + + srPoints := sr.Points() + gotPoints := got.Points() + for i, p := range srPoints { + g := gotPoints[i] + + if g.Name() != p.Name() { + t.Errorf("Point %d name mismatch: got %v, exp %v", i, g.Name(), p.Name()) + } + + if !g.Time().Equal(p.Time()) { + t.Errorf("Point %d time mismatch: got %v, exp %v", i, g.Time(), p.Time()) + } + + if g.HashID() != p.HashID() { + t.Errorf("Point #%d HashID() mismatch: got %v, exp %v", i, g.HashID(), p.HashID()) + } + + for k, v := range p.Tags() { + if g.Tags()[k] != v { + t.Errorf("Point #%d tag mismatch: got %v, exp %v", i, k, v) + } + } + + if len(p.Fields()) != len(g.Fields()) { + t.Errorf("Point %d field count mismatch: got %v, exp %v", i, len(g.Fields()), len(p.Fields())) + } + + for j, f := range p.Fields() { + if g.Fields()[j] != f { + t.Errorf("Point %d field mismatch: got %v, exp %v", i, g.Fields()[j], f) + } + } + } +} + +func TestWriteShardResponseBinary(t *testing.T) { + sr := &WriteShardResponse{} + sr.SetCode(10) + sr.SetMessage("foo") + b, err := sr.MarshalBinary() + + if exp := 10; sr.Code() != exp { + t.Fatalf("Code mismatch: got %v, exp %v", sr.Code(), exp) + } + + if exp := "foo"; sr.Message() != exp { + t.Fatalf("Message mismatch: got %v, exp %v", sr.Message(), exp) + } + + if err != nil { + t.Fatalf("WritePointsResponse.MarshalBinary() failed: %v", err) + } + if len(b) == 0 { + t.Fatalf("WritePointsResponse.MarshalBinary() returned 0 bytes") + } + + got := &WriteShardResponse{} + if err := got.UnmarshalBinary(b); err != nil { + t.Fatalf("WritePointsResponse.UnmarshalMarshalBinary() failed: %v", err) + } + + if got.Code() != sr.Code() { + t.Errorf("Code mismatch: got %v, exp %v", got.Code(), sr.Code()) + } + + if got.Message() != sr.Message() { + t.Errorf("Message mismatch: got %v, exp %v", got.Message(), sr.Message()) + } + +} + +// Ensure series list response can be marshaled into and out of a binary format. +func TestSeriesKeysResponse_MarshalBinary(t *testing.T) { + resp := &SeriesKeysResponse{ + SeriesList: []influxql.Series{ + {Name: "cpu", Aux: []influxql.DataType{influxql.Float}}, + }, + Err: errors.New("marker"), + } + + // Marshal to binary. + buf, err := resp.MarshalBinary() + if err != nil { + t.Fatal(err) + } + + // Unmarshal back to an object. + var other SeriesKeysResponse + if err := other.UnmarshalBinary(buf); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(&other, resp) { + t.Fatalf("unexpected response: %s", spew.Sdump(other)) + } +} diff --git a/vendor/github.com/influxdata/influxdb/cluster/service.go b/vendor/github.com/influxdata/influxdb/cluster/service.go new file mode 100644 index 0000000000..a24d569a1f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cluster/service.go @@ -0,0 +1,566 @@ +package cluster + +import ( + "encoding" + "encoding/binary" + "expvar" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "sync" + + "github.com/influxdata/influxdb" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/tsdb" +) + +// MaxMessageSize defines how large a message can be before we reject it +const MaxMessageSize = 1024 * 1024 * 1024 // 1GB + +// MuxHeader is the header byte used in the TCP mux. +const MuxHeader = 2 + +// Statistics maintained by the cluster package +const ( + writeShardReq = "writeShardReq" + writeShardPointsReq = "writeShardPointsReq" + writeShardFail = "writeShardFail" + + createIteratorReq = "createIteratorReq" + createIteratorResp = "createIteratorResp" + + fieldDimensionsReq = "fieldDimensionsReq" + fieldDimensionsResp = "fieldDimensionsResp" + + seriesKeysReq = "seriesKeysReq" + seriesKeysResp = "seriesKeysResp" +) + +// Service processes data received over raw TCP connections. +type Service struct { + mu sync.RWMutex + + wg sync.WaitGroup + closing chan struct{} + + Listener net.Listener + + MetaClient interface { + ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo) + } + + TSDBStore TSDBStore + + Logger *log.Logger + statMap *expvar.Map +} + +// NewService returns a new instance of Service. +func NewService(c Config) *Service { + return &Service{ + closing: make(chan struct{}), + Logger: log.New(os.Stderr, "[cluster] ", log.LstdFlags), + statMap: influxdb.NewStatistics("cluster", "cluster", nil), + } +} + +// Open opens the network listener and begins serving requests. +func (s *Service) Open() error { + + s.Logger.Println("Starting cluster service") + // Begin serving conections. + s.wg.Add(1) + go s.serve() + + return nil +} + +// SetLogger sets the internal logger to the logger passed in. +func (s *Service) SetLogger(l *log.Logger) { + s.Logger = l +} + +// serve accepts connections from the listener and handles them. +func (s *Service) serve() { + defer s.wg.Done() + + for { + // Check if the service is shutting down. + select { + case <-s.closing: + return + default: + } + + // Accept the next connection. + conn, err := s.Listener.Accept() + if err != nil { + if strings.Contains(err.Error(), "connection closed") { + s.Logger.Printf("cluster service accept error: %s", err) + return + } + s.Logger.Printf("accept error: %s", err) + continue + } + + // Delegate connection handling to a separate goroutine. + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.handleConn(conn) + }() + } +} + +// Close shuts down the listener and waits for all connections to finish. +func (s *Service) Close() error { + if s.Listener != nil { + s.Listener.Close() + } + + // Shut down all handlers. + close(s.closing) + s.wg.Wait() + + return nil +} + +// handleConn services an individual TCP connection. +func (s *Service) handleConn(conn net.Conn) { + // Ensure connection is closed when service is closed. + closing := make(chan struct{}) + defer close(closing) + go func() { + select { + case <-closing: + case <-s.closing: + } + conn.Close() + }() + + s.Logger.Printf("accept remote connection from %v\n", conn.RemoteAddr()) + defer func() { + s.Logger.Printf("close remote connection from %v\n", conn.RemoteAddr()) + }() + for { + // Read type-length-value. + typ, err := ReadType(conn) + if err != nil { + if strings.HasSuffix(err.Error(), "EOF") { + return + } + s.Logger.Printf("unable to read type: %s", err) + return + } + + // Delegate message processing by type. + switch typ { + case writeShardRequestMessage: + buf, err := ReadLV(conn) + if err != nil { + s.Logger.Printf("unable to read length-value: %s", err) + return + } + + s.statMap.Add(writeShardReq, 1) + err = s.processWriteShardRequest(buf) + if err != nil { + s.Logger.Printf("process write shard error: %s", err) + } + s.writeShardResponse(conn, err) + case executeStatementRequestMessage: + buf, err := ReadLV(conn) + if err != nil { + s.Logger.Printf("unable to read length-value: %s", err) + return + } + + err = s.processExecuteStatementRequest(buf) + if err != nil { + s.Logger.Printf("process execute statement error: %s", err) + } + s.writeShardResponse(conn, err) + case createIteratorRequestMessage: + s.statMap.Add(createIteratorReq, 1) + s.processCreateIteratorRequest(conn) + return + case fieldDimensionsRequestMessage: + s.statMap.Add(fieldDimensionsReq, 1) + s.processFieldDimensionsRequest(conn) + return + case seriesKeysRequestMessage: + s.statMap.Add(seriesKeysReq, 1) + s.processSeriesKeysRequest(conn) + return + default: + s.Logger.Printf("cluster service message type not found: %d", typ) + } + } +} + +func (s *Service) processExecuteStatementRequest(buf []byte) error { + // Unmarshal the request. + var req ExecuteStatementRequest + if err := req.UnmarshalBinary(buf); err != nil { + return err + } + + // Parse the InfluxQL statement. + stmt, err := influxql.ParseStatement(req.Statement()) + if err != nil { + return err + } + + return s.executeStatement(stmt, req.Database()) +} + +func (s *Service) executeStatement(stmt influxql.Statement, database string) error { + switch t := stmt.(type) { + case *influxql.DropDatabaseStatement: + return s.TSDBStore.DeleteDatabase(t.Name) + case *influxql.DropMeasurementStatement: + return s.TSDBStore.DeleteMeasurement(database, t.Name) + case *influxql.DropSeriesStatement: + return s.TSDBStore.DeleteSeries(database, t.Sources, t.Condition) + case *influxql.DropRetentionPolicyStatement: + return s.TSDBStore.DeleteRetentionPolicy(database, t.Name) + default: + return fmt.Errorf("%q should not be executed across a cluster", stmt.String()) + } +} + +func (s *Service) processWriteShardRequest(buf []byte) error { + // Build request + var req WriteShardRequest + if err := req.UnmarshalBinary(buf); err != nil { + return err + } + + points := req.Points() + s.statMap.Add(writeShardPointsReq, int64(len(points))) + err := s.TSDBStore.WriteToShard(req.ShardID(), points) + + // We may have received a write for a shard that we don't have locally because the + // sending node may have just created the shard (via the metastore) and the write + // arrived before the local store could create the shard. In this case, we need + // to check the metastore to determine what database and retention policy this + // shard should reside within. + if err == tsdb.ErrShardNotFound { + db, rp := req.Database(), req.RetentionPolicy() + if db == "" || rp == "" { + s.Logger.Printf("drop write request: shard=%d. no database or rentention policy received", req.ShardID()) + return nil + } + + err = s.TSDBStore.CreateShard(req.Database(), req.RetentionPolicy(), req.ShardID()) + if err != nil { + s.statMap.Add(writeShardFail, 1) + return fmt.Errorf("create shard %d: %s", req.ShardID(), err) + } + + err = s.TSDBStore.WriteToShard(req.ShardID(), points) + if err != nil { + s.statMap.Add(writeShardFail, 1) + return fmt.Errorf("write shard %d: %s", req.ShardID(), err) + } + } + + if err != nil { + s.statMap.Add(writeShardFail, 1) + return fmt.Errorf("write shard %d: %s", req.ShardID(), err) + } + + return nil +} + +func (s *Service) writeShardResponse(w io.Writer, e error) { + // Build response. + var resp WriteShardResponse + if e != nil { + resp.SetCode(1) + resp.SetMessage(e.Error()) + } else { + resp.SetCode(0) + } + + // Marshal response to binary. + buf, err := resp.MarshalBinary() + if err != nil { + s.Logger.Printf("error marshalling shard response: %s", err) + return + } + + // Write to connection. + if err := WriteTLV(w, writeShardResponseMessage, buf); err != nil { + s.Logger.Printf("write shard response error: %s", err) + } +} + +func (s *Service) processCreateIteratorRequest(conn net.Conn) { + defer conn.Close() + + var itr influxql.Iterator + if err := func() error { + // Parse request. + var req CreateIteratorRequest + if err := DecodeLV(conn, &req); err != nil { + return err + } + + // Collect iterator creators for each shard. + ics := make([]influxql.IteratorCreator, 0, len(req.ShardIDs)) + for _, shardID := range req.ShardIDs { + ic := s.TSDBStore.ShardIteratorCreator(shardID) + if ic == nil { + return nil + } + ics = append(ics, ic) + } + + // Generate a single iterator from all shards. + i, err := influxql.IteratorCreators(ics).CreateIterator(req.Opt) + if err != nil { + return err + } + itr = i + + return nil + }(); err != nil { + itr.Close() + s.Logger.Printf("error reading CreateIterator request: %s", err) + EncodeTLV(conn, createIteratorResponseMessage, &CreateIteratorResponse{Err: err}) + return + } + + // Encode success response. + if err := EncodeTLV(conn, createIteratorResponseMessage, &CreateIteratorResponse{}); err != nil { + s.Logger.Printf("error writing CreateIterator response: %s", err) + return + } + + // Exit if no iterator was produced. + if itr == nil { + return + } + + // Stream iterator to connection. + if err := influxql.NewIteratorEncoder(conn).EncodeIterator(itr); err != nil { + s.Logger.Printf("error encoding CreateIterator iterator: %s", err) + return + } +} + +func (s *Service) processFieldDimensionsRequest(conn net.Conn) { + var fields, dimensions map[string]struct{} + if err := func() error { + // Parse request. + var req FieldDimensionsRequest + if err := DecodeLV(conn, &req); err != nil { + return err + } + + // Collect iterator creators for each shard. + ics := make([]influxql.IteratorCreator, 0, len(req.ShardIDs)) + for _, shardID := range req.ShardIDs { + ic := s.TSDBStore.ShardIteratorCreator(shardID) + if ic == nil { + return nil + } + ics = append(ics, ic) + } + + // Generate a single iterator from all shards. + f, d, err := influxql.IteratorCreators(ics).FieldDimensions(req.Sources) + if err != nil { + return err + } + fields, dimensions = f, d + + return nil + }(); err != nil { + s.Logger.Printf("error reading FieldDimensions request: %s", err) + EncodeTLV(conn, fieldDimensionsResponseMessage, &FieldDimensionsResponse{Err: err}) + return + } + + // Encode success response. + if err := EncodeTLV(conn, fieldDimensionsResponseMessage, &FieldDimensionsResponse{ + Fields: fields, + Dimensions: dimensions, + }); err != nil { + s.Logger.Printf("error writing FieldDimensions response: %s", err) + return + } +} + +func (s *Service) processSeriesKeysRequest(conn net.Conn) { + var seriesList influxql.SeriesList + if err := func() error { + // Parse request. + var req SeriesKeysRequest + if err := DecodeLV(conn, &req); err != nil { + return err + } + + // Collect iterator creators for each shard. + ics := make([]influxql.IteratorCreator, 0, len(req.ShardIDs)) + for _, shardID := range req.ShardIDs { + ic := s.TSDBStore.ShardIteratorCreator(shardID) + if ic == nil { + return nil + } + ics = append(ics, ic) + } + + // Generate a single iterator from all shards. + a, err := influxql.IteratorCreators(ics).SeriesKeys(req.Opt) + if err != nil { + return err + } + seriesList = a + + return nil + }(); err != nil { + s.Logger.Printf("error reading SeriesKeys request: %s", err) + EncodeTLV(conn, seriesKeysResponseMessage, &SeriesKeysResponse{Err: err}) + return + } + + // Encode success response. + if err := EncodeTLV(conn, seriesKeysResponseMessage, &SeriesKeysResponse{ + SeriesList: seriesList, + }); err != nil { + s.Logger.Printf("error writing SeriesKeys response: %s", err) + return + } +} + +// ReadTLV reads a type-length-value record from r. +func ReadTLV(r io.Reader) (byte, []byte, error) { + typ, err := ReadType(r) + if err != nil { + return 0, nil, err + } + + buf, err := ReadLV(r) + if err != nil { + return 0, nil, err + } + return typ, buf, err +} + +// ReadType reads the type from a TLV record. +func ReadType(r io.Reader) (byte, error) { + var typ [1]byte + if _, err := io.ReadFull(r, typ[:]); err != nil { + return 0, fmt.Errorf("read message type: %s", err) + } + return typ[0], nil +} + +// ReadLV reads the length-value from a TLV record. +func ReadLV(r io.Reader) ([]byte, error) { + // Read the size of the message. + var sz int64 + if err := binary.Read(r, binary.BigEndian, &sz); err != nil { + return nil, fmt.Errorf("read message size: %s", err) + } + + if sz >= MaxMessageSize { + return nil, fmt.Errorf("max message size of %d exceeded: %d", MaxMessageSize, sz) + } + + // Read the value. + buf := make([]byte, sz) + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("read message value: %s", err) + } + + return buf, nil +} + +// WriteTLV writes a type-length-value record to w. +func WriteTLV(w io.Writer, typ byte, buf []byte) error { + if err := WriteType(w, typ); err != nil { + return err + } + if err := WriteLV(w, buf); err != nil { + return err + } + return nil +} + +// WriteType writes the type in a TLV record to w. +func WriteType(w io.Writer, typ byte) error { + if _, err := w.Write([]byte{typ}); err != nil { + return fmt.Errorf("write message type: %s", err) + } + return nil +} + +// WriteLV writes the length-value in a TLV record to w. +func WriteLV(w io.Writer, buf []byte) error { + // Write the size of the message. + if err := binary.Write(w, binary.BigEndian, int64(len(buf))); err != nil { + return fmt.Errorf("write message size: %s", err) + } + + // Write the value. + if _, err := w.Write(buf); err != nil { + return fmt.Errorf("write message value: %s", err) + } + return nil +} + +// EncodeTLV encodes v to a binary format and writes the record-length-value record to w. +func EncodeTLV(w io.Writer, typ byte, v encoding.BinaryMarshaler) error { + if err := WriteType(w, typ); err != nil { + return err + } + if err := EncodeLV(w, v); err != nil { + return err + } + return nil +} + +// EncodeLV encodes v to a binary format and writes the length-value record to w. +func EncodeLV(w io.Writer, v encoding.BinaryMarshaler) error { + buf, err := v.MarshalBinary() + if err != nil { + return err + } + + if err := WriteLV(w, buf); err != nil { + return err + } + return nil +} + +// DecodeTLV reads the type-length-value record from r and unmarshals it into v. +func DecodeTLV(r io.Reader, v encoding.BinaryUnmarshaler) (typ byte, err error) { + typ, err = ReadType(r) + if err != nil { + return 0, err + } + if err := DecodeLV(r, v); err != nil { + return 0, err + } + return typ, nil +} + +// DecodeLV reads the length-value record from r and unmarshals it into v. +func DecodeLV(r io.Reader, v encoding.BinaryUnmarshaler) error { + buf, err := ReadLV(r) + if err != nil { + return err + } + + if err := v.UnmarshalBinary(buf); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/cluster/service_test.go b/vendor/github.com/influxdata/influxdb/cluster/service_test.go new file mode 100644 index 0000000000..743632ba17 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cluster/service_test.go @@ -0,0 +1,174 @@ +package cluster_test + +import ( + "fmt" + "io" + "net" + "time" + + "github.com/influxdata/influxdb/cluster" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/tcp" +) + +type metaClient struct { + host string +} + +func (m *metaClient) DataNode(nodeID uint64) (*meta.NodeInfo, error) { + return &meta.NodeInfo{ + ID: nodeID, + TCPHost: m.host, + }, nil +} + +func (m *metaClient) ShardOwner(shardID uint64) (db, rp string, sgi *meta.ShardGroupInfo) { + return "db", "rp", &meta.ShardGroupInfo{} +} + +type testService struct { + nodeID uint64 + ln net.Listener + muxln net.Listener + responses chan *serviceResponse + + TSDBStore TSDBStore +} + +func newTestWriteService(f func(shardID uint64, points []models.Point) error) testService { + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + panic(err) + } + + mux := tcp.NewMux() + muxln := mux.Listen(cluster.MuxHeader) + go mux.Serve(ln) + + s := testService{ + ln: ln, + muxln: muxln, + } + s.TSDBStore.WriteToShardFn = f + s.responses = make(chan *serviceResponse, 1024) + return s +} + +func (ts *testService) Close() { + if ts.ln != nil { + ts.ln.Close() + } +} + +type serviceResponses []serviceResponse +type serviceResponse struct { + shardID uint64 + ownerID uint64 + points []models.Point +} + +func (ts *testService) writeShardSuccess(shardID uint64, points []models.Point) error { + ts.responses <- &serviceResponse{ + shardID: shardID, + points: points, + } + return nil +} + +func writeShardFail(shardID uint64, points []models.Point) error { + return fmt.Errorf("failed to write") +} + +func writeShardSlow(shardID uint64, points []models.Point) error { + time.Sleep(1 * time.Second) + return nil +} + +func (ts *testService) ResponseN(n int) ([]*serviceResponse, error) { + var a []*serviceResponse + for { + select { + case r := <-ts.responses: + a = append(a, r) + if len(a) == n { + return a, nil + } + case <-time.After(time.Second): + return a, fmt.Errorf("unexpected response count: expected: %d, actual: %d", n, len(a)) + } + } +} + +// Service is a test wrapper for cluster.Service. +type Service struct { + *cluster.Service + + ln net.Listener + TSDBStore TSDBStore +} + +// NewService returns a new instance of Service. +func NewService() *Service { + s := &Service{ + Service: cluster.NewService(cluster.Config{}), + } + s.Service.TSDBStore = &s.TSDBStore + return s +} + +// MustOpenService returns a new, open service on a random port. Panic on error. +func MustOpenService() *Service { + s := NewService() + s.ln = MustListen("tcp", "127.0.0.1:0") + s.Listener = &muxListener{s.ln} + if err := s.Open(); err != nil { + panic(err) + } + return s +} + +// Close closes the listener and waits for the service to close. +func (s *Service) Close() error { + if s.ln != nil { + s.ln.Close() + } + return s.Service.Close() +} + +// Addr returns the network address of the service. +func (s *Service) Addr() net.Addr { return s.ln.Addr() } + +// muxListener is a net.Listener implementation that strips off the first byte. +// This is used to simulate the listener from pkg/mux. +type muxListener struct { + net.Listener +} + +// Accept accepts the next connection and removes the first byte. +func (ln *muxListener) Accept() (net.Conn, error) { + conn, err := ln.Listener.Accept() + if err != nil { + return nil, err + } + + var buf [1]byte + if _, err := io.ReadFull(conn, buf[:]); err != nil { + conn.Close() + return nil, err + } else if buf[0] != cluster.MuxHeader { + conn.Close() + panic(fmt.Sprintf("unexpected mux header byte: %d", buf[0])) + } + + return conn, nil +} + +// MustListen opens a listener. Panic on error. +func MustListen(network, laddr string) net.Listener { + ln, err := net.Listen(network, laddr) + if err != nil { + panic(err) + } + return ln +} diff --git a/vendor/github.com/influxdata/influxdb/cluster/shard_writer.go b/vendor/github.com/influxdata/influxdb/cluster/shard_writer.go new file mode 100644 index 0000000000..9a44137a8e --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cluster/shard_writer.go @@ -0,0 +1,188 @@ +package cluster + +import ( + "fmt" + "net" + "time" + + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/services/meta" +) + +const ( + writeShardRequestMessage byte = iota + 1 + writeShardResponseMessage + + executeStatementRequestMessage + executeStatementResponseMessage + + createIteratorRequestMessage + createIteratorResponseMessage + + fieldDimensionsRequestMessage + fieldDimensionsResponseMessage + + seriesKeysRequestMessage + seriesKeysResponseMessage +) + +// ShardWriter writes a set of points to a shard. +type ShardWriter struct { + pool *clientPool + timeout time.Duration + maxConnections int + + MetaClient interface { + DataNode(id uint64) (ni *meta.NodeInfo, err error) + ShardOwner(shardID uint64) (database, policy string, sgi *meta.ShardGroupInfo) + } +} + +// NewShardWriter returns a new instance of ShardWriter. +func NewShardWriter(timeout time.Duration, maxConnections int) *ShardWriter { + return &ShardWriter{ + pool: newClientPool(), + timeout: timeout, + maxConnections: maxConnections, + } +} + +// WriteShard writes time series points to a shard +func (w *ShardWriter) WriteShard(shardID, ownerID uint64, points []models.Point) error { + c, err := w.dial(ownerID) + if err != nil { + return err + } + + conn, ok := c.(*pooledConn) + if !ok { + panic("wrong connection type") + } + defer func(conn net.Conn) { + conn.Close() // return to pool + }(conn) + + // Determine the location of this shard and whether it still exists + db, rp, sgi := w.MetaClient.ShardOwner(shardID) + if sgi == nil { + // If we can't get the shard group for this shard, then we need to drop this request + // as it is no longer valid. This could happen if writes were queued via + // hinted handoff and we're processing the queue after a shard group was deleted. + return nil + } + + // Build write request. + var request WriteShardRequest + request.SetShardID(shardID) + request.SetDatabase(db) + request.SetRetentionPolicy(rp) + request.AddPoints(points) + + // Marshal into protocol buffers. + buf, err := request.MarshalBinary() + if err != nil { + return err + } + + // Write request. + conn.SetWriteDeadline(time.Now().Add(w.timeout)) + if err := WriteTLV(conn, writeShardRequestMessage, buf); err != nil { + conn.MarkUnusable() + return err + } + + // Read the response. + conn.SetReadDeadline(time.Now().Add(w.timeout)) + _, buf, err = ReadTLV(conn) + if err != nil { + conn.MarkUnusable() + return err + } + + // Unmarshal response. + var response WriteShardResponse + if err := response.UnmarshalBinary(buf); err != nil { + return err + } + + if response.Code() != 0 { + return fmt.Errorf("error code %d: %s", response.Code(), response.Message()) + } + + return nil +} + +func (w *ShardWriter) dial(nodeID uint64) (net.Conn, error) { + // If we don't have a connection pool for that addr yet, create one + _, ok := w.pool.getPool(nodeID) + if !ok { + factory := &connFactory{nodeID: nodeID, clientPool: w.pool, timeout: w.timeout} + factory.metaClient = w.MetaClient + + p, err := NewBoundedPool(1, w.maxConnections, w.timeout, factory.dial) + if err != nil { + return nil, err + } + w.pool.setPool(nodeID, p) + } + return w.pool.conn(nodeID) +} + +// Close closes ShardWriter's pool +func (w *ShardWriter) Close() error { + if w.pool == nil { + return fmt.Errorf("client already closed") + } + w.pool.close() + w.pool = nil + return nil +} + +const ( + maxConnections = 500 + maxRetries = 3 +) + +var errMaxConnectionsExceeded = fmt.Errorf("can not exceed max connections of %d", maxConnections) + +type connFactory struct { + nodeID uint64 + timeout time.Duration + + clientPool interface { + size() int + } + + metaClient interface { + DataNode(id uint64) (ni *meta.NodeInfo, err error) + } +} + +func (c *connFactory) dial() (net.Conn, error) { + if c.clientPool.size() > maxConnections { + return nil, errMaxConnectionsExceeded + } + + ni, err := c.metaClient.DataNode(c.nodeID) + if err != nil { + return nil, err + } + + if ni == nil { + return nil, fmt.Errorf("node %d does not exist", c.nodeID) + } + + conn, err := net.DialTimeout("tcp", ni.TCPHost, c.timeout) + if err != nil { + return nil, err + } + + // Write a marker byte for cluster messages. + _, err = conn.Write([]byte{MuxHeader}) + if err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} diff --git a/vendor/github.com/influxdata/influxdb/cluster/shard_writer_test.go b/vendor/github.com/influxdata/influxdb/cluster/shard_writer_test.go new file mode 100644 index 0000000000..ffbcbebaa1 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cluster/shard_writer_test.go @@ -0,0 +1,224 @@ +package cluster_test + +import ( + "net" + "strings" + "testing" + "time" + + "github.com/influxdata/influxdb/cluster" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/toml" +) + +// Ensure the shard writer can successfully write a single request. +func TestShardWriter_WriteShard_Success(t *testing.T) { + ts := newTestWriteService(nil) + ts.TSDBStore.WriteToShardFn = ts.writeShardSuccess + s := cluster.NewService(cluster.Config{}) + s.Listener = ts.muxln + s.TSDBStore = &ts.TSDBStore + if err := s.Open(); err != nil { + t.Fatal(err) + } + defer s.Close() + defer ts.Close() + + w := cluster.NewShardWriter(time.Minute, 1) + w.MetaClient = &metaClient{host: ts.ln.Addr().String()} + + // Build a single point. + now := time.Now() + var points []models.Point + points = append(points, models.MustNewPoint("cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now)) + + // Write to shard and close. + if err := w.WriteShard(1, 2, points); err != nil { + t.Fatal(err) + } else if err := w.Close(); err != nil { + t.Fatal(err) + } + + // Validate response. + responses, err := ts.ResponseN(1) + if err != nil { + t.Fatal(err) + } else if responses[0].shardID != 1 { + t.Fatalf("unexpected shard id: %d", responses[0].shardID) + } + + // Validate point. + if p := responses[0].points[0]; p.Name() != "cpu" { + t.Fatalf("unexpected name: %s", p.Name()) + } else if p.Fields()["value"] != int64(100) { + t.Fatalf("unexpected 'value' field: %d", p.Fields()["value"]) + } else if p.Tags()["host"] != "server01" { + t.Fatalf("unexpected 'host' tag: %s", p.Tags()["host"]) + } else if p.Time().UnixNano() != now.UnixNano() { + t.Fatalf("unexpected time: %s", p.Time()) + } +} + +// Ensure the shard writer can successful write a multiple requests. +func TestShardWriter_WriteShard_Multiple(t *testing.T) { + ts := newTestWriteService(nil) + ts.TSDBStore.WriteToShardFn = ts.writeShardSuccess + s := cluster.NewService(cluster.Config{}) + s.Listener = ts.muxln + s.TSDBStore = &ts.TSDBStore + if err := s.Open(); err != nil { + t.Fatal(err) + } + defer s.Close() + defer ts.Close() + + w := cluster.NewShardWriter(time.Minute, 1) + w.MetaClient = &metaClient{host: ts.ln.Addr().String()} + + // Build a single point. + now := time.Now() + var points []models.Point + points = append(points, models.MustNewPoint("cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now)) + + // Write to shard twice and close. + if err := w.WriteShard(1, 2, points); err != nil { + t.Fatal(err) + } else if err := w.WriteShard(1, 2, points); err != nil { + t.Fatal(err) + } else if err := w.Close(); err != nil { + t.Fatal(err) + } + + // Validate response. + responses, err := ts.ResponseN(1) + if err != nil { + t.Fatal(err) + } else if responses[0].shardID != 1 { + t.Fatalf("unexpected shard id: %d", responses[0].shardID) + } + + // Validate point. + if p := responses[0].points[0]; p.Name() != "cpu" { + t.Fatalf("unexpected name: %s", p.Name()) + } else if p.Fields()["value"] != int64(100) { + t.Fatalf("unexpected 'value' field: %d", p.Fields()["value"]) + } else if p.Tags()["host"] != "server01" { + t.Fatalf("unexpected 'host' tag: %s", p.Tags()["host"]) + } else if p.Time().UnixNano() != now.UnixNano() { + t.Fatalf("unexpected time: %s", p.Time()) + } +} + +// Ensure the shard writer returns an error when the server fails to accept the write. +func TestShardWriter_WriteShard_Error(t *testing.T) { + ts := newTestWriteService(writeShardFail) + s := cluster.NewService(cluster.Config{}) + s.Listener = ts.muxln + s.TSDBStore = &ts.TSDBStore + if err := s.Open(); err != nil { + t.Fatal(err) + } + defer s.Close() + defer ts.Close() + + w := cluster.NewShardWriter(time.Minute, 1) + w.MetaClient = &metaClient{host: ts.ln.Addr().String()} + now := time.Now() + + shardID := uint64(1) + ownerID := uint64(2) + var points []models.Point + points = append(points, models.MustNewPoint( + "cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now, + )) + + if err := w.WriteShard(shardID, ownerID, points); err == nil || err.Error() != "error code 1: write shard 1: failed to write" { + t.Fatalf("unexpected error: %v", err) + } +} + +// Ensure the shard writer returns an error when dialing times out. +func TestShardWriter_Write_ErrDialTimeout(t *testing.T) { + ts := newTestWriteService(nil) + ts.TSDBStore.WriteToShardFn = ts.writeShardSuccess + s := cluster.NewService(cluster.Config{}) + s.Listener = ts.muxln + s.TSDBStore = &ts.TSDBStore + if err := s.Open(); err != nil { + t.Fatal(err) + } + defer s.Close() + defer ts.Close() + + // Zero timeout set to support all platforms. + w := cluster.NewShardWriter(0, 1) + w.MetaClient = &metaClient{host: ts.ln.Addr().String()} + now := time.Now() + + shardID := uint64(1) + ownerID := uint64(2) + var points []models.Point + + points = append(points, models.MustNewPoint( + "cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now, + )) + + if err, exp := w.WriteShard(shardID, ownerID, points), "i/o timeout"; err == nil || !strings.Contains(err.Error(), exp) { + t.Fatalf("expected error %v, to contain %s", err, exp) + } +} + +// Ensure the shard writer returns an error when reading times out. +func TestShardWriter_Write_ErrReadTimeout(t *testing.T) { + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + + w := cluster.NewShardWriter(time.Millisecond, 1) + w.MetaClient = &metaClient{host: ln.Addr().String()} + now := time.Now() + + shardID := uint64(1) + ownerID := uint64(2) + var points []models.Point + points = append(points, models.MustNewPoint( + "cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now, + )) + + if err := w.WriteShard(shardID, ownerID, points); err == nil || !strings.Contains(err.Error(), "i/o timeout") { + t.Fatalf("unexpected error: %s", err) + } +} + +// Ensure the shard writer returns an error when we can't get a connection. +func TestShardWriter_Write_PoolMax(t *testing.T) { + ts := newTestWriteService(writeShardSlow) + s := cluster.NewService(cluster.Config{ + ShardWriterTimeout: toml.Duration(100 * time.Millisecond), + }) + s.Listener = ts.muxln + s.TSDBStore = &ts.TSDBStore + if err := s.Open(); err != nil { + t.Fatal(err) + } + defer s.Close() + defer ts.Close() + + w := cluster.NewShardWriter(100*time.Millisecond, 1) + w.MetaClient = &metaClient{host: ts.ln.Addr().String()} + now := time.Now() + + shardID := uint64(1) + ownerID := uint64(2) + var points []models.Point + points = append(points, models.MustNewPoint( + "cpu", models.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now, + )) + + go w.WriteShard(shardID, ownerID, points) + time.Sleep(time.Millisecond) + if err := w.WriteShard(shardID, ownerID, points); err == nil || err.Error() != "timed out waiting for free connection" { + t.Fatalf("unexpected error: %v", err) + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli.go b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli.go new file mode 100644 index 0000000000..f6929bcf64 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli.go @@ -0,0 +1,838 @@ +package cli // import "github.com/influxdata/influxdb/cmd/influx/cli" + +import ( + "bytes" + "encoding/csv" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "net/url" + "os" + "os/signal" + "os/user" + "path/filepath" + "sort" + "strconv" + "strings" + "syscall" + "text/tabwriter" + + "github.com/influxdata/influxdb/client" + "github.com/influxdata/influxdb/cluster" + "github.com/influxdata/influxdb/importer/v8" + "github.com/peterh/liner" +) + +const ( + noTokenMsg = "Visit https://enterprise.influxdata.com to register for updates, InfluxDB server management, and monitoring.\n" +) + +// ErrBlankCommand is returned when a parsed command is empty. +var ErrBlankCommand = errors.New("empty input") + +// CommandLine holds CLI configuration and state +type CommandLine struct { + Client *client.Client + Line *liner.State + Host string + Port int + Username string + Password string + Database string + Ssl bool + UnsafeSsl bool + RetentionPolicy string + ClientVersion string + ServerVersion string + Pretty bool // controls pretty print for json + Format string // controls the output format. Valid values are json, csv, or column + Precision string + WriteConsistency string + Execute string + ShowVersion bool + Import bool + PPS int // Controls how many points per second the import will allow via throttling + Path string + Compressed bool + Quit chan struct{} + IgnoreSignals bool // Ignore signals normally caught by this process (used primarily for testing) + osSignals chan os.Signal + historyFilePath string +} + +// New returns an instance of CommandLine +func New(version string) *CommandLine { + return &CommandLine{ + ClientVersion: version, + Quit: make(chan struct{}, 1), + osSignals: make(chan os.Signal, 1), + } +} + +// Run executes the CLI +func (c *CommandLine) Run() error { + // register OS signals for graceful termination + if !c.IgnoreSignals { + signal.Notify(c.osSignals, os.Kill, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGHUP) + } + + var promptForPassword bool + // determine if they set the password flag but provided no value + for _, v := range os.Args { + v = strings.ToLower(v) + if (strings.HasPrefix(v, "-password") || strings.HasPrefix(v, "--password")) && c.Password == "" { + promptForPassword = true + break + } + } + + c.Line = liner.NewLiner() + defer c.Line.Close() + + c.Line.SetMultiLineMode(true) + + if promptForPassword { + p, e := c.Line.PasswordPrompt("password: ") + if e != nil { + fmt.Println("Unable to parse password.") + } else { + c.Password = p + } + } + + if err := c.Connect(""); err != nil { + return fmt.Errorf( + "Failed to connect to %s\nPlease check your connection settings and ensure 'influxd' is running.", + c.Client.Addr()) + } + + // Modify precision. + c.SetPrecision(c.Precision) + + if c.Execute == "" && !c.Import { + token, err := c.DatabaseToken() + if err != nil { + return fmt.Errorf("Failed to check token: %s", err.Error()) + } + if token == "" { + fmt.Printf(noTokenMsg) + } + fmt.Printf("Connected to %s version %s\n", c.Client.Addr(), c.ServerVersion) + } + + if c.Execute != "" { + // Make the non-interactive mode send everything through the CLI's parser + // the same way the interactive mode works + lines := strings.Split(c.Execute, "\n") + for _, line := range lines { + if err := c.ParseCommand(line); err != nil { + return err + } + } + + c.Line.Close() + return nil + } + + if c.Import { + path := net.JoinHostPort(c.Host, strconv.Itoa(c.Port)) + u, e := client.ParseConnectionString(path, c.Ssl) + if e != nil { + return e + } + + config := v8.NewConfig() + config.Username = c.Username + config.Password = c.Password + config.Precision = "ns" + config.WriteConsistency = "any" + config.Path = c.Path + config.Version = c.ClientVersion + config.URL = u + config.Compressed = c.Compressed + config.PPS = c.PPS + config.Precision = c.Precision + + i := v8.NewImporter(config) + if err := i.Import(); err != nil { + err = fmt.Errorf("ERROR: %s\n", err) + c.Line.Close() + return err + } + c.Line.Close() + return nil + } + + c.Version() + + usr, err := user.Current() + // Only load/write history if we can get the user + if err == nil { + c.historyFilePath = filepath.Join(usr.HomeDir, ".influx_history") + if historyFile, err := os.Open(c.historyFilePath); err == nil { + c.Line.ReadHistory(historyFile) + historyFile.Close() + } + } + + // read from prompt until exit is run + for { + select { + case <-c.osSignals: + close(c.Quit) + case <-c.Quit: + c.exit() + return nil + default: + l, e := c.Line.Prompt("> ") + if e == io.EOF { + // Instead of die, register that someone exited the program gracefully + l = "exit" + } else if e != nil { + break + } + if err := c.ParseCommand(l); err != ErrBlankCommand { + c.Line.AppendHistory(l) + c.saveHistory() + } + } + } +} + +// ParseCommand parses an instruction and calls related method, if any +func (c *CommandLine) ParseCommand(cmd string) error { + lcmd := strings.TrimSpace(strings.ToLower(cmd)) + tokens := strings.Fields(lcmd) + + if len(tokens) > 0 { + switch tokens[0] { + case "exit", "quit": + // signal the program to exit + close(c.Quit) + case "gopher": + c.gopher() + case "connect": + return c.Connect(cmd) + case "auth": + c.SetAuth(cmd) + case "help": + c.help() + case "history": + c.history() + case "format": + c.SetFormat(cmd) + case "precision": + c.SetPrecision(cmd) + case "consistency": + c.SetWriteConsistency(cmd) + case "settings": + c.Settings() + case "pretty": + c.Pretty = !c.Pretty + if c.Pretty { + fmt.Println("Pretty print enabled") + } else { + fmt.Println("Pretty print disabled") + } + case "use": + c.use(cmd) + case "insert": + return c.Insert(cmd) + default: + return c.ExecuteQuery(cmd) + } + + return nil + } + return ErrBlankCommand +} + +// Connect connects client to a server +func (c *CommandLine) Connect(cmd string) error { + var cl *client.Client + var u url.URL + + // Remove the "connect" keyword if it exists + path := strings.TrimSpace(strings.Replace(cmd, "connect", "", -1)) + + // If they didn't provide a connection string, use the current settings + if path == "" { + path = net.JoinHostPort(c.Host, strconv.Itoa(c.Port)) + } + + var e error + u, e = client.ParseConnectionString(path, c.Ssl) + if e != nil { + return e + } + + config := client.NewConfig() + config.URL = u + config.Username = c.Username + config.Password = c.Password + config.UserAgent = "InfluxDBShell/" + c.ClientVersion + config.Precision = c.Precision + config.UnsafeSsl = c.UnsafeSsl + cl, err := client.NewClient(config) + if err != nil { + return fmt.Errorf("Could not create client %s", err) + } + c.Client = cl + + var v string + if _, v, e = c.Client.Ping(); e != nil { + return fmt.Errorf("Failed to connect to %s\n", c.Client.Addr()) + } + c.ServerVersion = v + + return nil +} + +// SetAuth sets client authentication credentials +func (c *CommandLine) SetAuth(cmd string) { + // If they pass in the entire command, we should parse it + // auth + args := strings.Fields(cmd) + if len(args) == 3 { + args = args[1:] + } else { + args = []string{} + } + + if len(args) == 2 { + c.Username = args[0] + c.Password = args[1] + } else { + u, e := c.Line.Prompt("username: ") + if e != nil { + fmt.Printf("Unable to process input: %s", e) + return + } + c.Username = strings.TrimSpace(u) + p, e := c.Line.PasswordPrompt("password: ") + if e != nil { + fmt.Printf("Unable to process input: %s", e) + return + } + c.Password = p + } + + // Update the client as well + c.Client.SetAuth(c.Username, c.Password) +} + +func (c *CommandLine) use(cmd string) { + args := strings.Split(strings.TrimSuffix(strings.TrimSpace(cmd), ";"), " ") + if len(args) != 2 { + fmt.Printf("Could not parse database name from %q.\n", cmd) + return + } + d := args[1] + + // validate if specified database exists + response, err := c.Client.Query(client.Query{Command: "SHOW DATABASES"}) + if err != nil { + fmt.Printf("ERR: %s\n", err) + return + } + + if err := response.Error(); err != nil { + fmt.Printf("ERR: %s\n", err) + return + } + + // verify the provided database exists + databaseExists := func() bool { + for _, result := range response.Results { + for _, row := range result.Series { + if row.Name == "databases" { + for _, values := range row.Values { + for _, database := range values { + if database == d { + return true + } + } + } + } + } + } + return false + }() + if databaseExists { + c.Database = d + fmt.Printf("Using database %s\n", d) + } else { + fmt.Printf("ERR: Database %s doesn't exist. Run SHOW DATABASES for a list of existing databases.\n", d) + } +} + +// SetPrecision sets client precision +func (c *CommandLine) SetPrecision(cmd string) { + // Remove the "precision" keyword if it exists + cmd = strings.TrimSpace(strings.Replace(cmd, "precision", "", -1)) + // normalize cmd + cmd = strings.ToLower(cmd) + + switch cmd { + case "h", "m", "s", "ms", "u", "ns": + c.Precision = cmd + c.Client.SetPrecision(c.Precision) + case "rfc3339": + c.Precision = "" + c.Client.SetPrecision(c.Precision) + default: + fmt.Printf("Unknown precision %q. Please use rfc3339, h, m, s, ms, u or ns.\n", cmd) + } +} + +// SetFormat sets output format +func (c *CommandLine) SetFormat(cmd string) { + // Remove the "format" keyword if it exists + cmd = strings.TrimSpace(strings.Replace(cmd, "format", "", -1)) + // normalize cmd + cmd = strings.ToLower(cmd) + + switch cmd { + case "json", "csv", "column": + c.Format = cmd + default: + fmt.Printf("Unknown format %q. Please use json, csv, or column.\n", cmd) + } +} + +// SetWriteConsistency sets cluster consistency level +func (c *CommandLine) SetWriteConsistency(cmd string) { + // Remove the "consistency" keyword if it exists + cmd = strings.TrimSpace(strings.Replace(cmd, "consistency", "", -1)) + // normalize cmd + cmd = strings.ToLower(cmd) + + _, err := cluster.ParseConsistencyLevel(cmd) + if err != nil { + fmt.Printf("Unknown consistency level %q. Please use any, one, quorum, or all.\n", cmd) + return + } + c.WriteConsistency = cmd +} + +// isWhitespace returns true if the rune is a space, tab, or newline. +func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' } + +// isLetter returns true if the rune is a letter. +func isLetter(ch rune) bool { return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') } + +// isDigit returns true if the rune is a digit. +func isDigit(ch rune) bool { return (ch >= '0' && ch <= '9') } + +// isIdentFirstChar returns true if the rune can be used as the first char in an unquoted identifer. +func isIdentFirstChar(ch rune) bool { return isLetter(ch) || ch == '_' } + +// isIdentChar returns true if the rune can be used in an unquoted identifier. +func isNotIdentChar(ch rune) bool { return !(isLetter(ch) || isDigit(ch) || ch == '_') } + +func parseUnquotedIdentifier(stmt string) (string, string) { + if fields := strings.FieldsFunc(stmt, isNotIdentChar); len(fields) > 0 { + return fields[0], strings.TrimPrefix(stmt, fields[0]) + } + return "", stmt +} + +func parseDoubleQuotedIdentifier(stmt string) (string, string) { + escapeNext := false + fields := strings.FieldsFunc(stmt, func(ch rune) bool { + if ch == '\\' { + escapeNext = true + } else if ch == '"' { + if !escapeNext { + return true + } + escapeNext = false + } + return false + }) + if len(fields) > 0 { + return fields[0], strings.TrimPrefix(stmt, "\""+fields[0]+"\"") + } + return "", stmt +} + +func parseNextIdentifier(stmt string) (ident, remainder string) { + if len(stmt) > 0 { + switch { + case isWhitespace(rune(stmt[0])): + return parseNextIdentifier(stmt[1:]) + case isIdentFirstChar(rune(stmt[0])): + return parseUnquotedIdentifier(stmt) + case stmt[0] == '"': + return parseDoubleQuotedIdentifier(stmt) + } + } + return "", stmt +} + +func (c *CommandLine) parseInto(stmt string) string { + ident, stmt := parseNextIdentifier(stmt) + if strings.HasPrefix(stmt, ".") { + c.Database = ident + fmt.Printf("Using database %s\n", c.Database) + ident, stmt = parseNextIdentifier(stmt[1:]) + } + if strings.HasPrefix(stmt, " ") { + c.RetentionPolicy = ident + fmt.Printf("Using retention policy %s\n", c.RetentionPolicy) + return stmt[1:] + } + return stmt +} + +// Insert runs an INSERT statement +func (c *CommandLine) Insert(stmt string) error { + i, point := parseNextIdentifier(stmt) + if !strings.EqualFold(i, "insert") { + fmt.Printf("ERR: found %s, expected INSERT\n", i) + return nil + } + if i, r := parseNextIdentifier(point); strings.EqualFold(i, "into") { + point = c.parseInto(r) + } + _, err := c.Client.Write(client.BatchPoints{ + Points: []client.Point{ + client.Point{Raw: point}, + }, + Database: c.Database, + RetentionPolicy: c.RetentionPolicy, + Precision: "n", + WriteConsistency: c.WriteConsistency, + }) + if err != nil { + fmt.Printf("ERR: %s\n", err) + if c.Database == "" { + fmt.Println("Note: error may be due to not setting a database or retention policy.") + fmt.Println(`Please set a database with the command "use " or`) + fmt.Println("INSERT INTO . ") + } + return err + } + return nil +} + +// ExecuteQuery runs any query statement +func (c *CommandLine) ExecuteQuery(query string) error { + response, err := c.Client.Query(client.Query{Command: query, Database: c.Database}) + if err != nil { + fmt.Printf("ERR: %s\n", err) + return err + } + c.FormatResponse(response, os.Stdout) + if err := response.Error(); err != nil { + fmt.Printf("ERR: %s\n", response.Error()) + if c.Database == "" { + fmt.Println("Warning: It is possible this error is due to not setting a database.") + fmt.Println(`Please set a database with the command "use ".`) + } + return err + } + return nil +} + +// DatabaseToken retrieves database token +func (c *CommandLine) DatabaseToken() (string, error) { + response, err := c.Client.Query(client.Query{Command: "SHOW DIAGNOSTICS for 'registration'"}) + if err != nil { + return "", err + } + if response.Error() != nil || len((*response).Results[0].Series) == 0 { + return "", nil + } + + // Look for position of "token" column. + for i, s := range (*response).Results[0].Series[0].Columns { + if s == "token" { + return (*response).Results[0].Series[0].Values[0][i].(string), nil + } + } + return "", nil +} + +// FormatResponse formats output to previsouly chosen format +func (c *CommandLine) FormatResponse(response *client.Response, w io.Writer) { + switch c.Format { + case "json": + c.writeJSON(response, w) + case "csv": + c.writeCSV(response, w) + case "column": + c.writeColumns(response, w) + default: + fmt.Fprintf(w, "Unknown output format %q.\n", c.Format) + } +} + +func (c *CommandLine) writeJSON(response *client.Response, w io.Writer) { + var data []byte + var err error + if c.Pretty { + data, err = json.MarshalIndent(response, "", " ") + } else { + data, err = json.Marshal(response) + } + if err != nil { + fmt.Fprintf(w, "Unable to parse json: %s\n", err) + return + } + fmt.Fprintln(w, string(data)) +} + +func (c *CommandLine) writeCSV(response *client.Response, w io.Writer) { + csvw := csv.NewWriter(w) + for _, result := range response.Results { + // Create a tabbed writer for each result as they won't always line up + rows := c.formatResults(result, "\t") + for _, r := range rows { + csvw.Write(strings.Split(r, "\t")) + } + csvw.Flush() + } +} + +func (c *CommandLine) writeColumns(response *client.Response, w io.Writer) { + for _, result := range response.Results { + // Create a tabbed writer for each result a they won't always line up + w := new(tabwriter.Writer) + w.Init(os.Stdout, 0, 8, 1, '\t', 0) + csv := c.formatResults(result, "\t") + for _, r := range csv { + fmt.Fprintln(w, r) + } + w.Flush() + } +} + +// formatResults will behave differently if you are formatting for columns or csv +func (c *CommandLine) formatResults(result client.Result, separator string) []string { + rows := []string{} + // Create a tabbed writer for each result a they won't always line up + for i, row := range result.Series { + // gather tags + tags := []string{} + for k, v := range row.Tags { + tags = append(tags, fmt.Sprintf("%s=%s", k, v)) + sort.Strings(tags) + } + + columnNames := []string{} + + // Only put name/tags in a column if format is csv + if c.Format == "csv" { + if len(tags) > 0 { + columnNames = append([]string{"tags"}, columnNames...) + } + + if row.Name != "" { + columnNames = append([]string{"name"}, columnNames...) + } + } + + for _, column := range row.Columns { + columnNames = append(columnNames, column) + } + + // Output a line separator if we have more than one set or results and format is column + if i > 0 && c.Format == "column" { + rows = append(rows, "") + } + + // If we are column format, we break out the name/tag to seperate lines + if c.Format == "column" { + if row.Name != "" { + n := fmt.Sprintf("name: %s", row.Name) + rows = append(rows, n) + if len(tags) == 0 { + l := strings.Repeat("-", len(n)) + rows = append(rows, l) + } + } + if len(tags) > 0 { + t := fmt.Sprintf("tags: %s", (strings.Join(tags, ", "))) + rows = append(rows, t) + } + } + + rows = append(rows, strings.Join(columnNames, separator)) + + // if format is column, break tags to their own line/format + if c.Format == "column" && len(tags) > 0 { + lines := []string{} + for _, columnName := range columnNames { + lines = append(lines, strings.Repeat("-", len(columnName))) + } + rows = append(rows, strings.Join(lines, separator)) + } + + for _, v := range row.Values { + var values []string + if c.Format == "csv" { + if row.Name != "" { + values = append(values, row.Name) + } + if len(tags) > 0 { + values = append(values, strings.Join(tags, ",")) + } + } + + for _, vv := range v { + values = append(values, interfaceToString(vv)) + } + rows = append(rows, strings.Join(values, separator)) + } + // Outout a line separator if in column format + if c.Format == "column" { + rows = append(rows, "") + } + } + return rows +} + +func interfaceToString(v interface{}) string { + switch t := v.(type) { + case nil: + return "" + case bool: + return fmt.Sprintf("%v", v) + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr: + return fmt.Sprintf("%d", t) + case float32, float64: + return fmt.Sprintf("%v", t) + default: + return fmt.Sprintf("%v", t) + } +} + +// Settings prints current settings +func (c *CommandLine) Settings() { + w := new(tabwriter.Writer) + w.Init(os.Stdout, 0, 8, 1, '\t', 0) + if c.Port > 0 { + fmt.Fprintf(w, "Host\t%s:%d\n", c.Host, c.Port) + } else { + fmt.Fprintf(w, "Host\t%s\n", c.Host) + } + fmt.Fprintf(w, "Username\t%s\n", c.Username) + fmt.Fprintf(w, "Database\t%s\n", c.Database) + fmt.Fprintf(w, "Pretty\t%v\n", c.Pretty) + fmt.Fprintf(w, "Format\t%s\n", c.Format) + fmt.Fprintf(w, "Write Consistency\t%s\n", c.WriteConsistency) + fmt.Fprintln(w) + w.Flush() +} + +func (c *CommandLine) help() { + fmt.Println(`Usage: + connect connects to another node specified by host:port + auth prompts for username and password + pretty toggles pretty print for the json format + use sets current database + format specifies the format of the server responses: json, csv, or column + precision specifies the format of the timestamp: rfc3339, h, m, s, ms, u or ns + consistency sets write consistency level: any, one, quorum, or all + history displays command history + settings outputs the current settings for the shell + exit/quit/ctrl+d quits the influx shell + + show databases show database names + show series show series information + show measurements show measurement information + show tag keys show tag key information + show field keys show field key information + + A full list of influxql commands can be found at: + https://docs.influxdata.com/influxdb/v0.10/query_language/spec +`) +} + +func (c *CommandLine) history() { + var buf bytes.Buffer + c.Line.WriteHistory(&buf) + fmt.Print(buf.String()) +} + +func (c *CommandLine) saveHistory() { + if historyFile, err := os.Create(c.historyFilePath); err != nil { + fmt.Printf("There was an error writing history file: %s\n", err) + } else { + c.Line.WriteHistory(historyFile) + historyFile.Close() + } +} + +func (c *CommandLine) gopher() { + fmt.Println(` + .-::-::://:-::- .:/++/' + '://:-''/oo+//++o+/.://o- ./+: + .:-. '++- .o/ '+yydhy' o- + .:/. .h: :osoys .smMN- :/ + -/:.' s- /MMMymh. '/y/ s' + -+s:'''' d -mMMms// '-/o: + -/++/++/////:. o: '... s- :s. + :+-+s-' ':/' 's- /+ 'o: + '+-'o: /ydhsh. '//. '-o- o- + .y. o: .MMMdm+y ':+++:::/+:.' s: + .-h/ y- 'sdmds'h -+ydds:::-.' 'h. + .//-.d' o: '.' 'dsNMMMNh:.:++' :y + +y. 'd 's. .s:mddds: ++ o/ + 'N- odd 'o/. './o-s-' .---+++' o- + 'N' yNd .://:/:::::. -s -+/s/./s' 'o/' + so' .h '''' ////s: '+. .s +y' + os/-.y' 's' 'y::+ +d' + '.:o/ -+:-:.' so.---.' + o' 'd-.''/s' + .s' :y.''.y + -s mo:::' + :: yh + // '''' /M' + o+ .s///:/. 'N: + :+ /: -s' ho + 's- -/s/:+/.+h' +h + ys' ':' '-. -d + oh .h + /o .s + s. .h + -y .d + m/ -h + +d /o + 'N- y: + h: m. + s- -d + o- s+ + +- 'm' + s/ oo--. + y- /s ':+' + s' 'od--' .d: + -+ ':o: ':+-/+ + y- .:+- ' + //o- '.:+/. + .-:+/' ''-/+/. + ./:' ''.:o+/-' + .+o:/:/+-' ''.-+ooo/-' + o: -h///++////-. + /: .o/ + //+ 'y + ./sooy. + +`) +} + +// Version prints CLI version +func (c *CommandLine) Version() { + fmt.Println("InfluxDB shell " + c.ClientVersion) +} + +func (c *CommandLine) exit() { + // write to history file + c.saveHistory() + // release line resources + c.Line.Close() + c.Line = nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli_test.go b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli_test.go new file mode 100644 index 0000000000..bb5f393429 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli_test.go @@ -0,0 +1,514 @@ +package cli_test + +import ( + "bufio" + "bytes" + "io" + "net" + "net/http" + "net/http/httptest" + "net/url" + "strconv" + "strings" + "testing" + + "github.com/influxdata/influxdb/client" + "github.com/influxdata/influxdb/cmd/influx/cli" + "github.com/influxdata/influxdb/influxql" + "github.com/peterh/liner" +) + +const ( + CLIENT_VERSION = "y.y" + SERVER_VERSION = "x.x" +) + +func TestNewCLI(t *testing.T) { + t.Parallel() + c := cli.New(CLIENT_VERSION) + + if c == nil { + t.Fatal("CommandLine shouldn't be nil.") + } + + if c.ClientVersion != CLIENT_VERSION { + t.Fatalf("CommandLine version is %s but should be %s", c.ClientVersion, CLIENT_VERSION) + } +} + +func TestRunCLI(t *testing.T) { + t.Parallel() + ts := emptyTestServer() + defer ts.Close() + + u, _ := url.Parse(ts.URL) + h, p, _ := net.SplitHostPort(u.Host) + c := cli.New(CLIENT_VERSION) + c.Host = h + c.Port, _ = strconv.Atoi(p) + c.IgnoreSignals = true + go func() { + close(c.Quit) + }() + if err := c.Run(); err != nil { + t.Fatalf("Run failed with error: %s", err) + } +} + +func TestRunCLI_ExecuteInsert(t *testing.T) { + t.Parallel() + ts := emptyTestServer() + defer ts.Close() + + u, _ := url.Parse(ts.URL) + h, p, _ := net.SplitHostPort(u.Host) + c := cli.New(CLIENT_VERSION) + c.Host = h + c.Port, _ = strconv.Atoi(p) + c.Precision = "ms" + c.Execute = "INSERT sensor,floor=1 value=2" + c.IgnoreSignals = true + if err := c.Run(); err != nil { + t.Fatalf("Run failed with error: %s", err) + } +} + +func TestSetAuth(t *testing.T) { + t.Parallel() + c := cli.New(CLIENT_VERSION) + config := client.NewConfig() + client, _ := client.NewClient(config) + c.Client = client + u := "userx" + p := "pwdy" + c.SetAuth("auth " + u + " " + p) + + // validate CLI configuration + if c.Username != u { + t.Fatalf("Username is %s but should be %s", c.Username, u) + } + if c.Password != p { + t.Fatalf("Password is %s but should be %s", c.Password, p) + } +} + +func TestSetPrecision(t *testing.T) { + t.Parallel() + c := cli.New(CLIENT_VERSION) + config := client.NewConfig() + client, _ := client.NewClient(config) + c.Client = client + + // validate set non-default precision + p := "ns" + c.SetPrecision("precision " + p) + if c.Precision != p { + t.Fatalf("Precision is %s but should be %s", c.Precision, p) + } + + // validate set default precision which equals empty string + p = "rfc3339" + c.SetPrecision("precision " + p) + if c.Precision != "" { + t.Fatalf("Precision is %s but should be empty", c.Precision) + } +} + +func TestSetFormat(t *testing.T) { + t.Parallel() + c := cli.New(CLIENT_VERSION) + config := client.NewConfig() + client, _ := client.NewClient(config) + c.Client = client + + // validate set non-default format + f := "json" + c.SetFormat("format " + f) + if c.Format != f { + t.Fatalf("Format is %s but should be %s", c.Format, f) + } +} + +func TestSetWriteConsistency(t *testing.T) { + t.Parallel() + c := cli.New(CLIENT_VERSION) + config := client.NewConfig() + client, _ := client.NewClient(config) + c.Client = client + + // set valid write consistency + consistency := "all" + c.SetWriteConsistency("consistency " + consistency) + if c.WriteConsistency != consistency { + t.Fatalf("WriteConsistency is %s but should be %s", c.WriteConsistency, consistency) + } + + // set different valid write consistency and validate change + consistency = "quorum" + c.SetWriteConsistency("consistency " + consistency) + if c.WriteConsistency != consistency { + t.Fatalf("WriteConsistency is %s but should be %s", c.WriteConsistency, consistency) + } + + // set invalid write consistency and verify there was no change + invalidConsistency := "invalid_consistency" + c.SetWriteConsistency("consistency " + invalidConsistency) + if c.WriteConsistency == invalidConsistency { + t.Fatalf("WriteConsistency is %s but should be %s", c.WriteConsistency, consistency) + } +} + +func TestParseCommand_CommandsExist(t *testing.T) { + t.Parallel() + c, err := client.NewClient(client.Config{}) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + + m := cli.CommandLine{Client: c, Line: liner.NewLiner()} + tests := []struct { + cmd string + }{ + {cmd: "gopher"}, + {cmd: "auth"}, + {cmd: "help"}, + {cmd: "format"}, + {cmd: "precision"}, + {cmd: "settings"}, + } + for _, test := range tests { + if err := m.ParseCommand(test.cmd); err != nil { + t.Fatalf(`Got error %v for command %q, expected nil`, err, test.cmd) + } + } +} + +func TestParseCommand_Connect(t *testing.T) { + t.Parallel() + ts := emptyTestServer() + defer ts.Close() + + u, _ := url.Parse(ts.URL) + cmd := "connect " + u.Host + c := cli.CommandLine{} + + // assert connection is established + if err := c.ParseCommand(cmd); err != nil { + t.Fatalf("There was an error while connecting to %v: %v", u.Path, err) + } + + // assert server version is populated + if c.ServerVersion != SERVER_VERSION { + t.Fatalf("Server version is %s but should be %s.", c.ServerVersion, SERVER_VERSION) + } +} + +func TestParseCommand_TogglePretty(t *testing.T) { + t.Parallel() + c := cli.CommandLine{} + if c.Pretty { + t.Fatalf(`Pretty should be false.`) + } + c.ParseCommand("pretty") + if !c.Pretty { + t.Fatalf(`Pretty should be true.`) + } + c.ParseCommand("pretty") + if c.Pretty { + t.Fatalf(`Pretty should be false.`) + } +} + +func TestParseCommand_Exit(t *testing.T) { + t.Parallel() + tests := []struct { + cmd string + }{ + {cmd: "exit"}, + {cmd: " exit"}, + {cmd: "exit "}, + {cmd: "Exit "}, + } + + for _, test := range tests { + c := cli.CommandLine{Quit: make(chan struct{}, 1)} + c.ParseCommand(test.cmd) + // channel should be closed + if _, ok := <-c.Quit; ok { + t.Fatalf(`Command "exit" failed for %q.`, test.cmd) + } + } +} + +func TestParseCommand_Quit(t *testing.T) { + t.Parallel() + tests := []struct { + cmd string + }{ + {cmd: "quit"}, + {cmd: " quit"}, + {cmd: "quit "}, + {cmd: "Quit "}, + } + + for _, test := range tests { + c := cli.CommandLine{Quit: make(chan struct{}, 1)} + c.ParseCommand(test.cmd) + // channel should be closed + if _, ok := <-c.Quit; ok { + t.Fatalf(`Command "quit" failed for %q.`, test.cmd) + } + } +} + +func TestParseCommand_Use(t *testing.T) { + t.Parallel() + ts := emptyTestServer() + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + m := cli.CommandLine{Client: c} + + tests := []struct { + cmd string + }{ + {cmd: "use db"}, + {cmd: " use db"}, + {cmd: "use db "}, + {cmd: "use db;"}, + {cmd: "use db; "}, + {cmd: "Use db"}, + } + + for _, test := range tests { + if err := m.ParseCommand(test.cmd); err != nil { + t.Fatalf(`Got error %v for command %q, expected nil.`, err, test.cmd) + } + + if m.Database != "db" { + t.Fatalf(`Command "use" changed database to %q. Expected db`, m.Database) + } + } +} + +func TestParseCommand_Consistency(t *testing.T) { + t.Parallel() + c := cli.CommandLine{} + tests := []struct { + cmd string + }{ + {cmd: "consistency one"}, + {cmd: " consistency one"}, + {cmd: "consistency one "}, + {cmd: "consistency one;"}, + {cmd: "consistency one; "}, + {cmd: "Consistency one"}, + } + + for _, test := range tests { + if err := c.ParseCommand(test.cmd); err != nil { + t.Fatalf(`Got error %v for command %q, expected nil.`, err, test.cmd) + } + + if c.WriteConsistency != "one" { + t.Fatalf(`Command "consistency" changed consistency to %q. Expected one`, c.WriteConsistency) + } + } +} + +func TestParseCommand_Insert(t *testing.T) { + t.Parallel() + ts := emptyTestServer() + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + m := cli.CommandLine{Client: c} + + tests := []struct { + cmd string + }{ + {cmd: "INSERT cpu,host=serverA,region=us-west value=1.0"}, + {cmd: " INSERT cpu,host=serverA,region=us-west value=1.0"}, + {cmd: "INSERT cpu,host=serverA,region=us-west value=1.0"}, + {cmd: "insert cpu,host=serverA,region=us-west value=1.0 "}, + {cmd: "insert"}, + {cmd: "Insert "}, + {cmd: "insert c"}, + {cmd: "insert int"}, + } + + for _, test := range tests { + if err := m.ParseCommand(test.cmd); err != nil { + t.Fatalf(`Got error %v for command %q, expected nil.`, err, test.cmd) + } + } +} + +func TestParseCommand_InsertInto(t *testing.T) { + t.Parallel() + ts := emptyTestServer() + defer ts.Close() + + u, _ := url.Parse(ts.URL) + config := client.Config{URL: *u} + c, err := client.NewClient(config) + if err != nil { + t.Fatalf("unexpected error. expected %v, actual %v", nil, err) + } + m := cli.CommandLine{Client: c} + + tests := []struct { + cmd, db, rp string + }{ + { + cmd: `INSERT INTO test cpu,host=serverA,region=us-west value=1.0`, + db: "", + rp: "test", + }, + { + cmd: ` INSERT INTO .test cpu,host=serverA,region=us-west value=1.0`, + db: "", + rp: "test", + }, + { + cmd: `INSERT INTO "test test" cpu,host=serverA,region=us-west value=1.0`, + db: "", + rp: "test test", + }, + { + cmd: `Insert iNTO test.test cpu,host=serverA,region=us-west value=1.0`, + db: "test", + rp: "test", + }, + { + cmd: `insert into "test test" cpu,host=serverA,region=us-west value=1.0`, + db: "test", + rp: "test test", + }, + { + cmd: `insert into "d b"."test test" cpu,host=serverA,region=us-west value=1.0`, + db: "d b", + rp: "test test", + }, + } + + for _, test := range tests { + if err := m.ParseCommand(test.cmd); err != nil { + t.Fatalf(`Got error %v for command %q, expected nil.`, err, test.cmd) + } + if m.Database != test.db { + t.Fatalf(`Command "insert into" db parsing failed, expected: %q, actual: %q`, test.db, m.Database) + } + if m.RetentionPolicy != test.rp { + t.Fatalf(`Command "insert into" rp parsing failed, expected: %q, actual: %q`, test.rp, m.RetentionPolicy) + } + } +} + +func TestParseCommand_History(t *testing.T) { + t.Parallel() + c := cli.CommandLine{Line: liner.NewLiner()} + defer c.Line.Close() + + // append one entry to history + c.Line.AppendHistory("abc") + + tests := []struct { + cmd string + }{ + {cmd: "history"}, + {cmd: " history"}, + {cmd: "history "}, + {cmd: "History "}, + } + + for _, test := range tests { + if err := c.ParseCommand(test.cmd); err != nil { + t.Fatalf(`Got error %v for command %q, expected nil.`, err, test.cmd) + } + } + + // buf size should be at least 1 + var buf bytes.Buffer + c.Line.WriteHistory(&buf) + if buf.Len() < 1 { + t.Fatal("History is borked") + } +} + +func TestParseCommand_HistoryWithBlankCommand(t *testing.T) { + t.Parallel() + c := cli.CommandLine{Line: liner.NewLiner()} + defer c.Line.Close() + + // append one entry to history + c.Line.AppendHistory("x") + + tests := []struct { + cmd string + err error + }{ + {cmd: "history"}, + {cmd: " history"}, + {cmd: "history "}, + {cmd: "", err: cli.ErrBlankCommand}, // shouldn't be persisted in history + {cmd: " ", err: cli.ErrBlankCommand}, // shouldn't be persisted in history + {cmd: " ", err: cli.ErrBlankCommand}, // shouldn't be persisted in history + } + + // a blank command will return cli.ErrBlankCommand. + for _, test := range tests { + if err := c.ParseCommand(test.cmd); err != test.err { + t.Errorf(`Got error %v for command %q, expected %v`, err, test.cmd, test.err) + } + } + + // buf shall not contain empty commands + var buf bytes.Buffer + c.Line.WriteHistory(&buf) + scanner := bufio.NewScanner(&buf) + for scanner.Scan() { + if strings.TrimSpace(scanner.Text()) == "" { + t.Fatal("Empty commands should not be persisted in history.") + } + } +} + +// helper methods + +func emptyTestServer() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("X-Influxdb-Version", SERVER_VERSION) + + switch r.URL.Path { + case "/query": + values := r.URL.Query() + parser := influxql.NewParser(bytes.NewBufferString(values.Get("q"))) + q, err := parser.ParseQuery() + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + stmt := q.Statements[0] + + switch stmt.(type) { + case *influxql.ShowDatabasesStatement: + io.WriteString(w, `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db"]]}]}]}`) + case *influxql.ShowDiagnosticsStatement: + io.WriteString(w, `{"results":[{}]}`) + } + case "/write": + w.WriteHeader(http.StatusOK) + } + })) +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx/main.go b/vendor/github.com/influxdata/influxdb/cmd/influx/main.go new file mode 100644 index 0000000000..43eba196ac --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx/main.go @@ -0,0 +1,109 @@ +package main + +import ( + "flag" + "fmt" + "os" + + "github.com/influxdata/influxdb/client" + "github.com/influxdata/influxdb/cmd/influx/cli" +) + +// These variables are populated via the Go linker. +var ( + version = "0.9" +) + +const ( + // defaultFormat is the default format of the results when issuing queries + defaultFormat = "column" + + // defaultPrecision is the default timestamp format of the results when issuing queries + defaultPrecision = "ns" + + // defaultPPS is the default points per second that the import will throttle at + // by default it's 0, which means it will not throttle + defaultPPS = 0 +) + +func main() { + c := cli.New(version) + + fs := flag.NewFlagSet("InfluxDB shell version "+version, flag.ExitOnError) + fs.StringVar(&c.Host, "host", client.DefaultHost, "Influxdb host to connect to.") + fs.IntVar(&c.Port, "port", client.DefaultPort, "Influxdb port to connect to.") + fs.StringVar(&c.Username, "username", c.Username, "Username to connect to the server.") + fs.StringVar(&c.Password, "password", c.Password, `Password to connect to the server. Leaving blank will prompt for password (--password="").`) + fs.StringVar(&c.Database, "database", c.Database, "Database to connect to the server.") + fs.BoolVar(&c.Ssl, "ssl", false, "Use https for connecting to cluster.") + fs.BoolVar(&c.UnsafeSsl, "unsafeSsl", false, "Set this when connecting to the cluster using https and not use SSL verification.") + fs.StringVar(&c.Format, "format", defaultFormat, "Format specifies the format of the server responses: json, csv, or column.") + fs.StringVar(&c.Precision, "precision", defaultPrecision, "Precision specifies the format of the timestamp: rfc3339,h,m,s,ms,u or ns.") + fs.StringVar(&c.WriteConsistency, "consistency", "any", "Set write consistency level: any, one, quorum, or all.") + fs.BoolVar(&c.Pretty, "pretty", false, "Turns on pretty print for the json format.") + fs.StringVar(&c.Execute, "execute", c.Execute, "Execute command and quit.") + fs.BoolVar(&c.ShowVersion, "version", false, "Displays the InfluxDB version.") + fs.BoolVar(&c.Import, "import", false, "Import a previous database.") + fs.IntVar(&c.PPS, "pps", defaultPPS, "How many points per second the import will allow. By default it is zero and will not throttle importing.") + fs.StringVar(&c.Path, "path", "", "path to the file to import") + fs.BoolVar(&c.Compressed, "compressed", false, "set to true if the import file is compressed") + + // Define our own custom usage to print + fs.Usage = func() { + fmt.Println(`Usage of influx: + -version + Display the version and exit. + -host 'host name' + Host to connect to. + -port 'port #' + Port to connect to. + -database 'database name' + Database to connect to the server. + -password 'password' + Password to connect to the server. Leaving blank will prompt for password (--password ''). + -username 'username' + Username to connect to the server. + -ssl + Use https for requests. + -unsafeSsl + Set this when connecting to the cluster using https and not use SSL verification. + -execute 'command' + Execute command and quit. + -format 'json|csv|column' + Format specifies the format of the server responses: json, csv, or column. + -precision 'rfc3339|h|m|s|ms|u|ns' + Precision specifies the format of the timestamp: rfc3339, h, m, s, ms, u or ns. + -consistency 'any|one|quorum|all' + Set write consistency level: any, one, quorum, or all + -pretty + Turns on pretty print for the json format. + -import + Import a previous database export from file + -pps + How many points per second the import will allow. By default it is zero and will not throttle importing. + -path + Path to file to import + -compressed + Set to true if the import file is compressed + +Examples: + + # Use influx in a non-interactive mode to query the database "metrics" and pretty print json: + $ influx -database 'metrics' -execute 'select * from cpu' -format 'json' -pretty + + # Connect to a specific database on startup and set database context: + $ influx -database 'metrics' -host 'localhost' -port '8086' +`) + } + fs.Parse(os.Args[1:]) + + if c.ShowVersion { + c.Version() + os.Exit(0) + } + + if err := c.Run(); err != nil { + fmt.Fprintf(os.Stderr, "%s\n", err) + os.Exit(1) + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/info.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/info.go new file mode 100644 index 0000000000..5cf5514fc0 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/info.go @@ -0,0 +1,97 @@ +package main + +import ( + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "strings" + "text/tabwriter" + + "github.com/influxdata/influxdb/tsdb" +) + +func cmdInfo(path string) { + tstore := tsdb.NewStore(filepath.Join(path, "data")) + tstore.Logger = log.New(ioutil.Discard, "", log.LstdFlags) + tstore.EngineOptions.Config.Dir = filepath.Join(path, "data") + tstore.EngineOptions.Config.WALLoggingEnabled = false + tstore.EngineOptions.Config.WALDir = filepath.Join(path, "wal") + if err := tstore.Open(); err != nil { + fmt.Printf("Failed to open dir: %v\n", err) + os.Exit(1) + } + + size, err := tstore.DiskSize() + if err != nil { + fmt.Printf("Failed to determine disk usage: %v\n", err) + } + + // Summary stats + fmt.Printf("Shards: %d, Indexes: %d, Databases: %d, Disk Size: %d, Series: %d\n\n", + tstore.ShardN(), tstore.DatabaseIndexN(), len(tstore.Databases()), size, countSeries(tstore)) + + tw := tabwriter.NewWriter(os.Stdout, 16, 8, 0, '\t', 0) + + fmt.Fprintln(tw, strings.Join([]string{"Shard", "DB", "Measurement", "Tags [#K/#V]", "Fields [Name:Type]", "Series"}, "\t")) + + shardIDs := tstore.ShardIDs() + + databases := tstore.Databases() + sort.Strings(databases) + + for _, db := range databases { + index := tstore.DatabaseIndex(db) + measurements := index.Measurements() + sort.Sort(measurements) + for _, m := range measurements { + tags := m.TagKeys() + tagValues := 0 + for _, tag := range tags { + tagValues += len(m.TagValues(tag)) + } + fields := m.FieldNames() + sort.Strings(fields) + series := m.SeriesKeys() + sort.Strings(series) + sort.Sort(ShardIDs(shardIDs)) + + // Sample a point from each measurement to determine the field types + for _, shardID := range shardIDs { + shard := tstore.Shard(shardID) + codec := shard.FieldCodec(m.Name) + for _, field := range codec.Fields() { + ft := fmt.Sprintf("%s:%s", field.Name, field.Type) + fmt.Fprintf(tw, "%d\t%s\t%s\t%d/%d\t%d [%s]\t%d\n", shardID, db, m.Name, len(tags), tagValues, + len(fields), ft, len(series)) + + } + + } + } + } + tw.Flush() +} + +func countSeries(tstore *tsdb.Store) int { + var count int + for _, shardID := range tstore.ShardIDs() { + shard := tstore.Shard(shardID) + cnt, err := shard.SeriesCount() + if err != nil { + fmt.Printf("series count failed: %v\n", err) + continue + } + count += cnt + } + return count +} + +// ShardIDs is a collection of UINT 64 that represent shard ids. +type ShardIDs []uint64 + +func (a ShardIDs) Len() int { return len(a) } +func (a ShardIDs) Less(i, j int) bool { return a[i] < a[j] } +func (a ShardIDs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/main.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/main.go new file mode 100644 index 0000000000..bac8fd20f9 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/main.go @@ -0,0 +1,120 @@ +package main + +import ( + "flag" + "fmt" + "os" + + _ "github.com/influxdata/influxdb/tsdb/engine" +) + +func usage() { + println(`Usage: influx_inspect [options] + +Displays detailed information about InfluxDB data files. +`) + + println(`Commands: + info - displays series meta-data for all shards. Default location [$HOME/.influxdb] + dumptsm - dumps low-level details about tsm1 files. + dumptsmdev - dumps low-level details about tsm1dev files.`) + println() +} + +func main() { + + flag.Usage = usage + flag.Parse() + + if len(flag.Args()) == 0 { + flag.Usage() + os.Exit(0) + } + + switch flag.Args()[0] { + case "info": + var path string + fs := flag.NewFlagSet("info", flag.ExitOnError) + fs.StringVar(&path, "dir", os.Getenv("HOME")+"/.influxdb", "Root storage path. [$HOME/.influxdb]") + + fs.Usage = func() { + println("Usage: influx_inspect info [options]\n\n Displays series meta-data for all shards..") + println() + println("Options:") + fs.PrintDefaults() + } + + if err := fs.Parse(flag.Args()[1:]); err != nil { + fmt.Printf("%v", err) + os.Exit(1) + } + cmdInfo(path) + case "dumptsm": + var dumpAll bool + opts := &tsdmDumpOpts{} + fs := flag.NewFlagSet("file", flag.ExitOnError) + fs.BoolVar(&opts.dumpIndex, "index", false, "Dump raw index data") + fs.BoolVar(&opts.dumpBlocks, "blocks", false, "Dump raw block data") + fs.BoolVar(&dumpAll, "all", false, "Dump all data. Caution: This may print a lot of information") + fs.StringVar(&opts.filterKey, "filter-key", "", "Only display index and block data match this key substring") + + fs.Usage = func() { + println("Usage: influx_inspect dumptsm [options] \n\n Dumps low-level details about tsm1 files.") + println() + println("Options:") + fs.PrintDefaults() + os.Exit(0) + } + + if err := fs.Parse(flag.Args()[1:]); err != nil { + fmt.Printf("%v", err) + os.Exit(1) + } + + if len(fs.Args()) == 0 || fs.Args()[0] == "" { + fmt.Printf("TSM file not specified\n\n") + fs.Usage() + fs.PrintDefaults() + os.Exit(1) + } + opts.path = fs.Args()[0] + opts.dumpBlocks = opts.dumpBlocks || dumpAll || opts.filterKey != "" + opts.dumpIndex = opts.dumpIndex || dumpAll || opts.filterKey != "" + cmdDumpTsm1(opts) + case "dumptsmdev": + var dumpAll bool + opts := &tsdmDumpOpts{} + fs := flag.NewFlagSet("file", flag.ExitOnError) + fs.BoolVar(&opts.dumpIndex, "index", false, "Dump raw index data") + fs.BoolVar(&opts.dumpBlocks, "blocks", false, "Dump raw block data") + fs.BoolVar(&dumpAll, "all", false, "Dump all data. Caution: This may print a lot of information") + fs.StringVar(&opts.filterKey, "filter-key", "", "Only display index and block data match this key substring") + + fs.Usage = func() { + println("Usage: influx_inspect dumptsm [options] \n\n Dumps low-level details about tsm1 files.") + println() + println("Options:") + fs.PrintDefaults() + os.Exit(0) + } + + if err := fs.Parse(flag.Args()[1:]); err != nil { + fmt.Printf("%v", err) + os.Exit(1) + } + + if len(fs.Args()) == 0 || fs.Args()[0] == "" { + fmt.Printf("TSM file not specified\n\n") + fs.Usage() + fs.PrintDefaults() + os.Exit(1) + } + opts.path = fs.Args()[0] + opts.dumpBlocks = opts.dumpBlocks || dumpAll || opts.filterKey != "" + opts.dumpIndex = opts.dumpIndex || dumpAll || opts.filterKey != "" + cmdDumpTsm1dev(opts) + default: + flag.Usage() + os.Exit(1) + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/tsm.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/tsm.go new file mode 100644 index 0000000000..d488797ffc --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/tsm.go @@ -0,0 +1,653 @@ +package main + +import ( + "encoding/binary" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "text/tabwriter" + "time" + + "github.com/golang/snappy" + "github.com/influxdata/influxdb/tsdb" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +// these consts are for the old tsm format. They can be removed once we remove +// the inspection for the original tsm1 files. +const ( + //IDsFileExtension is the extension for the file that keeps the compressed map + // of keys to uint64 IDs. + IDsFileExtension = "ids" + + // FieldsFileExtension is the extension for the file that stores compressed field + // encoding data for this db + FieldsFileExtension = "fields" + + // SeriesFileExtension is the extension for the file that stores the compressed + // series metadata for series in this db + SeriesFileExtension = "series" +) + +type tsdmDumpOpts struct { + dumpIndex bool + dumpBlocks bool + filterKey string + path string +} + +type tsmIndex struct { + series int + offset int64 + minTime int64 + maxTime int64 + blocks []*block +} + +type block struct { + id uint64 + offset int64 +} + +type blockStats struct { + min, max int + counts [][]int +} + +func (b *blockStats) inc(typ int, enc byte) { + for len(b.counts) <= typ { + b.counts = append(b.counts, []int{}) + } + for len(b.counts[typ]) <= int(enc) { + b.counts[typ] = append(b.counts[typ], 0) + } + b.counts[typ][enc]++ +} + +func (b *blockStats) size(sz int) { + if b.min == 0 || sz < b.min { + b.min = sz + } + if b.min == 0 || sz > b.max { + b.max = sz + } +} + +var ( + fieldType = []string{ + "timestamp", "float", "int", "bool", "string", + } + blockTypes = []string{ + "float64", "int64", "bool", "string", + } + timeEnc = []string{ + "none", "s8b", "rle", + } + floatEnc = []string{ + "none", "gor", + } + intEnc = []string{ + "none", "s8b", "rle", + } + boolEnc = []string{ + "none", "bp", + } + stringEnc = []string{ + "none", "snpy", + } + encDescs = [][]string{ + timeEnc, floatEnc, intEnc, boolEnc, stringEnc, + } +) + +func readFields(path string) (map[string]*tsdb.MeasurementFields, error) { + fields := make(map[string]*tsdb.MeasurementFields) + + f, err := os.OpenFile(filepath.Join(path, FieldsFileExtension), os.O_RDONLY, 0666) + if os.IsNotExist(err) { + return fields, nil + } else if err != nil { + return nil, err + } + b, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + data, err := snappy.Decode(nil, b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(data, &fields); err != nil { + return nil, err + } + return fields, nil +} + +func readSeries(path string) (map[string]*tsdb.Series, error) { + series := make(map[string]*tsdb.Series) + + f, err := os.OpenFile(filepath.Join(path, SeriesFileExtension), os.O_RDONLY, 0666) + if os.IsNotExist(err) { + return series, nil + } else if err != nil { + return nil, err + } + defer f.Close() + b, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + data, err := snappy.Decode(nil, b) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(data, &series); err != nil { + return nil, err + } + + return series, nil +} + +func readIds(path string) (map[string]uint64, error) { + f, err := os.OpenFile(filepath.Join(path, IDsFileExtension), os.O_RDONLY, 0666) + if os.IsNotExist(err) { + return nil, nil + } else if err != nil { + return nil, err + } + b, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + b, err = snappy.Decode(nil, b) + if err != nil { + return nil, err + } + + ids := make(map[string]uint64) + if b != nil { + if err := json.Unmarshal(b, &ids); err != nil { + return nil, err + } + } + return ids, err +} + +func readIndex(f *os.File) (*tsmIndex, error) { + // Get the file size + stat, err := f.Stat() + if err != nil { + return nil, err + } + + // Seek to the series count + f.Seek(-4, os.SEEK_END) + b := make([]byte, 8) + _, err = f.Read(b[:4]) + if err != nil { + return nil, err + } + + seriesCount := binary.BigEndian.Uint32(b) + + // Get the min time + f.Seek(-20, os.SEEK_END) + f.Read(b) + minTime := int64(binary.BigEndian.Uint64(b)) + + // Get max time + f.Seek(-12, os.SEEK_END) + f.Read(b) + maxTime := int64(binary.BigEndian.Uint64(b)) + + // Figure out where the index starts + indexStart := stat.Size() - int64(seriesCount*12+20) + + // Seek to the start of the index + f.Seek(indexStart, os.SEEK_SET) + count := int(seriesCount) + index := &tsmIndex{ + offset: indexStart, + minTime: minTime, + maxTime: maxTime, + series: count, + } + + if indexStart < 0 { + return nil, fmt.Errorf("index corrupt: offset=%d", indexStart) + } + + // Read the index entries + for i := 0; i < count; i++ { + f.Read(b) + id := binary.BigEndian.Uint64(b) + f.Read(b[:4]) + pos := binary.BigEndian.Uint32(b[:4]) + index.blocks = append(index.blocks, &block{id: id, offset: int64(pos)}) + } + + return index, nil +} + +func cmdDumpTsm1(opts *tsdmDumpOpts) { + var errors []error + + f, err := os.Open(opts.path) + if err != nil { + println(err.Error()) + os.Exit(1) + } + + // Get the file size + stat, err := f.Stat() + if err != nil { + println(err.Error()) + os.Exit(1) + } + + b := make([]byte, 8) + f.Read(b[:4]) + + // Verify magic number + if binary.BigEndian.Uint32(b[:4]) != 0x16D116D1 { + println("Not a tsm1 file.") + os.Exit(1) + } + + ids, err := readIds(filepath.Dir(opts.path)) + if err != nil { + println("Failed to read series:", err.Error()) + os.Exit(1) + } + + invIds := map[uint64]string{} + for k, v := range ids { + invIds[v] = k + } + + index, err := readIndex(f) + if err != nil { + println("Failed to readIndex:", err.Error()) + + // Create a stubbed out index so we can still try and read the block data directly + // w/o panicing ourselves. + index = &tsmIndex{ + minTime: 0, + maxTime: 0, + offset: stat.Size(), + } + } + + blockStats := &blockStats{} + + println("Summary:") + fmt.Printf(" File: %s\n", opts.path) + fmt.Printf(" Time Range: %s - %s\n", + time.Unix(0, index.minTime).UTC().Format(time.RFC3339Nano), + time.Unix(0, index.maxTime).UTC().Format(time.RFC3339Nano), + ) + fmt.Printf(" Duration: %s ", time.Unix(0, index.maxTime).Sub(time.Unix(0, index.minTime))) + fmt.Printf(" Series: %d ", index.series) + fmt.Printf(" File Size: %d\n", stat.Size()) + println() + + tw := tabwriter.NewWriter(os.Stdout, 8, 8, 1, '\t', 0) + fmt.Fprintln(tw, " "+strings.Join([]string{"Pos", "ID", "Ofs", "Key", "Field"}, "\t")) + for i, block := range index.blocks { + key := invIds[block.id] + split := strings.Split(key, "#!~#") + + // We dont' know know if we have fields so use an informative default + var measurement, field string = "UNKNOWN", "UNKNOWN" + + // We read some IDs from the ids file + if len(invIds) > 0 { + // Change the default to error until we know we have a valid key + measurement = "ERR" + field = "ERR" + + // Possible corruption? Try to read as much as we can and point to the problem. + if key == "" { + errors = append(errors, fmt.Errorf("index pos %d, field id: %d, missing key for id", i, block.id)) + } else if len(split) < 2 { + errors = append(errors, fmt.Errorf("index pos %d, field id: %d, key corrupt: got '%v'", i, block.id, key)) + } else { + measurement = split[0] + field = split[1] + } + } + + if opts.filterKey != "" && !strings.Contains(key, opts.filterKey) { + continue + } + fmt.Fprintln(tw, " "+strings.Join([]string{ + strconv.FormatInt(int64(i), 10), + strconv.FormatUint(block.id, 10), + strconv.FormatInt(int64(block.offset), 10), + measurement, + field, + }, "\t")) + } + + if opts.dumpIndex { + println("Index:") + tw.Flush() + println() + } + + tw = tabwriter.NewWriter(os.Stdout, 8, 8, 1, '\t', 0) + fmt.Fprintln(tw, " "+strings.Join([]string{"Blk", "Ofs", "Len", "ID", "Type", "Min Time", "Points", "Enc [T/V]", "Len [T/V]"}, "\t")) + + // Staring at 4 because the magic number is 4 bytes + i := int64(4) + var blockCount, pointCount, blockSize int64 + indexSize := stat.Size() - index.offset + + // Start at the beginning and read every block + for i < index.offset { + f.Seek(int64(i), 0) + + f.Read(b) + id := binary.BigEndian.Uint64(b) + f.Read(b[:4]) + length := binary.BigEndian.Uint32(b[:4]) + buf := make([]byte, length) + f.Read(buf) + + blockSize += int64(len(buf)) + 12 + + startTime := time.Unix(0, int64(binary.BigEndian.Uint64(buf[:8]))) + blockType := buf[8] + + encoded := buf[9:] + + cnt := tsm1.BlockCount(buf) + pointCount += int64(cnt) + + // Length of the timestamp block + tsLen, j := binary.Uvarint(encoded) + + // Unpack the timestamp bytes + ts := encoded[int(j) : int(j)+int(tsLen)] + + // Unpack the value bytes + values := encoded[int(j)+int(tsLen):] + + tsEncoding := timeEnc[int(ts[0]>>4)] + vEncoding := encDescs[int(blockType+1)][values[0]>>4] + + typeDesc := blockTypes[blockType] + + blockStats.inc(0, ts[0]>>4) + blockStats.inc(int(blockType+1), values[0]>>4) + blockStats.size(len(buf)) + + if opts.filterKey != "" && !strings.Contains(invIds[id], opts.filterKey) { + i += (12 + int64(length)) + blockCount++ + continue + } + + fmt.Fprintln(tw, " "+strings.Join([]string{ + strconv.FormatInt(blockCount, 10), + strconv.FormatInt(i, 10), + strconv.FormatInt(int64(len(buf)), 10), + strconv.FormatUint(id, 10), + typeDesc, + startTime.UTC().Format(time.RFC3339Nano), + strconv.FormatInt(int64(cnt), 10), + fmt.Sprintf("%s/%s", tsEncoding, vEncoding), + fmt.Sprintf("%d/%d", len(ts), len(values)), + }, "\t")) + + i += (12 + int64(length)) + blockCount++ + } + if opts.dumpBlocks { + println("Blocks:") + tw.Flush() + println() + } + + fmt.Printf("Statistics\n") + fmt.Printf(" Blocks:\n") + fmt.Printf(" Total: %d Size: %d Min: %d Max: %d Avg: %d\n", + blockCount, blockSize, blockStats.min, blockStats.max, blockSize/blockCount) + fmt.Printf(" Index:\n") + fmt.Printf(" Total: %d Size: %d\n", len(index.blocks), indexSize) + fmt.Printf(" Points:\n") + fmt.Printf(" Total: %d", pointCount) + println() + + println(" Encoding:") + for i, counts := range blockStats.counts { + if len(counts) == 0 { + continue + } + fmt.Printf(" %s: ", strings.Title(fieldType[i])) + for j, v := range counts { + fmt.Printf("\t%s: %d (%d%%) ", encDescs[i][j], v, int(float64(v)/float64(blockCount)*100)) + } + println() + } + fmt.Printf(" Compression:\n") + fmt.Printf(" Per block: %0.2f bytes/point\n", float64(blockSize)/float64(pointCount)) + fmt.Printf(" Total: %0.2f bytes/point\n", float64(stat.Size())/float64(pointCount)) + + if len(errors) > 0 { + println() + fmt.Printf("Errors (%d):\n", len(errors)) + for _, err := range errors { + fmt.Printf(" * %v\n", err) + } + println() + } +} + +func cmdDumpTsm1dev(opts *tsdmDumpOpts) { + var errors []error + + f, err := os.Open(opts.path) + if err != nil { + println(err.Error()) + os.Exit(1) + } + + // Get the file size + stat, err := f.Stat() + if err != nil { + println(err.Error()) + os.Exit(1) + } + b := make([]byte, 8) + + r, err := tsm1.NewTSMReaderWithOptions(tsm1.TSMReaderOptions{ + MMAPFile: f, + }) + if err != nil { + println("Error opening TSM files: ", err.Error()) + } + defer r.Close() + + minTime, maxTime := r.TimeRange() + keys := r.Keys() + + blockStats := &blockStats{} + + println("Summary:") + fmt.Printf(" File: %s\n", opts.path) + fmt.Printf(" Time Range: %s - %s\n", + time.Unix(0, minTime).UTC().Format(time.RFC3339Nano), + time.Unix(0, maxTime).UTC().Format(time.RFC3339Nano), + ) + fmt.Printf(" Duration: %s ", time.Unix(0, maxTime).Sub(time.Unix(0, minTime))) + fmt.Printf(" Series: %d ", len(keys)) + fmt.Printf(" File Size: %d\n", stat.Size()) + println() + + tw := tabwriter.NewWriter(os.Stdout, 8, 8, 1, '\t', 0) + fmt.Fprintln(tw, " "+strings.Join([]string{"Pos", "Min Time", "Max Time", "Ofs", "Size", "Key", "Field"}, "\t")) + var pos int + for _, key := range keys { + for _, e := range r.Entries(key) { + pos++ + split := strings.Split(key, "#!~#") + + // We dont' know know if we have fields so use an informative default + var measurement, field string = "UNKNOWN", "UNKNOWN" + + // Possible corruption? Try to read as much as we can and point to the problem. + measurement = split[0] + field = split[1] + + if opts.filterKey != "" && !strings.Contains(key, opts.filterKey) { + continue + } + fmt.Fprintln(tw, " "+strings.Join([]string{ + strconv.FormatInt(int64(pos), 10), + time.Unix(0, e.MinTime).UTC().Format(time.RFC3339Nano), + time.Unix(0, e.MaxTime).UTC().Format(time.RFC3339Nano), + strconv.FormatInt(int64(e.Offset), 10), + strconv.FormatInt(int64(e.Size), 10), + measurement, + field, + }, "\t")) + } + } + + if opts.dumpIndex { + println("Index:") + tw.Flush() + println() + } + + tw = tabwriter.NewWriter(os.Stdout, 8, 8, 1, '\t', 0) + fmt.Fprintln(tw, " "+strings.Join([]string{"Blk", "Chk", "Ofs", "Len", "Type", "Min Time", "Points", "Enc [T/V]", "Len [T/V]"}, "\t")) + + // Starting at 5 because the magic number is 4 bytes + 1 byte version + i := int64(5) + var blockCount, pointCount, blockSize int64 + indexSize := r.IndexSize() + + // Start at the beginning and read every block + for _, key := range keys { + for _, e := range r.Entries(key) { + + f.Seek(int64(e.Offset), 0) + f.Read(b[:4]) + + chksum := binary.BigEndian.Uint32(b[:4]) + + buf := make([]byte, e.Size-4) + f.Read(buf) + + blockSize += int64(e.Size) + + blockType := buf[0] + + encoded := buf[1:] + + var v []tsm1.Value + v, err := tsm1.DecodeBlock(buf, v) + if err != nil { + fmt.Printf("error: %v\n", err.Error()) + os.Exit(1) + } + startTime := time.Unix(0, v[0].UnixNano()) + + pointCount += int64(len(v)) + + // Length of the timestamp block + tsLen, j := binary.Uvarint(encoded) + + // Unpack the timestamp bytes + ts := encoded[int(j) : int(j)+int(tsLen)] + + // Unpack the value bytes + values := encoded[int(j)+int(tsLen):] + + tsEncoding := timeEnc[int(ts[0]>>4)] + vEncoding := encDescs[int(blockType+1)][values[0]>>4] + + typeDesc := blockTypes[blockType] + + blockStats.inc(0, ts[0]>>4) + blockStats.inc(int(blockType+1), values[0]>>4) + blockStats.size(len(buf)) + + if opts.filterKey != "" && !strings.Contains(key, opts.filterKey) { + i += blockSize + blockCount++ + continue + } + + fmt.Fprintln(tw, " "+strings.Join([]string{ + strconv.FormatInt(blockCount, 10), + strconv.FormatUint(uint64(chksum), 10), + strconv.FormatInt(i, 10), + strconv.FormatInt(int64(len(buf)), 10), + typeDesc, + startTime.UTC().Format(time.RFC3339Nano), + strconv.FormatInt(int64(len(v)), 10), + fmt.Sprintf("%s/%s", tsEncoding, vEncoding), + fmt.Sprintf("%d/%d", len(ts), len(values)), + }, "\t")) + + i += blockSize + blockCount++ + } + } + + if opts.dumpBlocks { + println("Blocks:") + tw.Flush() + println() + } + + var blockSizeAvg int64 + if blockCount > 0 { + blockSizeAvg = blockSize / blockCount + } + fmt.Printf("Statistics\n") + fmt.Printf(" Blocks:\n") + fmt.Printf(" Total: %d Size: %d Min: %d Max: %d Avg: %d\n", + blockCount, blockSize, blockStats.min, blockStats.max, blockSizeAvg) + fmt.Printf(" Index:\n") + fmt.Printf(" Total: %d Size: %d\n", blockCount, indexSize) + fmt.Printf(" Points:\n") + fmt.Printf(" Total: %d", pointCount) + println() + + println(" Encoding:") + for i, counts := range blockStats.counts { + if len(counts) == 0 { + continue + } + fmt.Printf(" %s: ", strings.Title(fieldType[i])) + for j, v := range counts { + fmt.Printf("\t%s: %d (%d%%) ", encDescs[i][j], v, int(float64(v)/float64(blockCount)*100)) + } + println() + } + fmt.Printf(" Compression:\n") + fmt.Printf(" Per block: %0.2f bytes/point\n", float64(blockSize)/float64(pointCount)) + fmt.Printf(" Total: %0.2f bytes/point\n", float64(stat.Size())/float64(pointCount)) + + if len(errors) > 0 { + println() + fmt.Printf("Errors (%d):\n", len(errors)) + for _, err := range errors { + fmt.Printf(" * %v\n", err) + } + println() + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_stress/README.md b/vendor/github.com/influxdata/influxdb/cmd/influx_stress/README.md new file mode 100644 index 0000000000..0c910db747 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_stress/README.md @@ -0,0 +1,38 @@ +# `influx_stress` + +## Ways to run + +### `influx_stress` +This runs a basic stress test with the [default config](https://github.com/influxdata/influxdb/blob/master/stress/stress.toml) For more information on the configuration file please see the default. For additional questions please contact @mjdesa + +### `influx_stress -config someConfig.toml` +This runs the stress test with a valid configuration file located at `someConfig.tom` + +## Flags + +If flags are defined they overwrite the config from any file passed in. + +### `-addr` string +IP address and port of database where response times will persist (e.g., localhost:8086) + +`default` = "http://localhost:8086" + +### `-config` string +The relative path to the stress test configuration file. + +`default` = [config](https://github.com/influxdata/influxdb/blob/master/stress/stress.toml) + +### `-cpuprofile` filename +Writes the result of Go's cpu profile to filename + +`default` = no profiling + +### `-database` string +Name of database on `-addr` that `influx_stress` will persist write and query response times + +`default` = "stress" + +### `-tags` value +A comma separated list of tags to add to write and query response times. + +`default` = "" diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_stress/examples/template.toml b/vendor/github.com/influxdata/influxdb/cmd/influx_stress/examples/template.toml new file mode 100644 index 0000000000..08be339950 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_stress/examples/template.toml @@ -0,0 +1,92 @@ +# This section can be removed +[provision] + # The basic provisioner simply deletes and creates database. + # If `reset_database` is false, it will not attempt to delete the database + [provision.basic] + # If enabled the provisioner will actually run + enabled = true + # Address of the instance that is to be provisioned + address = "localhost:8086" + # Database the will be created/deleted + database = "stress" + # Attempt to delete database + reset_database = true + +# This section cannot be commented out +# To prevent writes set `enabled=false` +# in [write.influx_client.basic] +[write] + [write.point_generator] + # The basic point generator will generate points of the form + # `cpu,host=server-%v,location=us-west value=234 123456` + [write.point_generator.basic] + # number of points that will be written for each of the series + point_count = 100 + # number of series + series_count = 100000 + # How much time between each timestamp + tick = "10s" + # Randomize timestamp a bit (not functional) + jitter = true + # Precision of points that are being written + precision = "s" + # name of the measurement that will be written + measurement = "cpu" + # The date for the first point that is written into influx + start_date = "2006-Jan-02" + # Defines a tag for a series + [[write.point_generator.basic.tag]] + key = "host" + value = "server" + [[write.point_generator.basic.tag]] + key = "location" + value = "us-west" + # Defines a field for a series + [[write.point_generator.basic.field]] + key = "value" + value = "float64" # supported types: float64, int, bool + + + [write.influx_client] + [write.influx_client.basic] + # If enabled the writer will actually write + enabled = true + # Addresses is an array of the Influxdb instances + addresses = ["localhost:8086"] # stress_test_server runs on port 1234 + # Database that is being written to + database = "stress" + # Precision of points that are being written + precision = "s" + # Size of batches that are sent to db + batch_size = 10000 + # Interval between each batch + batch_interval = "0s" + # How many concurrent writers to the db + concurrency = 10 + # ssl enabled? + ssl = false + # format of points that are written to influxdb + format = "line_http" # line_udp (not supported yet), graphite_tcp (not supported yet), graphite_udp (not supported yet) + +# This section can be removed +[read] + [read.query_generator] + [read.query_generator.basic] + # Template of the query that will be ran against the instance + template = "SELECT count(value) FROM cpu where host='server-%v'" + # How many times the templated query will be ran + query_count = 250 + + [read.query_client] + [read.query_client.basic] + # if enabled the reader will actually read + enabled = true + # Address of the instance that will be queried + addresses = ["localhost:8086"] + # Database that will be queried + database = "stress" + # Interval bewteen queries + query_interval = "100ms" + # Number of concurrent queriers + concurrency = 1 + diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_stress/influx_stress.go b/vendor/github.com/influxdata/influxdb/cmd/influx_stress/influx_stress.go new file mode 100644 index 0000000000..639fd37a9f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_stress/influx_stress.go @@ -0,0 +1,60 @@ +package main + +import ( + "flag" + "fmt" + "os" + "runtime/pprof" + + "github.com/influxdata/influxdb/stress" +) + +var ( + config = flag.String("config", "", "The stress test file") + cpuprofile = flag.String("cpuprofile", "", "Write the cpu profile to `filename`") + db = flag.String("db", "", "target database within test system for write and query load") +) + +func main() { + o := stress.NewOutputConfig() + flag.Parse() + + if *cpuprofile != "" { + f, err := os.Create(*cpuprofile) + if err != nil { + fmt.Println(err) + return + } + pprof.StartCPUProfile(f) + defer pprof.StopCPUProfile() + } + + c, err := stress.NewConfig(*config) + if err != nil { + fmt.Println(err) + return + } + + if *db != "" { + c.Provision.Basic.Database = *db + c.Write.InfluxClients.Basic.Database = *db + c.Read.QueryClients.Basic.Database = *db + } + + w := stress.NewWriter(&c.Write.PointGenerators.Basic, &c.Write.InfluxClients.Basic) + r := stress.NewQuerier(&c.Read.QueryGenerators.Basic, &c.Read.QueryClients.Basic) + s := stress.NewStressTest(&c.Provision.Basic, w, r) + + bw := stress.NewBroadcastChannel() + bw.Register(c.Write.InfluxClients.Basic.BasicWriteHandler) + bw.Register(o.HTTPHandler("write")) + + br := stress.NewBroadcastChannel() + br.Register(c.Read.QueryClients.Basic.BasicReadHandler) + br.Register(o.HTTPHandler("read")) + + s.Start(bw.Handle, br.Handle) + + return + +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/README.md b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/README.md new file mode 100644 index 0000000000..c6fba59ce0 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/README.md @@ -0,0 +1,89 @@ +# Converting b1 and bz1 shards to tsm1 +`influx_tsm` is a tool for converting b1 and bz1 shards to tsm1 format. Converting shards to tsm1 format results in a very significant reduction in disk usage, and significantly improved write-throughput, when writing data into those shards. + +Conversion can be controlled on a database-by-database basis. By default a database is backed up before it is converted, allowing you to roll back any changes. Because of the backup process, ensure the host system has at least as much free disk space as the disk space consumed by the _data_ directory of your InfluxDB system. + +The tool automatically ignores tsm1 shards, and can be run idempotently on any database. + +Conversion is an offline process, and the InfluxDB system must be stopped during conversion. However the conversion process reads and writes shards directly on disk and should be fast. + +## Steps +Follow these steps to perform a conversion. + +* Identify the databases you wish to convert. You can convert one or more databases at a time. By default all databases are converted. +* Decide on parallel operation. By default the conversion operation peforms each operation in a serial manner. This minimizes load on the host system performing the conversion, but also takes the most time. If you wish to minimize the time conversion takes, enable parallel mode. Conversion will then perform as many operations as possible in parallel, but the process may place significant load on the host system (CPU, disk, and RAM, usage will all increase). +* Stop all write-traffic to your InfluxDB system. +* Restart the InfluxDB service and wait until all WAL data is flushed to disk -- this has completed when the system responds to queries. This is to ensure all data is present in shards. +* Stop the InfluxDB service. It should not be restarted until conversion is complete. +* Run conversion tool. Depending on the size of the data directory, this might be a lengthy operation. Consider running the conversion tool under a "screen" session to avoid any interruptions. +* Unless you ran the conversion tool as the same user as that which runs InfluxDB, then you may need to set the correct read-and-write permissions on the new tsm1 directories. +* Restart node and ensure data looks correct. +* If everything looks OK, you may then wish to remove or archive the backed-up databases. +* Restart write traffic. + +## Example session +Below is an example session, showing a database being converted. + +``` +$ mkdir ~/influxdb_backup +$ influx_tsm -backup ~/influxdb_backup -parallel ~/.influxdb/data + +b1 and bz1 shard conversion. +----------------------------------- +Data directory is: /home/user/.influxdb/data +Backup directory is: /home/user/influxdb_backup +Databases specified: all +Database backups enabled: yes +Parallel mode enabled (GOMAXPROCS): yes (8) + + +Found 1 shards that will be converted. + +Database Retention Path Engine Size +_internal monitor /home/user/.influxdb/data/_internal/monitor/1 bz1 65536 + +These shards will be converted. Proceed? y/N: y +Conversion starting.... +Backing up 1 databases... +2016/01/28 12:23:43.699266 Backup of databse '_internal' started +2016/01/28 12:23:43.699883 Backing up file /home/user/.influxdb/data/_internal/monitor/1 +2016/01/28 12:23:43.700052 Database _internal backed up (851.776µs) +2016/01/28 12:23:43.700320 Starting conversion of shard: /home/user/.influxdb/data/_internal/monitor/1 +2016/01/28 12:23:43.706276 Conversion of /home/user/.influxdb/data/_internal/monitor/1 successful (6.040148ms) + +Summary statistics +======================================== +Databases converted: 1 +Shards converted: 1 +TSM files created: 1 +Points read: 369 +Points written: 369 +NaN filtered: 0 +Inf filtered: 0 +Points without fields filtered: 0 +Disk usage pre-conversion (bytes): 65536 +Disk usage post-conversion (bytes): 11000 +Reduction factor: 83% +Bytes per TSM point: 29.81 +Total conversion time: 7.330443ms + +$ # restart node, verify data + +$ rm -r ~/influxdb_backup +``` + +Note that the tool first lists the shards that will be converted, before asking for confirmation. You can abort the conversion process at this step if you just wish to see what would be converted, or if the list of shards does not look correct. + +## Rolling back a conversion +After a successful backup (the message `Database XYZ backed up` was logged), you have a duplicate of that database in the _backup_ directory you provided on the command line. If, when checking your data after a successful conversion, you notice things missing or something just isn't right, you can "undo" the conversion: + +- Shut down your node (this is very important) +- Remove the database's directory from the influxdb `data` directory (default: ~/.influxdb/data/XYZ) +- Copy (to really make sure the shard is preserved) the database's directory from the backup directory you created into the `data` directory. + +Using the same directories as above, and assuming a database named `stats`: +``` +$ rm -r ~/.influxdb/data/stats +$ cp -r ~/influxdb_backup/stats ~/.influxdb/data/ +$ # restart influxd node +``` diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/b1/reader.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/b1/reader.go new file mode 100644 index 0000000000..16a46c82d6 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/b1/reader.go @@ -0,0 +1,275 @@ +package b1 // import "github.com/influxdata/influxdb/cmd/influx_tsm/b1" + +import ( + "encoding/binary" + "math" + "sort" + "time" + + "github.com/boltdb/bolt" + "github.com/influxdata/influxdb/cmd/influx_tsm/stats" + "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +const DefaultChunkSize int = 1000 + +var excludedBuckets = map[string]bool{ + "fields": true, + "meta": true, + "series": true, + "wal": true, +} + +// Reader is used to read all data from a b1 shard. +type Reader struct { + path string + db *bolt.DB + tx *bolt.Tx + + cursors []*cursor + currCursor int + + keyBuf string + tsmValues []tsm1.Value + values []tsdb.Value + valuePos int + + fields map[string]*tsdb.MeasurementFields + codecs map[string]*tsdb.FieldCodec + + stats *stats.Stats +} + +// NewReader returns a reader for the b1 shard at path. +func NewReader(path string, stats *stats.Stats, chunkSize int) *Reader { + r := &Reader{ + path: path, + fields: make(map[string]*tsdb.MeasurementFields), + codecs: make(map[string]*tsdb.FieldCodec), + stats: stats, + } + + if chunkSize <= 0 { + chunkSize = DefaultChunkSize + } + + // known-sized slice of a known type, in a contiguous chunk + r.values = make([]tsdb.Value, chunkSize) + r.tsmValues = make([]tsm1.Value, len(r.values)) + for i := range r.values { + r.tsmValues[i] = &r.values[i] + } + + return r +} + +// Open opens the reader. +func (r *Reader) Open() error { + // Open underlying storage. + db, err := bolt.Open(r.path, 0666, &bolt.Options{Timeout: 1 * time.Second}) + if err != nil { + return err + } + r.db = db + + // Load fields. + if err := r.db.View(func(tx *bolt.Tx) error { + meta := tx.Bucket([]byte("fields")) + c := meta.Cursor() + + for k, v := c.First(); k != nil; k, v = c.Next() { + mf := &tsdb.MeasurementFields{} + if err := mf.UnmarshalBinary(v); err != nil { + return err + } + r.fields[string(k)] = mf + r.codecs[string(k)] = tsdb.NewFieldCodec(mf.Fields) + } + return nil + }); err != nil { + return err + } + + seriesSet := make(map[string]bool) + + // ignore series index and find all series in this shard + if err := r.db.View(func(tx *bolt.Tx) error { + tx.ForEach(func(name []byte, _ *bolt.Bucket) error { + key := string(name) + if !excludedBuckets[key] { + seriesSet[key] = true + } + return nil + }) + return nil + }); err != nil { + return err + } + + r.tx, err = r.db.Begin(false) + if err != nil { + return err + } + + // Create cursor for each field of each series. + for s := range seriesSet { + measurement := tsdb.MeasurementFromSeriesKey(s) + fields := r.fields[measurement] + if fields == nil { + r.stats.IncrFiltered() + continue + } + for _, f := range fields.Fields { + c := newCursor(r.tx, s, f.Name, r.codecs[measurement]) + c.SeekTo(0) + r.cursors = append(r.cursors, c) + } + } + sort.Sort(cursors(r.cursors)) + + return nil +} + +// Next returns whether any data remains to be read. It must be called before +// the next call to Read(). +func (r *Reader) Next() bool { + r.valuePos = 0 +OUTER: + for { + if r.currCursor >= len(r.cursors) { + // All cursors drained. No more data remains. + return false + } + + cc := r.cursors[r.currCursor] + r.keyBuf = tsm1.SeriesFieldKey(cc.series, cc.field) + + for { + k, v := cc.Next() + if k == -1 { + // Go to next cursor and try again. + r.currCursor++ + if r.valuePos == 0 { + // The previous cursor had no data. Instead of returning + // just go immediately to the next cursor. + continue OUTER + } + // There is some data available. Indicate that it should be read. + return true + } + + if f, ok := v.(float64); ok { + if math.IsInf(f, 0) { + r.stats.AddPointsRead(1) + r.stats.IncrInf() + continue + } + + if math.IsNaN(f) { + r.stats.AddPointsRead(1) + r.stats.IncrNaN() + continue + } + } + + r.values[r.valuePos].T = k + r.values[r.valuePos].Val = v + r.valuePos++ + + if r.valuePos >= len(r.values) { + return true + } + } + } +} + +// Read returns the next chunk of data in the shard, converted to tsm1 values. Data is +// emitted completely for every field, in every series, before the next field is processed. +// Data from Read() adheres to the requirements for writing to tsm1 shards +func (r *Reader) Read() (string, []tsm1.Value, error) { + return r.keyBuf, r.tsmValues[:r.valuePos], nil +} + +// Close closes the reader. +func (r *Reader) Close() error { + r.tx.Rollback() + return r.db.Close() +} + +// cursor provides ordered iteration across a series. +type cursor struct { + // Bolt cursor and readahead buffer. + cursor *bolt.Cursor + keyBuf int64 + valBuf interface{} + + series string + field string + dec *tsdb.FieldCodec +} + +// Cursor returns an iterator for a key over a single field. +func newCursor(tx *bolt.Tx, series string, field string, dec *tsdb.FieldCodec) *cursor { + cur := &cursor{ + keyBuf: -2, + series: series, + field: field, + dec: dec, + } + + // Retrieve series bucket. + b := tx.Bucket([]byte(series)) + if b != nil { + cur.cursor = b.Cursor() + } + + return cur +} + +// Seek moves the cursor to a position. +func (c *cursor) SeekTo(seek int64) { + var seekBytes [8]byte + binary.BigEndian.PutUint64(seekBytes[:], uint64(seek)) + k, v := c.cursor.Seek(seekBytes[:]) + c.keyBuf, c.valBuf = tsdb.DecodeKeyValue(c.field, c.dec, k, v) +} + +// Next returns the next key/value pair from the cursor. +func (c *cursor) Next() (key int64, value interface{}) { + for { + k, v := func() (int64, interface{}) { + if c.keyBuf != -2 { + k, v := c.keyBuf, c.valBuf + c.keyBuf = -2 + return k, v + } + + k, v := c.cursor.Next() + if k == nil { + return -1, nil + } + return tsdb.DecodeKeyValue(c.field, c.dec, k, v) + }() + + if k != -1 && v == nil { + // There is a point in the series at the next timestamp, + // but not for this cursor's field. Go to the next point. + continue + } + return k, v + } +} + +// Sort b1 cursors in correct order for writing to TSM files. + +type cursors []*cursor + +func (a cursors) Len() int { return len(a) } +func (a cursors) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a cursors) Less(i, j int) bool { + if a[i].series == a[j].series { + return a[i].field < a[j].field + } + return a[i].series < a[j].series +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/bz1/reader.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/bz1/reader.go new file mode 100644 index 0000000000..d207a1a07f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/bz1/reader.go @@ -0,0 +1,377 @@ +package bz1 // import "github.com/influxdata/influxdb/cmd/influx_tsm/bz1" + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "fmt" + "math" + "sort" + "time" + + "github.com/boltdb/bolt" + "github.com/golang/snappy" + "github.com/influxdata/influxdb/cmd/influx_tsm/stats" + "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +// DefaultChunkSize is the size of chunks read from the bz1 shard +const DefaultChunkSize = 1000 + +// Reader is used to read all data from a bz1 shard. +type Reader struct { + path string + db *bolt.DB + tx *bolt.Tx + + cursors []*cursor + currCursor int + + keyBuf string + tsmValues []tsm1.Value + values []tsdb.Value + valuePos int + + fields map[string]*tsdb.MeasurementFields + codecs map[string]*tsdb.FieldCodec + + stats *stats.Stats +} + +// NewReader returns a reader for the bz1 shard at path. +func NewReader(path string, stats *stats.Stats, chunkSize int) *Reader { + r := &Reader{ + path: path, + fields: make(map[string]*tsdb.MeasurementFields), + codecs: make(map[string]*tsdb.FieldCodec), + stats: stats, + } + + if chunkSize <= 0 { + chunkSize = DefaultChunkSize + } + + // known-sized slice of a known type, in a contiguous chunk + r.values = make([]tsdb.Value, chunkSize) + r.tsmValues = make([]tsm1.Value, len(r.values)) + for i := range r.values { + r.tsmValues[i] = &r.values[i] + } + + return r +} + +// Open opens the reader. +func (r *Reader) Open() error { + // Open underlying storage. + db, err := bolt.Open(r.path, 0666, &bolt.Options{Timeout: 1 * time.Second}) + if err != nil { + return err + } + r.db = db + + seriesSet := make(map[string]bool) + + if err := r.db.View(func(tx *bolt.Tx) error { + var data []byte + + meta := tx.Bucket([]byte("meta")) + if meta == nil { + // No data in this shard. + return nil + } + + pointsBucket := tx.Bucket([]byte("points")) + if pointsBucket == nil { + return nil + } + + if err := pointsBucket.ForEach(func(key, _ []byte) error { + seriesSet[string(key)] = true + return nil + }); err != nil { + return err + } + + buf := meta.Get([]byte("fields")) + if buf == nil { + // No data in this shard. + return nil + } + + data, err = snappy.Decode(nil, buf) + if err != nil { + return err + } + if err := json.Unmarshal(data, &r.fields); err != nil { + return err + } + return nil + }); err != nil { + return err + } + + // Build the codec for each measurement. + for k, v := range r.fields { + r.codecs[k] = tsdb.NewFieldCodec(v.Fields) + } + + r.tx, err = r.db.Begin(false) + if err != nil { + return err + } + + // Create cursor for each field of each series. + for s := range seriesSet { + measurement := tsdb.MeasurementFromSeriesKey(s) + fields := r.fields[measurement] + if fields == nil { + r.stats.IncrFiltered() + continue + } + for _, f := range fields.Fields { + c := newCursor(r.tx, s, f.Name, r.codecs[measurement]) + if c == nil { + continue + } + c.SeekTo(0) + r.cursors = append(r.cursors, c) + } + } + sort.Sort(cursors(r.cursors)) + + return nil +} + +// Next returns whether there is any more data to be read. +func (r *Reader) Next() bool { + r.valuePos = 0 +OUTER: + for { + if r.currCursor >= len(r.cursors) { + // All cursors drained. No more data remains. + return false + } + + cc := r.cursors[r.currCursor] + r.keyBuf = tsm1.SeriesFieldKey(cc.series, cc.field) + + for { + k, v := cc.Next() + if k == -1 { + // Go to next cursor and try again. + r.currCursor++ + if r.valuePos == 0 { + // The previous cursor had no data. Instead of returning + // just go immediately to the next cursor. + continue OUTER + } + // There is some data available. Indicate that it should be read. + return true + } + + if f, ok := v.(float64); ok { + if math.IsInf(f, 0) { + r.stats.AddPointsRead(1) + r.stats.IncrInf() + continue + } + + if math.IsNaN(f) { + r.stats.AddPointsRead(1) + r.stats.IncrNaN() + continue + } + } + + r.values[r.valuePos].T = k + r.values[r.valuePos].Val = v + r.valuePos++ + + if r.valuePos >= len(r.values) { + return true + } + } + } +} + +// Read returns the next chunk of data in the shard, converted to tsm1 values. Data is +// emitted completely for every field, in every series, before the next field is processed. +// Data from Read() adheres to the requirements for writing to tsm1 shards +func (r *Reader) Read() (string, []tsm1.Value, error) { + return r.keyBuf, r.tsmValues[:r.valuePos], nil +} + +// Close closes the reader. +func (r *Reader) Close() error { + r.tx.Rollback() + return r.db.Close() +} + +// cursor provides ordered iteration across a series. +type cursor struct { + cursor *bolt.Cursor + buf []byte // uncompressed buffer + off int // buffer offset + fieldIndices []int + index int + + series string + field string + dec *tsdb.FieldCodec + + keyBuf int64 + valBuf interface{} +} + +// newCursor returns an instance of a bz1 cursor. +func newCursor(tx *bolt.Tx, series string, field string, dec *tsdb.FieldCodec) *cursor { + // Retrieve points bucket. Ignore if there is no bucket. + b := tx.Bucket([]byte("points")).Bucket([]byte(series)) + if b == nil { + return nil + } + + return &cursor{ + cursor: b.Cursor(), + series: series, + field: field, + dec: dec, + keyBuf: -2, + } +} + +// Seek moves the cursor to a position. +func (c *cursor) SeekTo(seek int64) { + var seekBytes [8]byte + binary.BigEndian.PutUint64(seekBytes[:], uint64(seek)) + + // Move cursor to appropriate block and set to buffer. + k, v := c.cursor.Seek(seekBytes[:]) + if v == nil { // get the last block, it might have this time + _, v = c.cursor.Last() + } else if seek < int64(binary.BigEndian.Uint64(k)) { // the seek key is less than this block, go back one and check + _, v = c.cursor.Prev() + + // if the previous block max time is less than the seek value, reset to where we were originally + if v == nil || seek > int64(binary.BigEndian.Uint64(v[0:8])) { + _, v = c.cursor.Seek(seekBytes[:]) + } + } + c.setBuf(v) + + // Read current block up to seek position. + c.seekBuf(seekBytes[:]) + + // Return current entry. + c.keyBuf, c.valBuf = c.read() +} + +// seekBuf moves the cursor to a position within the current buffer. +func (c *cursor) seekBuf(seek []byte) (key, value []byte) { + for { + // Slice off the current entry. + buf := c.buf[c.off:] + + // Exit if current entry's timestamp is on or after the seek. + if len(buf) == 0 { + return + } + + if bytes.Compare(buf[0:8], seek) != -1 { + return + } + + c.off += entryHeaderSize + entryDataSize(buf) + } +} + +// Next returns the next key/value pair from the cursor. If there are no values +// remaining, -1 is returned. +func (c *cursor) Next() (int64, interface{}) { + for { + k, v := func() (int64, interface{}) { + if c.keyBuf != -2 { + k, v := c.keyBuf, c.valBuf + c.keyBuf = -2 + return k, v + } + + // Ignore if there is no buffer. + if len(c.buf) == 0 { + return -1, nil + } + + // Move forward to next entry. + c.off += entryHeaderSize + entryDataSize(c.buf[c.off:]) + + // If no items left then read first item from next block. + if c.off >= len(c.buf) { + _, v := c.cursor.Next() + c.setBuf(v) + } + + return c.read() + }() + + if k != -1 && v == nil { + // There is a point in the series at the next timestamp, + // but not for this cursor's field. Go to the next point. + continue + } + return k, v + } +} + +// setBuf saves a compressed block to the buffer. +func (c *cursor) setBuf(block []byte) { + // Clear if the block is empty. + if len(block) == 0 { + c.buf, c.off, c.fieldIndices, c.index = c.buf[0:0], 0, c.fieldIndices[0:0], 0 + return + } + + // Otherwise decode block into buffer. + // Skip over the first 8 bytes since they are the max timestamp. + buf, err := snappy.Decode(nil, block[8:]) + if err != nil { + c.buf = c.buf[0:0] + fmt.Printf("block decode error: %s\n", err) + } + + c.buf, c.off = buf, 0 +} + +// read reads the current key and value from the current block. +func (c *cursor) read() (key int64, value interface{}) { + // Return nil if the offset is at the end of the buffer. + if c.off >= len(c.buf) { + return -1, nil + } + + // Otherwise read the current entry. + buf := c.buf[c.off:] + dataSize := entryDataSize(buf) + + return tsdb.DecodeKeyValue(c.field, c.dec, buf[0:8], buf[entryHeaderSize:entryHeaderSize+dataSize]) +} + +// Sort bz1 cursors in correct order for writing to TSM files. + +type cursors []*cursor + +func (a cursors) Len() int { return len(a) } +func (a cursors) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a cursors) Less(i, j int) bool { + if a[i].series == a[j].series { + return a[i].field < a[j].field + } + return a[i].series < a[j].series +} + +// entryHeaderSize is the number of bytes required for the header. +const entryHeaderSize = 8 + 4 + +// entryDataSize returns the size of an entry's data field, in bytes. +func entryDataSize(v []byte) int { return int(binary.BigEndian.Uint32(v[8:12])) } diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/converter.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/converter.go new file mode 100644 index 0000000000..3469af62b1 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/converter.go @@ -0,0 +1,118 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/influxdata/influxdb/cmd/influx_tsm/stats" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +const ( + maxBlocksPerKey = 65535 +) + +// KeyIterator is used to iterate over b* keys for conversion to tsm keys +type KeyIterator interface { + Next() bool + Read() (string, []tsm1.Value, error) +} + +// Converter encapsulates the logic for converting b*1 shards to tsm1 shards. +type Converter struct { + path string + maxTSMFileSize uint32 + sequence int + stats *stats.Stats +} + +// NewConverter returns a new instance of the Converter. +func NewConverter(path string, sz uint32, stats *stats.Stats) *Converter { + return &Converter{ + path: path, + maxTSMFileSize: sz, + stats: stats, + } +} + +// Process writes the data provided by iter to a tsm1 shard. +func (c *Converter) Process(iter KeyIterator) error { + // Ensure the tsm1 directory exists. + if err := os.MkdirAll(c.path, 0777); err != nil { + return err + } + + // Iterate until no more data remains. + var w tsm1.TSMWriter + var keyCount map[string]int + + for iter.Next() { + k, v, err := iter.Read() + if err != nil { + return err + } + + if w == nil { + w, err = c.nextTSMWriter() + if err != nil { + return err + } + keyCount = map[string]int{} + } + if err := w.Write(k, v); err != nil { + return err + } + keyCount[k]++ + + c.stats.AddPointsRead(len(v)) + c.stats.AddPointsWritten(len(v)) + + // If we have a max file size configured and we're over it, start a new TSM file. + if w.Size() > c.maxTSMFileSize || keyCount[k] == maxBlocksPerKey { + if err := w.WriteIndex(); err != nil && err != tsm1.ErrNoValues { + return err + } + + c.stats.AddTSMBytes(w.Size()) + + if err := w.Close(); err != nil { + return err + } + w = nil + } + } + + if w != nil { + if err := w.WriteIndex(); err != nil && err != tsm1.ErrNoValues { + return err + } + c.stats.AddTSMBytes(w.Size()) + + if err := w.Close(); err != nil { + return err + } + } + + return nil +} + +// nextTSMWriter returns the next TSMWriter for the Converter. +func (c *Converter) nextTSMWriter() (tsm1.TSMWriter, error) { + c.sequence++ + fileName := filepath.Join(c.path, fmt.Sprintf("%09d-%09d.%s", 1, c.sequence, tsm1.TSMFileExtension)) + + fd, err := os.OpenFile(fileName, os.O_CREATE|os.O_RDWR, 0666) + if err != nil { + return nil, err + } + + // Create the writer for the new TSM file. + w, err := tsm1.NewTSMWriter(fd) + if err != nil { + return nil, err + } + + c.stats.IncrTSMFileCount() + return w, nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/main.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/main.go new file mode 100644 index 0000000000..ebe884acbc --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/main.go @@ -0,0 +1,413 @@ +package main + +import ( + "bufio" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "runtime" + "runtime/pprof" + "sort" + "strings" + "text/tabwriter" + "time" + + "net/http" + _ "net/http/pprof" + + "github.com/influxdata/influxdb/cmd/influx_tsm/b1" + "github.com/influxdata/influxdb/cmd/influx_tsm/bz1" + "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb" +) + +// ShardReader reads b* shards and converts to tsm shards +type ShardReader interface { + KeyIterator + Open() error + Close() error +} + +const ( + tsmExt = "tsm" +) + +var description = ` +Convert a database from b1 or bz1 format to tsm1 format. + +This tool will backup the directories before conversion (if not disabled). +The backed-up files must be removed manually, generally after starting up the +node again to make sure all of data has been converted correctly. + +To restore a backup: + Shut down the node, remove the converted directory, and + copy the backed-up directory to the original location.` + +type options struct { + DataPath string + BackupPath string + DBs []string + DebugAddr string + TSMSize uint64 + Parallel bool + SkipBackup bool + UpdateInterval time.Duration + Yes bool + CpuFile string +} + +func (o *options) Parse() error { + fs := flag.NewFlagSet(os.Args[0], flag.ExitOnError) + + var dbs string + + fs.StringVar(&dbs, "dbs", "", "Comma-delimited list of databases to convert. Default is to convert all databases.") + fs.Uint64Var(&opts.TSMSize, "sz", maxTSMSz, "Maximum size of individual TSM files.") + fs.BoolVar(&opts.Parallel, "parallel", false, "Perform parallel conversion. (up to GOMAXPROCS shards at once)") + fs.BoolVar(&opts.SkipBackup, "nobackup", false, "Disable database backups. Not recommended.") + fs.StringVar(&opts.BackupPath, "backup", "", "The location to backup up the current databases. Must not be within the data directory.") + fs.StringVar(&opts.DebugAddr, "debug", "", "If set, http debugging endpoints will be enabled on the given address") + fs.DurationVar(&opts.UpdateInterval, "interval", 5*time.Second, "How often status updates are printed.") + fs.BoolVar(&opts.Yes, "y", false, "Don't ask, just convert") + fs.StringVar(&opts.CpuFile, "profile", "", "CPU Profile location") + fs.Usage = func() { + fmt.Fprintf(os.Stderr, "Usage: %v [options] \n", os.Args[0]) + fmt.Fprintf(os.Stderr, "%v\n\nOptions:\n", description) + fs.PrintDefaults() + fmt.Fprintf(os.Stderr, "\n") + } + + if err := fs.Parse(os.Args[1:]); err != nil { + return err + } + + if len(fs.Args()) < 1 { + return errors.New("no data directory specified") + } + var err error + if o.DataPath, err = filepath.Abs(fs.Args()[0]); err != nil { + return err + } + if o.DataPath, err = filepath.EvalSymlinks(filepath.Clean(o.DataPath)); err != nil { + return err + } + + if o.TSMSize > maxTSMSz { + return fmt.Errorf("bad TSM file size, maximum TSM file size is %d", maxTSMSz) + } + + // Check if specific databases were requested. + o.DBs = strings.Split(dbs, ",") + if len(o.DBs) == 1 && o.DBs[0] == "" { + o.DBs = nil + } + + if !o.SkipBackup { + if o.BackupPath == "" { + return errors.New("either -nobackup or -backup DIR must be set") + } + if o.BackupPath, err = filepath.Abs(o.BackupPath); err != nil { + return err + } + if o.BackupPath, err = filepath.EvalSymlinks(filepath.Clean(o.BackupPath)); err != nil { + if os.IsNotExist(err) { + return errors.New("backup directory must already exist") + } + return err + } + + if strings.HasPrefix(o.BackupPath, o.DataPath) { + fmt.Println(o.BackupPath, o.DataPath) + return errors.New("backup directory cannot be contained within data directory") + } + } + + if o.DebugAddr != "" { + log.Printf("Starting debugging server on http://%v", o.DebugAddr) + go func() { + log.Fatal(http.ListenAndServe(o.DebugAddr, nil)) + }() + } + + return nil +} + +var opts options + +const maxTSMSz uint64 = 2 * 1024 * 1024 * 1024 + +func init() { + log.SetOutput(os.Stderr) + log.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds) +} + +func main() { + if err := opts.Parse(); err != nil { + log.Fatal(err) + } + + // Determine the list of databases + dbs, err := ioutil.ReadDir(opts.DataPath) + if err != nil { + log.Fatalf("failed to access data directory at %v: %v\n", opts.DataPath, err) + } + fmt.Println() // Cleanly separate output from start of program. + + if opts.Parallel { + if !isEnvSet("GOMAXPROCS") { + // Only modify GOMAXPROCS if it wasn't set in the environment + // This means 'GOMAXPROCS=1 influx_tsm -parallel' will not actually + // run in parallel + runtime.GOMAXPROCS(runtime.NumCPU()) + } + } + + var badUser string + if opts.SkipBackup { + badUser = "(NOT RECOMMENDED)" + } + + // Dump summary of what is about to happen. + fmt.Println("b1 and bz1 shard conversion.") + fmt.Println("-----------------------------------") + fmt.Println("Data directory is: ", opts.DataPath) + if !opts.SkipBackup { + fmt.Println("Backup directory is: ", opts.BackupPath) + } + fmt.Println("Databases specified: ", allDBs(opts.DBs)) + fmt.Println("Database backups enabled: ", yesno(!opts.SkipBackup), badUser) + fmt.Printf("Parallel mode enabled (GOMAXPROCS): %s (%d)\n", yesno(opts.Parallel), runtime.GOMAXPROCS(0)) + fmt.Println() + + shards := collectShards(dbs) + + // Anything to convert? + fmt.Printf("\nFound %d shards that will be converted.\n", len(shards)) + if len(shards) == 0 { + fmt.Println("Nothing to do.") + return + } + + // Display list of convertible shards. + fmt.Println() + w := new(tabwriter.Writer) + w.Init(os.Stdout, 0, 8, 1, '\t', 0) + fmt.Fprintln(w, "Database\tRetention\tPath\tEngine\tSize") + for _, si := range shards { + fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%d\n", si.Database, si.RetentionPolicy, si.FullPath(opts.DataPath), si.FormatAsString(), si.Size) + } + w.Flush() + + if !opts.Yes { + // Get confirmation from user. + fmt.Printf("\nThese shards will be converted. Proceed? y/N: ") + liner := bufio.NewReader(os.Stdin) + yn, err := liner.ReadString('\n') + if err != nil { + log.Fatalf("failed to read response: %v", err) + } + yn = strings.TrimRight(strings.ToLower(yn), "\n") + if yn != "y" { + log.Fatal("Conversion aborted.") + } + } + fmt.Println("Conversion starting....") + + if opts.CpuFile != "" { + f, err := os.Create(opts.CpuFile) + if err != nil { + log.Fatal(err) + } + if err = pprof.StartCPUProfile(f); err != nil { + log.Fatal(err) + } + defer pprof.StopCPUProfile() + } + + tr := newTracker(shards, opts) + + if err := tr.Run(); err != nil { + log.Fatalf("Error occurred preventing completion: %v\n", err) + } + + tr.PrintStats() +} + +func collectShards(dbs []os.FileInfo) tsdb.ShardInfos { + // Get the list of shards for conversion. + var shards tsdb.ShardInfos + for _, db := range dbs { + d := tsdb.NewDatabase(filepath.Join(opts.DataPath, db.Name())) + shs, err := d.Shards() + if err != nil { + log.Fatalf("Failed to access shards for database %v: %v\n", d.Name(), err) + } + shards = append(shards, shs...) + } + + sort.Sort(shards) + shards = shards.FilterFormat(tsdb.TSM1) + if len(dbs) > 0 { + shards = shards.ExclusiveDatabases(opts.DBs) + } + + return shards +} + +// backupDatabase backs up the database named db +func backupDatabase(db string) error { + copyFile := func(path string, info os.FileInfo, err error) error { + // Strip the DataPath from the path and replace with BackupPath. + toPath := strings.Replace(path, opts.DataPath, opts.BackupPath, 1) + + if info.IsDir() { + return os.MkdirAll(toPath, info.Mode()) + } + + in, err := os.Open(path) + if err != nil { + return err + } + defer in.Close() + + srcInfo, err := os.Stat(path) + if err != nil { + return err + } + + out, err := os.OpenFile(toPath, os.O_CREATE|os.O_WRONLY, info.Mode()) + if err != nil { + return err + } + defer out.Close() + + dstInfo, err := os.Stat(toPath) + if err != nil { + return err + } + + if dstInfo.Size() == srcInfo.Size() { + log.Printf("Backup file already found for %v with correct size, skipping.", path) + return nil + } + + if dstInfo.Size() > srcInfo.Size() { + log.Printf("Invalid backup file found for %v, replacing with good copy.", path) + if err := out.Truncate(0); err != nil { + return err + } + if _, err := out.Seek(0, os.SEEK_SET); err != nil { + return err + } + } + + if dstInfo.Size() > 0 { + log.Printf("Resuming backup of file %v, starting at %v bytes", path, dstInfo.Size()) + } + + off, err := out.Seek(0, os.SEEK_END) + if err != nil { + return err + } + if _, err := in.Seek(off, os.SEEK_SET); err != nil { + return err + } + + log.Printf("Backing up file %v", path) + + _, err = io.Copy(out, in) + + return err + } + + return filepath.Walk(filepath.Join(opts.DataPath, db), copyFile) +} + +// convertShard converts the shard in-place. +func convertShard(si *tsdb.ShardInfo, tr *tracker) error { + src := si.FullPath(opts.DataPath) + dst := fmt.Sprintf("%v.%v", src, tsmExt) + + var reader ShardReader + switch si.Format { + case tsdb.BZ1: + reader = bz1.NewReader(src, &tr.Stats, 0) + case tsdb.B1: + reader = b1.NewReader(src, &tr.Stats, 0) + default: + return fmt.Errorf("Unsupported shard format: %v", si.FormatAsString()) + } + + // Open the shard, and create a converter. + if err := reader.Open(); err != nil { + return fmt.Errorf("Failed to open %v for conversion: %v", src, err) + } + defer reader.Close() + converter := NewConverter(dst, uint32(opts.TSMSize), &tr.Stats) + + // Perform the conversion. + if err := converter.Process(reader); err != nil { + return fmt.Errorf("Conversion of %v failed: %v", src, err) + } + + // Delete source shard, and rename new tsm1 shard. + if err := reader.Close(); err != nil { + return fmt.Errorf("Conversion of %v failed due to close: %v", src, err) + } + + if err := os.RemoveAll(si.FullPath(opts.DataPath)); err != nil { + return fmt.Errorf("Deletion of %v failed: %v", src, err) + } + if err := os.Rename(dst, src); err != nil { + return fmt.Errorf("Rename of %v to %v failed: %v", dst, src, err) + } + + return nil +} + +// ParallelGroup allows the maximum parrallelism of a set of operations to be controlled. +type ParallelGroup chan struct{} + +// NewParallelGroup returns a group which allows n operations to run in parallel. A value of 0 +// means no operations will ever run. +func NewParallelGroup(n int) ParallelGroup { + return make(chan struct{}, n) +} + +// Do executes one operation of the ParallelGroup +func (p ParallelGroup) Do(f func()) { + p <- struct{}{} // acquire working slot + defer func() { <-p }() + + f() +} + +// yesno returns "yes" for true, "no" for false. +func yesno(b bool) string { + if b { + return "yes" + } + return "no" +} + +// allDBs returns "all" if all databases are requested for conversion. +func allDBs(dbs []string) string { + if dbs == nil { + return "all" + } + return fmt.Sprintf("%v", dbs) +} + +// isEnvSet checks to see if a variable was set in the environment +func isEnvSet(name string) bool { + for _, s := range os.Environ() { + if strings.SplitN(s, "=", 2)[0] == name { + return true + } + } + return false +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/stats/stats.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/stats/stats.go new file mode 100644 index 0000000000..de1270ca2e --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/stats/stats.go @@ -0,0 +1,47 @@ +package stats + +import ( + "sync/atomic" + "time" +) + +// Stats are the statistics captured while converting non-TSM shards to TSM +type Stats struct { + NanFiltered uint64 + InfFiltered uint64 + FieldsFiltered uint64 + PointsWritten uint64 + PointsRead uint64 + TsmFilesCreated uint64 + TsmBytesWritten uint64 + CompletedShards uint64 + TotalTime time.Duration +} + +func (s *Stats) AddPointsRead(n int) { + atomic.AddUint64(&s.PointsRead, uint64(n)) +} + +func (s *Stats) AddPointsWritten(n int) { + atomic.AddUint64(&s.PointsWritten, uint64(n)) +} + +func (s *Stats) AddTSMBytes(n uint32) { + atomic.AddUint64(&s.TsmBytesWritten, uint64(n)) +} + +func (s *Stats) IncrTSMFileCount() { + atomic.AddUint64(&s.TsmFilesCreated, 1) +} + +func (s *Stats) IncrNaN() { + atomic.AddUint64(&s.NanFiltered, 1) +} + +func (s *Stats) IncrInf() { + atomic.AddUint64(&s.InfFiltered, 1) +} + +func (s *Stats) IncrFiltered() { + atomic.AddUint64(&s.FieldsFiltered, 1) +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tracker.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tracker.go new file mode 100644 index 0000000000..9048aa7647 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tracker.go @@ -0,0 +1,130 @@ +package main + +import ( + "fmt" + "log" + "runtime" + "sync" + "sync/atomic" + "time" + + "github.com/influxdata/influxdb/cmd/influx_tsm/stats" + "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb" +) + +// tracker will orchestrate and track the conversions of non-TSM shards to TSM +type tracker struct { + Stats stats.Stats + + shards tsdb.ShardInfos + opts options + + pg ParallelGroup + wg sync.WaitGroup +} + +// newTracker will setup and return a clean tracker instance +func newTracker(shards tsdb.ShardInfos, opts options) *tracker { + t := &tracker{ + shards: shards, + opts: opts, + pg: NewParallelGroup(runtime.GOMAXPROCS(0)), + } + + return t +} + +func (t *tracker) Run() error { + conversionStart := time.Now() + + // Backup each directory. + if !opts.SkipBackup { + databases := t.shards.Databases() + fmt.Printf("Backing up %d databases...\n", len(databases)) + t.wg.Add(len(databases)) + for i := range databases { + db := databases[i] + go t.pg.Do(func() { + defer t.wg.Done() + + start := time.Now() + log.Printf("Backup of database '%v' started", db) + err := backupDatabase(db) + if err != nil { + log.Fatalf("Backup of database %v failed: %v\n", db, err) + } + log.Printf("Database %v backed up (%v)\n", db, time.Now().Sub(start)) + }) + } + t.wg.Wait() + } else { + fmt.Println("Database backup disabled.") + } + + t.wg.Add(len(t.shards)) + for i := range t.shards { + si := t.shards[i] + go t.pg.Do(func() { + defer func() { + atomic.AddUint64(&t.Stats.CompletedShards, 1) + t.wg.Done() + }() + + start := time.Now() + log.Printf("Starting conversion of shard: %v", si.FullPath(opts.DataPath)) + if err := convertShard(si, t); err != nil { + log.Fatalf("Failed to convert %v: %v\n", si.FullPath(opts.DataPath), err) + } + log.Printf("Conversion of %v successful (%v)\n", si.FullPath(opts.DataPath), time.Since(start)) + }) + } + + done := make(chan struct{}) + go func() { + t.wg.Wait() + close(done) + }() + +WAIT_LOOP: + for { + select { + case <-done: + break WAIT_LOOP + case <-time.After(opts.UpdateInterval): + t.StatusUpdate() + } + } + + t.Stats.TotalTime = time.Since(conversionStart) + + return nil +} + +func (t *tracker) StatusUpdate() { + shardCount := atomic.LoadUint64(&t.Stats.CompletedShards) + pointCount := atomic.LoadUint64(&t.Stats.PointsRead) + pointWritten := atomic.LoadUint64(&t.Stats.PointsWritten) + + log.Printf("Still Working: Completed Shards: %d/%d Points read/written: %d/%d", shardCount, len(t.shards), pointCount, pointWritten) +} + +func (t *tracker) PrintStats() { + preSize := t.shards.Size() + postSize := int64(t.Stats.TsmBytesWritten) + + fmt.Printf("\nSummary statistics\n========================================\n") + fmt.Printf("Databases converted: %d\n", len(t.shards.Databases())) + fmt.Printf("Shards converted: %d\n", len(t.shards)) + fmt.Printf("TSM files created: %d\n", t.Stats.TsmFilesCreated) + fmt.Printf("Points read: %d\n", t.Stats.PointsRead) + fmt.Printf("Points written: %d\n", t.Stats.PointsWritten) + fmt.Printf("NaN filtered: %d\n", t.Stats.NanFiltered) + fmt.Printf("Inf filtered: %d\n", t.Stats.InfFiltered) + fmt.Printf("Points without fields filtered: %d\n", t.Stats.FieldsFiltered) + fmt.Printf("Disk usage pre-conversion (bytes): %d\n", preSize) + fmt.Printf("Disk usage post-conversion (bytes): %d\n", postSize) + fmt.Printf("Reduction factor: %d%%\n", 100*(preSize-postSize)/preSize) + fmt.Printf("Bytes per TSM point: %.2f\n", float64(postSize)/float64(t.Stats.PointsWritten)) + fmt.Printf("Total conversion time: %v\n", t.Stats.TotalTime) + fmt.Println() +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/codec.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/codec.go new file mode 100644 index 0000000000..760a33ff72 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/codec.go @@ -0,0 +1,105 @@ +package tsdb + +import ( + "encoding/binary" + "errors" + "fmt" + "math" +) + +const maxStringLength = 64 * 1024 + +const ( + fieldFloat = 1 + fieldInteger = 2 + fieldBoolean = 3 + fieldString = 4 +) + +var ( + // ErrFieldNotFound is returned when a field cannot be found. + ErrFieldNotFound = errors.New("field not found") + + // ErrFieldUnmappedID is returned when the system is presented, during decode, with a field ID + // there is no mapping for. + ErrFieldUnmappedID = errors.New("field ID not mapped") +) + +// FieldCodec provides encoding and decoding functionality for the fields of a given +// Measurement. +type FieldCodec struct { + fieldsByID map[uint8]*Field + fieldsByName map[string]*Field +} + +// NewFieldCodec returns a FieldCodec for the given Measurement. Must be called with +// a RLock that protects the Measurement. +func NewFieldCodec(fields map[string]*Field) *FieldCodec { + fieldsByID := make(map[uint8]*Field, len(fields)) + fieldsByName := make(map[string]*Field, len(fields)) + for _, f := range fields { + fieldsByID[f.ID] = f + fieldsByName[f.Name] = f + } + return &FieldCodec{fieldsByID: fieldsByID, fieldsByName: fieldsByName} +} + +// FieldIDByName returns the ID for the given field. +func (f *FieldCodec) FieldIDByName(s string) (uint8, error) { + fi := f.fieldsByName[s] + if fi == nil { + return 0, ErrFieldNotFound + } + return fi.ID, nil +} + +// DecodeByID scans a byte slice for a field with the given ID, converts it to its +// expected type, and return that value. +func (f *FieldCodec) DecodeByID(targetID uint8, b []byte) (interface{}, error) { + if len(b) == 0 { + // No more bytes. + return nil, ErrFieldNotFound + } + + field := f.fieldsByID[b[0]] + if field == nil { + // This can happen, though is very unlikely. If this node receives encoded data, to be written + // to disk, and is queried for that data before its metastore is updated, there will be no field + // mapping for the data during decode. All this can happen because data is encoded by the node + // that first received the write request, not the node that actually writes the data to disk. + // So if this happens, the read must be aborted. + return nil, ErrFieldUnmappedID + } + + if field.ID != targetID { + return nil, ErrFieldNotFound + } + + switch field.Type { + case fieldFloat: + return math.Float64frombits(binary.BigEndian.Uint64(b[1:9])), nil + case fieldInteger: + return int64(binary.BigEndian.Uint64(b[1:9])), nil + case fieldBoolean: + return b[1] == 1, nil + case fieldString: + return string(b[3 : 3+binary.BigEndian.Uint16(b[1:3])]), nil + default: + panic(fmt.Sprintf("unsupported value type during decode by id: %T", field.Type)) + } +} + +// DecodeByName scans a byte slice for a field with the given name, converts it to its +// expected type, and return that value. +func (f *FieldCodec) DecodeByName(name string, b []byte) (interface{}, error) { + fi := f.FieldByName(name) + if fi == nil { + return 0, ErrFieldNotFound + } + return f.DecodeByID(fi.ID, b) +} + +// FieldByName returns the field by its name. It will return a nil if not found +func (f *FieldCodec) FieldByName(name string) *Field { + return f.fieldsByName[name] +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/database.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/database.go new file mode 100644 index 0000000000..c276db08a7 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/database.go @@ -0,0 +1,239 @@ +package tsdb // import "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb" + +import ( + "fmt" + "os" + "path" + "path/filepath" + "sort" + "time" + + "github.com/boltdb/bolt" + "github.com/influxdata/influxdb/pkg/slices" +) + +// Flags for differentiating between engines +const ( + B1 = iota + BZ1 + TSM1 +) + +// EngineFormat holds the flag for the engine +type EngineFormat int + +// String returns the string format of the engine. +func (e EngineFormat) String() string { + switch e { + case TSM1: + return "tsm1" + case B1: + return "b1" + case BZ1: + return "bz1" + default: + panic("unrecognized shard engine format") + } +} + +// ShardInfo is the description of a shard on disk. +type ShardInfo struct { + Database string + RetentionPolicy string + Path string + Format EngineFormat + Size int64 +} + +// FormatAsString returns the format of the shard as a string. +func (s *ShardInfo) FormatAsString() string { + return s.Format.String() +} + +// FullPath returns the full path to the shard, given the data directory root. +func (s *ShardInfo) FullPath(dataPath string) string { + return filepath.Join(dataPath, s.Database, s.RetentionPolicy, s.Path) +} + +// ShardInfos is an array of ShardInfo +type ShardInfos []*ShardInfo + +func (s ShardInfos) Len() int { return len(s) } +func (s ShardInfos) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s ShardInfos) Less(i, j int) bool { + if s[i].Database == s[j].Database { + if s[i].RetentionPolicy == s[j].RetentionPolicy { + return s[i].Path < s[i].Path + } + + return s[i].RetentionPolicy < s[j].RetentionPolicy + } + + return s[i].Database < s[j].Database +} + +// Databases returns the sorted unique set of databases for the shards. +func (s ShardInfos) Databases() []string { + dbm := make(map[string]bool) + for _, ss := range s { + dbm[ss.Database] = true + } + + var dbs []string + for k := range dbm { + dbs = append(dbs, k) + } + sort.Strings(dbs) + return dbs +} + +// FilterFormat returns a copy of the ShardInfos, with shards of the given +// format removed. +func (s ShardInfos) FilterFormat(fmt EngineFormat) ShardInfos { + var a ShardInfos + for _, si := range s { + if si.Format != fmt { + a = append(a, si) + } + } + return a +} + +// Size returns the space on disk consumed by the shards. +func (s ShardInfos) Size() int64 { + var sz int64 + for _, si := range s { + sz += si.Size + } + return sz +} + +// ExclusiveDatabases returns a copy of the ShardInfo, with shards associated +// with the given databases present. If the given set is empty, all databases +// are returned. +func (s ShardInfos) ExclusiveDatabases(exc []string) ShardInfos { + var a ShardInfos + + // Empty set? Return everything. + if len(exc) == 0 { + a = make(ShardInfos, len(s)) + copy(a, s) + return a + } + + for _, si := range s { + if slices.Exists(exc, si.Database) { + a = append(a, si) + } + } + return a +} + +// Database represents an entire database on disk. +type Database struct { + path string +} + +// NewDatabase creates a database instance using data at path. +func NewDatabase(path string) *Database { + return &Database{path: path} +} + +// Name returns the name of the database. +func (d *Database) Name() string { + return path.Base(d.path) +} + +// Path returns the path to the database. +func (d *Database) Path() string { + return d.path +} + +// Shards returns information for every shard in the database. +func (d *Database) Shards() ([]*ShardInfo, error) { + fd, err := os.Open(d.path) + if err != nil { + return nil, err + } + + // Get each retention policy. + rps, err := fd.Readdirnames(-1) + if err != nil { + return nil, err + } + + // Process each retention policy. + var shardInfos []*ShardInfo + for _, rp := range rps { + rpfd, err := os.Open(filepath.Join(d.path, rp)) + if err != nil { + return nil, err + } + + // Process each shard + shards, err := rpfd.Readdirnames(-1) + for _, sh := range shards { + fmt, sz, err := shardFormat(filepath.Join(d.path, rp, sh)) + if err != nil { + return nil, err + } + + si := &ShardInfo{ + Database: d.Name(), + RetentionPolicy: path.Base(rp), + Path: sh, + Format: fmt, + Size: sz, + } + shardInfos = append(shardInfos, si) + } + } + + sort.Sort(ShardInfos(shardInfos)) + return shardInfos, nil +} + +// shardFormat returns the format and size on disk of the shard at path. +func shardFormat(path string) (EngineFormat, int64, error) { + // If it's a directory then it's a tsm1 engine + fi, err := os.Stat(path) + if err != nil { + return 0, 0, err + } + if fi.Mode().IsDir() { + return TSM1, fi.Size(), nil + } + + // It must be a BoltDB-based engine. + db, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 1 * time.Second}) + if err != nil { + return 0, 0, err + } + defer db.Close() + + var format EngineFormat + err = db.View(func(tx *bolt.Tx) error { + // Retrieve the meta bucket. + b := tx.Bucket([]byte("meta")) + + // If no format is specified then it must be an original b1 database. + if b == nil { + format = B1 + return nil + } + + // There is an actual format indicator. + switch f := string(b.Get([]byte("format"))); f { + case "b1", "v1": + format = B1 + case "bz1": + format = BZ1 + default: + return fmt.Errorf("unrecognized engine format: %s", f) + } + + return nil + }) + + return format, fi.Size(), err +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/internal/meta.pb.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/internal/meta.pb.go new file mode 100644 index 0000000000..c580f4dba6 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/internal/meta.pb.go @@ -0,0 +1,122 @@ +// Code generated by protoc-gen-gogo. +// source: internal/meta.proto +// DO NOT EDIT! + +/* +Package internal is a generated protocol buffer package. + +It is generated from these files: + internal/meta.proto + +It has these top-level messages: + Series + Tag + MeasurementFields + Field +*/ +package internal + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type Series struct { + Key *string `protobuf:"bytes,1,req,name=Key" json:"Key,omitempty"` + Tags []*Tag `protobuf:"bytes,2,rep,name=Tags" json:"Tags,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Series) Reset() { *m = Series{} } +func (m *Series) String() string { return proto.CompactTextString(m) } +func (*Series) ProtoMessage() {} + +func (m *Series) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *Series) GetTags() []*Tag { + if m != nil { + return m.Tags + } + return nil +} + +type Tag struct { + Key *string `protobuf:"bytes,1,req,name=Key" json:"Key,omitempty"` + Value *string `protobuf:"bytes,2,req,name=Value" json:"Value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Tag) Reset() { *m = Tag{} } +func (m *Tag) String() string { return proto.CompactTextString(m) } +func (*Tag) ProtoMessage() {} + +func (m *Tag) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *Tag) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type MeasurementFields struct { + Fields []*Field `protobuf:"bytes,1,rep,name=Fields" json:"Fields,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MeasurementFields) Reset() { *m = MeasurementFields{} } +func (m *MeasurementFields) String() string { return proto.CompactTextString(m) } +func (*MeasurementFields) ProtoMessage() {} + +func (m *MeasurementFields) GetFields() []*Field { + if m != nil { + return m.Fields + } + return nil +} + +type Field struct { + ID *int32 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"` + Name *string `protobuf:"bytes,2,req,name=Name" json:"Name,omitempty"` + Type *int32 `protobuf:"varint,3,req,name=Type" json:"Type,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Field) Reset() { *m = Field{} } +func (m *Field) String() string { return proto.CompactTextString(m) } +func (*Field) ProtoMessage() {} + +func (m *Field) GetID() int32 { + if m != nil && m.ID != nil { + return *m.ID + } + return 0 +} + +func (m *Field) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Field) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return 0 +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/types.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/types.go new file mode 100644 index 0000000000..c0d0010f35 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/types.go @@ -0,0 +1,60 @@ +package tsdb + +import ( + "encoding/binary" + "strings" + + "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/internal" + "github.com/influxdata/influxdb/influxql" + + "github.com/gogo/protobuf/proto" +) + +// Field represents an encoded field. +type Field struct { + ID uint8 `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Type influxql.DataType `json:"type,omitempty"` +} + +// MeasurementFields is a mapping from measurements to its fields. +type MeasurementFields struct { + Fields map[string]*Field `json:"fields"` + Codec *FieldCodec +} + +// UnmarshalBinary decodes the object from a binary format. +func (m *MeasurementFields) UnmarshalBinary(buf []byte) error { + var pb internal.MeasurementFields + if err := proto.Unmarshal(buf, &pb); err != nil { + return err + } + m.Fields = make(map[string]*Field) + for _, f := range pb.Fields { + m.Fields[f.GetName()] = &Field{ID: uint8(f.GetID()), Name: f.GetName(), Type: influxql.DataType(f.GetType())} + } + return nil +} + +// Series represents a series in the shard. +type Series struct { + Key string + Tags map[string]string +} + +// MeasurementFromSeriesKey returns the Measurement name for a given series. +func MeasurementFromSeriesKey(key string) string { + return strings.SplitN(key, ",", 2)[0] +} + +// DecodeKeyValue decodes the key and value from bytes. +func DecodeKeyValue(field string, dec *FieldCodec, k, v []byte) (int64, interface{}) { + // Convert key to a timestamp. + key := int64(binary.BigEndian.Uint64(k[0:8])) + + decValue, err := dec.DecodeByName(field, v) + if err != nil { + return key, nil + } + return key, decValue +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/values.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/values.go new file mode 100644 index 0000000000..90ec1d4407 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/values.go @@ -0,0 +1,39 @@ +package tsdb + +import ( + "fmt" + "time" +) + +type Value struct { + T int64 + Val interface{} +} + +func (v *Value) Time() time.Time { + return time.Unix(0, v.T) +} + +func (v *Value) UnixNano() int64 { + return v.T +} + +func (v *Value) Value() interface{} { + return v.Val +} + +func (v *Value) String() string { + return fmt.Sprintf("%v %v", v.Time(), v.Val) +} + +func (v *Value) Size() int { + switch vv := v.Val.(type) { + case int64, float64: + return 16 + case bool: + return 9 + case string: + return 8 + len(vv) + } + return 0 +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/backup/backup.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup/backup.go new file mode 100644 index 0000000000..c6f3ddf90e --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup/backup.go @@ -0,0 +1,367 @@ +package backup + +import ( + "encoding/binary" + "encoding/json" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/influxdata/influxdb/services/snapshotter" + "github.com/influxdata/influxdb/tcp" +) + +const ( + // Suffix is a suffix added to the backup while it's in-process. + Suffix = ".pending" + + // Metafile is the base name given to the metastore backups. + Metafile = "meta" + + // BackupFilePattern is the beginning of the pattern for a backup + // file. They follow the scheme ... + BackupFilePattern = "%s.%s.%05d" +) + +// Command represents the program execution for "influxd backup". +type Command struct { + // The logger passed to the ticker during execution. + Logger *log.Logger + + // Standard input/output, overridden for testing. + Stderr io.Writer + Stdout io.Writer + + host string + path string + database string +} + +// NewCommand returns a new instance of Command with default settings. +func NewCommand() *Command { + return &Command{ + Stderr: os.Stderr, + Stdout: os.Stdout, + } +} + +// Run executes the program. +func (cmd *Command) Run(args ...string) error { + // Set up logger. + cmd.Logger = log.New(cmd.Stderr, "", log.LstdFlags) + + // Parse command line arguments. + retentionPolicy, shardID, since, err := cmd.parseFlags(args) + if err != nil { + return err + } + + // based on the arguments passed in we only backup the minimum + if shardID != "" { + // always backup the metastore + if err := cmd.backupMetastore(); err != nil { + return err + } + err = cmd.backupShard(retentionPolicy, shardID, since) + } else if retentionPolicy != "" { + err = cmd.backupRetentionPolicy(retentionPolicy, since) + } else if cmd.database != "" { + err = cmd.backupDatabase(since) + } else { + err = cmd.backupMetastore() + } + + if err != nil { + cmd.Logger.Printf("backup failed: %v", err) + return err + } + + cmd.Logger.Println("backup complete") + + return nil +} + +// parseFlags parses and validates the command line arguments into a request object. +func (cmd *Command) parseFlags(args []string) (retentionPolicy, shardID string, since time.Time, err error) { + fs := flag.NewFlagSet("", flag.ContinueOnError) + + fs.StringVar(&cmd.host, "host", "localhost:8088", "") + fs.StringVar(&cmd.database, "database", "", "") + fs.StringVar(&retentionPolicy, "retention", "", "") + fs.StringVar(&shardID, "shard", "", "") + var sinceArg string + fs.StringVar(&sinceArg, "since", "", "") + + fs.SetOutput(cmd.Stderr) + fs.Usage = cmd.printUsage + + err = fs.Parse(args) + if err != nil { + return + } + if sinceArg != "" { + since, err = time.Parse(time.RFC3339, sinceArg) + if err != nil { + return + } + } + + // Ensure that only one arg is specified. + if fs.NArg() == 0 { + return "", "", time.Unix(0, 0), errors.New("backup destination path required") + } else if fs.NArg() != 1 { + return "", "", time.Unix(0, 0), errors.New("only one backup path allowed") + } + cmd.path = fs.Arg(0) + + err = os.MkdirAll(cmd.path, 0700) + + return +} + +// backupShard will write a tar archive of the passed in shard with any TSM files that have been +// created since the time passed in +func (cmd *Command) backupShard(retentionPolicy string, shardID string, since time.Time) error { + id, err := strconv.ParseUint(shardID, 10, 64) + if err != nil { + return err + } + + shardArchivePath, err := cmd.nextPath(filepath.Join(cmd.path, fmt.Sprintf(BackupFilePattern, cmd.database, retentionPolicy, id))) + if err != nil { + return err + } + + cmd.Logger.Printf("backing up db=%v rp=%v shard=%v to %s since %s", + cmd.database, retentionPolicy, shardID, shardArchivePath, since) + + req := &snapshotter.Request{ + Type: snapshotter.RequestShardBackup, + Database: cmd.database, + RetentionPolicy: retentionPolicy, + ShardID: id, + Since: since, + } + + // TODO: verify shard backup data + return cmd.downloadAndVerify(req, shardArchivePath, nil) +} + +// backupDatabase will request the database information from the server and then backup the metastore and +// every shard in every retention policy in the database. Each shard will be written to a separate tar. +func (cmd *Command) backupDatabase(since time.Time) error { + cmd.Logger.Printf("backing up db=%s since %s", cmd.database, since) + + req := &snapshotter.Request{ + Type: snapshotter.RequestDatabaseInfo, + Database: cmd.database, + } + + response, err := cmd.requestInfo(req) + if err != nil { + return err + } + + return cmd.backupResponsePaths(response, since) +} + +// backupRetentionPolicy will request the retention policy information from the server and then backup +// the metastore and every shard in the retention policy. Each shard will be written to a separate tar. +func (cmd *Command) backupRetentionPolicy(retentionPolicy string, since time.Time) error { + cmd.Logger.Printf("backing up rp=%s since %s", retentionPolicy, since) + + req := &snapshotter.Request{ + Type: snapshotter.RequestRetentionPolicyInfo, + Database: cmd.database, + RetentionPolicy: retentionPolicy, + } + + response, err := cmd.requestInfo(req) + if err != nil { + return err + } + + return cmd.backupResponsePaths(response, since) +} + +// backupResponsePaths will backup the metastore and all shard paths in the response struct +func (cmd *Command) backupResponsePaths(response *snapshotter.Response, since time.Time) error { + if err := cmd.backupMetastore(); err != nil { + return err + } + + // loop through the returned paths and back up each shard + for _, path := range response.Paths { + rp, id, err := retentionAndShardFromPath(path) + if err != nil { + return err + } + + if err := cmd.backupShard(rp, id, since); err != nil { + return err + } + } + + return nil +} + +// backupMetastore will backup the metastore on the host to the passed in path. Database and retention policy backups +// will force a backup of the metastore as well as requesting a specific shard backup from the command line +func (cmd *Command) backupMetastore() error { + metastoreArchivePath, err := cmd.nextPath(filepath.Join(cmd.path, Metafile)) + if err != nil { + return err + } + + cmd.Logger.Printf("backing up metastore to %s", metastoreArchivePath) + + req := &snapshotter.Request{ + Type: snapshotter.RequestMetastoreBackup, + } + + return cmd.downloadAndVerify(req, metastoreArchivePath, func(file string) error { + binData, err := ioutil.ReadFile(file) + if err != nil { + return err + } + + magic := binary.BigEndian.Uint64(binData[:8]) + if magic != snapshotter.BackupMagicHeader { + cmd.Logger.Println("Invalid metadata blob, ensure the metadata service is running (default port 8088)") + return errors.New("invalid metadata received") + } + + return nil + }) +} + +// nextPath returns the next file to write to. +func (cmd *Command) nextPath(path string) (string, error) { + // Iterate through incremental files until one is available. + for i := 0; ; i++ { + s := fmt.Sprintf(path+".%02d", i) + if _, err := os.Stat(s); os.IsNotExist(err) { + return s, nil + } else if err != nil { + return "", err + } + } +} + +// downloadAndVerify will download either the metastore or shard to a temp file and then +// rename it to a good backup file name after complete +func (cmd *Command) downloadAndVerify(req *snapshotter.Request, path string, validator func(string) error) error { + tmppath := path + Suffix + if err := cmd.download(req, tmppath); err != nil { + return err + } + + if validator != nil { + if err := validator(tmppath); err != nil { + if rmErr := os.Remove(tmppath); rmErr != nil { + cmd.Logger.Printf("Error cleaning up temporary file: %v", rmErr) + } + return err + } + } + + // Rename temporary file to final path. + if err := os.Rename(tmppath, path); err != nil { + return fmt.Errorf("rename: %s", err) + } + + return nil +} + +// download downloads a snapshot of either the metastore or a shard from a host to a given path. +func (cmd *Command) download(req *snapshotter.Request, path string) error { + // Create local file to write to. + f, err := os.Create(path) + if err != nil { + return fmt.Errorf("open temp file: %s", err) + } + defer f.Close() + + // Connect to snapshotter service. + conn, err := tcp.Dial("tcp", cmd.host, snapshotter.MuxHeader) + if err != nil { + return err + } + defer conn.Close() + + // Write the request + if err := json.NewEncoder(conn).Encode(req); err != nil { + return fmt.Errorf("encode snapshot request: %s", err) + } + + // Read snapshot from the connection + if _, err := io.Copy(f, conn); err != nil { + return fmt.Errorf("copy backup to file: %s", err) + } + + return nil +} + +// requestInfo will request the database or retention policy information from the host +func (cmd *Command) requestInfo(request *snapshotter.Request) (*snapshotter.Response, error) { + // Connect to snapshotter service. + conn, err := tcp.Dial("tcp", cmd.host, snapshotter.MuxHeader) + if err != nil { + return nil, err + } + defer conn.Close() + + // Write the request + if err := json.NewEncoder(conn).Encode(request); err != nil { + return nil, fmt.Errorf("encode snapshot request: %s", err) + } + + // Read the response + var r snapshotter.Response + if err := json.NewDecoder(conn).Decode(&r); err != nil { + return nil, err + } + + return &r, nil +} + +// printUsage prints the usage message to STDERR. +func (cmd *Command) printUsage() { + fmt.Fprintf(cmd.Stdout, `usage: influxd backup [flags] PATH + +Backup downloads a snapshot of a data node and saves it to disk. + +Options: + -host + The host to connect to snapshot. Defaults to 127.0.0.1:8088. + -database + The database to backup. + -retention + Optional. The retention policy to backup. + -shard + Optional. The shard id to backup. If specified, retention is required. + -since <2015-12-24T08:12:23> + Optional. Do an incremental backup since the passed in RFC3339 + formatted time. + +`) +} + +// retentionAndShardFromPath will take the shard relative path and split it into the +// retention policy name and shard ID. The first part of the path should be the database name. +func retentionAndShardFromPath(path string) (retention, shard string, err error) { + a := strings.Split(path, string(filepath.Separator)) + if len(a) != 3 { + return "", "", fmt.Errorf("expected database, retention policy, and shard id in path: %s", path) + } + + return a[1], a[2], nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/help/help.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/help/help.go new file mode 100644 index 0000000000..3f6bbfb083 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/help/help.go @@ -0,0 +1,46 @@ +package help + +import ( + "fmt" + "io" + "os" + "strings" +) + +// Command displays help for command-line sub-commands. +type Command struct { + Stdout io.Writer +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stdout: os.Stdout, + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + fmt.Fprintln(cmd.Stdout, strings.TrimSpace(usage)) + return nil +} + +const usage = ` +Configure and start an InfluxDB server. + +Usage: + + influxd [[command] [arguments]] + +The commands are: + + backup downloads a snapshot of a data node and saves it to disk + config display the default configuration + restore uses a snapshot of a data node to rebuild a cluster + run run node with existing configuration + version displays the InfluxDB version + +"run" is the default command. + +Use "influxd help [command]" for more information about a command. +` diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/main.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/main.go new file mode 100644 index 0000000000..eb463f2f25 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/main.go @@ -0,0 +1,203 @@ +package main + +import ( + "flag" + "fmt" + "io" + "log" + "math/rand" + "os" + "os/signal" + "strings" + "syscall" + "time" + + "github.com/influxdata/influxdb/cmd/influxd/backup" + "github.com/influxdata/influxdb/cmd/influxd/help" + "github.com/influxdata/influxdb/cmd/influxd/restore" + "github.com/influxdata/influxdb/cmd/influxd/run" +) + +// These variables are populated via the Go linker. +var ( + version string + commit string + branch string +) + +func init() { + // If commit, branch, or build time are not set, make that clear. + if version == "" { + version = "unknown" + } + if commit == "" { + commit = "unknown" + } + if branch == "" { + branch = "unknown" + } +} + +func main() { + rand.Seed(time.Now().UnixNano()) + + m := NewMain() + if err := m.Run(os.Args[1:]...); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +// Main represents the program execution. +type Main struct { + Logger *log.Logger + + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewMain return a new instance of Main. +func NewMain() *Main { + return &Main{ + Logger: log.New(os.Stderr, "[run] ", log.LstdFlags), + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +// Run determines and runs the command specified by the CLI args. +func (m *Main) Run(args ...string) error { + name, args := ParseCommandName(args) + + // Extract name from args. + switch name { + case "", "run": + cmd := run.NewCommand() + + // Tell the server the build details. + cmd.Version = version + cmd.Commit = commit + cmd.Branch = branch + + if err := cmd.Run(args...); err != nil { + return fmt.Errorf("run: %s", err) + } + + signalCh := make(chan os.Signal, 1) + signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM) + m.Logger.Println("Listening for signals") + + // Block until one of the signals above is received + select { + case <-signalCh: + m.Logger.Println("Signal received, initializing clean shutdown...") + go func() { + cmd.Close() + }() + } + + // Block again until another signal is received, a shutdown timeout elapses, + // or the Command is gracefully closed + m.Logger.Println("Waiting for clean shutdown...") + select { + case <-signalCh: + m.Logger.Println("second signal received, initializing hard shutdown") + case <-time.After(time.Second * 30): + m.Logger.Println("time limit reached, initializing hard shutdown") + case <-cmd.Closed: + m.Logger.Println("server shutdown completed") + } + + // goodbye. + + case "backup": + name := backup.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("backup: %s", err) + } + case "restore": + name := restore.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("restore: %s", err) + } + case "config": + if err := run.NewPrintConfigCommand().Run(args...); err != nil { + return fmt.Errorf("config: %s", err) + } + case "version": + if err := NewVersionCommand().Run(args...); err != nil { + return fmt.Errorf("version: %s", err) + } + case "help": + if err := help.NewCommand().Run(args...); err != nil { + return fmt.Errorf("help: %s", err) + } + default: + return fmt.Errorf(`unknown command "%s"`+"\n"+`Run 'influxd help' for usage`+"\n\n", name) + } + + return nil +} + +// ParseCommandName extracts the command name and args from the args list. +func ParseCommandName(args []string) (string, []string) { + // Retrieve command name as first argument. + var name string + if len(args) > 0 && !strings.HasPrefix(args[0], "-") { + name = args[0] + } + + // Special case -h immediately following binary name + if len(args) > 0 && args[0] == "-h" { + name = "help" + } + + // If command is "help" and has an argument then rewrite args to use "-h". + if name == "help" && len(args) > 1 { + args[0], args[1] = args[1], "-h" + name = args[0] + } + + // If a named command is specified then return it with its arguments. + if name != "" { + return name, args[1:] + } + return "", args +} + +// VersionCommand represents the command executed by "influxd version". +type VersionCommand struct { + Stdout io.Writer + Stderr io.Writer +} + +// NewVersionCommand return a new instance of VersionCommand. +func NewVersionCommand() *VersionCommand { + return &VersionCommand{ + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +// Run prints the current version and commit info. +func (cmd *VersionCommand) Run(args ...string) error { + // Parse flags in case -h is specified. + fs := flag.NewFlagSet("", flag.ContinueOnError) + fs.Usage = func() { fmt.Fprintln(cmd.Stderr, strings.TrimSpace(versionUsage)) } + if err := fs.Parse(args); err != nil { + return err + } + + // Print version info. + fmt.Fprintf(cmd.Stdout, "InfluxDB v%s (git: %s %s)\n", version, branch, commit) + + return nil +} + +var versionUsage = ` +usage: version + + version displays the InfluxDB version, build branch and git commit hash +` diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/restore/restore.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/restore/restore.go new file mode 100644 index 0000000000..97e37cf99b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/restore/restore.go @@ -0,0 +1,403 @@ +package restore + +import ( + "archive/tar" + "bytes" + "encoding/binary" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "os" + "path/filepath" + "strconv" + "sync" + + "github.com/influxdata/influxdb/cmd/influxd/backup" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/services/snapshotter" +) + +// Command represents the program execution for "influxd restore". +type Command struct { + Stdout io.Writer + Stderr io.Writer + + backupFilesPath string + metadir string + datadir string + database string + retention string + shard string + + // TODO: when the new meta stuff is done this should not be exported or be gone + MetaConfig *meta.Config +} + +// NewCommand returns a new instance of Command with default settings. +func NewCommand() *Command { + return &Command{ + Stdout: os.Stdout, + Stderr: os.Stderr, + MetaConfig: meta.NewConfig(), + } +} + +// Run executes the program. +func (cmd *Command) Run(args ...string) error { + if err := cmd.parseFlags(args); err != nil { + return err + } + + if err := cmd.ensureStopped(); err != nil { + fmt.Fprintln(cmd.Stderr, "influxd cannot be running during a restore. Please stop any running instances and try again.") + return err + } + + if cmd.metadir != "" { + if err := cmd.unpackMeta(); err != nil { + return err + } + } + + if cmd.shard != "" { + return cmd.unpackShard(cmd.shard) + } else if cmd.retention != "" { + return cmd.unpackRetention() + } else if cmd.datadir != "" { + return cmd.unpackDatabase() + } + return nil +} + +// parseFlags parses and validates the command line arguments. +func (cmd *Command) parseFlags(args []string) error { + fs := flag.NewFlagSet("", flag.ContinueOnError) + fs.StringVar(&cmd.metadir, "metadir", "", "") + fs.StringVar(&cmd.datadir, "datadir", "", "") + fs.StringVar(&cmd.database, "database", "", "") + fs.StringVar(&cmd.retention, "retention", "", "") + fs.StringVar(&cmd.shard, "shard", "", "") + fs.SetOutput(cmd.Stdout) + fs.Usage = cmd.printUsage + if err := fs.Parse(args); err != nil { + return err + } + + cmd.MetaConfig = meta.NewConfig() + cmd.MetaConfig.Dir = cmd.metadir + + // Require output path. + cmd.backupFilesPath = fs.Arg(0) + if cmd.backupFilesPath == "" { + return fmt.Errorf("path with backup files required") + } + + // validate the arguments + if cmd.metadir == "" && cmd.database == "" { + return fmt.Errorf("-metadir or -database are required to restore") + } + + if cmd.database != "" && cmd.datadir == "" { + return fmt.Errorf("-datadir is required to restore") + } + + if cmd.shard != "" { + if cmd.database == "" { + return fmt.Errorf("-database is required to restore shard") + } + if cmd.retention == "" { + return fmt.Errorf("-retention is required to restore shard") + } + } else if cmd.retention != "" && cmd.database == "" { + return fmt.Errorf("-database is required to restore retention policy") + } + + return nil +} + +func (cmd *Command) ensureStopped() error { + ln, err := net.Listen("tcp", cmd.MetaConfig.BindAddress) + if err != nil { + return fmt.Errorf("influxd running on %s: aborting", cmd.MetaConfig.BindAddress) + } + defer ln.Close() + return nil +} + +// unpackMeta reads the metadata from the backup directory and initializes a raft +// cluster and replaces the root metadata. +func (cmd *Command) unpackMeta() error { + // find the meta file + metaFiles, err := filepath.Glob(filepath.Join(cmd.backupFilesPath, backup.Metafile+".*")) + if err != nil { + return err + } + + if len(metaFiles) == 0 { + return fmt.Errorf("no metastore backups in %s", cmd.backupFilesPath) + } + + latest := metaFiles[len(metaFiles)-1] + + fmt.Fprintf(cmd.Stdout, "Using metastore snapshot: %v\n", latest) + // Read the metastore backup + f, err := os.Open(latest) + if err != nil { + return err + } + + var buf bytes.Buffer + if _, err := io.Copy(&buf, f); err != nil { + return fmt.Errorf("copy: %s", err) + } + + b := buf.Bytes() + var i int + + // Make sure the file is actually a meta store backup file + magic := binary.BigEndian.Uint64(b[:8]) + if magic != snapshotter.BackupMagicHeader { + return fmt.Errorf("invalid metadata file") + } + i += 8 + + // Size of the meta store bytes + length := int(binary.BigEndian.Uint64(b[i : i+8])) + i += 8 + metaBytes := b[i : i+length] + i += int(length) + + // Size of the node.json bytes + length = int(binary.BigEndian.Uint64(b[i : i+8])) + i += 8 + nodeBytes := b[i:] + + // Unpack into metadata. + var data meta.Data + if err := data.UnmarshalBinary(metaBytes); err != nil { + return fmt.Errorf("unmarshal: %s", err) + } + + // Copy meta config and remove peers so it starts in single mode. + c := cmd.MetaConfig + c.JoinPeers = nil + c.LoggingEnabled = false + + // Create the meta dir + if os.MkdirAll(c.Dir, 0700); err != nil { + return err + } + + // Write node.json back to meta dir + if err := ioutil.WriteFile(filepath.Join(c.Dir, "node.json"), nodeBytes, 0655); err != nil { + return err + } + + // Initialize meta store. + store := meta.NewService(c) + store.RaftListener = newNopListener() + + // Open the meta store. + if err := store.Open(); err != nil { + return fmt.Errorf("open store: %s", err) + } + defer store.Close() + + // Wait for the store to be ready or error. + select { + case err := <-store.Err(): + return err + default: + } + + client := meta.NewClient() + client.SetMetaServers([]string{store.HTTPAddr()}) + client.SetTLS(false) + client.SetLogger(log.New(ioutil.Discard, "", 0)) + if err := client.Open(); err != nil { + return err + } + defer client.Close() + + // Force set the full metadata. + if err := client.SetData(&data); err != nil { + return fmt.Errorf("set data: %s", err) + } + return nil +} + +// unpackShard will look for all backup files in the path matching this shard ID +// and restore them to the data dir +func (cmd *Command) unpackShard(shardID string) error { + // make sure the shard isn't already there so we don't clobber anything + restorePath := filepath.Join(cmd.datadir, cmd.database, cmd.retention, shardID) + if _, err := os.Stat(restorePath); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("shard already present: %s", restorePath) + } + + id, err := strconv.ParseUint(shardID, 10, 64) + if err != nil { + return err + } + + // find the shard backup files + pat := filepath.Join(cmd.backupFilesPath, fmt.Sprintf(backup.BackupFilePattern, cmd.database, cmd.retention, id)) + return cmd.unpackFiles(pat + ".*") +} + +// unpackDatabase will look for all backup files in the path matching this database +// and restore them to the data dir +func (cmd *Command) unpackDatabase() error { + // make sure the shard isn't already there so we don't clobber anything + restorePath := filepath.Join(cmd.datadir, cmd.database) + if _, err := os.Stat(restorePath); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("database already present: %s", restorePath) + } + + // find the database backup files + pat := filepath.Join(cmd.backupFilesPath, cmd.database) + return cmd.unpackFiles(pat + ".*") +} + +// unpackRetention will look for all backup files in the path matching this retention +// and restore them to the data dir +func (cmd *Command) unpackRetention() error { + // make sure the shard isn't already there so we don't clobber anything + restorePath := filepath.Join(cmd.datadir, cmd.database, cmd.retention) + if _, err := os.Stat(restorePath); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("retention already present: %s", restorePath) + } + + // find the retention backup files + pat := filepath.Join(cmd.backupFilesPath, cmd.database) + return cmd.unpackFiles(fmt.Sprintf("%s.%s.*", pat, cmd.retention)) +} + +// unpackFiles will look for backup files matching the pattern and restore them to the data dir +func (cmd *Command) unpackFiles(pat string) error { + fmt.Printf("Restoring from backup %s\n", pat) + + backupFiles, err := filepath.Glob(pat) + if err != nil { + return err + } + + if len(backupFiles) == 0 { + return fmt.Errorf("no backup files for %s in %s", pat, cmd.backupFilesPath) + } + + for _, fn := range backupFiles { + if err := cmd.unpackTar(fn); err != nil { + return err + } + } + + return nil +} + +// unpackTar will restore a single tar archive to the data dir +func (cmd *Command) unpackTar(tarFile string) error { + f, err := os.Open(tarFile) + if err != nil { + return err + } + defer f.Close() + + tr := tar.NewReader(f) + + for { + hdr, err := tr.Next() + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + if err := cmd.unpackFile(tr, hdr.Name); err != nil { + return err + } + } +} + +// unpackFile will copy the current file from the tar archive to the data dir +func (cmd *Command) unpackFile(tr *tar.Reader, fileName string) error { + fn := filepath.Join(cmd.datadir, fileName) + fmt.Printf("unpacking %s\n", fn) + + if err := os.MkdirAll(filepath.Dir(fn), 0777); err != nil { + return fmt.Errorf("error making restore dir: %s", err.Error()) + } + + ff, err := os.Create(fn) + if err != nil { + return err + } + defer ff.Close() + + if _, err := io.Copy(ff, tr); err != nil { + return err + } + + return nil +} + +// printUsage prints the usage message to STDERR. +func (cmd *Command) printUsage() { + fmt.Fprintf(cmd.Stdout, `usage: influxd restore [flags] PATH + +Restore uses backups from the PATH to restore the metastore, databases, +retention policies, or specific shards. The InfluxDB process must not be +running during restore. + +Options: + -metadir + Optional. If set the metastore will be recovered to the given path. + -datadir + Optional. If set the restore process will recover the specified + database, retention policy or shard to the given directory. + -database + Optional. Required if no metadir given. Will restore the database + TSM files. + -retention + Optional. If given, database is required. Will restore the retention policy's + TSM files. + -shard + Optional. If given, database and retention are required. Will restore the shard's + TSM files. + +`) +} + +type nopListener struct { + mu sync.Mutex + closing chan struct{} +} + +func newNopListener() *nopListener { + return &nopListener{closing: make(chan struct{})} +} + +func (ln *nopListener) Accept() (net.Conn, error) { + ln.mu.Lock() + defer ln.mu.Unlock() + + <-ln.closing + return nil, errors.New("listener closing") +} + +func (ln *nopListener) Close() error { + if ln.closing != nil { + close(ln.closing) + ln.mu.Lock() + defer ln.mu.Unlock() + + ln.closing = nil + } + return nil +} + +func (ln *nopListener) Addr() net.Addr { return &net.TCPAddr{} } diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/backup_restore_test.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/backup_restore_test.go new file mode 100644 index 0000000000..8ebc56d7eb --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/backup_restore_test.go @@ -0,0 +1,106 @@ +package run_test + +import ( + "io/ioutil" + "net" + "os" + "path/filepath" + "testing" + "time" + + "github.com/influxdata/influxdb/cmd/influxd/backup" + "github.com/influxdata/influxdb/cmd/influxd/restore" + "github.com/influxdata/influxdb/cmd/influxd/run" + "github.com/influxdata/influxdb/services/meta" +) + +func TestServer_BackupAndRestore(t *testing.T) { + config := NewConfig() + config.Data.Engine = "tsm1" + config.Data.Dir, _ = ioutil.TempDir("", "data_backup") + config.Meta.Dir, _ = ioutil.TempDir("", "meta_backup") + config.Meta.BindAddress = freePort() + config.Meta.HTTPBindAddress = freePort() + + backupDir, _ := ioutil.TempDir("", "backup") + defer os.RemoveAll(backupDir) + + db := "mydb" + rp := "forever" + expected := `{"results":[{"series":[{"name":"myseries","columns":["time","host","value"],"values":[["1970-01-01T00:00:00.001Z","A",23]]}]}]}` + + // set the cache snapshot size low so that a single point will cause TSM file creation + config.Data.CacheSnapshotMemorySize = 1 + + func() { + s := OpenServer(config, "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy(db, newRetentionPolicyInfo(rp, 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy(db, rp); err != nil { + t.Fatal(err) + } + + s.MustWrite(db, rp, "myseries,host=A value=23 1000000", nil) + + // wait for the snapshot to write + time.Sleep(time.Second) + + res, err := s.Query(`select * from "mydb"."forever"."myseries"`) + if err != nil { + t.Fatalf("error querying: %s", err.Error()) + } + if res != expected { + t.Fatalf("query results wrong:\n\texp: %s\n\tgot: %s", expected, res) + } + + // now backup + cmd := backup.NewCommand() + hostAddress, _ := meta.DefaultHost(run.DefaultHostname, config.Meta.BindAddress) + if err := cmd.Run("-host", hostAddress, "-database", "mydb", backupDir); err != nil { + t.Fatalf("error backing up: %s", err.Error()) + } + }() + + if _, err := os.Stat(config.Meta.Dir); err == nil || !os.IsNotExist(err) { + t.Fatalf("meta dir should be deleted") + } + + if _, err := os.Stat(config.Data.Dir); err == nil || !os.IsNotExist(err) { + t.Fatalf("meta dir should be deleted") + } + + // restore + cmd := restore.NewCommand() + cmd.MetaConfig.BindAddress = config.Meta.BindAddress + + if err := cmd.Run("-metadir", config.Meta.Dir, "-datadir", config.Data.Dir, "-database", "mydb", backupDir); err != nil { + t.Fatalf("error restoring: %s", err.Error()) + } + + // Make sure node.json was restored + nodePath := filepath.Join(config.Meta.Dir, "node.json") + if _, err := os.Stat(nodePath); err != nil || os.IsNotExist(err) { + t.Fatalf("node.json should exist") + } + + // now open it up and verify we're good + s := OpenServer(config, "") + defer s.Close() + + res, err := s.Query(`select * from "mydb"."forever"."myseries"`) + if err != nil { + t.Fatalf("error querying: %s", err.Error()) + } + if res != expected { + t.Fatalf("query results wrong:\n\texp: %s\n\tgot: %s", expected, res) + } +} + +func freePort() string { + l, _ := net.Listen("tcp", "") + defer l.Close() + return l.Addr().String() +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/command.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/command.go new file mode 100644 index 0000000000..e3a4bff6fd --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/command.go @@ -0,0 +1,258 @@ +package run + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "time" + + "github.com/BurntSushi/toml" +) + +const logo = ` + 8888888 .d888 888 8888888b. 888888b. + 888 d88P" 888 888 "Y88b 888 "88b + 888 888 888 888 888 888 .88P + 888 88888b. 888888 888 888 888 888 888 888 888 8888888K. + 888 888 "88b 888 888 888 888 Y8bd8P' 888 888 888 "Y88b + 888 888 888 888 888 888 888 X88K 888 888 888 888 + 888 888 888 888 888 Y88b 888 .d8""8b. 888 .d88P 888 d88P + 8888888 888 888 888 888 "Y88888 888 888 8888888P" 8888888P" + +` + +// Command represents the command executed by "influxd run". +type Command struct { + Version string + Branch string + Commit string + BuildTime string + + closing chan struct{} + Closed chan struct{} + + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer + + Server *Server +} + +// NewCommand return a new instance of Command. +func NewCommand() *Command { + return &Command{ + closing: make(chan struct{}), + Closed: make(chan struct{}), + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +// Run parses the config from args and runs the server. +func (cmd *Command) Run(args ...string) error { + // Parse the command line flags. + options, err := cmd.ParseFlags(args...) + if err != nil { + return err + } + + // Print sweet InfluxDB logo. + fmt.Print(logo) + + // Set parallelism. + runtime.GOMAXPROCS(runtime.NumCPU()) + + // Mark start-up in log. + log.Printf("InfluxDB starting, version %s, branch %s, commit %s", + cmd.Version, cmd.Branch, cmd.Commit) + log.Printf("Go version %s, GOMAXPROCS set to %d", runtime.Version(), runtime.GOMAXPROCS(0)) + + // Write the PID file. + if err := cmd.writePIDFile(options.PIDFile); err != nil { + return fmt.Errorf("write pid file: %s", err) + } + + // Turn on block profiling to debug stuck databases + runtime.SetBlockProfileRate(int(1 * time.Second)) + + // Parse config + config, err := cmd.ParseConfig(options.ConfigPath) + if err != nil { + return fmt.Errorf("parse config: %s", err) + } + + // Apply any environment variables on top of the parsed config + if err := config.ApplyEnvOverrides(); err != nil { + return fmt.Errorf("apply env config: %v", err) + } + + // Propogate the top-level join options down to the meta config + if config.Join != "" { + config.Meta.JoinPeers = strings.Split(config.Join, ",") + } + + // Command-line flags for -join and -hostname override the config + // and env variable + if options.Join != "" { + config.Meta.JoinPeers = strings.Split(options.Join, ",") + } + + if options.Hostname != "" { + config.Hostname = options.Hostname + } + + // Propogate the top-level hostname down to dependendent configs + config.Meta.RemoteHostname = config.Hostname + + // Validate the configuration. + if err := config.Validate(); err != nil { + return fmt.Errorf("%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`", err) + } + + // Create server from config and start it. + buildInfo := &BuildInfo{ + Version: cmd.Version, + Commit: cmd.Commit, + Branch: cmd.Branch, + Time: cmd.BuildTime, + } + s, err := NewServer(config, buildInfo) + if err != nil { + return fmt.Errorf("create server: %s", err) + } + s.CPUProfile = options.CPUProfile + s.MemProfile = options.MemProfile + if err := s.Open(); err != nil { + return fmt.Errorf("open server: %s", err) + } + cmd.Server = s + + // Begin monitoring the server's error channel. + go cmd.monitorServerErrors() + + return nil +} + +// Close shuts down the server. +func (cmd *Command) Close() error { + defer close(cmd.Closed) + close(cmd.closing) + if cmd.Server != nil { + return cmd.Server.Close() + } + return nil +} + +func (cmd *Command) monitorServerErrors() { + logger := log.New(cmd.Stderr, "", log.LstdFlags) + for { + select { + case err := <-cmd.Server.Err(): + logger.Println(err) + case <-cmd.closing: + return + } + } +} + +// ParseFlags parses the command line flags from args and returns an options set. +func (cmd *Command) ParseFlags(args ...string) (Options, error) { + var options Options + fs := flag.NewFlagSet("", flag.ContinueOnError) + fs.StringVar(&options.ConfigPath, "config", "", "") + fs.StringVar(&options.PIDFile, "pidfile", "", "") + fs.StringVar(&options.Join, "join", "", "") + fs.StringVar(&options.Hostname, "hostname", "", "") + fs.StringVar(&options.CPUProfile, "cpuprofile", "", "") + fs.StringVar(&options.MemProfile, "memprofile", "", "") + fs.Usage = func() { fmt.Fprintln(cmd.Stderr, usage) } + if err := fs.Parse(args); err != nil { + return Options{}, err + } + return options, nil +} + +// writePIDFile writes the process ID to path. +func (cmd *Command) writePIDFile(path string) error { + // Ignore if path is not set. + if path == "" { + return nil + } + + // Ensure the required directory structure exists. + err := os.MkdirAll(filepath.Dir(path), 0777) + if err != nil { + return fmt.Errorf("mkdir: %s", err) + } + + // Retrieve the PID and write it. + pid := strconv.Itoa(os.Getpid()) + if err := ioutil.WriteFile(path, []byte(pid), 0666); err != nil { + return fmt.Errorf("write file: %s", err) + } + + return nil +} + +// ParseConfig parses the config at path. +// Returns a demo configuration if path is blank. +func (cmd *Command) ParseConfig(path string) (*Config, error) { + // Use demo configuration if no config path is specified. + if path == "" { + log.Println("no configuration provided, using default settings") + return NewDemoConfig() + } + + log.Printf("Using configuration at: %s\n", path) + + config := NewConfig() + if _, err := toml.DecodeFile(path, &config); err != nil { + return nil, err + } + + return config, nil +} + +var usage = `usage: run [flags] + +run starts the InfluxDB server. If this is the first time running the command +then a new cluster will be initialized unless the -join argument is used. + + -config + Set the path to the configuration file. + + -join + Joins the server to an existing cluster. Should be + the HTTP bind address of an existing meta server + + -hostname + Override the hostname, the 'hostname' configuration + option will be overridden. + + -pidfile + Write process ID to a file. + + -cpuprofile + Write CPU profiling information to a file. + + -memprofile + Write memory usage information to a file. +` + +// Options represents the command line options that can be parsed. +type Options struct { + ConfigPath string + PIDFile string + Join string + Hostname string + CPUProfile string + MemProfile string +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config.go new file mode 100644 index 0000000000..bbfa18c742 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config.go @@ -0,0 +1,281 @@ +package run + +import ( + "errors" + "fmt" + "os" + "os/user" + "path/filepath" + "reflect" + "strconv" + "strings" + "time" + + "github.com/influxdata/influxdb/cluster" + "github.com/influxdata/influxdb/monitor" + "github.com/influxdata/influxdb/services/admin" + "github.com/influxdata/influxdb/services/collectd" + "github.com/influxdata/influxdb/services/continuous_querier" + "github.com/influxdata/influxdb/services/graphite" + "github.com/influxdata/influxdb/services/hh" + "github.com/influxdata/influxdb/services/httpd" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/services/opentsdb" + "github.com/influxdata/influxdb/services/precreator" + "github.com/influxdata/influxdb/services/retention" + "github.com/influxdata/influxdb/services/subscriber" + "github.com/influxdata/influxdb/services/udp" + "github.com/influxdata/influxdb/tsdb" +) + +const ( + // DefaultBindAddress is the default address for raft, cluster, snapshot, etc.. + DefaultBindAddress = ":8088" + + // DefaultHostname is the default hostname used if we are unable to determine + // the hostname from the system + DefaultHostname = "localhost" +) + +// Config represents the configuration format for the influxd binary. +type Config struct { + Meta *meta.Config `toml:"meta"` + Data tsdb.Config `toml:"data"` + Cluster cluster.Config `toml:"cluster"` + Retention retention.Config `toml:"retention"` + Precreator precreator.Config `toml:"shard-precreation"` + + Admin admin.Config `toml:"admin"` + Monitor monitor.Config `toml:"monitor"` + Subscriber subscriber.Config `toml:"subscriber"` + HTTPD httpd.Config `toml:"http"` + Graphites []graphite.Config `toml:"graphite"` + Collectd collectd.Config `toml:"collectd"` + OpenTSDB opentsdb.Config `toml:"opentsdb"` + UDPs []udp.Config `toml:"udp"` + + ContinuousQuery continuous_querier.Config `toml:"continuous_queries"` + HintedHandoff hh.Config `toml:"hinted-handoff"` + + // Server reporting + ReportingDisabled bool `toml:"reporting-disabled"` + + // BindAddress is the address that all TCP services use (Raft, Snapshot, Cluster, etc.) + BindAddress string `toml:"bind-address"` + + // Hostname is the hostname portion to use when registering local + // addresses. This hostname must be resolvable from other nodes. + Hostname string `toml:"hostname"` + + Join string `toml:"join"` +} + +// NewConfig returns an instance of Config with reasonable defaults. +func NewConfig() *Config { + c := &Config{} + c.Meta = meta.NewConfig() + c.Data = tsdb.NewConfig() + c.Cluster = cluster.NewConfig() + c.Precreator = precreator.NewConfig() + + c.Admin = admin.NewConfig() + c.Monitor = monitor.NewConfig() + c.Subscriber = subscriber.NewConfig() + c.HTTPD = httpd.NewConfig() + c.Collectd = collectd.NewConfig() + c.OpenTSDB = opentsdb.NewConfig() + + c.ContinuousQuery = continuous_querier.NewConfig() + c.Retention = retention.NewConfig() + c.HintedHandoff = hh.NewConfig() + c.BindAddress = DefaultBindAddress + + // All ARRAY attributes have to be init after toml decode + // See: https://github.com/BurntSushi/toml/pull/68 + // Those attributes will be initialized in Config.InitTableAttrs method + // Concerned Attributes: + // * `c.Graphites` + // * `c.UDPs` + + return c +} + +// InitTableAttrs initialises all ARRAY attributes if empty +func (c *Config) InitTableAttrs() { + if len(c.UDPs) == 0 { + c.UDPs = []udp.Config{udp.NewConfig()} + } + if len(c.Graphites) == 0 { + c.Graphites = []graphite.Config{graphite.NewConfig()} + } +} + +// NewDemoConfig returns the config that runs when no config is specified. +func NewDemoConfig() (*Config, error) { + c := NewConfig() + c.InitTableAttrs() + + var homeDir string + // By default, store meta and data files in current users home directory + u, err := user.Current() + if err == nil { + homeDir = u.HomeDir + } else if os.Getenv("HOME") != "" { + homeDir = os.Getenv("HOME") + } else { + return nil, fmt.Errorf("failed to determine current user for storage") + } + + c.Meta.Dir = filepath.Join(homeDir, ".influxdb/meta") + c.Data.Dir = filepath.Join(homeDir, ".influxdb/data") + c.HintedHandoff.Dir = filepath.Join(homeDir, ".influxdb/hh") + c.Data.WALDir = filepath.Join(homeDir, ".influxdb/wal") + + c.HintedHandoff.Enabled = true + c.Admin.Enabled = true + + return c, nil +} + +// Validate returns an error if the config is invalid. +func (c *Config) Validate() error { + if !c.Meta.Enabled && !c.Data.Enabled { + return errors.New("either Meta, Data, or both must be enabled") + } + + if c.Meta.Enabled { + if err := c.Meta.Validate(); err != nil { + return err + } + + // If the config is for a meta-only node, we can't store monitor stats + // locally. + if c.Monitor.StoreEnabled && !c.Data.Enabled { + return fmt.Errorf("monitor storage can not be enabled on meta only nodes") + } + } + + if c.Data.Enabled { + if err := c.Data.Validate(); err != nil { + return err + } + + if err := c.HintedHandoff.Validate(); err != nil { + return err + } + for _, g := range c.Graphites { + if err := g.Validate(); err != nil { + return fmt.Errorf("invalid graphite config: %v", err) + } + } + } + + return nil +} + +// ApplyEnvOverrides apply the environment configuration on top of the config. +func (c *Config) ApplyEnvOverrides() error { + return c.applyEnvOverrides("INFLUXDB", reflect.ValueOf(c)) +} + +func (c *Config) applyEnvOverrides(prefix string, spec reflect.Value) error { + // If we have a pointer, dereference it + s := spec + if spec.Kind() == reflect.Ptr { + s = spec.Elem() + } + + // Make sure we have struct + if s.Kind() != reflect.Struct { + return nil + } + + typeOfSpec := s.Type() + for i := 0; i < s.NumField(); i++ { + f := s.Field(i) + // Get the toml tag to determine what env var name to use + configName := typeOfSpec.Field(i).Tag.Get("toml") + // Replace hyphens with underscores to avoid issues with shells + configName = strings.Replace(configName, "-", "_", -1) + fieldKey := typeOfSpec.Field(i).Name + + // Skip any fields that we cannot set + if f.CanSet() || f.Kind() == reflect.Slice { + + // Use the upper-case prefix and toml name for the env var + key := strings.ToUpper(configName) + if prefix != "" { + key = strings.ToUpper(fmt.Sprintf("%s_%s", prefix, configName)) + } + value := os.Getenv(key) + + // If the type is s slice, apply to each using the index as a suffix + // e.g. GRAPHITE_0 + if f.Kind() == reflect.Slice || f.Kind() == reflect.Array { + for i := 0; i < f.Len(); i++ { + if err := c.applyEnvOverrides(fmt.Sprintf("%s_%d", key, i), f.Index(i)); err != nil { + return err + } + } + continue + } + + // If it's a sub-config, recursively apply + if f.Kind() == reflect.Struct || f.Kind() == reflect.Ptr { + if err := c.applyEnvOverrides(key, f); err != nil { + return err + } + continue + } + + // Skip any fields we don't have a value to set + if value == "" { + continue + } + + switch f.Kind() { + case reflect.String: + f.SetString(value) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + + var intValue int64 + + // Handle toml.Duration + if f.Type().Name() == "Duration" { + dur, err := time.ParseDuration(value) + if err != nil { + return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldKey, f.Type().String(), value) + } + intValue = dur.Nanoseconds() + } else { + var err error + intValue, err = strconv.ParseInt(value, 0, f.Type().Bits()) + if err != nil { + return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldKey, f.Type().String(), value) + } + } + + f.SetInt(intValue) + case reflect.Bool: + boolValue, err := strconv.ParseBool(value) + if err != nil { + return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldKey, f.Type().String(), value) + + } + f.SetBool(boolValue) + case reflect.Float32, reflect.Float64: + floatValue, err := strconv.ParseFloat(value, f.Type().Bits()) + if err != nil { + return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldKey, f.Type().String(), value) + + } + f.SetFloat(floatValue) + default: + if err := c.applyEnvOverrides(key, f); err != nil { + return err + } + } + } + } + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_command.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_command.go new file mode 100644 index 0000000000..ee102213e0 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_command.go @@ -0,0 +1,78 @@ +package run + +import ( + "flag" + "fmt" + "io" + "os" + + "github.com/BurntSushi/toml" +) + +// PrintConfigCommand represents the command executed by "influxd config". +type PrintConfigCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewPrintConfigCommand return a new instance of PrintConfigCommand. +func NewPrintConfigCommand() *PrintConfigCommand { + return &PrintConfigCommand{ + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +// Run parses and prints the current config loaded. +func (cmd *PrintConfigCommand) Run(args ...string) error { + // Parse command flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + configPath := fs.String("config", "", "") + fs.Usage = func() { fmt.Fprintln(cmd.Stderr, printConfigUsage) } + if err := fs.Parse(args); err != nil { + return err + } + + // Parse config from path. + config, err := cmd.parseConfig(*configPath) + if err != nil { + return fmt.Errorf("parse config: %s", err) + } + + // Apply any environment variables on top of the parsed config + if err := config.ApplyEnvOverrides(); err != nil { + return fmt.Errorf("apply env config: %v", err) + } + + // Validate the configuration. + if err := config.Validate(); err != nil { + return fmt.Errorf("%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`", err) + } + + toml.NewEncoder(cmd.Stdout).Encode(config) + fmt.Fprint(cmd.Stdout, "\n") + + return nil +} + +// ParseConfig parses the config at path. +// Returns a demo configuration if path is blank. +func (cmd *PrintConfigCommand) parseConfig(path string) (*Config, error) { + if path == "" { + return NewDemoConfig() + } + + config := NewConfig() + if _, err := toml.DecodeFile(path, &config); err != nil { + return nil, err + } + config.InitTableAttrs() + return config, nil +} + +var printConfigUsage = `usage: config + + config displays the default configuration +` diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_test.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_test.go new file mode 100644 index 0000000000..dea8f30330 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_test.go @@ -0,0 +1,188 @@ +package run_test + +import ( + "os" + "testing" + + "github.com/BurntSushi/toml" + "github.com/influxdata/influxdb/cmd/influxd/run" +) + +// Ensure the configuration can be parsed. +func TestConfig_Parse(t *testing.T) { + // Parse configuration. + var c run.Config + if _, err := toml.Decode(` +join = "foo:123,bar:456" + +[meta] +dir = "/tmp/meta" + +[data] +dir = "/tmp/data" + +[cluster] + +[admin] +bind-address = ":8083" + +[http] +bind-address = ":8087" + +[[graphite]] +protocol = "udp" + +[[graphite]] +protocol = "tcp" + +[collectd] +bind-address = ":1000" + +[opentsdb] +bind-address = ":2000" + +[[udp]] +bind-address = ":4444" + +[monitoring] +enabled = true + +[subscriber] +enabled = true + +[continuous_queries] +enabled = true +`, &c); err != nil { + t.Fatal(err) + } + + // Validate configuration. + if c.Meta.Dir != "/tmp/meta" { + t.Fatalf("unexpected meta dir: %s", c.Meta.Dir) + } else if c.Data.Dir != "/tmp/data" { + t.Fatalf("unexpected data dir: %s", c.Data.Dir) + } else if c.Admin.BindAddress != ":8083" { + t.Fatalf("unexpected admin bind address: %s", c.Admin.BindAddress) + } else if c.HTTPD.BindAddress != ":8087" { + t.Fatalf("unexpected api bind address: %s", c.HTTPD.BindAddress) + } else if len(c.Graphites) != 2 { + t.Fatalf("unexpected graphites count: %d", len(c.Graphites)) + } else if c.Graphites[0].Protocol != "udp" { + t.Fatalf("unexpected graphite protocol(0): %s", c.Graphites[0].Protocol) + } else if c.Graphites[1].Protocol != "tcp" { + t.Fatalf("unexpected graphite protocol(1): %s", c.Graphites[1].Protocol) + } else if c.Collectd.BindAddress != ":1000" { + t.Fatalf("unexpected collectd bind address: %s", c.Collectd.BindAddress) + } else if c.OpenTSDB.BindAddress != ":2000" { + t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDB.BindAddress) + } else if c.UDPs[0].BindAddress != ":4444" { + t.Fatalf("unexpected udp bind address: %s", c.UDPs[0].BindAddress) + } else if c.Subscriber.Enabled != true { + t.Fatalf("unexpected subscriber enabled: %v", c.Subscriber.Enabled) + } else if c.ContinuousQuery.Enabled != true { + t.Fatalf("unexpected continuous query enabled: %v", c.ContinuousQuery.Enabled) + } else if exp, got := "foo:123,bar:456", c.Join; exp != got { + t.Fatalf("unexpected join value: got %v, exp %v", got, exp) + } +} + +// Ensure the configuration can be parsed. +func TestConfig_Parse_EnvOverride(t *testing.T) { + // Parse configuration. + var c run.Config + if _, err := toml.Decode(` +[meta] +dir = "/tmp/meta" + +[data] +dir = "/tmp/data" + +[cluster] + +[admin] +bind-address = ":8083" + +[http] +bind-address = ":8087" + +[[graphite]] +protocol = "udp" + +[[graphite]] +protocol = "tcp" + +[collectd] +bind-address = ":1000" + +[opentsdb] +bind-address = ":2000" + +[[udp]] +bind-address = ":4444" + +[monitoring] +enabled = true + +[continuous_queries] +enabled = true +`, &c); err != nil { + t.Fatal(err) + } + + if err := os.Setenv("INFLUXDB_UDP_BIND_ADDRESS", ":1234"); err != nil { + t.Fatalf("failed to set env var: %v", err) + } + + if err := os.Setenv("INFLUXDB_GRAPHITE_1_PROTOCOL", "udp"); err != nil { + t.Fatalf("failed to set env var: %v", err) + } + + if err := c.ApplyEnvOverrides(); err != nil { + t.Fatalf("failed to apply env overrides: %v", err) + } + + if c.UDPs[0].BindAddress != ":4444" { + t.Fatalf("unexpected udp bind address: %s", c.UDPs[0].BindAddress) + } + + if c.Graphites[1].Protocol != "udp" { + t.Fatalf("unexpected graphite protocol(0): %s", c.Graphites[0].Protocol) + } +} + +func TestConfig_ValidateNoServiceConfigured(t *testing.T) { + var c run.Config + if _, err := toml.Decode(` +[meta] +enabled = false + +[data] +enabled = false +`, &c); err != nil { + t.Fatal(err) + } + + if e := c.Validate(); e == nil { + t.Fatalf("got nil, expected error") + } +} + +func TestConfig_ValidateMonitorStore_MetaOnly(t *testing.T) { + c := run.NewConfig() + if _, err := toml.Decode(` +[monitor] +store-enabled = true + +[meta] +dir = "foo" + +[data] +enabled = false +`, &c); err != nil { + t.Fatal(err) + } + + if err := c.Validate(); err == nil { + t.Fatalf("got nil, expected error") + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server.go new file mode 100644 index 0000000000..b1d90c2d57 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server.go @@ -0,0 +1,763 @@ +package run + +import ( + "fmt" + "io/ioutil" + "log" + "net" + "os" + "path/filepath" + "runtime" + "runtime/pprof" + "strings" + "time" + + "github.com/influxdata/influxdb" + "github.com/influxdata/influxdb/cluster" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/monitor" + "github.com/influxdata/influxdb/services/admin" + "github.com/influxdata/influxdb/services/collectd" + "github.com/influxdata/influxdb/services/continuous_querier" + "github.com/influxdata/influxdb/services/copier" + "github.com/influxdata/influxdb/services/graphite" + "github.com/influxdata/influxdb/services/hh" + "github.com/influxdata/influxdb/services/httpd" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/services/opentsdb" + "github.com/influxdata/influxdb/services/precreator" + "github.com/influxdata/influxdb/services/retention" + "github.com/influxdata/influxdb/services/snapshotter" + "github.com/influxdata/influxdb/services/subscriber" + "github.com/influxdata/influxdb/services/udp" + "github.com/influxdata/influxdb/tcp" + "github.com/influxdata/influxdb/tsdb" + client "github.com/influxdata/usage-client/v1" + // Initialize the engine packages + _ "github.com/influxdata/influxdb/tsdb/engine" +) + +// BuildInfo represents the build details for the server code. +type BuildInfo struct { + Version string + Commit string + Branch string + Time string +} + +// Server represents a container for the metadata and storage data and services. +// It is built using a Config and it manages the startup and shutdown of all +// services in the proper order. +type Server struct { + buildInfo BuildInfo + + err chan error + closing chan struct{} + + BindAddress string + Listener net.Listener + + Node *influxdb.Node + + MetaClient *meta.Client + MetaService *meta.Service + + TSDBStore *tsdb.Store + QueryExecutor *cluster.QueryExecutor + PointsWriter *cluster.PointsWriter + ShardWriter *cluster.ShardWriter + HintedHandoff *hh.Service + Subscriber *subscriber.Service + + Services []Service + + // These references are required for the tcp muxer. + ClusterService *cluster.Service + SnapshotterService *snapshotter.Service + CopierService *copier.Service + + Monitor *monitor.Monitor + + // Server reporting and registration + reportingDisabled bool + + // Profiling + CPUProfile string + MemProfile string + + // joinPeers are the metaservers specified at run time to join this server to + joinPeers []string + + // metaUseTLS specifies if we should use a TLS connection to the meta servers + metaUseTLS bool + + // httpAPIAddr is the host:port combination for the main HTTP API for querying and writing data + httpAPIAddr string + + // httpUseTLS specifies if we should use a TLS connection to the http servers + httpUseTLS bool + + // tcpAddr is the host:port combination for the TCP listener that services mux onto + tcpAddr string + + config *Config +} + +// NewServer returns a new instance of Server built from a config. +func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) { + // We need to ensure that a meta directory always exists even if + // we don't start the meta store. node.json is always stored under + // the meta directory. + if err := os.MkdirAll(c.Meta.Dir, 0777); err != nil { + return nil, fmt.Errorf("mkdir all: %s", err) + } + + // 0.10-rc1 and prior would sometimes put the node.json at the root + // dir which breaks backup/restore and restarting nodes. This moves + // the file from the root so it's always under the meta dir. + oldPath := filepath.Join(filepath.Dir(c.Meta.Dir), "node.json") + newPath := filepath.Join(c.Meta.Dir, "node.json") + + if _, err := os.Stat(oldPath); err == nil { + if err := os.Rename(oldPath, newPath); err != nil { + return nil, err + } + } + + // 0.11 we no longer use peers.json. Remove the file if we have one on disk. + os.RemoveAll(filepath.Join(c.Meta.Dir, "peers.json")) + + node, err := influxdb.LoadNode(c.Meta.Dir) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + + node = influxdb.NewNode(c.Meta.Dir) + } + + // In 0.11 we removed MetaServers from node.json. To avoid confusion for + // existing users, force a re-save of the node.json file to remove that property + // if it happens to exist. + nodeContents, err := ioutil.ReadFile(filepath.Join(c.Meta.Dir, "node.json")) + if err == nil && strings.Contains(string(nodeContents), "MetaServers") { + node.Save() + } + + // In 0.10.0 bind-address got moved to the top level. Check + // The old location to keep things backwards compatible + bind := c.BindAddress + if c.Meta.BindAddress != "" { + bind = c.Meta.BindAddress + } + + if !c.Data.Enabled && !c.Meta.Enabled { + return nil, fmt.Errorf("must run as either meta node or data node or both") + } + + s := &Server{ + buildInfo: *buildInfo, + err: make(chan error), + closing: make(chan struct{}), + + BindAddress: bind, + + Node: node, + MetaClient: meta.NewClient(), + + Monitor: monitor.New(c.Monitor), + + reportingDisabled: c.ReportingDisabled, + joinPeers: c.Meta.JoinPeers, + metaUseTLS: c.Meta.HTTPSEnabled, + + httpAPIAddr: c.HTTPD.BindAddress, + httpUseTLS: c.HTTPD.HTTPSEnabled, + tcpAddr: bind, + + config: c, + } + + if c.Meta.Enabled { + s.MetaService = meta.NewService(c.Meta) + s.MetaService.Version = s.buildInfo.Version + s.MetaService.Node = s.Node + } + + if c.Data.Enabled { + s.TSDBStore = tsdb.NewStore(c.Data.Dir) + s.TSDBStore.EngineOptions.Config = c.Data + + // Copy TSDB configuration. + s.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine + s.TSDBStore.EngineOptions.MaxWALSize = c.Data.MaxWALSize + s.TSDBStore.EngineOptions.WALFlushInterval = time.Duration(c.Data.WALFlushInterval) + s.TSDBStore.EngineOptions.WALPartitionFlushDelay = time.Duration(c.Data.WALPartitionFlushDelay) + + // Set the shard writer + s.ShardWriter = cluster.NewShardWriter(time.Duration(c.Cluster.ShardWriterTimeout), + c.Cluster.MaxRemoteWriteConnections) + + // Create the hinted handoff service + s.HintedHandoff = hh.NewService(c.HintedHandoff, s.ShardWriter, s.MetaClient) + s.HintedHandoff.Monitor = s.Monitor + + // Create the Subscriber service + s.Subscriber = subscriber.NewService(c.Subscriber) + + // Initialize points writer. + s.PointsWriter = cluster.NewPointsWriter() + s.PointsWriter.WriteTimeout = time.Duration(c.Cluster.WriteTimeout) + s.PointsWriter.TSDBStore = s.TSDBStore + s.PointsWriter.ShardWriter = s.ShardWriter + s.PointsWriter.HintedHandoff = s.HintedHandoff + s.PointsWriter.Subscriber = s.Subscriber + s.PointsWriter.Node = s.Node + + // Initialize meta executor. + metaExecutor := cluster.NewMetaExecutor() + metaExecutor.MetaClient = s.MetaClient + metaExecutor.Node = s.Node + + // Initialize query executor. + s.QueryExecutor = cluster.NewQueryExecutor() + s.QueryExecutor.MetaClient = s.MetaClient + s.QueryExecutor.TSDBStore = s.TSDBStore + s.QueryExecutor.Monitor = s.Monitor + s.QueryExecutor.PointsWriter = s.PointsWriter + s.QueryExecutor.MetaExecutor = metaExecutor + if c.Data.QueryLogEnabled { + s.QueryExecutor.LogOutput = os.Stderr + } + + // Initialize the monitor + s.Monitor.Version = s.buildInfo.Version + s.Monitor.Commit = s.buildInfo.Commit + s.Monitor.Branch = s.buildInfo.Branch + s.Monitor.BuildTime = s.buildInfo.Time + s.Monitor.PointsWriter = (*monitorPointsWriter)(s.PointsWriter) + } + + return s, nil +} + +func (s *Server) appendClusterService(c cluster.Config) { + srv := cluster.NewService(c) + srv.TSDBStore = s.TSDBStore + srv.MetaClient = s.MetaClient + s.Services = append(s.Services, srv) + s.ClusterService = srv +} + +func (s *Server) appendSnapshotterService() { + srv := snapshotter.NewService() + srv.TSDBStore = s.TSDBStore + srv.MetaClient = s.MetaClient + srv.Node = s.Node + s.Services = append(s.Services, srv) + s.SnapshotterService = srv +} + +func (s *Server) appendCopierService() { + srv := copier.NewService() + srv.TSDBStore = s.TSDBStore + s.Services = append(s.Services, srv) + s.CopierService = srv +} + +func (s *Server) appendRetentionPolicyService(c retention.Config) { + if !c.Enabled { + return + } + srv := retention.NewService(c) + srv.MetaClient = s.MetaClient + srv.TSDBStore = s.TSDBStore + s.Services = append(s.Services, srv) +} + +func (s *Server) appendAdminService(c admin.Config) { + if !c.Enabled { + return + } + srv := admin.NewService(c) + s.Services = append(s.Services, srv) +} + +func (s *Server) appendHTTPDService(c httpd.Config) { + if !c.Enabled { + return + } + srv := httpd.NewService(c) + srv.Handler.MetaClient = s.MetaClient + srv.Handler.QueryAuthorizer = meta.NewQueryAuthorizer(s.MetaClient) + srv.Handler.QueryExecutor = s.QueryExecutor + srv.Handler.PointsWriter = s.PointsWriter + srv.Handler.Version = s.buildInfo.Version + + // If a ContinuousQuerier service has been started, attach it. + for _, srvc := range s.Services { + if cqsrvc, ok := srvc.(continuous_querier.ContinuousQuerier); ok { + srv.Handler.ContinuousQuerier = cqsrvc + } + } + + s.Services = append(s.Services, srv) +} + +func (s *Server) appendCollectdService(c collectd.Config) { + if !c.Enabled { + return + } + srv := collectd.NewService(c) + srv.MetaClient = s.MetaClient + srv.PointsWriter = s.PointsWriter + s.Services = append(s.Services, srv) +} + +func (s *Server) appendOpenTSDBService(c opentsdb.Config) error { + if !c.Enabled { + return nil + } + srv, err := opentsdb.NewService(c) + if err != nil { + return err + } + srv.PointsWriter = s.PointsWriter + srv.MetaClient = s.MetaClient + s.Services = append(s.Services, srv) + return nil +} + +func (s *Server) appendGraphiteService(c graphite.Config) error { + if !c.Enabled { + return nil + } + srv, err := graphite.NewService(c) + if err != nil { + return err + } + + srv.PointsWriter = s.PointsWriter + srv.MetaClient = s.MetaClient + srv.Monitor = s.Monitor + s.Services = append(s.Services, srv) + return nil +} + +func (s *Server) appendPrecreatorService(c precreator.Config) error { + if !c.Enabled { + return nil + } + srv, err := precreator.NewService(c) + if err != nil { + return err + } + + srv.MetaClient = s.MetaClient + s.Services = append(s.Services, srv) + return nil +} + +func (s *Server) appendUDPService(c udp.Config) { + if !c.Enabled { + return + } + srv := udp.NewService(c) + srv.PointsWriter = s.PointsWriter + srv.MetaClient = s.MetaClient + s.Services = append(s.Services, srv) +} + +func (s *Server) appendContinuousQueryService(c continuous_querier.Config) { + if !c.Enabled { + return + } + srv := continuous_querier.NewService(c) + srv.MetaClient = s.MetaClient + srv.QueryExecutor = s.QueryExecutor + s.Services = append(s.Services, srv) +} + +// Err returns an error channel that multiplexes all out of band errors received from all services. +func (s *Server) Err() <-chan error { return s.err } + +// Open opens the meta and data store and all services. +func (s *Server) Open() error { + // Start profiling, if set. + startProfile(s.CPUProfile, s.MemProfile) + + // Open shared TCP connection. + ln, err := net.Listen("tcp", s.BindAddress) + if err != nil { + return fmt.Errorf("listen: %s", err) + } + s.Listener = ln + + // Multiplex listener. + mux := tcp.NewMux() + go mux.Serve(ln) + + if s.MetaService != nil { + s.MetaService.RaftListener = mux.Listen(meta.MuxHeader) + // Open meta service. + if err := s.MetaService.Open(); err != nil { + return fmt.Errorf("open meta service: %s", err) + } + go s.monitorErrorChan(s.MetaService.Err()) + } + + // initialize MetaClient. + if err = s.initializeMetaClient(); err != nil { + return err + } + + if s.TSDBStore != nil { + // Append services. + s.appendClusterService(s.config.Cluster) + s.appendPrecreatorService(s.config.Precreator) + s.appendSnapshotterService() + s.appendCopierService() + s.appendAdminService(s.config.Admin) + s.appendContinuousQueryService(s.config.ContinuousQuery) + s.appendHTTPDService(s.config.HTTPD) + s.appendCollectdService(s.config.Collectd) + if err := s.appendOpenTSDBService(s.config.OpenTSDB); err != nil { + return err + } + for _, g := range s.config.UDPs { + s.appendUDPService(g) + } + s.appendRetentionPolicyService(s.config.Retention) + for _, g := range s.config.Graphites { + if err := s.appendGraphiteService(g); err != nil { + return err + } + } + + s.QueryExecutor.Node = s.Node + + s.Subscriber.MetaClient = s.MetaClient + s.ShardWriter.MetaClient = s.MetaClient + s.HintedHandoff.MetaClient = s.MetaClient + s.Subscriber.MetaClient = s.MetaClient + s.PointsWriter.MetaClient = s.MetaClient + s.Monitor.MetaClient = s.MetaClient + + s.ClusterService.Listener = mux.Listen(cluster.MuxHeader) + s.SnapshotterService.Listener = mux.Listen(snapshotter.MuxHeader) + s.CopierService.Listener = mux.Listen(copier.MuxHeader) + + // Open TSDB store. + if err := s.TSDBStore.Open(); err != nil { + return fmt.Errorf("open tsdb store: %s", err) + } + + // Open the hinted handoff service + if err := s.HintedHandoff.Open(); err != nil { + return fmt.Errorf("open hinted handoff: %s", err) + } + + // Open the subcriber service + if err := s.Subscriber.Open(); err != nil { + return fmt.Errorf("open subscriber: %s", err) + } + + // Open the points writer service + if err := s.PointsWriter.Open(); err != nil { + return fmt.Errorf("open points writer: %s", err) + } + + // Open the monitor service + if err := s.Monitor.Open(); err != nil { + return fmt.Errorf("open monitor: %v", err) + } + + for _, service := range s.Services { + if err := service.Open(); err != nil { + return fmt.Errorf("open service: %s", err) + } + } + } + + // Start the reporting service, if not disabled. + if !s.reportingDisabled { + go s.startServerReporting() + } + + return nil +} + +// Close shuts down the meta and data stores and all services. +func (s *Server) Close() error { + stopProfile() + + // Close the listener first to stop any new connections + if s.Listener != nil { + s.Listener.Close() + } + + // Close services to allow any inflight requests to complete + // and prevent new requests from being accepted. + for _, service := range s.Services { + service.Close() + } + + if s.Monitor != nil { + s.Monitor.Close() + } + + if s.PointsWriter != nil { + s.PointsWriter.Close() + } + + if s.HintedHandoff != nil { + s.HintedHandoff.Close() + } + + // Close the TSDBStore, no more reads or writes at this point + if s.TSDBStore != nil { + s.TSDBStore.Close() + } + + if s.Subscriber != nil { + s.Subscriber.Close() + } + + // Finally close the meta-store since everything else depends on it + if s.MetaService != nil { + s.MetaService.Close() + } + + if s.MetaClient != nil { + s.MetaClient.Close() + } + + close(s.closing) + return nil +} + +// startServerReporting starts periodic server reporting. +func (s *Server) startServerReporting() { + for { + select { + case <-s.closing: + return + default: + } + s.reportServer() + <-time.After(24 * time.Hour) + } +} + +// reportServer reports anonymous statistics about the system. +func (s *Server) reportServer() { + dis, err := s.MetaClient.Databases() + if err != nil { + log.Printf("failed to retrieve databases for reporting: %s", err.Error()) + return + } + numDatabases := len(dis) + + numMeasurements := 0 + numSeries := 0 + + // Only needed in the case of a data node + if s.TSDBStore != nil { + for _, di := range dis { + d := s.TSDBStore.DatabaseIndex(di.Name) + if d == nil { + // No data in this store for this database. + continue + } + m, s := d.MeasurementSeriesCounts() + numMeasurements += m + numSeries += s + } + } + + clusterID := s.MetaClient.ClusterID() + if err != nil { + log.Printf("failed to retrieve cluster ID for reporting: %s", err.Error()) + return + } + + cl := client.New("") + usage := client.Usage{ + Product: "influxdb", + Data: []client.UsageData{ + { + Values: client.Values{ + "os": runtime.GOOS, + "arch": runtime.GOARCH, + "version": s.buildInfo.Version, + "server_id": fmt.Sprintf("%v", s.Node.ID), + "cluster_id": fmt.Sprintf("%v", clusterID), + "num_series": numSeries, + "num_measurements": numMeasurements, + "num_databases": numDatabases, + }, + }, + }, + } + + log.Printf("Sending anonymous usage statistics to m.influxdb.com") + + go cl.Save(usage) +} + +// monitorErrorChan reads an error channel and resends it through the server. +func (s *Server) monitorErrorChan(ch <-chan error) { + for { + select { + case err, ok := <-ch: + if !ok { + return + } + s.err <- err + case <-s.closing: + return + } + } +} + +// initializeMetaClient will set the MetaClient and join the node to the cluster if needed +func (s *Server) initializeMetaClient() error { + // It's the first time starting up and we need to either join + // the cluster or initialize this node as the first member + if len(s.joinPeers) == 0 { + // start up a new single node cluster + if s.MetaService == nil { + return fmt.Errorf("server not set to join existing cluster must run also as a meta node") + } + s.MetaClient.SetMetaServers([]string{s.MetaService.HTTPAddr()}) + s.MetaClient.SetTLS(s.metaUseTLS) + } else { + // join this node to the cluster + s.MetaClient.SetMetaServers(s.joinPeers) + s.MetaClient.SetTLS(s.metaUseTLS) + } + if err := s.MetaClient.Open(); err != nil { + return err + } + + // if the node ID is > 0 then we need to initialize the metaclient + if s.Node.ID > 0 { + s.MetaClient.WaitForDataChanged() + } + + if s.config.Data.Enabled { + // If we've already created a data node for our id, we're done + if _, err := s.MetaClient.DataNode(s.Node.ID); err == nil { + return nil + } + + n, err := s.MetaClient.CreateDataNode(s.HTTPAddr(), s.TCPAddr()) + for err != nil { + log.Printf("Unable to create data node. retry in 1s: %s", err.Error()) + time.Sleep(time.Second) + n, err = s.MetaClient.CreateDataNode(s.HTTPAddr(), s.TCPAddr()) + } + s.Node.ID = n.ID + + if err := s.Node.Save(); err != nil { + return err + } + } + + return nil +} + +// HTTPAddr returns the HTTP address used by other nodes for HTTP queries and writes. +func (s *Server) HTTPAddr() string { + return s.remoteAddr(s.httpAPIAddr) +} + +// TCPAddr returns the TCP address used by other nodes for cluster communication. +func (s *Server) TCPAddr() string { + return s.remoteAddr(s.tcpAddr) +} + +func (s *Server) remoteAddr(addr string) string { + hostname := s.config.Hostname + if hostname == "" { + hostname = meta.DefaultHostname + } + remote, err := meta.DefaultHost(hostname, addr) + if err != nil { + return addr + } + return remote +} + +// MetaServers returns the meta node HTTP addresses used by this server. +func (s *Server) MetaServers() []string { + return s.MetaClient.MetaServers() +} + +// Service represents a service attached to the server. +type Service interface { + Open() error + Close() error +} + +// prof stores the file locations of active profiles. +var prof struct { + cpu *os.File + mem *os.File +} + +// StartProfile initializes the cpu and memory profile, if specified. +func startProfile(cpuprofile, memprofile string) { + if cpuprofile != "" { + f, err := os.Create(cpuprofile) + if err != nil { + log.Fatalf("cpuprofile: %v", err) + } + log.Printf("writing CPU profile to: %s\n", cpuprofile) + prof.cpu = f + pprof.StartCPUProfile(prof.cpu) + } + + if memprofile != "" { + f, err := os.Create(memprofile) + if err != nil { + log.Fatalf("memprofile: %v", err) + } + log.Printf("writing mem profile to: %s\n", memprofile) + prof.mem = f + runtime.MemProfileRate = 4096 + } + +} + +// StopProfile closes the cpu and memory profiles if they are running. +func stopProfile() { + if prof.cpu != nil { + pprof.StopCPUProfile() + prof.cpu.Close() + log.Println("CPU profile stopped") + } + if prof.mem != nil { + pprof.Lookup("heap").WriteTo(prof.mem, 0) + prof.mem.Close() + log.Println("mem profile stopped") + } +} + +type tcpaddr struct{ host string } + +func (a *tcpaddr) Network() string { return "tcp" } +func (a *tcpaddr) String() string { return a.host } + +// monitorPointsWriter is a wrapper around `cluster.PointsWriter` that helps +// to prevent a circular dependency between the `cluster` and `monitor` packages. +type monitorPointsWriter cluster.PointsWriter + +func (pw *monitorPointsWriter) WritePoints(database, retentionPolicy string, points models.Points) error { + return (*cluster.PointsWriter)(pw).WritePoints(&cluster.WritePointsRequest{ + Database: database, + RetentionPolicy: retentionPolicy, + ConsistencyLevel: cluster.ConsistencyLevelOne, + Points: points, + }) +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server_cluster_test.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server_cluster_test.go new file mode 100644 index 0000000000..d126d1f49d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server_cluster_test.go @@ -0,0 +1,357 @@ +package run_test + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/influxdata/influxdb/cmd/influxd/run" +) + +func TestCluster_CreateDatabase(t *testing.T) { + t.Skip() + t.Parallel() + + c, err := NewClusterWithDefaults(5) + defer c.Close() + if err != nil { + t.Fatalf("error creating cluster: %s", err) + } +} + +func TestCluster_Write(t *testing.T) { + t.Skip() + t.Parallel() + + c, err := NewClusterWithDefaults(5) + if err != nil { + t.Fatalf("error creating cluster: %s", err) + } + defer c.Close() + + writes := []string{ + fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + } + + _, err = c.Servers[0].Write("db0", "default", strings.Join(writes, "\n"), nil) + if err != nil { + t.Fatal(err) + } + + q := &Query{ + name: "write", + command: `SELECT * FROM db0."default".cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverA","uswest",23.2]]}]}]}`, + } + err = c.QueryAll(q) + if err != nil { + t.Fatal(err) + } +} + +func TestCluster_DatabaseCommands(t *testing.T) { + t.Skip() + t.Parallel() + c, err := NewCluster(5) + if err != nil { + t.Fatalf("error creating cluster: %s", err) + } + + defer c.Close() + + test := tests.load(t, "database_commands") + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + t.Logf("Running %s", query.name) + if query.once { + if _, err := c.Query(query); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + continue + } + if err := c.QueryAll(query); err != nil { + t.Error(query.Error(err)) + } + } +} + +func TestCluster_Query_DropAndRecreateDatabase(t *testing.T) { + t.Skip() + t.Parallel() + c, err := NewCluster(5) + if err != nil { + t.Fatalf("error creating cluster: %s", err) + } + defer c.Close() + + test := tests.load(t, "drop_and_recreate_database") + + s := c.Servers[0] + if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicyInfo(test.retentionPolicy(), 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil { + t.Fatal(err) + } + + if err = writeTestData(c.Servers[0], &test); err != nil { + t.Fatal(err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + t.Logf("Running %s", query.name) + if query.once { + if _, err := c.Query(query); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + continue + } + if err := c.QueryAll(query); err != nil { + t.Error(query.Error(err)) + } + } +} + +func TestCluster_Query_DropDatabaseIsolated(t *testing.T) { + t.Skip() + t.Parallel() + c, err := NewCluster(5) + if err != nil { + t.Fatalf("error creating cluster: %s", err) + } + defer c.Close() + + test := tests.load(t, "drop_database_isolated") + + s := c.Servers[0] + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + if err := s.CreateDatabaseAndRetentionPolicy("db1", newRetentionPolicyInfo("rp1", 1, 0)); err != nil { + t.Fatal(err) + } + + if err = writeTestData(s, &test); err != nil { + t.Fatal(err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + t.Logf("Running %s", query.name) + if query.once { + if _, err := c.Query(query); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + continue + } + if err := c.QueryAll(query); err != nil { + t.Error(query.Error(err)) + } + } +} + +func TestCluster_Query_DropAndRecreateSeries(t *testing.T) { + t.Parallel() + t.Skip() + c, err := NewCluster(5) + if err != nil { + t.Fatalf("error creating cluster: %s", err) + } + defer c.Close() + + test := tests.load(t, "drop_and_recreate_series") + + s := c.Servers[0] + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + if err = writeTestData(s, &test); err != nil { + t.Fatal(err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + t.Logf("Running %s", query.name) + if query.once { + if _, err := c.Query(query); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + continue + } + if err := c.QueryAll(query); err != nil { + t.Fatal(query.Error(err)) + } + } + + // Re-write data and test again. + retest := tests.load(t, "drop_and_recreate_series_retest") + + if err = writeTestData(s, &test); err != nil { + t.Fatal(err) + } + + for _, query := range retest.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + t.Logf("Running %s", query.name) + if query.once { + if _, err := c.Query(query); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + continue + } + if err := c.QueryAll(query); err != nil { + t.Error(query.Error(err)) + } + } +} + +func TestCluster_Query_DropSeriesFromRegex(t *testing.T) { + t.Parallel() + t.Skip() + c, err := NewCluster(5) + if err != nil { + t.Fatalf("error creating cluster: %s", err) + } + defer c.Close() + + test := tests.load(t, "drop_series_from_regex") + + s := c.Servers[0] + if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicyInfo(test.retentionPolicy(), 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil { + t.Fatal(err) + } + + if err = writeTestData(s, &test); err != nil { + t.Fatal(err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + t.Logf("Running %s", query.name) + if query.once { + if _, err := c.Query(query); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + continue + } + if err := c.QueryAll(query); err != nil { + t.Error(query.Error(err)) + } + } +} + +func TestCluster_RetentionPolicyCommands(t *testing.T) { + t.Skip() + t.Parallel() + + configFunc := func(index int, config *run.Config) { + config.Meta.RetentionAutoCreate = false + } + + c, err := NewClusterCustom(5, configFunc) + + if err != nil { + t.Fatalf("error creating cluster: %s", err) + } + defer c.Close() + + test := tests.load(t, "retention_policy_commands") + + s := c.Servers[0] + if _, err := s.MetaClient.CreateDatabase(test.database()); err != nil { + t.Fatal(err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + t.Logf("Running %s", query.name) + if query.once { + if _, err := c.Query(query); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + continue + } + if err := c.QueryAll(query); err != nil { + t.Error(query.Error(err)) + } + } +} + +func TestCluster_DatabaseRetentionPolicyAutoCreate(t *testing.T) { + t.Parallel() + t.Skip() + c, err := NewCluster(5) + if err != nil { + t.Fatalf("error creating cluster: %s", err) + } + defer c.Close() + + test := tests.load(t, "retention_policy_auto_create") + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + t.Logf("Running %s", query.name) + if query.once { + if _, err := c.Query(query); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + continue + } + if err := c.QueryAll(query); err != nil { + t.Error(query.Error(err)) + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server_helpers_test.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server_helpers_test.go new file mode 100644 index 0000000000..a5bee471d2 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server_helpers_test.go @@ -0,0 +1,705 @@ +// This package is a set of convenience helpers and structs to make integration testing easier +package run_test + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "net/http" + "net/url" + "os" + "regexp" + "strings" + "sync" + "testing" + "time" + + "github.com/influxdata/influxdb/client/v2" + "github.com/influxdata/influxdb/cmd/influxd/run" + "github.com/influxdata/influxdb/services/httpd" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/toml" +) + +const emptyResults = `{"results":[{}]}` + +// Server represents a test wrapper for run.Server. +type Server struct { + *run.Server + Config *run.Config +} + +// NewServer returns a new instance of Server. +func NewServer(c *run.Config) *Server { + buildInfo := &run.BuildInfo{ + Version: "testServer", + Commit: "testCommit", + Branch: "testBranch", + } + srv, _ := run.NewServer(c, buildInfo) + s := Server{ + Server: srv, + Config: c, + } + return &s +} + +// OpenServer opens a test server. +func OpenServer(c *run.Config, joinURLs string) *Server { + if len(joinURLs) > 0 { + c.Meta.JoinPeers = strings.Split(joinURLs, ",") + } + s := NewServer(c) + configureLogging(s) + if err := s.Open(); err != nil { + panic(err.Error()) + } + return s +} + +// OpenServerWithVersion opens a test server with a specific version. +func OpenServerWithVersion(c *run.Config, version string) *Server { + buildInfo := &run.BuildInfo{ + Version: version, + Commit: "", + Branch: "", + } + fmt.Println(">>> ", c.Data.Enabled) + srv, _ := run.NewServer(c, buildInfo) + s := Server{ + Server: srv, + Config: c, + } + if err := s.Open(); err != nil { + panic(err.Error()) + } + configureLogging(&s) + + return &s +} + +// OpenDefaultServer opens a test server with a default database & retention policy. +func OpenDefaultServer(c *run.Config, joinURLs string) *Server { + s := OpenServer(c, joinURLs) + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + panic(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + panic(err) + } + return s +} + +// Close shuts down the server and removes all temporary paths. +func (s *Server) Close() { + if err := s.Server.Close(); err != nil { + panic(err.Error()) + } + if err := os.RemoveAll(s.Config.Meta.Dir); err != nil { + panic(err.Error()) + } + if err := os.RemoveAll(s.Config.Data.Dir); err != nil { + panic(err.Error()) + } + if err := os.RemoveAll(s.Config.HintedHandoff.Dir); err != nil { + panic(err.Error()) + } +} + +// URL returns the base URL for the httpd endpoint. +func (s *Server) URL() string { + for _, service := range s.Services { + if service, ok := service.(*httpd.Service); ok { + return "http://" + service.Addr().String() + } + } + panic("httpd server not found in services") +} + +// CreateDatabaseAndRetentionPolicy will create the database and retention policy. +func (s *Server) CreateDatabaseAndRetentionPolicy(db string, rp *meta.RetentionPolicyInfo) error { + if _, err := s.MetaClient.CreateDatabase(db); err != nil { + return err + } else if _, err := s.MetaClient.CreateRetentionPolicy(db, rp); err != nil { + return err + } + return nil +} + +// Query executes a query against the server and returns the results. +func (s *Server) Query(query string) (results string, err error) { + return s.QueryWithParams(query, nil) +} + +// MustQuery executes a query against the server and returns the results. +func (s *Server) MustQuery(query string) string { + results, err := s.Query(query) + if err != nil { + panic(err) + } + return results +} + +// Query executes a query against the server and returns the results. +func (s *Server) QueryWithParams(query string, values url.Values) (results string, err error) { + var v url.Values + if values == nil { + v = url.Values{} + } else { + v, _ = url.ParseQuery(values.Encode()) + } + v.Set("q", query) + return s.HTTPGet(s.URL() + "/query?" + v.Encode()) +} + +// MustQueryWithParams executes a query against the server and returns the results. +func (s *Server) MustQueryWithParams(query string, values url.Values) string { + results, err := s.QueryWithParams(query, values) + if err != nil { + panic(err) + } + return results +} + +// HTTPGet makes an HTTP GET request to the server and returns the response. +func (s *Server) HTTPGet(url string) (results string, err error) { + resp, err := http.Get(url) + if err != nil { + return "", err + } + body := string(MustReadAll(resp.Body)) + switch resp.StatusCode { + case http.StatusBadRequest: + if !expectPattern(".*error parsing query*.", body) { + return "", fmt.Errorf("unexpected status code: code=%d, body=%s", resp.StatusCode, body) + } + return body, nil + case http.StatusOK: + return body, nil + default: + return "", fmt.Errorf("unexpected status code: code=%d, body=%s", resp.StatusCode, body) + } +} + +// HTTPPost makes an HTTP POST request to the server and returns the response. +func (s *Server) HTTPPost(url string, content []byte) (results string, err error) { + buf := bytes.NewBuffer(content) + resp, err := http.Post(url, "application/json", buf) + if err != nil { + return "", err + } + body := string(MustReadAll(resp.Body)) + switch resp.StatusCode { + case http.StatusBadRequest: + if !expectPattern(".*error parsing query*.", body) { + return "", fmt.Errorf("unexpected status code: code=%d, body=%s", resp.StatusCode, body) + } + return body, nil + case http.StatusOK, http.StatusNoContent: + return body, nil + default: + return "", fmt.Errorf("unexpected status code: code=%d, body=%s", resp.StatusCode, body) + } +} + +// Write executes a write against the server and returns the results. +func (s *Server) Write(db, rp, body string, params url.Values) (results string, err error) { + if params == nil { + params = url.Values{} + } + if params.Get("db") == "" { + params.Set("db", db) + } + if params.Get("rp") == "" { + params.Set("rp", rp) + } + resp, err := http.Post(s.URL()+"/write?"+params.Encode(), "", strings.NewReader(body)) + if err != nil { + return "", err + } else if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return "", fmt.Errorf("invalid status code: code=%d, body=%s", resp.StatusCode, MustReadAll(resp.Body)) + } + return string(MustReadAll(resp.Body)), nil +} + +// MustWrite executes a write to the server. Panic on error. +func (s *Server) MustWrite(db, rp, body string, params url.Values) string { + results, err := s.Write(db, rp, body, params) + if err != nil { + panic(err) + } + return results +} + +// NewConfig returns the default config with temporary paths. +func NewConfig() *run.Config { + c := run.NewConfig() + c.ReportingDisabled = true + c.Cluster.ShardWriterTimeout = toml.Duration(30 * time.Second) + c.Cluster.WriteTimeout = toml.Duration(30 * time.Second) + c.Meta.Dir = MustTempFile() + c.Meta.BindAddress = "127.0.0.1:0" + c.Meta.HTTPBindAddress = "127.0.0.1:0" + c.Meta.HeartbeatTimeout = toml.Duration(50 * time.Millisecond) + c.Meta.ElectionTimeout = toml.Duration(50 * time.Millisecond) + c.Meta.LeaderLeaseTimeout = toml.Duration(50 * time.Millisecond) + c.Meta.CommitTimeout = toml.Duration(5 * time.Millisecond) + + if !testing.Verbose() { + c.Meta.LoggingEnabled = false + } + + c.Data.Dir = MustTempFile() + c.Data.WALDir = MustTempFile() + c.Data.WALLoggingEnabled = false + + c.HintedHandoff.Dir = MustTempFile() + + c.HTTPD.Enabled = true + c.HTTPD.BindAddress = "127.0.0.1:0" + c.HTTPD.LogEnabled = testing.Verbose() + + c.Monitor.StoreEnabled = false + + return c +} + +func newRetentionPolicyInfo(name string, rf int, duration time.Duration) *meta.RetentionPolicyInfo { + return &meta.RetentionPolicyInfo{Name: name, ReplicaN: rf, Duration: duration} +} + +func maxFloat64() string { + maxFloat64, _ := json.Marshal(math.MaxFloat64) + return string(maxFloat64) +} + +func maxInt64() string { + maxInt64, _ := json.Marshal(^int64(0)) + return string(maxInt64) +} + +func now() time.Time { + return time.Now().UTC() +} + +func yesterday() time.Time { + return now().Add(-1 * time.Hour * 24) +} + +func mustParseTime(layout, value string) time.Time { + tm, err := time.Parse(layout, value) + if err != nil { + panic(err) + } + return tm +} + +// MustReadAll reads r. Panic on error. +func MustReadAll(r io.Reader) []byte { + b, err := ioutil.ReadAll(r) + if err != nil { + panic(err) + } + return b +} + +// MustTempFile returns a path to a temporary file. +func MustTempFile() string { + f, err := ioutil.TempFile("", "influxd-") + if err != nil { + panic(err) + } + f.Close() + os.Remove(f.Name()) + return f.Name() +} + +func expectPattern(exp, act string) bool { + re := regexp.MustCompile(exp) + if !re.MatchString(act) { + return false + } + return true +} + +type Query struct { + name string + command string + params url.Values + exp, act string + pattern bool + skip bool + repeat int + once bool +} + +// Execute runs the command and returns an err if it fails +func (q *Query) Execute(s *Server) (err error) { + if q.params == nil { + q.act, err = s.Query(q.command) + return + } + q.act, err = s.QueryWithParams(q.command, q.params) + return +} + +func (q *Query) success() bool { + if q.pattern { + return expectPattern(q.exp, q.act) + } + return q.exp == q.act +} + +func (q *Query) Error(err error) string { + return fmt.Sprintf("%s: %v", q.name, err) +} + +func (q *Query) failureMessage() string { + return fmt.Sprintf("%s: unexpected results\nquery: %s\nparams: %v\nexp: %s\nactual: %s\n", q.name, q.command, q.params, q.exp, q.act) +} + +type Write struct { + db string + rp string + data string +} + +func (w *Write) duplicate() *Write { + return &Write{ + db: w.db, + rp: w.rp, + data: w.data, + } +} + +type Writes []*Write + +func (a Writes) duplicate() Writes { + writes := make(Writes, 0, len(a)) + for _, w := range a { + writes = append(writes, w.duplicate()) + } + return writes +} + +type Tests map[string]Test + +type Test struct { + initialized bool + writes Writes + params url.Values + db string + rp string + exp string + queries []*Query +} + +func NewTest(db, rp string) Test { + return Test{ + db: db, + rp: rp, + } +} + +func (t Test) duplicate() Test { + test := Test{ + initialized: t.initialized, + writes: t.writes.duplicate(), + db: t.db, + rp: t.rp, + exp: t.exp, + queries: make([]*Query, len(t.queries)), + } + + if t.params != nil { + t.params = url.Values{} + for k, a := range t.params { + vals := make([]string, len(a)) + copy(vals, a) + test.params[k] = vals + } + } + copy(test.queries, t.queries) + return test +} + +func (t *Test) addQueries(q ...*Query) { + t.queries = append(t.queries, q...) +} + +func (t *Test) database() string { + if t.db != "" { + return t.db + } + return "db0" +} + +func (t *Test) retentionPolicy() string { + if t.rp != "" { + return t.rp + } + return "default" +} + +func (t *Test) init(s *Server) error { + if len(t.writes) == 0 || t.initialized { + return nil + } + if t.db == "" { + t.db = "db0" + } + if t.rp == "" { + t.rp = "rp0" + } + + if err := writeTestData(s, t); err != nil { + return err + } + + t.initialized = true + + return nil +} + +func writeTestData(s *Server, t *Test) error { + for i, w := range t.writes { + if w.db == "" { + w.db = t.database() + } + if w.rp == "" { + w.rp = t.retentionPolicy() + } + + if err := s.CreateDatabaseAndRetentionPolicy(w.db, newRetentionPolicyInfo(w.rp, 1, 0)); err != nil { + return err + } + if err := s.MetaClient.SetDefaultRetentionPolicy(w.db, w.rp); err != nil { + return err + } + + if res, err := s.Write(w.db, w.rp, w.data, t.params); err != nil { + return fmt.Errorf("write #%d: %s", i, err) + } else if t.exp != res { + return fmt.Errorf("unexpected results\nexp: %s\ngot: %s\n", t.exp, res) + } + } + + return nil +} + +func configureLogging(s *Server) { + // Set the logger to discard unless verbose is on + if !testing.Verbose() { + type logSetter interface { + SetLogger(*log.Logger) + } + nullLogger := log.New(ioutil.Discard, "", 0) + s.TSDBStore.Logger = nullLogger + s.HintedHandoff.SetLogger(nullLogger) + s.Monitor.SetLogger(nullLogger) + s.QueryExecutor.LogOutput = ioutil.Discard + s.Subscriber.SetLogger(nullLogger) + for _, service := range s.Services { + if service, ok := service.(logSetter); ok { + service.SetLogger(nullLogger) + } + } + } +} + +type Cluster struct { + Servers []*Server +} + +func NewCluster(size int) (*Cluster, error) { + c := Cluster{} + c.Servers = append(c.Servers, OpenServer(NewConfig(), "")) + metaServiceAddr := c.Servers[0].MetaServers()[0] + + for i := 1; i < size; i++ { + c.Servers = append(c.Servers, OpenServer(NewConfig(), metaServiceAddr)) + } + + for _, s := range c.Servers { + configureLogging(s) + } + + if err := verifyCluster(&c, size); err != nil { + return nil, err + } + + return &c, nil +} + +func verifyCluster(c *Cluster, size int) error { + r, err := c.Servers[0].Query("SHOW SERVERS") + if err != nil { + return err + } + var cl client.Response + if e := json.Unmarshal([]byte(r), &cl); e != nil { + return e + } + + // grab only the meta nodes series + series := cl.Results[0].Series[0] + for i, value := range series.Values { + addr := c.Servers[0].MetaServers()[0] + if value[0].(float64) != float64(i+1) { + return fmt.Errorf("expected nodeID %d, got %v", i, value[0]) + } + if value[1].(string) != addr { + return fmt.Errorf("expected addr %s, got %v", addr, value[1]) + } + } + + return nil +} + +func NewClusterWithDefaults(size int) (*Cluster, error) { + c, err := NewCluster(size) + if err != nil { + return nil, err + } + + r, err := c.Query(&Query{command: "CREATE DATABASE db0"}) + if err != nil { + return nil, err + } + if r != emptyResults { + return nil, fmt.Errorf("%s", r) + } + + for i, s := range c.Servers { + got, err := s.Query("SHOW DATABASES") + if err != nil { + return nil, fmt.Errorf("failed to query databases on node %d for show databases", i+1) + } + if exp := `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db0"]]}]}]}`; got != exp { + return nil, fmt.Errorf("unexpected result node %d\nexp: %s\ngot: %s\n", i+1, exp, got) + } + } + + return c, nil +} + +func NewClusterCustom(size int, cb func(index int, config *run.Config)) (*Cluster, error) { + c := Cluster{} + + config := NewConfig() + cb(0, config) + + c.Servers = append(c.Servers, OpenServer(config, "")) + metaServiceAddr := c.Servers[0].MetaServers()[0] + + for i := 1; i < size; i++ { + config := NewConfig() + cb(i, config) + c.Servers = append(c.Servers, OpenServer(config, metaServiceAddr)) + } + + for _, s := range c.Servers { + configureLogging(s) + } + + if err := verifyCluster(&c, size); err != nil { + return nil, err + } + + return &c, nil +} + +// Close shuts down all servers. +func (c *Cluster) Close() { + var wg sync.WaitGroup + wg.Add(len(c.Servers)) + + for _, s := range c.Servers { + go func(s *Server) { + defer wg.Done() + s.Close() + }(s) + } + wg.Wait() +} + +func (c *Cluster) Query(q *Query) (string, error) { + r, e := c.Servers[0].Query(q.command) + q.act = r + return r, e +} + +func (c *Cluster) QueryIndex(index int, q string) (string, error) { + return c.Servers[index].Query(q) +} + +func (c *Cluster) QueryAll(q *Query) error { + type Response struct { + Val string + Err error + } + + timeoutErr := fmt.Errorf("timed out waiting for response") + + queryAll := func() error { + // if a server doesn't return in 5 seconds, fail the response + timeout := time.After(5 * time.Second) + ch := make(chan Response, 0) + + for _, s := range c.Servers { + go func(s *Server) { + r, err := s.QueryWithParams(q.command, q.params) + ch <- Response{Val: r, Err: err} + }(s) + } + + resps := []Response{} + for i := 0; i < len(c.Servers); i++ { + select { + case r := <-ch: + resps = append(resps, r) + case <-timeout: + return timeoutErr + } + } + + for _, r := range resps { + if r.Err != nil { + return r.Err + } + if q.pattern { + if !expectPattern(q.exp, r.Val) { + return fmt.Errorf("unexpected pattern: \n\texp: %s\n\tgot: %s\n", q.exp, r.Val) + } + } else { + if r.Val != q.exp { + return fmt.Errorf("unexpected value:\n\texp: %s\n\tgot: %s\n", q.exp, r.Val) + } + } + } + + return nil + } + + tick := time.Tick(100 * time.Millisecond) + // if we don't reach consensus in 20 seconds, fail the query + timeout := time.After(20 * time.Second) + + if err := queryAll(); err == nil { + return nil + } + for { + select { + case <-tick: + if err := queryAll(); err == nil { + return nil + } + case <-timeout: + return fmt.Errorf("timed out waiting for response") + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server_suite_test.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server_suite_test.go new file mode 100644 index 0000000000..1f0b69a66f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server_suite_test.go @@ -0,0 +1,410 @@ +package run_test + +import ( + "fmt" + "net/url" + "strings" + "testing" + "time" +) + +var tests Tests + +// Load all shared tests +func init() { + tests = make(map[string]Test) + + tests["database_commands"] = Test{ + queries: []*Query{ + &Query{ + name: "create database should succeed", + command: `CREATE DATABASE db0`, + exp: `{"results":[{}]}`, + once: true, + }, + &Query{ + name: "create database with retention duration should succeed", + command: `CREATE DATABASE db0_r WITH DURATION 24h REPLICATION 2 NAME db0_r_policy`, + exp: `{"results":[{}]}`, + once: true, + }, + &Query{ + name: "create database should error with bad name", + command: `CREATE DATABASE 0xdb0`, + exp: `{"error":"error parsing query: found 0, expected identifier at line 1, char 17"}`, + }, + &Query{ + name: "create database with retention duration should error with bad retention duration", + command: `CREATE DATABASE db0 WITH DURATION xyz`, + exp: `{"error":"error parsing query: found xyz, expected duration at line 1, char 35"}`, + }, + &Query{ + name: "create database with retention replication should error with bad retention replication number", + command: `CREATE DATABASE db0 WITH REPLICATION xyz`, + exp: `{"error":"error parsing query: found xyz, expected number at line 1, char 38"}`, + }, + &Query{ + name: "create database with retention name should error with missing retention name", + command: `CREATE DATABASE db0 WITH NAME`, + exp: `{"error":"error parsing query: found EOF, expected identifier at line 1, char 31"}`, + }, + &Query{ + name: "show database should succeed", + command: `SHOW DATABASES`, + exp: `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db0"],["db0_r"]]}]}]}`, + }, + &Query{ + name: "create database should not error with existing database with IF NOT EXISTS", + command: `CREATE DATABASE IF NOT EXISTS db0`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "create database should create non-existing database with IF NOT EXISTS", + command: `CREATE DATABASE IF NOT EXISTS db1`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "create database with retention duration should error if retention policy is different with IF NOT EXISTS", + command: `CREATE DATABASE IF NOT EXISTS db1 WITH DURATION 24h`, + exp: `{"results":[{"error":"retention policy conflicts with an existing policy"}]}`, + }, + &Query{ + name: "create database should error IF NOT EXISTS with bad retention duration", + command: `CREATE DATABASE IF NOT EXISTS db1 WITH DURATION xyz`, + exp: `{"error":"error parsing query: found xyz, expected duration at line 1, char 49"}`, + }, + &Query{ + name: "show database should succeed", + command: `SHOW DATABASES`, + exp: `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db0"],["db0_r"],["db1"]]}]}]}`, + }, + &Query{ + name: "drop database db0 should succeed", + command: `DROP DATABASE db0`, + exp: `{"results":[{}]}`, + once: true, + }, + &Query{ + name: "drop database db0_r should succeed", + command: `DROP DATABASE db0_r`, + exp: `{"results":[{}]}`, + once: true, + }, + &Query{ + name: "drop database db1 should succeed", + command: `DROP DATABASE db1`, + exp: `{"results":[{}]}`, + once: true, + }, + &Query{ + name: "drop database should not error if it does not exists", + command: `DROP DATABASE db1`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "drop database should not error with non-existing database db1 WITH IF EXISTS", + command: `DROP DATABASE IF EXISTS db1`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "show database should have no results", + command: `SHOW DATABASES`, + exp: `{"results":[{"series":[{"name":"databases","columns":["name"]}]}]}`, + }, + }, + } + + tests["drop_and_recreate_database"] = Test{ + db: "db0", + rp: "rp0", + writes: Writes{ + &Write{data: fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano())}, + }, + queries: []*Query{ + &Query{ + name: "Drop database after data write", + command: `DROP DATABASE db0`, + exp: `{"results":[{}]}`, + once: true, + }, + &Query{ + name: "Recreate database", + command: `CREATE DATABASE db0`, + exp: `{"results":[{}]}`, + once: true, + }, + &Query{ + name: "Recreate retention policy", + command: `CREATE RETENTION POLICY rp0 ON db0 DURATION 365d REPLICATION 1 DEFAULT`, + exp: `{"results":[{}]}`, + once: true, + }, + &Query{ + name: "Show measurements after recreate", + command: `SHOW MEASUREMENTS`, + exp: `{"results":[{}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "Query data after recreate", + command: `SELECT * FROM cpu`, + exp: `{"results":[{}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }, + } + + tests["drop_database_isolated"] = Test{ + db: "db0", + rp: "rp0", + writes: Writes{ + &Write{data: fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano())}, + }, + queries: []*Query{ + &Query{ + name: "Query data from 1st database", + command: `SELECT * FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverA","uswest",23.2]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "Query data from 1st database with GROUP BY *", + command: `SELECT * FROM cpu GROUP BY *`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"serverA","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:00Z",23.2]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "Drop other database", + command: `DROP DATABASE db1`, + once: true, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "Query data from 1st database and ensure it's still there", + command: `SELECT * FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverA","uswest",23.2]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "Query data from 1st database and ensure it's still there with GROUP BY *", + command: `SELECT * FROM cpu GROUP BY *`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"serverA","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:00Z",23.2]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }, + } + + tests["drop_and_recreate_series"] = Test{ + db: "db0", + rp: "rp0", + writes: Writes{ + &Write{data: fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano())}, + &Write{db: "db1", data: fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano())}, + }, + queries: []*Query{ + &Query{ + name: "Show series is present", + command: `SHOW SERIES`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=serverA,region=uswest","serverA","uswest"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "Drop series after data write", + command: `DROP SERIES FROM cpu`, + exp: `{"results":[{}]}`, + params: url.Values{"db": []string{"db0"}}, + once: true, + }, + &Query{ + name: "Show series is gone", + command: `SHOW SERIES`, + exp: `{"results":[{}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "Make sure data wasn't deleted from other database.", + command: `SELECT * FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverA","uswest",23.2]]}]}]}`, + params: url.Values{"db": []string{"db1"}}, + }, + }, + } + tests["drop_and_recreate_series_retest"] = Test{ + db: "db0", + rp: "rp0", + writes: Writes{ + &Write{data: fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano())}, + }, + queries: []*Query{ + &Query{ + name: "Show series is present again after re-write", + command: `SHOW SERIES`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=serverA,region=uswest","serverA","uswest"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }, + } + + tests["drop_series_from_regex"] = Test{ + db: "db0", + rp: "rp0", + writes: Writes{ + &Write{data: strings.Join([]string{ + fmt.Sprintf(`a,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`aa,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`b,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`c,host=serverA,region=uswest val=30.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + }, "\n")}, + }, + queries: []*Query{ + &Query{ + name: "Show series is present", + command: `SHOW SERIES`, + exp: `{"results":[{"series":[{"name":"a","columns":["_key","host","region"],"values":[["a,host=serverA,region=uswest","serverA","uswest"]]},{"name":"aa","columns":["_key","host","region"],"values":[["aa,host=serverA,region=uswest","serverA","uswest"]]},{"name":"b","columns":["_key","host","region"],"values":[["b,host=serverA,region=uswest","serverA","uswest"]]},{"name":"c","columns":["_key","host","region"],"values":[["c,host=serverA,region=uswest","serverA","uswest"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "Drop series after data write", + command: `DROP SERIES FROM /a.*/`, + exp: `{"results":[{}]}`, + params: url.Values{"db": []string{"db0"}}, + once: true, + }, + &Query{ + name: "Show series is gone", + command: `SHOW SERIES`, + exp: `{"results":[{"series":[{"name":"b","columns":["_key","host","region"],"values":[["b,host=serverA,region=uswest","serverA","uswest"]]},{"name":"c","columns":["_key","host","region"],"values":[["c,host=serverA,region=uswest","serverA","uswest"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "Drop series from regex that matches no measurements", + command: `DROP SERIES FROM /a.*/`, + exp: `{"results":[{}]}`, + params: url.Values{"db": []string{"db0"}}, + once: true, + }, + &Query{ + name: "make sure DROP SERIES doesn't delete anything when regex doesn't match", + command: `SHOW SERIES`, + exp: `{"results":[{"series":[{"name":"b","columns":["_key","host","region"],"values":[["b,host=serverA,region=uswest","serverA","uswest"]]},{"name":"c","columns":["_key","host","region"],"values":[["c,host=serverA,region=uswest","serverA","uswest"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "Drop series with WHERE field should error", + command: `DROP SERIES FROM c WHERE val > 50.0`, + exp: `{"results":[{"error":"DROP SERIES doesn't support fields in WHERE clause"}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "make sure DROP SERIES with field in WHERE didn't delete data", + command: `SHOW SERIES`, + exp: `{"results":[{"series":[{"name":"b","columns":["_key","host","region"],"values":[["b,host=serverA,region=uswest","serverA","uswest"]]},{"name":"c","columns":["_key","host","region"],"values":[["c,host=serverA,region=uswest","serverA","uswest"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "Drop series with WHERE time should error", + command: `DROP SERIES FROM c WHERE time > now() - 1d`, + exp: `{"results":[{"error":"DROP SERIES doesn't support time in WHERE clause"}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }, + } + + tests["retention_policy_commands"] = Test{ + db: "db0", + queries: []*Query{ + &Query{ + name: "create retention policy should succeed", + command: `CREATE RETENTION POLICY rp0 ON db0 DURATION 1h REPLICATION 1`, + exp: `{"results":[{}]}`, + once: true, + }, + &Query{ + name: "show retention policy should succeed", + command: `SHOW RETENTION POLICIES ON db0`, + exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","1h0m0s",1,false]]}]}]}`, + }, + &Query{ + name: "alter retention policy should succeed", + command: `ALTER RETENTION POLICY rp0 ON db0 DURATION 2h REPLICATION 3 DEFAULT`, + exp: `{"results":[{}]}`, + once: true, + }, + &Query{ + name: "show retention policy should have new altered information", + command: `SHOW RETENTION POLICIES ON db0`, + exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","2h0m0s",3,true]]}]}]}`, + }, + &Query{ + name: "show retention policy should still show policy", + command: `SHOW RETENTION POLICIES ON db0`, + exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","2h0m0s",3,true]]}]}]}`, + }, + &Query{ + name: "create a second non-default retention policy", + command: `CREATE RETENTION POLICY rp2 ON db0 DURATION 1h REPLICATION 1`, + exp: `{"results":[{}]}`, + once: true, + }, + &Query{ + name: "show retention policy should show both", + command: `SHOW RETENTION POLICIES ON db0`, + exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","2h0m0s",3,true],["rp2","1h0m0s",1,false]]}]}]}`, + }, + &Query{ + name: "dropping non-default retention policy succeed", + command: `DROP RETENTION POLICY rp2 ON db0`, + exp: `{"results":[{}]}`, + once: true, + }, + &Query{ + name: "show retention policy should show just default", + command: `SHOW RETENTION POLICIES ON db0`, + exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","2h0m0s",3,true]]}]}]}`, + }, + &Query{ + name: "Ensure retention policy with unacceptable retention cannot be created", + command: `CREATE RETENTION POLICY rp3 ON db0 DURATION 1s REPLICATION 1`, + exp: `{"results":[{"error":"retention policy duration must be at least 1h0m0s"}]}`, + once: true, + }, + &Query{ + name: "Check error when deleting retention policy on non-existent database", + command: `DROP RETENTION POLICY rp1 ON mydatabase`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "Ensure retention policy for non existing db is not created", + command: `CREATE RETENTION POLICY rp0 ON nodb DURATION 1h REPLICATION 1`, + exp: `{"results":[{"error":"database not found: nodb"}]}`, + once: true, + }, + }, + } + + tests["retention_policy_auto_create"] = Test{ + queries: []*Query{ + &Query{ + name: "create database should succeed", + command: `CREATE DATABASE db0`, + exp: `{"results":[{}]}`, + once: true, + }, + &Query{ + name: "show retention policies should return auto-created policy", + command: `SHOW RETENTION POLICIES ON db0`, + exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["default","0",1,true]]}]}]}`, + }, + }, + } + +} + +func (tests Tests) load(t *testing.T, key string) Test { + test, ok := tests[key] + if !ok { + t.Fatalf("no test %q", key) + } + + return test.duplicate() +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server_test.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server_test.go new file mode 100644 index 0000000000..b1926c49f3 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server_test.go @@ -0,0 +1,5718 @@ +package run_test + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "testing" + "time" + + "github.com/influxdata/influxdb/cluster" +) + +// Ensure that HTTP responses include the InfluxDB version. +func TestServer_HTTPResponseVersion(t *testing.T) { + version := "v1234" + s := OpenServerWithVersion(NewConfig(), version) + defer s.Close() + + resp, _ := http.Get(s.URL() + "/query") + got := resp.Header.Get("X-Influxdb-Version") + if got != version { + t.Errorf("Server responded with incorrect version, exp %s, got %s", version, got) + } +} + +// Ensure the database commands work. +func TestServer_DatabaseCommands(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + test := tests.load(t, "database_commands") + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_DropAndRecreateDatabase(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + test := tests.load(t, "drop_and_recreate_database") + + if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicyInfo(test.retentionPolicy(), 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil { + t.Fatal(err) + } + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_DropDatabaseIsolated(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + test := tests.load(t, "drop_database_isolated") + + if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicyInfo(test.retentionPolicy(), 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil { + t.Fatal(err) + } + if err := s.CreateDatabaseAndRetentionPolicy("db1", newRetentionPolicyInfo("rp1", 1, 0)); err != nil { + t.Fatal(err) + } + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_DropAndRecreateSeries(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + test := tests.load(t, "drop_and_recreate_series") + + if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicyInfo(test.retentionPolicy(), 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil { + t.Fatal(err) + } + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } + + // Re-write data and test again. + retest := tests.load(t, "drop_and_recreate_series_retest") + + for i, query := range retest.queries { + if i == 0 { + if err := retest.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_DropSeriesFromRegex(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + test := tests.load(t, "drop_series_from_regex") + + if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicyInfo(test.retentionPolicy(), 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy(test.database(), test.retentionPolicy()); err != nil { + t.Fatal(err) + } + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure retention policy commands work. +func TestServer_RetentionPolicyCommands(t *testing.T) { + t.Parallel() + c := NewConfig() + c.Meta.RetentionAutoCreate = false + s := OpenServer(c, "") + defer s.Close() + + test := tests.load(t, "retention_policy_commands") + + // Create a database. + if _, err := s.MetaClient.CreateDatabase(test.database()); err != nil { + t.Fatal(err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the autocreation of retention policy works. +func TestServer_DatabaseRetentionPolicyAutoCreate(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + test := tests.load(t, "retention_policy_auto_create") + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure user commands work. +func TestServer_UserCommands(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + // Create a database. + if _, err := s.MetaClient.CreateDatabase("db0"); err != nil { + t.Fatal(err) + } + + test := Test{ + queries: []*Query{ + &Query{ + name: "show users, no actual users", + command: `SHOW USERS`, + exp: `{"results":[{"series":[{"columns":["user","admin"]}]}]}`, + }, + &Query{ + name: `create user`, + command: "CREATE USER jdoe WITH PASSWORD '1337'", + exp: `{"results":[{}]}`, + }, + &Query{ + name: "show users, 1 existing user", + command: `SHOW USERS`, + exp: `{"results":[{"series":[{"columns":["user","admin"],"values":[["jdoe",false]]}]}]}`, + }, + &Query{ + name: "grant all priviledges to jdoe", + command: `GRANT ALL PRIVILEGES TO jdoe`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "show users, existing user as admin", + command: `SHOW USERS`, + exp: `{"results":[{"series":[{"columns":["user","admin"],"values":[["jdoe",true]]}]}]}`, + }, + &Query{ + name: "grant DB privileges to user", + command: `GRANT READ ON db0 TO jdoe`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "revoke all privileges", + command: `REVOKE ALL PRIVILEGES FROM jdoe`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "bad create user request", + command: `CREATE USER 0xBAD WITH PASSWORD pwd1337`, + exp: `{"error":"error parsing query: found 0, expected identifier at line 1, char 13"}`, + }, + &Query{ + name: "bad create user request, no name", + command: `CREATE USER WITH PASSWORD pwd1337`, + exp: `{"error":"error parsing query: found WITH, expected identifier at line 1, char 13"}`, + }, + &Query{ + name: "bad create user request, no password", + command: `CREATE USER jdoe`, + exp: `{"error":"error parsing query: found EOF, expected WITH at line 1, char 18"}`, + }, + &Query{ + name: "drop user", + command: `DROP USER jdoe`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "make sure user was dropped", + command: `SHOW USERS`, + exp: `{"results":[{"series":[{"columns":["user","admin"]}]}]}`, + }, + &Query{ + name: "delete non existing user", + command: `DROP USER noone`, + exp: `{"results":[{"error":"user not found"}]}`, + }, + }, + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(fmt.Sprintf("command: %s - err: %s", query.command, query.Error(err))) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server rejects a single point via json protocol by default. +func TestServer_Write_JSON(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + // Verify writing JSON points returns an error. + now := now() + res, err := s.Write("", "", fmt.Sprintf(`{"database" : "db0", "retentionPolicy" : "rp0", "points": [{"measurement": "cpu", "tags": {"host": "server02"},"fields": {"value": 1.0}}],"time":"%s"} `, now.Format(time.RFC3339Nano)), nil) + if err == nil { + t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", ``, res) + } else if exp := `JSON write protocol has been deprecated`; !strings.Contains(err.Error(), exp) { + t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, err.Error()) + } + + // Verify no data has been written. + if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { + t.Fatal(err) + } else if exp := `{"results":[{}]}`; exp != res { + t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) + } +} + +// Ensure the server can create a single point via json protocol and read it back. +func TestServer_Write_JSON_Enabled(t *testing.T) { + t.Parallel() + c := NewConfig() + c.HTTPD.JSONWriteEnabled = true + s := OpenServer(c, "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + if res, err := s.Write("", "", fmt.Sprintf(`{"database" : "db0", "retentionPolicy" : "rp0", "points": [{"measurement": "cpu", "tags": {"host": "server02"},"fields": {"value": 1.0}}],"time":"%s"} `, now.Format(time.RFC3339Nano)), nil); err != nil { + t.Fatal(err) + } else if exp := ``; exp != res { + t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) + } + + // Verify the data was written. + if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { + t.Fatal(err) + } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server02"},"columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { + t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) + } +} + +// Ensure the server can create a single point via line protocol with float type and read it back. +func TestServer_Write_LineProtocol_Float(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + if res, err := s.Write("db0", "rp0", `cpu,host=server01 value=1.0 `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil { + t.Fatal(err) + } else if exp := ``; exp != res { + t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) + } + + // Verify the data was written. + if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { + t.Fatal(err) + } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { + t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) + } +} + +// Ensure the server can create a single point via line protocol with bool type and read it back. +func TestServer_Write_LineProtocol_Bool(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + if res, err := s.Write("db0", "rp0", `cpu,host=server01 value=true `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil { + t.Fatal(err) + } else if exp := ``; exp != res { + t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) + } + + // Verify the data was written. + if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { + t.Fatal(err) + } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",true]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { + t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) + } +} + +// Ensure the server can create a single point via line protocol with string type and read it back. +func TestServer_Write_LineProtocol_String(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + if res, err := s.Write("db0", "rp0", `cpu,host=server01 value="disk full" `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil { + t.Fatal(err) + } else if exp := ``; exp != res { + t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) + } + + // Verify the data was written. + if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { + t.Fatal(err) + } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s","disk full"]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { + t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) + } +} + +// Ensure the server can create a single point via line protocol with integer type and read it back. +func TestServer_Write_LineProtocol_Integer(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + if res, err := s.Write("db0", "rp0", `cpu,host=server01 value=100 `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil { + t.Fatal(err) + } else if exp := ``; exp != res { + t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) + } + + // Verify the data was written. + if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { + t.Fatal(err) + } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { + t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) + } +} + +// Ensure the server returns a partial write response when some points fail to parse. Also validate that +// the successfully parsed points can be queried. +func TestServer_Write_LineProtocol_Partial(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + points := []string{ + "cpu,host=server01 value=100 " + strconv.FormatInt(now.UnixNano(), 10), + "cpu,host=server01 value=NaN " + strconv.FormatInt(now.UnixNano(), 20), + "cpu,host=server01 value=NaN " + strconv.FormatInt(now.UnixNano(), 30), + } + if res, err := s.Write("db0", "rp0", strings.Join(points, "\n"), nil); err == nil { + t.Fatal("expected error. got nil", err) + } else if exp := ``; exp != res { + t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) + } else if exp := "partial write"; !strings.Contains(err.Error(), exp) { + t.Fatalf("unexpected error: exp\nexp: %v\ngot: %v", exp, err) + } + + // Verify the data was written. + if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { + t.Fatal(err) + } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { + t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) + } +} + +// Ensure the server can query with default databases (via param) and default retention policy +func TestServer_Query_DefaultDBAndRP(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: fmt.Sprintf(`cpu value=1.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano())}, + } + + test.addQueries([]*Query{ + &Query{ + name: "default db and rp", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * FROM cpu GROUP BY *`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T01:00:00Z",1]]}]}]}`, + }, + &Query{ + name: "default rp exists", + command: `show retention policies ON db0`, + exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["default","0",1,false],["rp0","1h0m0s",1,true]]}]}]}`, + }, + &Query{ + name: "default rp", + command: `SELECT * FROM db0..cpu GROUP BY *`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T01:00:00Z",1]]}]}]}`, + }, + &Query{ + name: "default dp", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * FROM rp0.cpu GROUP BY *`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T01:00:00Z",1]]}]}]}`, + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can have a database with multiple measurements. +func TestServer_Query_Multiple_Measurements(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + // Make sure we do writes for measurements that will span across shards + writes := []string{ + fmt.Sprintf("cpu,host=server01 value=100,core=4 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf("cpu1,host=server02 value=50,core=2 %d", mustParseTime(time.RFC3339Nano, "2015-01-01T00:00:00Z").UnixNano()), + } + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "measurement in one shard but not another shouldn't panic server", + command: `SELECT host,value FROM db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","host","value"],"values":[["2000-01-01T00:00:00Z","server01",100]]}]}]}`, + }, + &Query{ + name: "measurement in one shard but not another shouldn't panic server", + command: `SELECT host,value FROM db0.rp0.cpu GROUP BY host`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","host","value"],"values":[["2000-01-01T00:00:00Z","server01",100]]}]}]}`, + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server correctly supports data with identical tag values. +func TestServer_Query_IdenticalTagValues(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf("cpu,t1=val1 value=1 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf("cpu,t2=val2 value=2 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), + fmt.Sprintf("cpu,t1=val2 value=3 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:02:00Z").UnixNano()), + } + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "measurements with identical tag values - SELECT *, no GROUP BY", + command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"t1":"","t2":"val2"},"columns":["time","value"],"values":[["2000-01-01T00:01:00Z",2]]},{"name":"cpu","tags":{"t1":"val1","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"t1":"val2","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:02:00Z",3]]}]}]}`, + }, + &Query{ + name: "measurements with identical tag values - SELECT *, with GROUP BY", + command: `SELECT value FROM db0.rp0.cpu GROUP BY t1,t2`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"t1":"","t2":"val2"},"columns":["time","value"],"values":[["2000-01-01T00:01:00Z",2]]},{"name":"cpu","tags":{"t1":"val1","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"t1":"val2","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:02:00Z",3]]}]}]}`, + }, + &Query{ + name: "measurements with identical tag values - SELECT value no GROUP BY", + command: `SELECT value FROM db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:00Z",1],["2000-01-01T00:01:00Z",2],["2000-01-01T00:02:00Z",3]]}]}]}`, + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can handle a query that involves accessing no shards. +func TestServer_Query_NoShards(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: `cpu,host=server01 value=1 ` + strconv.FormatInt(now.UnixNano(), 10)}, + } + + test.addQueries([]*Query{ + &Query{ + name: "selecting value should succeed", + command: `SELECT value FROM db0.rp0.cpu WHERE time < now() - 1d`, + exp: `{"results":[{}]}`, + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can query a non-existent field +func TestServer_Query_NonExistent(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: `cpu,host=server01 value=1 ` + strconv.FormatInt(now.UnixNano(), 10)}, + } + + test.addQueries([]*Query{ + &Query{ + name: "selecting value should succeed", + command: `SELECT value FROM db0.rp0.cpu`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)), + }, + &Query{ + name: "selecting non-existent should succeed", + command: `SELECT foo FROM db0.rp0.cpu`, + exp: `{"results":[{}]}`, + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can perform basic math +func TestServer_Query_Math(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db", newRetentionPolicyInfo("rp", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + writes := []string{ + "float value=42 " + strconv.FormatInt(now.UnixNano(), 10), + "integer value=42i " + strconv.FormatInt(now.UnixNano(), 10), + } + + test := NewTest("db", "rp") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "SELECT multiple of float value", + command: `SELECT value * 2 from db.rp.float`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"float","columns":["time","value"],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), + }, + &Query{ + name: "SELECT multiple of float value", + command: `SELECT 2 * value from db.rp.float`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"float","columns":["time","value"],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), + }, + &Query{ + name: "SELECT multiple of integer value", + command: `SELECT value * 2 from db.rp.integer`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time","value"],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), + }, + &Query{ + name: "SELECT float multiple of integer value", + command: `SELECT value * 2.0 from db.rp.integer`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time","value"],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), + }, + &Query{ + name: "SELECT square of float value", + command: `SELECT value * value from db.rp.float`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"float","columns":["time","value_value"],"values":[["%s",1764]]}]}]}`, now.Format(time.RFC3339Nano)), + }, + &Query{ + name: "SELECT square of integer value", + command: `SELECT value * value from db.rp.integer`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time","value_value"],"values":[["%s",1764]]}]}]}`, now.Format(time.RFC3339Nano)), + }, + &Query{ + name: "SELECT square of integer, float value", + command: `SELECT value * value,float from db.rp.integer`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time","value_value","float"],"values":[["%s",1764,null]]}]}]}`, now.Format(time.RFC3339Nano)), + }, + &Query{ + name: "SELECT square of integer value with alias", + command: `SELECT value * value as square from db.rp.integer`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time","square"],"values":[["%s",1764]]}]}]}`, now.Format(time.RFC3339Nano)), + }, + &Query{ + name: "SELECT sum of aggregates", + command: `SELECT max(value) + min(value) from db.rp.integer`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time","max_min"],"values":[["1970-01-01T00:00:00Z",84]]}]}]}`), + }, + &Query{ + name: "SELECT square of enclosed integer value", + command: `SELECT ((value) * (value)) from db.rp.integer`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time","value_value"],"values":[["%s",1764]]}]}]}`, now.Format(time.RFC3339Nano)), + }, + &Query{ + name: "SELECT square of enclosed integer value", + command: `SELECT (value * value) from db.rp.integer`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time","value_value"],"values":[["%s",1764]]}]}]}`, now.Format(time.RFC3339Nano)), + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can query with the count aggregate function +func TestServer_Query_Count(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + + test := NewTest("db0", "rp0") + writes := []string{ + `cpu,host=server01 value=1.0 ` + strconv.FormatInt(now.UnixNano(), 10), + `ram value1=1.0,value2=2.0 ` + strconv.FormatInt(now.UnixNano(), 10), + } + + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + hour_ago := now.Add(-time.Hour).UTC() + + test.addQueries([]*Query{ + &Query{ + name: "selecting count(value) should succeed", + command: `SELECT count(value) FROM db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, + }, + &Query{ + name: "selecting count(value) with where time should return result", + command: fmt.Sprintf(`SELECT count(value) FROM db0.rp0.cpu WHERE time >= '%s'`, hour_ago.Format(time.RFC3339Nano)), + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","count"],"values":[["%s",1]]}]}]}`, hour_ago.Format(time.RFC3339Nano)), + }, + &Query{ + name: "selecting count(value) with filter that excludes all results should return 0", + command: fmt.Sprintf(`SELECT count(value) FROM db0.rp0.cpu WHERE value=100 AND time >= '%s'`, hour_ago.Format(time.RFC3339Nano)), + exp: `{"results":[{}]}`, + }, + &Query{ + name: "selecting count(value1) with matching filter against value2 should return correct result", + command: fmt.Sprintf(`SELECT count(value1) FROM db0.rp0.ram WHERE value2=2 AND time >= '%s'`, hour_ago.Format(time.RFC3339Nano)), + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"ram","columns":["time","count"],"values":[["%s",1]]}]}]}`, hour_ago.Format(time.RFC3339Nano)), + }, + &Query{ + name: "selecting count(value1) with non-matching filter against value2 should return correct result", + command: fmt.Sprintf(`SELECT count(value1) FROM db0.rp0.ram WHERE value2=3 AND time >= '%s'`, hour_ago.Format(time.RFC3339Nano)), + exp: `{"results":[{}]}`, + }, + &Query{ + name: "selecting count(*) should error", + command: `SELECT count(*) FROM db0.rp0.cpu`, + exp: `{"error":"error parsing query: expected field argument in count()"}`, + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can query with Now(). +func TestServer_Query_Now(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: `cpu,host=server01 value=1.0 ` + strconv.FormatInt(now.UnixNano(), 10)}, + } + + test.addQueries([]*Query{ + &Query{ + name: "where with time < now() should work", + command: `SELECT * FROM db0.rp0.cpu where time < now()`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","host","value"],"values":[["%s","server01",1]]}]}]}`, now.Format(time.RFC3339Nano)), + }, + &Query{ + name: "where with time < now() and GROUP BY * should work", + command: `SELECT * FROM db0.rp0.cpu where time < now() GROUP BY *`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)), + }, + &Query{ + name: "where with time > now() should return an empty result", + command: `SELECT * FROM db0.rp0.cpu where time > now()`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "where with time > now() with GROUP BY * should return an empty result", + command: `SELECT * FROM db0.rp0.cpu where time > now() GROUP BY *`, + exp: `{"results":[{}]}`, + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can query with epoch precisions. +func TestServer_Query_EpochPrecision(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: `cpu,host=server01 value=1.0 ` + strconv.FormatInt(now.UnixNano(), 10)}, + } + + test.addQueries([]*Query{ + &Query{ + name: "nanosecond precision", + command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, + params: url.Values{"epoch": []string{"n"}}, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()), + }, + &Query{ + name: "microsecond precision", + command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, + params: url.Values{"epoch": []string{"u"}}, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Microsecond)), + }, + &Query{ + name: "millisecond precision", + command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, + params: url.Values{"epoch": []string{"ms"}}, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Millisecond)), + }, + &Query{ + name: "second precision", + command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, + params: url.Values{"epoch": []string{"s"}}, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Second)), + }, + &Query{ + name: "minute precision", + command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, + params: url.Values{"epoch": []string{"m"}}, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Minute)), + }, + &Query{ + name: "hour precision", + command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, + params: url.Values{"epoch": []string{"h"}}, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Hour)), + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server works with tag queries. +func TestServer_Query_Tags(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + + writes := []string{ + fmt.Sprintf("cpu,host=server01 value=100,core=4 %d", now.UnixNano()), + fmt.Sprintf("cpu,host=server02 value=50,core=2 %d", now.Add(1).UnixNano()), + + fmt.Sprintf("cpu1,host=server01,region=us-west value=100 %d", mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), + fmt.Sprintf("cpu1,host=server02 value=200 %d", mustParseTime(time.RFC3339Nano, "2010-02-28T01:03:37.703820946Z").UnixNano()), + fmt.Sprintf("cpu1,host=server03 value=300 %d", mustParseTime(time.RFC3339Nano, "2012-02-28T01:03:38.703820946Z").UnixNano()), + + fmt.Sprintf("cpu2,host=server01 value=100 %d", mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), + fmt.Sprintf("cpu2 value=200 %d", mustParseTime(time.RFC3339Nano, "2012-02-28T01:03:38.703820946Z").UnixNano()), + + fmt.Sprintf("cpu3,company=acme01 value=100 %d", mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), + fmt.Sprintf("cpu3 value=200 %d", mustParseTime(time.RFC3339Nano, "2012-02-28T01:03:38.703820946Z").UnixNano()), + + fmt.Sprintf("status_code,url=http://www.example.com value=404 %d", mustParseTime(time.RFC3339Nano, "2015-07-22T08:13:54.929026672Z").UnixNano()), + fmt.Sprintf("status_code,url=https://influxdb.com value=418 %d", mustParseTime(time.RFC3339Nano, "2015-07-22T09:52:24.914395083Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "tag without field should return error", + command: `SELECT host FROM db0.rp0.cpu`, + exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, + skip: true, // FIXME(benbjohnson): tags should stream as values + }, + &Query{ + name: "field with tag should succeed", + command: `SELECT host, value FROM db0.rp0.cpu`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","host","value"],"values":[["%s","server01",100],["%s","server02",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), + }, + &Query{ + name: "field with tag and GROUP BY should succeed", + command: `SELECT host, value FROM db0.rp0.cpu GROUP BY host`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","host","value"],"values":[["%s","server01",100]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","host","value"],"values":[["%s","server02",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), + }, + &Query{ + name: "field with two tags should succeed", + command: `SELECT host, value, core FROM db0.rp0.cpu`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","host","value","core"],"values":[["%s","server01",100,4],["%s","server02",50,2]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), + }, + &Query{ + name: "field with two tags and GROUP BY should succeed", + command: `SELECT host, value, core FROM db0.rp0.cpu GROUP BY host`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","host","value","core"],"values":[["%s","server01",100,4]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","host","value","core"],"values":[["%s","server02",50,2]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), + }, + &Query{ + name: "select * with tags should succeed", + command: `SELECT * FROM db0.rp0.cpu`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","core","host","value"],"values":[["%s",4,"server01",100],["%s",2,"server02",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), + }, + &Query{ + name: "select * with tags with GROUP BY * should succeed", + command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","core","value"],"values":[["%s",4,100]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","core","value"],"values":[["%s",2,50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), + }, + &Query{ + name: "group by tag", + command: `SELECT value FROM db0.rp0.cpu GROUP by host`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","value"],"values":[["%s",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), + }, + &Query{ + name: "single field (EQ tag value1)", + command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01'`, + exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, + }, + &Query{ + name: "single field (2 EQ tags)", + command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' AND region = 'us-west'`, + exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, + }, + &Query{ + name: "single field (OR different tags)", + command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server03' OR region = 'us-west'`, + exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",300],["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, + }, + &Query{ + name: "single field (OR with non-existent tag value)", + command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' OR host = 'server66'`, + exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, + }, + &Query{ + name: "single field (OR with all tag values)", + command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' OR host = 'server02' OR host = 'server03'`, + exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2010-02-28T01:03:37.703820946Z",200],["2012-02-28T01:03:38.703820946Z",300],["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, + }, + &Query{ + name: "single field (1 EQ and 1 NEQ tag)", + command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' AND region != 'us-west'`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "single field (EQ tag value2)", + command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server02'`, + exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2010-02-28T01:03:37.703820946Z",200]]}]}]}`, + }, + &Query{ + name: "single field (NEQ tag value1)", + command: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01'`, + exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2010-02-28T01:03:37.703820946Z",200],["2012-02-28T01:03:38.703820946Z",300]]}]}]}`, + }, + &Query{ + name: "single field (NEQ tag value1 AND NEQ tag value2)", + command: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01' AND host != 'server02'`, + exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",300]]}]}]}`, + }, + &Query{ + name: "single field (NEQ tag value1 OR NEQ tag value2)", + command: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01' OR host != 'server02'`, // Yes, this is always true, but that's the point. + exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2010-02-28T01:03:37.703820946Z",200],["2012-02-28T01:03:38.703820946Z",300],["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, + }, + &Query{ + name: "single field (NEQ tag value1 AND NEQ tag value2 AND NEQ tag value3)", + command: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01' AND host != 'server02' AND host != 'server03'`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "single field (NEQ tag value1, point without any tags)", + command: `SELECT value FROM db0.rp0.cpu2 WHERE host != 'server01'`, + exp: `{"results":[{"series":[{"name":"cpu2","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",200]]}]}]}`, + }, + &Query{ + name: "single field (NEQ tag value1, point without any tags)", + command: `SELECT value FROM db0.rp0.cpu3 WHERE company !~ /acme01/`, + exp: `{"results":[{"series":[{"name":"cpu3","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",200]]}]}]}`, + }, + &Query{ + name: "single field (regex tag match)", + command: `SELECT value FROM db0.rp0.cpu3 WHERE company =~ /acme01/`, + exp: `{"results":[{"series":[{"name":"cpu3","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, + }, + &Query{ + name: "single field (regex tag match)", + command: `SELECT value FROM db0.rp0.cpu3 WHERE company !~ /acme[23]/`, + exp: `{"results":[{"series":[{"name":"cpu3","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",200],["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, + }, + &Query{ + name: "single field (regex tag match with escaping)", + command: `SELECT value FROM db0.rp0.status_code WHERE url !~ /https\:\/\/influxdb\.com/`, + exp: `{"results":[{"series":[{"name":"status_code","columns":["time","value"],"values":[["2015-07-22T08:13:54.929026672Z",404]]}]}]}`, + }, + &Query{ + name: "single field (regex tag match with escaping)", + command: `SELECT value FROM db0.rp0.status_code WHERE url =~ /https\:\/\/influxdb\.com/`, + exp: `{"results":[{"series":[{"name":"status_code","columns":["time","value"],"values":[["2015-07-22T09:52:24.914395083Z",418]]}]}]}`, + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server correctly queries with an alias. +func TestServer_Query_Alias(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf("cpu value=1i,steps=3i %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf("cpu value=2i,steps=4i %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), + } + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "baseline query - SELECT * FROM db0.rp0.cpu", + command: `SELECT * FROM db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","steps","value"],"values":[["2000-01-01T00:00:00Z",3,1],["2000-01-01T00:01:00Z",4,2]]}]}]}`, + }, + &Query{ + name: "basic query with alias - SELECT steps, value as v FROM db0.rp0.cpu", + command: `SELECT steps, value as v FROM db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","steps","v"],"values":[["2000-01-01T00:00:00Z",3,1],["2000-01-01T00:01:00Z",4,2]]}]}]}`, + }, + &Query{ + name: "double aggregate sum - SELECT sum(value), sum(steps) FROM db0.rp0.cpu", + command: `SELECT sum(value), sum(steps) FROM db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum","sum_1"],"values":[["1970-01-01T00:00:00Z",3,7]]}]}]}`, + }, + &Query{ + name: "double aggregate sum reverse order - SELECT sum(steps), sum(value) FROM db0.rp0.cpu", + command: `SELECT sum(steps), sum(value) FROM db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum","sum_1"],"values":[["1970-01-01T00:00:00Z",7,3]]}]}]}`, + }, + &Query{ + name: "double aggregate sum with alias - SELECT sum(value) as sumv, sum(steps) as sums FROM db0.rp0.cpu", + command: `SELECT sum(value) as sumv, sum(steps) as sums FROM db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sumv","sums"],"values":[["1970-01-01T00:00:00Z",3,7]]}]}]}`, + }, + &Query{ + name: "double aggregate with same value - SELECT sum(value), mean(value) FROM db0.rp0.cpu", + command: `SELECT sum(value), mean(value) FROM db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",3,1.5]]}]}]}`, + }, + &Query{ + name: "double aggregate with same value and same alias - SELECT mean(value) as mv, max(value) as mv FROM db0.rp0.cpu", + command: `SELECT mean(value) as mv, max(value) as mv FROM db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","mv","mv"],"values":[["1970-01-01T00:00:00Z",1.5,2]]}]}]}`, + }, + &Query{ + name: "double aggregate with non-existent field - SELECT mean(value), max(foo) FROM db0.rp0.cpu", + command: `SELECT mean(value), max(foo) FROM db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","mean","max"],"values":[["1970-01-01T00:00:00Z",1.5,null]]}]}]}`, + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server will succeed and error for common scenarios. +func TestServer_Query_Common(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: fmt.Sprintf("cpu,host=server01 value=1 %s", strconv.FormatInt(now.UnixNano(), 10))}, + } + + test.addQueries([]*Query{ + &Query{ + name: "selecting a from a non-existent database should error", + command: `SELECT value FROM db1.rp0.cpu`, + exp: `{"results":[{"error":"database not found: db1"}]}`, + }, + &Query{ + name: "selecting a from a non-existent retention policy should error", + command: `SELECT value FROM db0.rp1.cpu`, + exp: `{"results":[{"error":"retention policy not found: rp1"}]}`, + }, + &Query{ + name: "selecting a valid measurement and field should succeed", + command: `SELECT value FROM db0.rp0.cpu`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)), + }, + &Query{ + name: "explicitly selecting time and a valid measurement and field should succeed", + command: `SELECT time,value FROM db0.rp0.cpu`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)), + }, + &Query{ + name: "selecting a measurement that doesn't exist should result in empty set", + command: `SELECT value FROM db0.rp0.idontexist`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "selecting a field that doesn't exist should result in empty set", + command: `SELECT idontexist FROM db0.rp0.cpu`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "selecting wildcard without specifying a database should error", + command: `SELECT * FROM cpu`, + exp: `{"results":[{"error":"database name required"}]}`, + }, + &Query{ + name: "selecting explicit field without specifying a database should error", + command: `SELECT value FROM cpu`, + exp: `{"results":[{"error":"database name required"}]}`, + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can query two points. +func TestServer_Query_SelectTwoPoints(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: fmt.Sprintf("cpu value=100 %s\ncpu value=200 %s", strconv.FormatInt(now.UnixNano(), 10), strconv.FormatInt(now.Add(1).UnixNano(), 10))}, + } + + test.addQueries( + &Query{ + name: "selecting two points should result in two points", + command: `SELECT * FROM db0.rp0.cpu`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",100],["%s",200]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), + }, + &Query{ + name: "selecting two points with GROUP BY * should result in two points", + command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",100],["%s",200]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), + }, + ) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can query two negative points. +func TestServer_Query_SelectTwoNegativePoints(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: fmt.Sprintf("cpu value=-100 %s\ncpu value=-200 %s", strconv.FormatInt(now.UnixNano(), 10), strconv.FormatInt(now.Add(1).UnixNano(), 10))}, + } + + test.addQueries(&Query{ + name: "selecting two negative points should succeed", + command: `SELECT * FROM db0.rp0.cpu`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",-100],["%s",-200]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), + }) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can query with relative time. +func TestServer_Query_SelectRelativeTime(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + now := now() + yesterday := yesterday() + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: fmt.Sprintf("cpu,host=server01 value=100 %s\ncpu,host=server01 value=200 %s", strconv.FormatInt(yesterday.UnixNano(), 10), strconv.FormatInt(now.UnixNano(), 10))}, + } + + test.addQueries([]*Query{ + &Query{ + name: "single point with time pre-calculated for past time queries yesterday", + command: `SELECT * FROM db0.rp0.cpu where time >= '` + yesterday.Add(-1*time.Minute).Format(time.RFC3339Nano) + `' GROUP BY *`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100],["%s",200]]}]}]}`, yesterday.Format(time.RFC3339Nano), now.Format(time.RFC3339Nano)), + }, + &Query{ + name: "single point with time pre-calculated for relative time queries now", + command: `SELECT * FROM db0.rp0.cpu where time >= now() - 1m GROUP BY *`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",200]]}]}]}`, now.Format(time.RFC3339Nano)), + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can handle various simple derivative queries. +func TestServer_Query_SelectRawDerivative(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: fmt.Sprintf("cpu value=210 1278010021000000000\ncpu value=10 1278010022000000000")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "calculate single derivate", + command: `SELECT derivative(value) from db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-200]]}]}]}`, + }, + &Query{ + name: "calculate derivate with unit", + command: `SELECT derivative(value, 10s) from db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-2000]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can handle various simple non_negative_derivative queries. +func TestServer_Query_SelectRawNonNegativeDerivative(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: fmt.Sprintf(`cpu value=10 1278010021000000000 +cpu value=15 1278010022000000000 +cpu value=10 1278010023000000000 +cpu value=20 1278010024000000000 +`)}, + } + + test.addQueries([]*Query{ + &Query{ + name: "calculate single non_negative_derivative", + command: `SELECT non_negative_derivative(value) from db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","non_negative_derivative"],"values":[["2010-07-01T18:47:02Z",5],["2010-07-01T18:47:04Z",10]]}]}]}`, + }, + &Query{ + name: "calculate single non_negative_derivative", + command: `SELECT non_negative_derivative(value, 10s) from db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","non_negative_derivative"],"values":[["2010-07-01T18:47:02Z",50],["2010-07-01T18:47:04Z",100]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can handle various group by time derivative queries. +func TestServer_Query_SelectGroupByTimeDerivative(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: fmt.Sprintf(`cpu value=10 1278010020000000000 +cpu value=15 1278010021000000000 +cpu value=20 1278010022000000000 +cpu value=25 1278010023000000000 +`)}, + } + + test.addQueries([]*Query{ + &Query{ + name: "calculate derivative of count with unit default (2s) group by time", + command: `SELECT derivative(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate derivative of count with unit 4s group by time", + command: `SELECT derivative(count(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate derivative of mean with unit default (2s) group by time", + command: `SELECT derivative(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, + }, + &Query{ + name: "calculate derivative of mean with unit 4s group by time", + command: `SELECT derivative(mean(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, + }, + &Query{ + name: "calculate derivative of median with unit default (2s) group by time", + command: `SELECT derivative(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, + }, + &Query{ + name: "calculate derivative of median with unit 4s group by time", + command: `SELECT derivative(median(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, + }, + &Query{ + name: "calculate derivative of sum with unit default (2s) group by time", + command: `SELECT derivative(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, + }, + &Query{ + name: "calculate derivative of sum with unit 4s group by time", + command: `SELECT derivative(sum(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",40]]}]}]}`, + }, + &Query{ + name: "calculate derivative of first with unit default (2s) group by time", + command: `SELECT derivative(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, + }, + &Query{ + name: "calculate derivative of first with unit 4s group by time", + command: `SELECT derivative(first(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, + }, + &Query{ + name: "calculate derivative of last with unit default (2s) group by time", + command: `SELECT derivative(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, + }, + &Query{ + name: "calculate derivative of last with unit 4s group by time", + command: `SELECT derivative(last(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, + }, + &Query{ + name: "calculate derivative of min with unit default (2s) group by time", + command: `SELECT derivative(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, + }, + &Query{ + name: "calculate derivative of min with unit 4s group by time", + command: `SELECT derivative(min(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, + }, + &Query{ + name: "calculate derivative of max with unit default (2s) group by time", + command: `SELECT derivative(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, + }, + &Query{ + name: "calculate derivative of max with unit 4s group by time", + command: `SELECT derivative(max(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, + }, + &Query{ + name: "calculate derivative of percentile with unit default (2s) group by time", + command: `SELECT derivative(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, + }, + &Query{ + name: "calculate derivative of percentile with unit 4s group by time", + command: `SELECT derivative(percentile(value, 50), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Ensure the server can handle various group by time derivative queries. +func TestServer_Query_SelectGroupByTimeDerivativeWithFill(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: fmt.Sprintf(`cpu value=10 1278010020000000000 +cpu value=20 1278010021000000000 +`)}, + } + + test.addQueries([]*Query{ + &Query{ + name: "calculate derivative of count with unit default (2s) group by time with fill 0", + command: `SELECT derivative(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-2]]}]}]}`, + }, + &Query{ + name: "calculate derivative of count with unit 4s group by time with fill 0", + command: `SELECT derivative(count(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-4]]}]}]}`, + }, + &Query{ + name: "calculate derivative of count with unit default (2s) group by time with fill previous", + command: `SELECT derivative(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate derivative of count with unit 4s group by time with fill previous", + command: `SELECT derivative(count(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate derivative of mean with unit default (2s) group by time with fill 0", + command: `SELECT derivative(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-15]]}]}]}`, + }, + &Query{ + name: "calculate derivative of mean with unit 4s group by time with fill 0", + command: `SELECT derivative(mean(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-30]]}]}]}`, + }, + &Query{ + name: "calculate derivative of mean with unit default (2s) group by time with fill previous", + command: `SELECT derivative(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate derivative of mean with unit 4s group by time with fill previous", + command: `SELECT derivative(mean(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate derivative of median with unit default (2s) group by time with fill 0", + command: `SELECT derivative(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-15]]}]}]}`, + }, + &Query{ + name: "calculate derivative of median with unit 4s group by time with fill 0", + command: `SELECT derivative(median(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-30]]}]}]}`, + }, + &Query{ + name: "calculate derivative of median with unit default (2s) group by time with fill previous", + command: `SELECT derivative(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate derivative of median with unit 4s group by time with fill previous", + command: `SELECT derivative(median(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate derivative of sum with unit default (2s) group by time with fill 0", + command: `SELECT derivative(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-30]]}]}]}`, + }, + &Query{ + name: "calculate derivative of sum with unit 4s group by time with fill 0", + command: `SELECT derivative(sum(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-60]]}]}]}`, + }, + &Query{ + name: "calculate derivative of sum with unit default (2s) group by time with fill previous", + command: `SELECT derivative(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate derivative of sum with unit 4s group by time with fill previous", + command: `SELECT derivative(sum(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate derivative of first with unit default (2s) group by time with fill 0", + command: `SELECT derivative(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-10]]}]}]}`, + }, + &Query{ + name: "calculate derivative of first with unit 4s group by time with fill 0", + command: `SELECT derivative(first(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-20]]}]}]}`, + }, + &Query{ + name: "calculate derivative of first with unit default (2s) group by time with fill previous", + command: `SELECT derivative(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate derivative of first with unit 4s group by time with fill previous", + command: `SELECT derivative(first(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate derivative of last with unit default (2s) group by time with fill 0", + command: `SELECT derivative(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-20]]}]}]}`, + }, + &Query{ + name: "calculate derivative of last with unit 4s group by time with fill 0", + command: `SELECT derivative(last(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-40]]}]}]}`, + }, + &Query{ + name: "calculate derivative of last with unit default (2s) group by time with fill previous", + command: `SELECT derivative(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate derivative of last with unit 4s group by time with fill previous", + command: `SELECT derivative(last(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate derivative of min with unit default (2s) group by time with fill 0", + command: `SELECT derivative(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-10]]}]}]}`, + }, + &Query{ + name: "calculate derivative of min with unit 4s group by time with fill 0", + command: `SELECT derivative(min(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-20]]}]}]}`, + }, + &Query{ + name: "calculate derivative of min with unit default (2s) group by time with fill previous", + command: `SELECT derivative(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate derivative of min with unit 4s group by time with fill previous", + command: `SELECT derivative(min(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate derivative of max with unit default (2s) group by time with fill 0", + command: `SELECT derivative(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-20]]}]}]}`, + }, + &Query{ + name: "calculate derivative of max with unit 4s group by time with fill 0", + command: `SELECT derivative(max(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-40]]}]}]}`, + }, + &Query{ + name: "calculate derivative of max with unit default (2s) group by time with fill previous", + command: `SELECT derivative(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate derivative of max with unit 4s group by time with fill previous", + command: `SELECT derivative(max(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate derivative of percentile with unit default (2s) group by time with fill 0", + command: `SELECT derivative(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-10]]}]}]}`, + }, + &Query{ + name: "calculate derivative of percentile with unit 4s group by time with fill 0", + command: `SELECT derivative(percentile(value, 50), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-20]]}]}]}`, + }, + &Query{ + name: "calculate derivative of percentile with unit default (2s) group by time with fill previous", + command: `SELECT derivative(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + &Query{ + name: "calculate derivative of percentile with unit 4s group by time with fill previous", + command: `SELECT derivative(percentile(value, 50), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// mergeMany ensures that when merging many series together and some of them have a different number +// of points than others in a group by interval the results are correct +func TestServer_Query_MergeMany(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + // set infinite retention policy as we are inserting data in the past and don't want retention policy enforcement to make this test racy + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + + test := NewTest("db0", "rp0") + + writes := []string{} + for i := 1; i < 11; i++ { + for j := 1; j < 5+i%3; j++ { + data := fmt.Sprintf(`cpu,host=server_%d value=22 %d`, i, time.Unix(int64(j), int64(0)).UTC().UnixNano()) + writes = append(writes, data) + } + } + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "GROUP by time", + command: `SELECT count(value) FROM db0.rp0.cpu WHERE time >= '1970-01-01T00:00:01Z' AND time <= '1970-01-01T00:00:06Z' GROUP BY time(1s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","count"],"values":[["1970-01-01T00:00:01Z",10],["1970-01-01T00:00:02Z",10],["1970-01-01T00:00:03Z",10],["1970-01-01T00:00:04Z",10],["1970-01-01T00:00:05Z",7],["1970-01-01T00:00:06Z",3]]}]}]}`, + }, + &Query{ + skip: true, + name: "GROUP by tag - FIXME issue #2875", + command: `SELECT count(value) FROM db0.rp0.cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:00Z' group by host`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","count"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","count"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server03"},"columns":["time","count"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, + }, + &Query{ + name: "GROUP by field", + command: `SELECT count(value) FROM db0.rp0.cpu group by value`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"value":""},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",50]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_SLimitAndSOffset(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + // set infinite retention policy as we are inserting data in the past and don't want retention policy enforcement to make this test racy + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + + test := NewTest("db0", "rp0") + + writes := []string{} + for i := 1; i < 10; i++ { + data := fmt.Sprintf(`cpu,region=us-east,host=server-%d value=%d %d`, i, i, time.Unix(int64(i), int64(0)).UnixNano()) + writes = append(writes, data) + } + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "SLIMIT 2 SOFFSET 1", + command: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 2 SOFFSET 1`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server-2","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server-3","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, + }, + &Query{ + name: "SLIMIT 2 SOFFSET 3", + command: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 2 SOFFSET 3`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server-4","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server-5","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, + }, + &Query{ + name: "SLIMIT 3 SOFFSET 8", + command: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 3 SOFFSET 8`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server-9","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_Regex(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu1,host=server01 value=10 %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), + fmt.Sprintf(`cpu2,host=server01 value=20 %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), + fmt.Sprintf(`cpu3,host=server01 value=30 %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "default db and rp", + command: `SELECT * FROM /cpu[13]/`, + params: url.Values{"db": []string{"db0"}}, + exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","host","value"],"values":[["2015-02-28T01:03:36.703820946Z","server01",10]]},{"name":"cpu3","columns":["time","host","value"],"values":[["2015-02-28T01:03:36.703820946Z","server01",30]]}]}]}`, + }, + &Query{ + name: "default db and rp with GROUP BY *", + command: `SELECT * FROM /cpu[13]/ GROUP BY *`, + params: url.Values{"db": []string{"db0"}}, + exp: `{"results":[{"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`, + }, + &Query{ + name: "specifying db and rp", + command: `SELECT * FROM db0.rp0./cpu[13]/ GROUP BY *`, + exp: `{"results":[{"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`, + }, + &Query{ + name: "default db and specified rp", + command: `SELECT * FROM rp0./cpu[13]/ GROUP BY *`, + params: url.Values{"db": []string{"db0"}}, + exp: `{"results":[{"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`, + }, + &Query{ + name: "specified db and default rp", + command: `SELECT * FROM db0../cpu[13]/ GROUP BY *`, + exp: `{"results":[{"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_Aggregates_Int(t *testing.T) { + t.Parallel() + s := OpenDefaultServer(NewConfig(), "") + defer s.Close() + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join([]string{ + fmt.Sprintf(`int value=45 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + }, "\n")}, + } + + test.addQueries([]*Query{ + // int64 + &Query{ + name: "stddev with just one point - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT STDDEV(value) FROM int`, + exp: `{"results":[{"series":[{"name":"int","columns":["time","stddev"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_Aggregates_IntMax(t *testing.T) { + t.Parallel() + s := OpenDefaultServer(NewConfig(), "") + defer s.Close() + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join([]string{ + fmt.Sprintf(`intmax value=%s %d`, maxInt64(), mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`intmax value=%s %d`, maxInt64(), mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), + }, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "large mean and stddev - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT MEAN(value), STDDEV(value) FROM intmax`, + exp: `{"results":[{"series":[{"name":"intmax","columns":["time","mean","stddev"],"values":[["1970-01-01T00:00:00Z",` + maxInt64() + `,0]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_Aggregates_IntMany(t *testing.T) { + t.Parallel() + s := OpenDefaultServer(NewConfig(), "") + defer s.Close() + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join([]string{ + fmt.Sprintf(`intmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`intmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + fmt.Sprintf(`intmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), + fmt.Sprintf(`intmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), + fmt.Sprintf(`intmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), + fmt.Sprintf(`intmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()), + fmt.Sprintf(`intmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), + fmt.Sprintf(`intmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()), + }, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "mean and stddev - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT MEAN(value), STDDEV(value) FROM intmany WHERE time >= '2000-01-01' AND time < '2000-01-01T00:02:00Z' GROUP BY time(10m)`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","mean","stddev"],"values":[["2000-01-01T00:00:00Z",5,2.138089935299395]]}]}]}`, + }, + &Query{ + name: "first - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT FIRST(value) FROM intmany`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","first"],"values":[["1970-01-01T00:00:00Z",2]]}]}]}`, + }, + &Query{ + name: "first - int - epoch ms", + params: url.Values{"db": []string{"db0"}, "epoch": []string{"ms"}}, + command: `SELECT FIRST(value) FROM intmany`, + exp: fmt.Sprintf(`{"results":[{"series":[{"name":"intmany","columns":["time","first"],"values":[[%d,2]]}]}]}`, mustParseTime(time.RFC3339Nano, "1970-01-01T00:00:00Z").UnixNano()/int64(time.Millisecond)), + }, + &Query{ + name: "last - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT LAST(value) FROM intmany`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","last"],"values":[["1970-01-01T00:00:00Z",9]]}]}]}`, + }, + &Query{ + name: "spread - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SPREAD(value) FROM intmany`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","spread"],"values":[["1970-01-01T00:00:00Z",7]]}]}]}`, + }, + &Query{ + name: "median - even count - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT MEDIAN(value) FROM intmany`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4.5]]}]}]}`, + }, + &Query{ + name: "median - odd count - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT MEDIAN(value) FROM intmany where time < '2000-01-01T00:01:10Z'`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4]]}]}]}`, + }, + &Query{ + name: "distinct as call - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT DISTINCT(value) FROM intmany`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","distinct"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:10Z",4],["2000-01-01T00:00:40Z",5],["2000-01-01T00:01:00Z",7],["2000-01-01T00:01:10Z",9]]}]}]}`, + }, + &Query{ + name: "distinct alt syntax - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT DISTINCT value FROM intmany`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","distinct"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:10Z",4],["2000-01-01T00:00:40Z",5],["2000-01-01T00:01:00Z",7],["2000-01-01T00:01:10Z",9]]}]}]}`, + }, + &Query{ + name: "distinct select tag - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT DISTINCT(host) FROM intmany`, + exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, + skip: true, // FIXME(benbjohnson): should be allowed, need to stream tag values + }, + &Query{ + name: "distinct alt select tag - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT DISTINCT host FROM intmany`, + exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, + skip: true, // FIXME(benbjohnson): should be allowed, need to stream tag values + }, + &Query{ + name: "count distinct - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT COUNT(DISTINCT value) FROM intmany`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, + }, + &Query{ + name: "count distinct as call - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT COUNT(DISTINCT(value)) FROM intmany`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, + }, + &Query{ + name: "count distinct select tag - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT COUNT(DISTINCT host) FROM intmany`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, + skip: true, // FIXME(benbjohnson): stream tag values + }, + &Query{ + name: "count distinct as call select tag - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT COUNT(DISTINCT host) FROM intmany`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, + skip: true, // FIXME(benbjohnson): stream tag values + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_Aggregates_IntMany_GroupBy(t *testing.T) { + t.Parallel() + s := OpenDefaultServer(NewConfig(), "") + defer s.Close() + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join([]string{ + fmt.Sprintf(`intmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`intmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + fmt.Sprintf(`intmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), + fmt.Sprintf(`intmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), + fmt.Sprintf(`intmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), + fmt.Sprintf(`intmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()), + fmt.Sprintf(`intmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), + fmt.Sprintf(`intmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()), + }, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "max order by time with time specified group by 10s", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT time, max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(10s)`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:10Z",4],["2000-01-01T00:00:20Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:40Z",5],["2000-01-01T00:00:50Z",5],["2000-01-01T00:01:00Z",7],["2000-01-01T00:01:10Z",9]]}]}]}`, + }, + &Query{ + name: "max order by time without time specified group by 30s", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:30Z",5],["2000-01-01T00:01:00Z",9]]}]}]}`, + }, + &Query{ + name: "max order by time with time specified group by 30s", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT time, max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:30Z",5],["2000-01-01T00:01:00Z",9]]}]}]}`, + }, + &Query{ + name: "min order by time without time specified group by 15s", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT min(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","min"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`, + }, + &Query{ + name: "min order by time with time specified group by 15s", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT time, min(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","min"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`, + }, + &Query{ + name: "first order by time without time specified group by 15s", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT first(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`, + }, + &Query{ + name: "first order by time with time specified group by 15s", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT time, first(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`, + }, + &Query{ + name: "last order by time without time specified group by 15s", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT last(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","last"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",5],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",9]]}]}]}`, + }, + &Query{ + name: "last order by time with time specified group by 15s", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT time, last(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","last"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",5],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",9]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_Aggregates_IntMany_OrderByDesc(t *testing.T) { + t.Parallel() + s := OpenDefaultServer(NewConfig(), "") + defer s.Close() + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join([]string{ + fmt.Sprintf(`intmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`intmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + fmt.Sprintf(`intmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), + fmt.Sprintf(`intmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), + fmt.Sprintf(`intmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), + fmt.Sprintf(`intmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()), + fmt.Sprintf(`intmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), + fmt.Sprintf(`intmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()), + }, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "aggregate order by time desc", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:00Z' group by time(10s) order by time desc`, + exp: `{"results":[{"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:01:00Z",7],["2000-01-01T00:00:50Z",5],["2000-01-01T00:00:40Z",5],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:20Z",4],["2000-01-01T00:00:10Z",4],["2000-01-01T00:00:00Z",2]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_Aggregates_IntOverlap(t *testing.T) { + t.Parallel() + s := OpenDefaultServer(NewConfig(), "") + defer s.Close() + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join([]string{ + fmt.Sprintf(`intoverlap,region=us-east value=20 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`intoverlap,region=us-east value=30 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + fmt.Sprintf(`intoverlap,region=us-west value=100 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`intoverlap,region=us-east otherVal=20 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), + }, "\n")}, + } + + test.addQueries([]*Query{ + /* &Query{ + name: "aggregation with no interval - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT count(value) FROM intoverlap WHERE time = '2000-01-01 00:00:00'`, + exp: `{"results":[{"series":[{"name":"intoverlap","columns":["time","count"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`, + }, + &Query{ + name: "sum - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SUM(value) FROM intoverlap WHERE time >= '2000-01-01 00:00:05' AND time <= '2000-01-01T00:00:10Z' GROUP BY time(10s), region`, + exp: `{"results":[{"series":[{"name":"intoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["2000-01-01T00:00:10Z",30]]}]}]}`, + }, + */&Query{ + name: "aggregation with a null field value - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SUM(value) FROM intoverlap GROUP BY region`, + exp: `{"results":[{"series":[{"name":"intoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",50]]},{"name":"intoverlap","tags":{"region":"us-west"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",100]]}]}]}`, + }, + &Query{ + name: "multiple aggregations - int", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SUM(value), MEAN(value) FROM intoverlap GROUP BY region`, + exp: `{"results":[{"series":[{"name":"intoverlap","tags":{"region":"us-east"},"columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",50,25]]},{"name":"intoverlap","tags":{"region":"us-west"},"columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",100,100]]}]}]}`, + }, + &Query{ + skip: true, + name: "multiple aggregations with division - int FIXME issue #2879", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT sum(value), mean(value), sum(value) / mean(value) as div FROM intoverlap GROUP BY region`, + exp: `{"results":[{"series":[{"name":"intoverlap","tags":{"region":"us-east"},"columns":["time","sum","mean","div"],"values":[["1970-01-01T00:00:00Z",50,25,2]]},{"name":"intoverlap","tags":{"region":"us-west"},"columns":["time","div"],"values":[["1970-01-01T00:00:00Z",100,100,1]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_Aggregates_FloatSingle(t *testing.T) { + t.Parallel() + s := OpenDefaultServer(NewConfig(), "") + defer s.Close() + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join([]string{ + fmt.Sprintf(`floatsingle value=45.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + }, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "stddev with just one point - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT STDDEV(value) FROM floatsingle`, + exp: `{"results":[{"series":[{"name":"floatsingle","columns":["time","stddev"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_Aggregates_FloatMany(t *testing.T) { + t.Parallel() + s := OpenDefaultServer(NewConfig(), "") + defer s.Close() + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join([]string{ + fmt.Sprintf(`floatmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`floatmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + fmt.Sprintf(`floatmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), + fmt.Sprintf(`floatmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), + fmt.Sprintf(`floatmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), + fmt.Sprintf(`floatmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()), + fmt.Sprintf(`floatmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), + fmt.Sprintf(`floatmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()), + }, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "mean and stddev - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT MEAN(value), STDDEV(value) FROM floatmany WHERE time >= '2000-01-01' AND time < '2000-01-01T00:02:00Z' GROUP BY time(10m)`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","mean","stddev"],"values":[["2000-01-01T00:00:00Z",5,2.138089935299395]]}]}]}`, + }, + &Query{ + name: "first - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT FIRST(value) FROM floatmany`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","first"],"values":[["1970-01-01T00:00:00Z",2]]}]}]}`, + }, + &Query{ + name: "last - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT LAST(value) FROM floatmany`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","last"],"values":[["1970-01-01T00:00:00Z",9]]}]}]}`, + }, + &Query{ + name: "spread - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SPREAD(value) FROM floatmany`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","spread"],"values":[["1970-01-01T00:00:00Z",7]]}]}]}`, + }, + &Query{ + name: "median - even count - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT MEDIAN(value) FROM floatmany`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4.5]]}]}]}`, + }, + &Query{ + name: "median - odd count - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT MEDIAN(value) FROM floatmany where time < '2000-01-01T00:01:10Z'`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4]]}]}]}`, + }, + &Query{ + name: "distinct as call - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT DISTINCT(value) FROM floatmany`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","distinct"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:10Z",4],["2000-01-01T00:00:40Z",5],["2000-01-01T00:01:00Z",7],["2000-01-01T00:01:10Z",9]]}]}]}`, + }, + &Query{ + name: "distinct alt syntax - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT DISTINCT value FROM floatmany`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","distinct"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:10Z",4],["2000-01-01T00:00:40Z",5],["2000-01-01T00:01:00Z",7],["2000-01-01T00:01:10Z",9]]}]}]}`, + }, + &Query{ + name: "distinct select tag - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT DISTINCT(host) FROM floatmany`, + exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, + skip: true, // FIXME(benbjohnson): show be allowed, stream tag values + }, + &Query{ + name: "distinct alt select tag - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT DISTINCT host FROM floatmany`, + exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, + skip: true, // FIXME(benbjohnson): show be allowed, stream tag values + }, + &Query{ + name: "count distinct - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT COUNT(DISTINCT value) FROM floatmany`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, + }, + &Query{ + name: "count distinct as call - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT COUNT(DISTINCT(value)) FROM floatmany`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, + }, + &Query{ + name: "count distinct select tag - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT COUNT(DISTINCT host) FROM floatmany`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, + skip: true, // FIXME(benbjohnson): stream tag values + }, + &Query{ + name: "count distinct as call select tag - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT COUNT(DISTINCT host) FROM floatmany`, + exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, + skip: true, // FIXME(benbjohnson): stream tag values + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_Aggregates_FloatOverlap(t *testing.T) { + t.Parallel() + s := OpenDefaultServer(NewConfig(), "") + defer s.Close() + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join([]string{ + fmt.Sprintf(`floatoverlap,region=us-east value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`floatoverlap,region=us-east value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + fmt.Sprintf(`floatoverlap,region=us-west value=100.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`floatoverlap,region=us-east otherVal=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), + }, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "aggregation with no interval - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT count(value) FROM floatoverlap WHERE time = '2000-01-01 00:00:00'`, + exp: `{"results":[{"series":[{"name":"floatoverlap","columns":["time","count"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`, + }, + &Query{ + name: "sum - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SUM(value) FROM floatoverlap WHERE time >= '2000-01-01 00:00:05' AND time <= '2000-01-01T00:00:10Z' GROUP BY time(10s), region`, + exp: `{"results":[{"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",null],["2000-01-01T00:00:10Z",30]]}]}]}`, + }, + &Query{ + name: "aggregation with a null field value - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SUM(value) FROM floatoverlap GROUP BY region`, + exp: `{"results":[{"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",50]]},{"name":"floatoverlap","tags":{"region":"us-west"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",100]]}]}]}`, + }, + &Query{ + name: "multiple aggregations - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SUM(value), MEAN(value) FROM floatoverlap GROUP BY region`, + exp: `{"results":[{"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",50,25]]},{"name":"floatoverlap","tags":{"region":"us-west"},"columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",100,100]]}]}]}`, + }, + &Query{ + name: "multiple aggregations with division - float", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT sum(value) / mean(value) as div FROM floatoverlap GROUP BY region`, + exp: `{"results":[{"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","div"],"values":[["1970-01-01T00:00:00Z",2]]},{"name":"floatoverlap","tags":{"region":"us-west"},"columns":["time","div"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_Aggregates_Load(t *testing.T) { + t.Parallel() + s := OpenDefaultServer(NewConfig(), "") + defer s.Close() + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join([]string{ + fmt.Sprintf(`load,region=us-east,host=serverA value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`load,region=us-east,host=serverB value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + fmt.Sprintf(`load,region=us-west,host=serverC value=100.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + }, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "group by multiple dimensions", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT sum(value) FROM load GROUP BY region, host`, + exp: `{"results":[{"series":[{"name":"load","tags":{"host":"serverA","region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",20]]},{"name":"load","tags":{"host":"serverB","region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",30]]},{"name":"load","tags":{"host":"serverC","region":"us-west"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",100]]}]}]}`, + }, + &Query{ + name: "group by multiple dimensions", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT sum(value)*2 FROM load`, + exp: `{"results":[{"series":[{"name":"load","columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",300]]}]}]}`, + }, + &Query{ + name: "group by multiple dimensions", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT sum(value)/2 FROM load`, + exp: `{"results":[{"series":[{"name":"load","columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",75]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_Aggregates_CPU(t *testing.T) { + t.Parallel() + s := OpenDefaultServer(NewConfig(), "") + defer s.Close() + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join([]string{ + fmt.Sprintf(`cpu,region=uk,host=serverZ,service=redis value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), + fmt.Sprintf(`cpu,region=uk,host=serverZ,service=mysql value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), + }, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "aggregation with WHERE and AND", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT sum(value) FROM cpu WHERE region='uk' AND host='serverZ'`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",50]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_Aggregates_String(t *testing.T) { + t.Parallel() + s := OpenDefaultServer(NewConfig(), "") + defer s.Close() + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join([]string{ + fmt.Sprintf(`stringdata value="first" %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), + fmt.Sprintf(`stringdata value="last" %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:04Z").UnixNano()), + }, "\n")}, + } + + test.addQueries([]*Query{ + // strings + &Query{ + name: "STDDEV on string data - string", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT STDDEV(value) FROM stringdata`, + exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","stddev"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`, + skip: true, // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator + }, + &Query{ + name: "MEAN on string data - string", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT MEAN(value) FROM stringdata`, + exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","mean"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, + skip: true, // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator + }, + &Query{ + name: "MEDIAN on string data - string", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT MEDIAN(value) FROM stringdata`, + exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`, + skip: true, // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator + }, + &Query{ + name: "COUNT on string data - string", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT COUNT(value) FROM stringdata`, + exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",2]]}]}]}`, + skip: true, // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator + }, + &Query{ + name: "FIRST on string data - string", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT FIRST(value) FROM stringdata`, + exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","first"],"values":[["2000-01-01T00:00:03Z","first"]]}]}]}`, + skip: true, // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator + }, + &Query{ + name: "LAST on string data - string", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT LAST(value) FROM stringdata`, + exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","last"],"values":[["2000-01-01T00:00:04Z","last"]]}]}]}`, + skip: true, // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_AggregateSelectors(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`network,host=server01,region=west,core=1 rx=10i,tx=20i,core=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`network,host=server02,region=west,core=2 rx=40i,tx=50i,core=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + fmt.Sprintf(`network,host=server03,region=east,core=3 rx=40i,tx=55i,core=4i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), + fmt.Sprintf(`network,host=server04,region=east,core=4 rx=40i,tx=60i,core=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), + fmt.Sprintf(`network,host=server05,region=west,core=1 rx=50i,tx=70i,core=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), + fmt.Sprintf(`network,host=server06,region=east,core=2 rx=50i,tx=40i,core=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()), + fmt.Sprintf(`network,host=server07,region=west,core=3 rx=70i,tx=30i,core=4i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), + fmt.Sprintf(`network,host=server08,region=east,core=4 rx=90i,tx=10i,core=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()), + fmt.Sprintf(`network,host=server09,region=east,core=1 rx=5i,tx=4i,core=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:20Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "baseline", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * FROM network`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","core","host","region","rx","tx"],"values":[["2000-01-01T00:00:00Z",2,"server01","west",10,20],["2000-01-01T00:00:10Z",3,"server02","west",40,50],["2000-01-01T00:00:20Z",4,"server03","east",40,55],["2000-01-01T00:00:30Z",1,"server04","east",40,60],["2000-01-01T00:00:40Z",2,"server05","west",50,70],["2000-01-01T00:00:50Z",3,"server06","east",50,40],["2000-01-01T00:01:00Z",4,"server07","west",70,30],["2000-01-01T00:01:10Z",1,"server08","east",90,10],["2000-01-01T00:01:20Z",2,"server09","east",5,4]]}]}]}`, + }, + &Query{ + name: "max - baseline 30s", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",90]]}]}]}`, + }, + &Query{ + name: "max - baseline 30s - epoch ms", + params: url.Values{"db": []string{"db0"}, "epoch": []string{"ms"}}, + command: `SELECT max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: fmt.Sprintf( + `{"results":[{"series":[{"name":"network","columns":["time","max"],"values":[[%d,40],[%d,50],[%d,90]]}]}]}`, + mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()/int64(time.Millisecond), + mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()/int64(time.Millisecond), + mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()/int64(time.Millisecond), + ), + }, + &Query{ + name: "max - tx", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT tx, max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","max"],"values":[["2000-01-01T00:00:00Z",50,40],["2000-01-01T00:00:30Z",70,50],["2000-01-01T00:01:00Z",10,90]]}]}]}`, + }, + &Query{ + name: "max - time", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT time, max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",90]]}]}]}`, + }, + &Query{ + name: "max - time and tx", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT time, tx, max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","max"],"values":[["2000-01-01T00:00:00Z",50,40],["2000-01-01T00:00:30Z",70,50],["2000-01-01T00:01:00Z",10,90]]}]}]}`, + }, + &Query{ + name: "min - baseline 30s", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","min"],"values":[["2000-01-01T00:00:00Z",10],["2000-01-01T00:00:30Z",40],["2000-01-01T00:01:00Z",5]]}]}]}`, + }, + &Query{ + name: "min - tx", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT tx, min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","min"],"values":[["2000-01-01T00:00:00Z",20,10],["2000-01-01T00:00:30Z",60,40],["2000-01-01T00:01:00Z",4,5]]}]}]}`, + }, + &Query{ + name: "min - time", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT time, min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","min"],"values":[["2000-01-01T00:00:00Z",10],["2000-01-01T00:00:30Z",40],["2000-01-01T00:01:00Z",5]]}]}]}`, + }, + &Query{ + name: "min - time and tx", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT time, tx, min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","min"],"values":[["2000-01-01T00:00:00Z",20,10],["2000-01-01T00:00:30Z",60,40],["2000-01-01T00:01:00Z",4,5]]}]}]}`, + }, + &Query{ + name: "max,min - baseline 30s", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT max(rx), min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","max","min"],"values":[["2000-01-01T00:00:00Z",40,10],["2000-01-01T00:00:30Z",50,40],["2000-01-01T00:01:00Z",90,5]]}]}]}`, + }, + &Query{ + name: "first - baseline 30s", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT first(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",10],["2000-01-01T00:00:30Z",40],["2000-01-01T00:01:00Z",70]]}]}]}`, + }, + &Query{ + name: "first - tx", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT time, tx, first(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","first"],"values":[["2000-01-01T00:00:00Z",20,10],["2000-01-01T00:00:30Z",60,40],["2000-01-01T00:01:00Z",30,70]]}]}]}`, + }, + &Query{ + name: "first - time", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT time, first(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",10],["2000-01-01T00:00:30Z",40],["2000-01-01T00:01:00Z",70]]}]}]}`, + }, + &Query{ + name: "first - time and tx", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT time, tx, first(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","first"],"values":[["2000-01-01T00:00:00Z",20,10],["2000-01-01T00:00:30Z",60,40],["2000-01-01T00:01:00Z",30,70]]}]}]}`, + }, + &Query{ + name: "last - baseline 30s", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT last(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","last"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",5]]}]}]}`, + }, + &Query{ + name: "last - tx", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT tx, last(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","last"],"values":[["2000-01-01T00:00:00Z",55,40],["2000-01-01T00:00:30Z",40,50],["2000-01-01T00:01:00Z",4,5]]}]}]}`, + }, + &Query{ + name: "last - time", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT time, last(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","last"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",5]]}]}]}`, + }, + &Query{ + name: "last - time and tx", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT time, tx, last(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","tx","last"],"values":[["2000-01-01T00:00:00Z",55,40],["2000-01-01T00:00:30Z",40,50],["2000-01-01T00:01:00Z",4,5]]}]}]}`, + }, + &Query{ + name: "count - baseline 30s", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT count(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","count"],"values":[["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:30Z",3],["2000-01-01T00:01:00Z",3]]}]}]}`, + }, + &Query{ + name: "count - time", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT time, count(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, + }, + &Query{ + name: "count - tx", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT tx, count(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, + }, + &Query{ + name: "distinct - baseline 30s", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT distinct(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","distinct"],"values":[["2000-01-01T00:00:00Z",10],["2000-01-01T00:00:10Z",40],["2000-01-01T00:00:30Z",40],["2000-01-01T00:00:40Z",50],["2000-01-01T00:01:00Z",70],["2000-01-01T00:01:10Z",90],["2000-01-01T00:01:20Z",5]]}]}]}`, + }, + &Query{ + name: "distinct - time", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT time, distinct(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"error":"error parsing query: aggregate function distinct() can not be combined with other functions or fields"}`, + }, + &Query{ + name: "distinct - tx", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT tx, distinct(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"error":"error parsing query: aggregate function distinct() can not be combined with other functions or fields"}`, + }, + &Query{ + name: "mean - baseline 30s", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT mean(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",30],["2000-01-01T00:00:30Z",46.666666666666664],["2000-01-01T00:01:00Z",55]]}]}]}`, + }, + &Query{ + name: "mean - time", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT time, mean(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, + }, + &Query{ + name: "mean - tx", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT tx, mean(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, + }, + &Query{ + name: "median - baseline 30s", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT median(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","median"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",70]]}]}]}`, + }, + &Query{ + name: "median - time", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT time, median(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, + }, + &Query{ + name: "median - tx", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT tx, median(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, + }, + &Query{ + name: "spread - baseline 30s", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT spread(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","spread"],"values":[["2000-01-01T00:00:00Z",30],["2000-01-01T00:00:30Z",10],["2000-01-01T00:01:00Z",85]]}]}]}`, + }, + &Query{ + name: "spread - time", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT time, spread(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, + }, + &Query{ + name: "spread - tx", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT tx, spread(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, + }, + &Query{ + name: "stddev - baseline 30s", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT stddev(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","stddev"],"values":[["2000-01-01T00:00:00Z",17.320508075688775],["2000-01-01T00:00:30Z",5.773502691896258],["2000-01-01T00:01:00Z",44.44097208657794]]}]}]}`, + }, + &Query{ + name: "stddev - time", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT time, stddev(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, + }, + &Query{ + name: "stddev - tx", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT tx, stddev(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, + }, + &Query{ + name: "percentile - baseline 30s", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT percentile(rx, 75) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"results":[{"series":[{"name":"network","columns":["time","percentile"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",70]]}]}]}`, + }, + &Query{ + name: "percentile - time", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT time, percentile(rx, 75) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, + }, + &Query{ + name: "percentile - tx", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT tx, percentile(rx, 75) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, + exp: `{"error":"error parsing query: mixing aggregate and non-aggregate queries is not supported"}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_TopInt(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + // cpu data with overlapping duplicate values + // hour 0 + fmt.Sprintf(`cpu,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server02 value=3.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + fmt.Sprintf(`cpu,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), + // hour 1 + fmt.Sprintf(`cpu,host=server04 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server05 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:10Z").UnixNano()), + fmt.Sprintf(`cpu,host=server06 value=6.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:20Z").UnixNano()), + // hour 2 + fmt.Sprintf(`cpu,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:10Z").UnixNano()), + + // memory data + // hour 0 + fmt.Sprintf(`memory,host=a,service=redis value=1000i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`memory,host=b,service=mysql value=2000i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`memory,host=b,service=redis value=1500i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + // hour 1 + fmt.Sprintf(`memory,host=a,service=redis value=1001i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), + fmt.Sprintf(`memory,host=b,service=mysql value=2001i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), + fmt.Sprintf(`memory,host=b,service=redis value=1501i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), + // hour 2 + fmt.Sprintf(`memory,host=a,service=redis value=1002i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:00Z").UnixNano()), + fmt.Sprintf(`memory,host=b,service=mysql value=2002i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:00Z").UnixNano()), + fmt.Sprintf(`memory,host=b,service=redis value=1502i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:00Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "top - cpu", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, 1) FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T02:00:10Z",9]]}]}]}`, + }, + &Query{ + name: "top - cpu - 2 values", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, 2) FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T01:00:10Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`, + }, + &Query{ + name: "top - cpu - 3 values - sorts on tie properly", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, 3) FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T01:00:10Z",7],["2000-01-01T02:00:00Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`, + }, + &Query{ + name: "top - cpu - with tag", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, host, 2) FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top","host"],"values":[["2000-01-01T01:00:10Z",7,"server05"],["2000-01-01T02:00:10Z",9,"server08"]]}]}]}`, + }, + &Query{ + name: "top - cpu - 3 values with limit 2", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, 3) FROM cpu limit 2`, + exp: `{"error":"error parsing query: limit (3) in top function can not be larger than the LIMIT (2) in the select statement"}`, + }, + &Query{ + name: "top - cpu - hourly", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, 1) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T01:00:00Z",7],["2000-01-01T02:00:00Z",9]]}]}]}`, + }, + &Query{ + name: "top - cpu - 2 values hourly", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, 2) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:00Z",3],["2000-01-01T01:00:00Z",7],["2000-01-01T01:00:00Z",6],["2000-01-01T02:00:00Z",9],["2000-01-01T02:00:00Z",7]]}]}]}`, + }, + &Query{ + name: "top - cpu - 3 values hourly - validates that a bucket can have less than limit if no values exist in that time bucket", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, 3) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:00Z",2],["2000-01-01T01:00:00Z",7],["2000-01-01T01:00:00Z",6],["2000-01-01T01:00:00Z",5],["2000-01-01T02:00:00Z",9],["2000-01-01T02:00:00Z",7]]}]}]}`, + }, + &Query{ + name: "top - memory - 2 values, two tags", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, 2), host, service FROM memory`, + exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T01:00:00Z",2001,"b","mysql"],["2000-01-01T02:00:00Z",2002,"b","mysql"]]}]}]}`, + }, + &Query{ + name: "top - memory - host tag with limit 2", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, host, 2) FROM memory`, + exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host"],"values":[["2000-01-01T02:00:00Z",2002,"b"],["2000-01-01T02:00:00Z",1002,"a"]]}]}]}`, + }, + &Query{ + name: "top - memory - host tag with limit 2, service tag in select", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, host, 2), service FROM memory`, + exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T02:00:00Z",2002,"b","mysql"],["2000-01-01T02:00:00Z",1002,"a","redis"]]}]}]}`, + }, + &Query{ + name: "top - memory - service tag with limit 2, host tag in select", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, service, 2), host FROM memory`, + exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","service","host"],"values":[["2000-01-01T02:00:00Z",2002,"mysql","b"],["2000-01-01T02:00:00Z",1502,"redis","b"]]}]}]}`, + }, + &Query{ + name: "top - memory - host and service tag with limit 2", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, host, service, 2) FROM memory`, + exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T02:00:00Z",2002,"b","mysql"],["2000-01-01T02:00:00Z",1502,"b","redis"]]}]}]}`, + }, + &Query{ + name: "top - memory - host tag with limit 2 with service tag in select", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, host, 2), service FROM memory`, + exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T02:00:00Z",2002,"b","mysql"],["2000-01-01T02:00:00Z",1002,"a","redis"]]}]}]}`, + }, + &Query{ + name: "top - memory - host and service tag with limit 3", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT TOP(value, host, service, 3) FROM memory`, + exp: `{"results":[{"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T02:00:00Z",2002,"b","mysql"],["2000-01-01T02:00:00Z",1502,"b","redis"],["2000-01-01T02:00:00Z",1002,"a","redis"]]}]}]}`, + }, + + // TODO + // - Test that specifiying fields or tags in the function will rewrite the query to expand them to the fields + // - Test that a field can be used in the top function + // - Test that asking for a field will come back before a tag if they have the same name for a tag and a field + // - Test that `select top(value, host, 2)` when there is only one value for `host` it will only bring back one value + // - Test that `select top(value, host, 4) from foo where time > now() - 1d and time < now() group by time(1h)` and host is unique in some time buckets that it returns only the unique ones, and not always 4 values + + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP: %s", query.name) + continue + } + + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// Test various aggregates when different series only have data for the same timestamp. +func TestServer_Query_Aggregates_IdenticalTime(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`series,host=a value=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`series,host=b value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`series,host=c value=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`series,host=d value=4 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`series,host=e value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`series,host=f value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`series,host=g value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`series,host=h value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`series,host=i value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "last from multiple series with identical timestamp", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT last(value) FROM "series"`, + exp: `{"results":[{"series":[{"name":"series","columns":["time","last"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, + repeat: 100, + }, + &Query{ + name: "first from multiple series with identical timestamp", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT first(value) FROM "series"`, + exp: `{"results":[{"series":[{"name":"series","columns":["time","first"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, + repeat: 100, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + for n := 0; n <= query.repeat; n++ { + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } + } +} + +// This will test that when using a group by, that it observes the time you asked for +// but will only put the values in the bucket that match the time range +func TestServer_Query_GroupByTimeCutoffs(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu value=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`cpu value=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), + fmt.Sprintf(`cpu value=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:05Z").UnixNano()), + fmt.Sprintf(`cpu value=4i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:08Z").UnixNano()), + fmt.Sprintf(`cpu value=5i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:09Z").UnixNano()), + fmt.Sprintf(`cpu value=6i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + } + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "sum all time", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SUM(value) FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",21]]}]}]}`, + }, + &Query{ + name: "sum all time grouped by time 5s", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T00:00:10Z' group by time(5s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:05Z",12],["2000-01-01T00:00:10Z",6]]}]}]}`, + }, + &Query{ + name: "sum all time grouped by time 5s missing first point", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:01Z' and time <= '2000-01-01T00:00:10Z' group by time(5s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:05Z",12],["2000-01-01T00:00:10Z",6]]}]}]}`, + }, + &Query{ + name: "sum all time grouped by time 5s missing first points (null for bucket)", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:02Z' and time <= '2000-01-01T00:00:10Z' group by time(5s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",null],["2000-01-01T00:00:05Z",12],["2000-01-01T00:00:10Z",6]]}]}]}`, + }, + &Query{ + name: "sum all time grouped by time 5s missing last point - 2 time intervals", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T00:00:09Z' group by time(5s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:05Z",12]]}]}]}`, + }, + &Query{ + name: "sum all time grouped by time 5s missing last 2 points - 2 time intervals", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T00:00:08Z' group by time(5s)`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:05Z",7]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Write_Precision(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []struct { + write string + params url.Values + }{ + { + write: fmt.Sprintf("cpu_n0_precision value=1 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").UnixNano()), + }, + { + write: fmt.Sprintf("cpu_n1_precision value=1.1 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").UnixNano()), + params: url.Values{"precision": []string{"n"}}, + }, + { + write: fmt.Sprintf("cpu_u_precision value=100 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Microsecond).UnixNano()/int64(time.Microsecond)), + params: url.Values{"precision": []string{"u"}}, + }, + { + write: fmt.Sprintf("cpu_ms_precision value=200 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Millisecond).UnixNano()/int64(time.Millisecond)), + params: url.Values{"precision": []string{"ms"}}, + }, + { + write: fmt.Sprintf("cpu_s_precision value=300 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Second).UnixNano()/int64(time.Second)), + params: url.Values{"precision": []string{"s"}}, + }, + { + write: fmt.Sprintf("cpu_m_precision value=400 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Minute).UnixNano()/int64(time.Minute)), + params: url.Values{"precision": []string{"m"}}, + }, + { + write: fmt.Sprintf("cpu_h_precision value=500 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Hour).UnixNano()/int64(time.Hour)), + params: url.Values{"precision": []string{"h"}}, + }, + } + + test := NewTest("db0", "rp0") + + test.addQueries([]*Query{ + &Query{ + name: "point with nanosecond precision time - no precision specified on write", + command: `SELECT * FROM cpu_n0_precision`, + params: url.Values{"db": []string{"db0"}}, + exp: `{"results":[{"series":[{"name":"cpu_n0_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56.789012345Z",1]]}]}]}`, + }, + &Query{ + name: "point with nanosecond precision time", + command: `SELECT * FROM cpu_n1_precision`, + params: url.Values{"db": []string{"db0"}}, + exp: `{"results":[{"series":[{"name":"cpu_n1_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56.789012345Z",1.1]]}]}]}`, + }, + &Query{ + name: "point with microsecond precision time", + command: `SELECT * FROM cpu_u_precision`, + params: url.Values{"db": []string{"db0"}}, + exp: `{"results":[{"series":[{"name":"cpu_u_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56.789012Z",100]]}]}]}`, + }, + &Query{ + name: "point with millisecond precision time", + command: `SELECT * FROM cpu_ms_precision`, + params: url.Values{"db": []string{"db0"}}, + exp: `{"results":[{"series":[{"name":"cpu_ms_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56.789Z",200]]}]}]}`, + }, + &Query{ + name: "point with second precision time", + command: `SELECT * FROM cpu_s_precision`, + params: url.Values{"db": []string{"db0"}}, + exp: `{"results":[{"series":[{"name":"cpu_s_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56Z",300]]}]}]}`, + }, + &Query{ + name: "point with minute precision time", + command: `SELECT * FROM cpu_m_precision`, + params: url.Values{"db": []string{"db0"}}, + exp: `{"results":[{"series":[{"name":"cpu_m_precision","columns":["time","value"],"values":[["2000-01-01T12:34:00Z",400]]}]}]}`, + }, + &Query{ + name: "point with hour precision time", + command: `SELECT * FROM cpu_h_precision`, + params: url.Values{"db": []string{"db0"}}, + exp: `{"results":[{"series":[{"name":"cpu_h_precision","columns":["time","value"],"values":[["2000-01-01T12:00:00Z",500]]}]}]}`, + }, + }...) + + // we are doing writes that require parameter changes, so we are fighting the test harness a little to make this happen properly + for _, w := range writes { + test.writes = Writes{ + &Write{data: w.write}, + } + test.params = w.params + test.initialized = false + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_Wildcards(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`wildcard,region=us-east value=10 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`wildcard,region=us-east valx=20 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + fmt.Sprintf(`wildcard,region=us-east value=30,valx=40 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), + + fmt.Sprintf(`wgroup,region=us-east value=10.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`wgroup,region=us-east value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + fmt.Sprintf(`wgroup,region=us-west value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), + + fmt.Sprintf(`m1,region=us-east value=10.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`m2,host=server01 field=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "wildcard", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * FROM wildcard`, + exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","region","value","valx"],"values":[["2000-01-01T00:00:00Z","us-east",10,null],["2000-01-01T00:00:10Z","us-east",null,20],["2000-01-01T00:00:20Z","us-east",30,40]]}]}]}`, + }, + &Query{ + name: "wildcard with group by", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * FROM wildcard GROUP BY *`, + exp: `{"results":[{"series":[{"name":"wildcard","tags":{"region":"us-east"},"columns":["time","value","valx"],"values":[["2000-01-01T00:00:00Z",10,null],["2000-01-01T00:00:10Z",null,20],["2000-01-01T00:00:20Z",30,40]]}]}]}`, + }, + &Query{ + name: "GROUP BY queries", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT mean(value) FROM wgroup GROUP BY *`, + exp: `{"results":[{"series":[{"name":"wgroup","tags":{"region":"us-east"},"columns":["time","mean"],"values":[["1970-01-01T00:00:00Z",15]]},{"name":"wgroup","tags":{"region":"us-west"},"columns":["time","mean"],"values":[["1970-01-01T00:00:00Z",30]]}]}]}`, + }, + &Query{ + name: "GROUP BY queries with time", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT mean(value) FROM wgroup WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:01:00Z' GROUP BY *,TIME(1m)`, + exp: `{"results":[{"series":[{"name":"wgroup","tags":{"region":"us-east"},"columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",15]]},{"name":"wgroup","tags":{"region":"us-west"},"columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",30]]}]}]}`, + }, + &Query{ + name: "wildcard and field in select", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT value, * FROM wildcard`, + exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","value","region","value_1","valx"],"values":[["2000-01-01T00:00:00Z",10,"us-east",10,null],["2000-01-01T00:00:10Z",null,"us-east",null,20],["2000-01-01T00:00:20Z",30,"us-east",30,40]]}]}]}`, + }, + &Query{ + name: "field and wildcard in select", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT value, * FROM wildcard`, + exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","value","region","value_1","valx"],"values":[["2000-01-01T00:00:00Z",10,"us-east",10,null],["2000-01-01T00:00:10Z",null,"us-east",null,20],["2000-01-01T00:00:20Z",30,"us-east",30,40]]}]}]}`, + }, + &Query{ + name: "field and wildcard in group by", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * FROM wildcard GROUP BY region, *`, + exp: `{"results":[{"series":[{"name":"wildcard","tags":{"region":"us-east"},"columns":["time","value","valx"],"values":[["2000-01-01T00:00:00Z",10,null],["2000-01-01T00:00:10Z",null,20],["2000-01-01T00:00:20Z",30,40]]}]}]}`, + }, + &Query{ + name: "wildcard and field in group by", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * FROM wildcard GROUP BY *, region`, + exp: `{"results":[{"series":[{"name":"wildcard","tags":{"region":"us-east"},"columns":["time","value","valx"],"values":[["2000-01-01T00:00:00Z",10,null],["2000-01-01T00:00:10Z",null,20],["2000-01-01T00:00:20Z",30,40]]}]}]}`, + }, + &Query{ + name: "wildcard with multiple measurements", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * FROM m1, m2`, + exp: `{"results":[{"series":[{"name":"m1","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:00Z",null,null,"us-east",10]]},{"name":"m2","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:01Z",20,"server01",null,null]]}]}]}`, + }, + &Query{ + name: "wildcard with multiple measurements via regex", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * FROM /^m.*/`, + exp: `{"results":[{"series":[{"name":"m1","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:00Z",null,null,"us-east",10]]},{"name":"m2","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:01Z",20,"server01",null,null]]}]}]}`, + }, + &Query{ + name: "wildcard with multiple measurements via regex and limit", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * FROM db0../^m.*/ LIMIT 2`, + exp: `{"results":[{"series":[{"name":"m1","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:00Z",null,null,"us-east",10]]},{"name":"m2","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:01Z",20,"server01",null,null]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_WildcardExpansion(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`wildcard,region=us-east,host=A value=10,cpu=80 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`wildcard,region=us-east,host=B value=20,cpu=90 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + fmt.Sprintf(`wildcard,region=us-west,host=B value=30,cpu=70 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), + fmt.Sprintf(`wildcard,region=us-east,host=A value=40,cpu=60 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), + + fmt.Sprintf(`dupnames,region=us-east,day=1 value=10,day=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`dupnames,region=us-east,day=2 value=20,day=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + fmt.Sprintf(`dupnames,region=us-west,day=3 value=30,day=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "wildcard", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * FROM wildcard`, + exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","cpu","host","region","value"],"values":[["2000-01-01T00:00:00Z",80,"A","us-east",10],["2000-01-01T00:00:10Z",90,"B","us-east",20],["2000-01-01T00:00:20Z",70,"B","us-west",30],["2000-01-01T00:00:30Z",60,"A","us-east",40]]}]}]}`, + }, + &Query{ + name: "no wildcard in select", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT cpu, host, region, value FROM wildcard`, + exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","cpu","host","region","value"],"values":[["2000-01-01T00:00:00Z",80,"A","us-east",10],["2000-01-01T00:00:10Z",90,"B","us-east",20],["2000-01-01T00:00:20Z",70,"B","us-west",30],["2000-01-01T00:00:30Z",60,"A","us-east",40]]}]}]}`, + }, + &Query{ + name: "no wildcard in select, preserve column order", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT host, cpu, region, value FROM wildcard`, + exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","host","cpu","region","value"],"values":[["2000-01-01T00:00:00Z","A",80,"us-east",10],["2000-01-01T00:00:10Z","B",90,"us-east",20],["2000-01-01T00:00:20Z","B",70,"us-west",30],["2000-01-01T00:00:30Z","A",60,"us-east",40]]}]}]}`, + }, + + &Query{ + name: "no wildcard with alias", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT cpu as c, host as h, region, value FROM wildcard`, + exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","c","h","region","value"],"values":[["2000-01-01T00:00:00Z",80,"A","us-east",10],["2000-01-01T00:00:10Z",90,"B","us-east",20],["2000-01-01T00:00:20Z",70,"B","us-west",30],["2000-01-01T00:00:30Z",60,"A","us-east",40]]}]}]}`, + }, + &Query{ + name: "duplicate tag and field key, always favor field over tag", + command: `SELECT * FROM dupnames`, + params: url.Values{"db": []string{"db0"}}, + exp: `{"results":[{"series":[{"name":"dupnames","columns":["time","day","region","value"],"values":[["2000-01-01T00:00:00Z",3,"us-east",10],["2000-01-01T00:00:10Z",2,"us-east",20],["2000-01-01T00:00:20Z",1,"us-west",30]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_AcrossShardsAndFields(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu load=100 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`cpu load=200 %d`, mustParseTime(time.RFC3339Nano, "2010-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`cpu core=4 %d`, mustParseTime(time.RFC3339Nano, "2015-01-01T00:00:00Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "two results for cpu", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT load FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2000-01-01T00:00:00Z",100],["2010-01-01T00:00:00Z",200]]}]}]}`, + }, + &Query{ + name: "two results for cpu, multi-select", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT core,load FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core","load"],"values":[["2000-01-01T00:00:00Z",null,100],["2010-01-01T00:00:00Z",null,200],["2015-01-01T00:00:00Z",4,null]]}]}]}`, + }, + &Query{ + name: "two results for cpu, wildcard select", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core","load"],"values":[["2000-01-01T00:00:00Z",null,100],["2010-01-01T00:00:00Z",null,200],["2015-01-01T00:00:00Z",4,null]]}]}]}`, + }, + &Query{ + name: "one result for core", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT core FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2015-01-01T00:00:00Z",4]]}]}]}`, + }, + &Query{ + name: "empty result set from non-existent field", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT foo FROM cpu`, + exp: `{"results":[{}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_Where_Fields(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu alert_id="alert",tenant_id="tenant",_cust="johnson brothers" %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), + fmt.Sprintf(`cpu alert_id="alert",tenant_id="tenant",_cust="johnson brothers" %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), + + fmt.Sprintf(`cpu load=100.0,core=4 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), + fmt.Sprintf(`cpu load=80.0,core=2 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:01:02Z").UnixNano()), + + fmt.Sprintf(`clicks local=true %d`, mustParseTime(time.RFC3339Nano, "2014-11-10T23:00:01Z").UnixNano()), + fmt.Sprintf(`clicks local=false %d`, mustParseTime(time.RFC3339Nano, "2014-11-10T23:00:02Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + // non type specific + &Query{ + name: "missing measurement with group by", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT load from missing group by *`, + exp: `{"results":[{}]}`, + }, + + // string + &Query{ + name: "single string field", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT alert_id FROM cpu WHERE alert_id='alert'`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","alert_id"],"values":[["2015-02-28T01:03:36.703820946Z","alert"]]}]}]}`, + }, + &Query{ + name: "string AND query, all fields in SELECT", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT alert_id,tenant_id,_cust FROM cpu WHERE alert_id='alert' AND tenant_id='tenant'`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","alert_id","tenant_id","_cust"],"values":[["2015-02-28T01:03:36.703820946Z","alert","tenant","johnson brothers"]]}]}]}`, + }, + &Query{ + name: "string AND query, all fields in SELECT, one in parenthesis", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT alert_id,tenant_id FROM cpu WHERE alert_id='alert' AND (tenant_id='tenant')`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","alert_id","tenant_id"],"values":[["2015-02-28T01:03:36.703820946Z","alert","tenant"]]}]}]}`, + }, + &Query{ + name: "string underscored field", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT alert_id FROM cpu WHERE _cust='johnson brothers'`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","alert_id"],"values":[["2015-02-28T01:03:36.703820946Z","alert"]]}]}]}`, + }, + &Query{ + name: "string no match", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT alert_id FROM cpu WHERE _cust='acme'`, + exp: `{"results":[{}]}`, + }, + + // float64 + &Query{ + name: "float64 GT no match", + params: url.Values{"db": []string{"db0"}}, + command: `select load from cpu where load > 100`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "float64 GTE match one", + params: url.Values{"db": []string{"db0"}}, + command: `select load from cpu where load >= 100`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:00:02Z",100]]}]}]}`, + }, + &Query{ + name: "float64 EQ match upper bound", + params: url.Values{"db": []string{"db0"}}, + command: `select load from cpu where load = 100`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:00:02Z",100]]}]}]}`, + }, + &Query{ + name: "float64 LTE match two", + params: url.Values{"db": []string{"db0"}}, + command: `select load from cpu where load <= 100`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:00:02Z",100],["2009-11-10T23:01:02Z",80]]}]}]}`, + }, + &Query{ + name: "float64 GT match one", + params: url.Values{"db": []string{"db0"}}, + command: `select load from cpu where load > 99`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:00:02Z",100]]}]}]}`, + }, + &Query{ + name: "float64 EQ no match", + params: url.Values{"db": []string{"db0"}}, + command: `select load from cpu where load = 99`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "float64 LT match one", + params: url.Values{"db": []string{"db0"}}, + command: `select load from cpu where load < 99`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:01:02Z",80]]}]}]}`, + }, + &Query{ + name: "float64 LT no match", + params: url.Values{"db": []string{"db0"}}, + command: `select load from cpu where load < 80`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "float64 NE match one", + params: url.Values{"db": []string{"db0"}}, + command: `select load from cpu where load != 100`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:01:02Z",80]]}]}]}`, + }, + + // int64 + &Query{ + name: "int64 GT no match", + params: url.Values{"db": []string{"db0"}}, + command: `select core from cpu where core > 4`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "int64 GTE match one", + params: url.Values{"db": []string{"db0"}}, + command: `select core from cpu where core >= 4`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:00:02Z",4]]}]}]}`, + }, + &Query{ + name: "int64 EQ match upper bound", + params: url.Values{"db": []string{"db0"}}, + command: `select core from cpu where core = 4`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:00:02Z",4]]}]}]}`, + }, + &Query{ + name: "int64 LTE match two ", + params: url.Values{"db": []string{"db0"}}, + command: `select core from cpu where core <= 4`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:00:02Z",4],["2009-11-10T23:01:02Z",2]]}]}]}`, + }, + &Query{ + name: "int64 GT match one", + params: url.Values{"db": []string{"db0"}}, + command: `select core from cpu where core > 3`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:00:02Z",4]]}]}]}`, + }, + &Query{ + name: "int64 EQ no match", + params: url.Values{"db": []string{"db0"}}, + command: `select core from cpu where core = 3`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "int64 LT match one", + params: url.Values{"db": []string{"db0"}}, + command: `select core from cpu where core < 3`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:01:02Z",2]]}]}]}`, + }, + &Query{ + name: "int64 LT no match", + params: url.Values{"db": []string{"db0"}}, + command: `select core from cpu where core < 2`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "int64 NE match one", + params: url.Values{"db": []string{"db0"}}, + command: `select core from cpu where core != 4`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:01:02Z",2]]}]}]}`, + }, + + // bool + &Query{ + name: "bool EQ match true", + params: url.Values{"db": []string{"db0"}}, + command: `select local from clicks where local = true`, + exp: `{"results":[{"series":[{"name":"clicks","columns":["time","local"],"values":[["2014-11-10T23:00:01Z",true]]}]}]}`, + }, + &Query{ + name: "bool EQ match false", + params: url.Values{"db": []string{"db0"}}, + command: `select local from clicks where local = false`, + exp: `{"results":[{"series":[{"name":"clicks","columns":["time","local"],"values":[["2014-11-10T23:00:02Z",false]]}]}]}`, + }, + + &Query{ + name: "bool NE match one", + params: url.Values{"db": []string{"db0"}}, + command: `select local from clicks where local != true`, + exp: `{"results":[{"series":[{"name":"clicks","columns":["time","local"],"values":[["2014-11-10T23:00:02Z",false]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_Where_With_Tags(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`where_events,tennant=paul foo="bar" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), + fmt.Sprintf(`where_events,tennant=paul foo="baz" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), + fmt.Sprintf(`where_events,tennant=paul foo="bat" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:04Z").UnixNano()), + fmt.Sprintf(`where_events,tennant=todd foo="bar" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:05Z").UnixNano()), + fmt.Sprintf(`where_events,tennant=david foo="bap" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:06Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "tag field and time", + params: url.Values{"db": []string{"db0"}}, + command: `select foo from where_events where (tennant = 'paul' OR tennant = 'david') AND time > 1s AND (foo = 'bar' OR foo = 'baz' OR foo = 'bap')`, + exp: `{"results":[{"series":[{"name":"where_events","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z","bar"],["2009-11-10T23:00:03Z","baz"],["2009-11-10T23:00:06Z","bap"]]}]}]}`, + }, + &Query{ + name: "where on tag that should be double quoted but isn't", + params: url.Values{"db": []string{"db0"}}, + command: `show series where data-center = 'foo'`, + exp: `{"error":"error parsing query: found DATA, expected identifier, string, number, bool at line 1, char 19"}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_LimitAndOffset(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`limited,tennant=paul foo=2 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), + fmt.Sprintf(`limited,tennant=paul foo=3 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), + fmt.Sprintf(`limited,tennant=paul foo=4 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:04Z").UnixNano()), + fmt.Sprintf(`limited,tennant=todd foo=5 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:05Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "limit on points", + params: url.Values{"db": []string{"db0"}}, + command: `select foo from "limited" LIMIT 2`, + exp: `{"results":[{"series":[{"name":"limited","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z",2],["2009-11-10T23:00:03Z",3]]}]}]}`, + }, + &Query{ + name: "limit higher than the number of data points", + params: url.Values{"db": []string{"db0"}}, + command: `select foo from "limited" LIMIT 20`, + exp: `{"results":[{"series":[{"name":"limited","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z",2],["2009-11-10T23:00:03Z",3],["2009-11-10T23:00:04Z",4],["2009-11-10T23:00:05Z",5]]}]}]}`, + }, + &Query{ + name: "limit and offset", + params: url.Values{"db": []string{"db0"}}, + command: `select foo from "limited" LIMIT 2 OFFSET 1`, + exp: `{"results":[{"series":[{"name":"limited","columns":["time","foo"],"values":[["2009-11-10T23:00:03Z",3],["2009-11-10T23:00:04Z",4]]}]}]}`, + }, + &Query{ + name: "limit + offset equal to total number of points", + params: url.Values{"db": []string{"db0"}}, + command: `select foo from "limited" LIMIT 3 OFFSET 3`, + exp: `{"results":[{"series":[{"name":"limited","columns":["time","foo"],"values":[["2009-11-10T23:00:05Z",5]]}]}]}`, + }, + &Query{ + name: "limit - offset higher than number of points", + command: `select foo from "limited" LIMIT 2 OFFSET 20`, + exp: `{"results":[{}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "limit on points with group by time", + command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 2`, + exp: `{"results":[{"series":[{"name":"limited","columns":["time","mean"],"values":[["2009-11-10T23:00:02Z",2],["2009-11-10T23:00:03Z",3]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "limit higher than the number of data points with group by time", + command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 20`, + exp: `{"results":[{"series":[{"name":"limited","columns":["time","mean"],"values":[["2009-11-10T23:00:02Z",2],["2009-11-10T23:00:03Z",3],["2009-11-10T23:00:04Z",4],["2009-11-10T23:00:05Z",5]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "limit and offset with group by time", + command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 2 OFFSET 1`, + exp: `{"results":[{"series":[{"name":"limited","columns":["time","mean"],"values":[["2009-11-10T23:00:03Z",3],["2009-11-10T23:00:04Z",4]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "limit + offset equal to the number of points with group by time", + command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 3 OFFSET 3`, + exp: `{"results":[{"series":[{"name":"limited","columns":["time","mean"],"values":[["2009-11-10T23:00:05Z",5]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "limit - offset higher than number of points with group by time", + command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 2 OFFSET 20`, + exp: `{"results":[{}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_Fill(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`fills val=3 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), + fmt.Sprintf(`fills val=5 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), + fmt.Sprintf(`fills val=4 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:06Z").UnixNano()), + fmt.Sprintf(`fills val=10 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:16Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "fill with value", + command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) FILL(1)`, + exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",1],["2009-11-10T23:00:15Z",10]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "fill with value, WHERE all values match condition", + command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' and val < 50 group by time(5s) FILL(1)`, + exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",1],["2009-11-10T23:00:15Z",10]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "fill with value, WHERE no values match condition", + command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' and val > 50 group by time(5s) FILL(1)`, + exp: `{"results":[{}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "fill with previous", + command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) FILL(previous)`, + exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",4],["2009-11-10T23:00:15Z",10]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "fill with none, i.e. clear out nulls", + command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) FILL(none)`, + exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:15Z",10]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "fill defaults to null", + command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s)`, + exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",null],["2009-11-10T23:00:15Z",10]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "fill defaults to 0 for count", + command: `select count(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s)`, + exp: `{"results":[{"series":[{"name":"fills","columns":["time","count"],"values":[["2009-11-10T23:00:00Z",2],["2009-11-10T23:00:05Z",1],["2009-11-10T23:00:10Z",0],["2009-11-10T23:00:15Z",1]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "fill none drops 0s for count", + command: `select count(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) fill(none)`, + exp: `{"results":[{"series":[{"name":"fills","columns":["time","count"],"values":[["2009-11-10T23:00:00Z",2],["2009-11-10T23:00:05Z",1],["2009-11-10T23:00:15Z",1]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "fill previous overwrites 0s for count", + command: `select count(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) fill(previous)`, + exp: `{"results":[{"series":[{"name":"fills","columns":["time","count"],"values":[["2009-11-10T23:00:00Z",2],["2009-11-10T23:00:05Z",1],["2009-11-10T23:00:10Z",1],["2009-11-10T23:00:15Z",1]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_Chunk(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := make([]string, 10001) // 10,000 is the default chunking size, even when no chunking requested. + expectedValues := make([]string, len(writes)) + for i := 0; i < len(writes); i++ { + writes[i] = fmt.Sprintf(`cpu value=%d %d`, i, time.Unix(0, int64(i)).UnixNano()) + expectedValues[i] = fmt.Sprintf(`["%s",%d]`, time.Unix(0, int64(i)).UTC().Format(time.RFC3339Nano), i) + } + expected := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[%s]}]}]}`, strings.Join(expectedValues, ",")) + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "SELECT all values, no chunking", + command: `SELECT value FROM cpu`, + exp: expected, + params: url.Values{"db": []string{"db0"}}, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_DropAndRecreateMeasurement(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + if err := s.CreateDatabaseAndRetentionPolicy("db1", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db1", "rp0"); err != nil { + t.Fatal(err) + } + + writes := strings.Join([]string{ + fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`memory,host=serverB,region=uswest val=33.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), + }, "\n") + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: writes}, + &Write{db: "db1", data: writes}, + } + + test.addQueries([]*Query{ + &Query{ + name: "verify cpu measurement exists in db1", + command: `SELECT * FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverA","uswest",23.2]]}]}]}`, + params: url.Values{"db": []string{"db1"}}, + }, + &Query{ + name: "Drop Measurement, series tags preserved tests", + command: `SHOW MEASUREMENTS`, + exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["memory"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "show series", + command: `SHOW SERIES`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=serverA,region=uswest","serverA","uswest"]]},{"name":"memory","columns":["_key","host","region"],"values":[["memory,host=serverB,region=uswest","serverB","uswest"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "ensure we can query for memory with both tags", + command: `SELECT * FROM memory where region='uswest' and host='serverB' GROUP BY *`, + exp: `{"results":[{"series":[{"name":"memory","tags":{"host":"serverB","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:01Z",33.2]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "drop measurement cpu", + command: `DROP MEASUREMENT cpu`, + exp: `{"results":[{}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "verify measurements in DB that we deleted a measurement from", + command: `SHOW MEASUREMENTS`, + exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["memory"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "verify series", + command: `SHOW SERIES`, + exp: `{"results":[{"series":[{"name":"memory","columns":["_key","host","region"],"values":[["memory,host=serverB,region=uswest","serverB","uswest"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "verify cpu measurement is gone", + command: `SELECT * FROM cpu`, + exp: `{"results":[{}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "verify cpu measurement is NOT gone from other DB", + command: `SELECT * FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverA","uswest",23.2]]}]}]}`, + params: url.Values{"db": []string{"db1"}}, + }, + &Query{ + name: "verify selecting from a tag 'host' still works", + command: `SELECT * FROM memory where host='serverB' GROUP BY *`, + exp: `{"results":[{"series":[{"name":"memory","tags":{"host":"serverB","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:01Z",33.2]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "verify selecting from a tag 'region' still works", + command: `SELECT * FROM memory where region='uswest' GROUP BY *`, + exp: `{"results":[{"series":[{"name":"memory","tags":{"host":"serverB","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:01Z",33.2]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "verify selecting from a tag 'host' and 'region' still works", + command: `SELECT * FROM memory where region='uswest' and host='serverB' GROUP BY *`, + exp: `{"results":[{"series":[{"name":"memory","tags":{"host":"serverB","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:01Z",33.2]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "Drop non-existant measurement", + command: `DROP MEASUREMENT doesntexist`, + exp: `{"results":[{"error":"measurement not found: doesntexist"}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }...) + + // Test that re-inserting the measurement works fine. + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } + + test = NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: writes}, + } + + test.addQueries([]*Query{ + &Query{ + name: "verify measurements after recreation", + command: `SHOW MEASUREMENTS`, + exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["memory"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "verify cpu measurement has been re-inserted", + command: `SELECT * FROM cpu GROUP BY *`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"serverA","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:00Z",23.2]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_ShowSeries(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu,host=server01 value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:01Z").UnixNano()), + fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), + fmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), + fmt.Sprintf(`cpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:04Z").UnixNano()), + fmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:05Z").UnixNano()), + fmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:06Z").UnixNano()), + fmt.Sprintf(`disk,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:07Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: `show series`, + command: "SHOW SERIES", + exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=server01","server01",""],["cpu,host=server01,region=uswest","server01","uswest"],["cpu,host=server01,region=useast","server01","useast"],["cpu,host=server02,region=useast","server02","useast"]]},{"name":"disk","columns":["_key","host","region"],"values":[["disk,host=server03,region=caeast","server03","caeast"]]},{"name":"gpu","columns":["_key","host","region"],"values":[["gpu,host=server02,region=useast","server02","useast"],["gpu,host=server03,region=caeast","server03","caeast"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show series from measurement`, + command: "SHOW SERIES FROM cpu", + exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=server01","server01",""],["cpu,host=server01,region=uswest","server01","uswest"],["cpu,host=server01,region=useast","server01","useast"],["cpu,host=server02,region=useast","server02","useast"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show series from regular expression`, + command: "SHOW SERIES FROM /[cg]pu/", + exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=server01","server01",""],["cpu,host=server01,region=uswest","server01","uswest"],["cpu,host=server01,region=useast","server01","useast"],["cpu,host=server02,region=useast","server02","useast"]]},{"name":"gpu","columns":["_key","host","region"],"values":[["gpu,host=server02,region=useast","server02","useast"],["gpu,host=server03,region=caeast","server03","caeast"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show series with where tag`, + command: "SHOW SERIES WHERE region = 'uswest'", + exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=server01,region=uswest","server01","uswest"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show series where tag matches regular expression`, + command: "SHOW SERIES WHERE region =~ /ca.*/", + exp: `{"results":[{"series":[{"name":"disk","columns":["_key","host","region"],"values":[["disk,host=server03,region=caeast","server03","caeast"]]},{"name":"gpu","columns":["_key","host","region"],"values":[["gpu,host=server03,region=caeast","server03","caeast"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show series`, + command: "SHOW SERIES WHERE host !~ /server0[12]/", + exp: `{"results":[{"series":[{"name":"disk","columns":["_key","host","region"],"values":[["disk,host=server03,region=caeast","server03","caeast"]]},{"name":"gpu","columns":["_key","host","region"],"values":[["gpu,host=server03,region=caeast","server03","caeast"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show series with from and where`, + command: "SHOW SERIES FROM cpu WHERE region = 'useast'", + exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=server01,region=useast","server01","useast"],["cpu,host=server02,region=useast","server02","useast"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show series with WHERE time should fail`, + command: "SHOW SERIES WHERE time > now() - 1h", + exp: `{"results":[{"error":"SHOW SERIES doesn't support time in WHERE clause"}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show series with WHERE field should fail`, + command: "SHOW SERIES WHERE value > 10.0", + exp: `{"results":[{"error":"SHOW SERIES doesn't support fields in WHERE clause"}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_ShowMeasurements(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu,host=server01 value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`gpu,host=server02,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`other,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: `show measurements with limit 2`, + command: "SHOW MEASUREMENTS LIMIT 2", + exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["gpu"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show measurements using WITH`, + command: "SHOW MEASUREMENTS WITH MEASUREMENT = cpu", + exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show measurements using WITH and regex`, + command: "SHOW MEASUREMENTS WITH MEASUREMENT =~ /[cg]pu/", + exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["gpu"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show measurements using WITH and regex - no matches`, + command: "SHOW MEASUREMENTS WITH MEASUREMENT =~ /.*zzzzz.*/", + exp: `{"results":[{}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show measurements where tag matches regular expression`, + command: "SHOW MEASUREMENTS WHERE region =~ /ca.*/", + exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["gpu"],["other"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show measurements where tag does not match a regular expression`, + command: "SHOW MEASUREMENTS WHERE region !~ /ca.*/", + exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show measurements with time in WHERE clauses errors`, + command: `SHOW MEASUREMENTS WHERE time > now() - 1h`, + exp: `{"results":[{"error":"SHOW MEASUREMENTS doesn't support time in WHERE clause"}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_ShowTagKeys(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu,host=server01 value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`disk,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: `show tag keys`, + command: "SHOW TAG KEYS", + exp: `{"results":[{"series":[{"name":"cpu","columns":["tagKey"],"values":[["host"],["region"]]},{"name":"disk","columns":["tagKey"],"values":[["host"],["region"]]},{"name":"gpu","columns":["tagKey"],"values":[["host"],["region"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "show tag keys from", + command: "SHOW TAG KEYS FROM cpu", + exp: `{"results":[{"series":[{"name":"cpu","columns":["tagKey"],"values":[["host"],["region"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "show tag keys from regex", + command: "SHOW TAG KEYS FROM /[cg]pu/", + exp: `{"results":[{"series":[{"name":"cpu","columns":["tagKey"],"values":[["host"],["region"]]},{"name":"gpu","columns":["tagKey"],"values":[["host"],["region"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "show tag keys measurement not found", + command: "SHOW TAG KEYS FROM doesntexist", + exp: `{"results":[{}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "show tag keys with time in WHERE clause errors", + command: "SHOW TAG KEYS FROM cpu WHERE time > now() - 1h", + exp: `{"results":[{"error":"SHOW TAG KEYS doesn't support time in WHERE clause"}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "show tag values with key", + command: "SHOW TAG VALUES WITH KEY = host", + exp: `{"results":[{"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"],["host","server02"]]},{"name":"disk","columns":["key","value"],"values":[["host","server03"]]},{"name":"gpu","columns":["key","value"],"values":[["host","server02"],["host","server03"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show tag values with key and where`, + command: `SHOW TAG VALUES FROM cpu WITH KEY = host WHERE region = 'uswest'`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show tag values with key and where matches the regular expression`, + command: `SHOW TAG VALUES WITH KEY = host WHERE region =~ /ca.*/`, + exp: `{"results":[{"series":[{"name":"disk","columns":["key","value"],"values":[["host","server03"]]},{"name":"gpu","columns":["key","value"],"values":[["host","server03"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show tag values with key and where does not match the regular expression`, + command: `SHOW TAG VALUES WITH KEY = region WHERE host !~ /server0[12]/`, + exp: `{"results":[{"series":[{"name":"disk","columns":["key","value"],"values":[["region","caeast"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show tag values with key in and where does not match the regular expression`, + command: `SHOW TAG VALUES FROM cpu WITH KEY IN (host, region) WHERE region = 'uswest'`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"],["region","uswest"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show tag values with key and measurement matches regular expression`, + command: `SHOW TAG VALUES FROM /[cg]pu/ WITH KEY = host`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"],["host","server02"]]},{"name":"gpu","columns":["key","value"],"values":[["host","server02"],["host","server03"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show tag values with key and time in WHERE clause should error`, + command: `SHOW TAG VALUES WITH KEY = host WHERE time > now() - 1h`, + exp: `{"results":[{"error":"SHOW TAG VALUES doesn't support time in WHERE clause"}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_ShowFieldKeys(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu,host=server01 field1=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server01,region=uswest field1=200,field2=300,field3=400 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server01,region=useast field1=200,field2=300,field3=400 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`cpu,host=server02,region=useast field1=200,field2=300,field3=400 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`gpu,host=server01,region=useast field4=200,field5=300 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`gpu,host=server03,region=caeast field6=200,field7=300 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + fmt.Sprintf(`disk,host=server03,region=caeast field8=200,field9=300 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: `show field keys`, + command: `SHOW FIELD KEYS`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["fieldKey"],"values":[["field1"],["field2"],["field3"]]},{"name":"disk","columns":["fieldKey"],"values":[["field8"],["field9"]]},{"name":"gpu","columns":["fieldKey"],"values":[["field4"],["field5"],["field6"],["field7"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show field keys from measurement`, + command: `SHOW FIELD KEYS FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["fieldKey"],"values":[["field1"],["field2"],["field3"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: `show field keys measurement with regex`, + command: `SHOW FIELD KEYS FROM /[cg]pu/`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["fieldKey"],"values":[["field1"],["field2"],["field3"]]},{"name":"gpu","columns":["fieldKey"],"values":[["field4"],["field5"],["field6"],["field7"]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_ContinuousQuery(t *testing.T) { + t.Skip() + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + runTest := func(test *Test, t *testing.T) { + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } + } + + // Start times of CQ intervals. + interval0 := time.Now().Add(-time.Second).Round(time.Second * 5) + interval1 := interval0.Add(-time.Second * 5) + interval2 := interval0.Add(-time.Second * 10) + interval3 := interval0.Add(-time.Second * 15) + + writes := []string{ + // Point too far in the past for CQ to pick up. + fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, interval3.Add(time.Second).UnixNano()), + + // Points two intervals ago. + fmt.Sprintf(`cpu,host=server01 value=100 %d`, interval2.Add(time.Second).UnixNano()), + fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, interval2.Add(time.Second*2).UnixNano()), + fmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, interval2.Add(time.Second*3).UnixNano()), + + // Points one interval ago. + fmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, interval1.Add(time.Second).UnixNano()), + fmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, interval1.Add(time.Second*2).UnixNano()), + + // Points in the current interval. + fmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, interval0.Add(time.Second).UnixNano()), + fmt.Sprintf(`disk,host=server03,region=caeast value=100 %d`, interval0.Add(time.Second*2).UnixNano()), + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + test.addQueries([]*Query{ + &Query{ + name: `create another retention policy for CQ to write into`, + command: `CREATE RETENTION POLICY rp1 ON db0 DURATION 1h REPLICATION 1`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "create continuous query with backreference", + command: `CREATE CONTINUOUS QUERY "cq1" ON db0 BEGIN SELECT count(value) INTO "rp1".:MEASUREMENT FROM /[cg]pu/ GROUP BY time(5s) END`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: `create another retention policy for CQ to write into`, + command: `CREATE RETENTION POLICY rp2 ON db0 DURATION 1h REPLICATION 1`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "create continuous query with backreference and group by time", + command: `CREATE CONTINUOUS QUERY "cq2" ON db0 BEGIN SELECT count(value) INTO "rp2".:MEASUREMENT FROM /[cg]pu/ GROUP BY time(5s), * END`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: `show continuous queries`, + command: `SHOW CONTINUOUS QUERIES`, + exp: `{"results":[{"series":[{"name":"db0","columns":["name","query"],"values":[["cq1","CREATE CONTINUOUS QUERY cq1 ON db0 BEGIN SELECT count(value) INTO \"db0\".\"rp1\".:MEASUREMENT FROM \"db0\".\"rp0\"./[cg]pu/ GROUP BY time(5s) END"],["cq2","CREATE CONTINUOUS QUERY cq2 ON db0 BEGIN SELECT count(value) INTO \"db0\".\"rp2\".:MEASUREMENT FROM \"db0\".\"rp0\"./[cg]pu/ GROUP BY time(5s), * END"]]}]}]}`, + }, + }...) + + // Run first test to create CQs. + runTest(&test, t) + + // Trigger CQs to run. + u := fmt.Sprintf("%s/data/process_continuous_queries?time=%d", s.URL(), interval0.UnixNano()) + if _, err := s.HTTPPost(u, nil); err != nil { + t.Fatal(err) + } + + // Wait for CQs to run. TODO: fix this ugly hack + time.Sleep(time.Second * 5) + + // Setup tests to check the CQ results. + test2 := NewTest("db0", "rp1") + test2.addQueries([]*Query{ + &Query{ + name: "check results of cq1", + command: `SELECT * FROM "rp1"./[cg]pu/`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","count","host","region","value"],"values":[["` + interval2.UTC().Format(time.RFC3339Nano) + `",3,null,null,null]]},{"name":"gpu","columns":["time","count","host","region","value"],"values":[["` + interval1.UTC().Format(time.RFC3339Nano) + `",2,null,null,null],["` + interval0.UTC().Format(time.RFC3339Nano) + `",1,null,null,null]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + // TODO: restore this test once this is fixed: https://github.com/influxdata/influxdb/issues/3968 + &Query{ + skip: true, + name: "check results of cq2", + command: `SELECT * FROM "rp2"./[cg]pu/`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","count","host","region","value"],"values":[["` + interval2.UTC().Format(time.RFC3339Nano) + `",1,"server01","uswest",null],["` + interval2.UTC().Format(time.RFC3339Nano) + `",1,"server01","",null],["` + interval2.UTC().Format(time.RFC3339Nano) + `",1,"server01","useast",null]]},{"name":"gpu","columns":["time","count","host","region","value"],"values":[["` + interval1.UTC().Format(time.RFC3339Nano) + `",1,"server02","useast",null],["` + interval1.UTC().Format(time.RFC3339Nano) + `",1,"server03","caeast",null],["` + interval0.UTC().Format(time.RFC3339Nano) + `",1,"server03","caeast",null]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }...) + + // Run second test to check CQ results. + runTest(&test2, t) +} + +// Tests that a known CQ query with concurrent writes does not deadlock the server +func TestServer_ContinuousQuery_Deadlock(t *testing.T) { + + // Skip until #3517 & #3522 are merged + t.Skip("Skipping CQ deadlock test") + if testing.Short() { + t.Skip("skipping CQ deadlock test") + } + t.Parallel() + s := OpenServer(NewConfig(), "") + defer func() { + s.Close() + // Nil the server so our deadlock detector goroutine can determine if we completed writes + // without timing out + s.Server = nil + }() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + test := NewTest("db0", "rp0") + + test.addQueries([]*Query{ + &Query{ + name: "create continuous query", + command: `CREATE CONTINUOUS QUERY "my.query" ON db0 BEGIN SELECT sum(visits) as visits INTO test_1m FROM myseries GROUP BY time(1m), host END`, + exp: `{"results":[{}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } + + // Deadlock detector. If the deadlock is fixed, this test should complete all the writes in ~2.5s seconds (with artifical delays + // added). After 10 seconds, if the server has not been closed then we hit the deadlock bug. + iterations := 0 + go func(s *Server) { + <-time.After(10 * time.Second) + + // If the server is not nil then the test is still running and stuck. We panic to avoid + // having the whole test suite hang indefinitely. + if s.Server != nil { + panic("possible deadlock. writes did not complete in time") + } + }(s) + + for { + + // After the second write, if the deadlock exists, we'll get a write timeout and + // all subsequent writes will timeout + if iterations > 5 { + break + } + writes := []string{} + for i := 0; i < 1000; i++ { + writes = append(writes, fmt.Sprintf(`myseries,host=host-%d visits=1i`, i)) + } + write := strings.Join(writes, "\n") + + if _, err := s.Write(test.db, test.rp, write, test.params); err != nil { + t.Fatal(err) + } + iterations += 1 + time.Sleep(500 * time.Millisecond) + } +} + +func TestServer_Query_EvilIdentifiers(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: fmt.Sprintf("cpu select=1,in-bytes=2 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano())}, + } + + test.addQueries([]*Query{ + &Query{ + name: `query evil identifiers`, + command: `SELECT "select", "in-bytes" FROM cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","select","in-bytes"],"values":[["2000-01-01T00:00:00Z",1,2]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_OrderByTime(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu,host=server1 value=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), + fmt.Sprintf(`cpu,host=server1 value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:02Z").UnixNano()), + fmt.Sprintf(`cpu,host=server1 value=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), + + fmt.Sprintf(`power,presence=true value=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), + fmt.Sprintf(`power,presence=true value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:02Z").UnixNano()), + fmt.Sprintf(`power,presence=true value=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), + fmt.Sprintf(`power,presence=false value=4 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:04Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "order on points", + params: url.Values{"db": []string{"db0"}}, + command: `select value from "cpu" ORDER BY time DESC`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:03Z",3],["2000-01-01T00:00:02Z",2],["2000-01-01T00:00:01Z",1]]}]}]}`, + }, + + &Query{ + name: "order desc with tags", + params: url.Values{"db": []string{"db0"}}, + command: `select value from "power" ORDER BY time DESC`, + exp: `{"results":[{"series":[{"name":"power","columns":["time","value"],"values":[["2000-01-01T00:00:04Z",4],["2000-01-01T00:00:03Z",3],["2000-01-01T00:00:02Z",2],["2000-01-01T00:00:01Z",1]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_FieldWithMultiplePeriods(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu foo.bar.baz=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "baseline", + params: url.Values{"db": []string{"db0"}}, + command: `select * from cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","foo.bar.baz"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, + }, + &Query{ + name: "select field with periods", + params: url.Values{"db": []string{"db0"}}, + command: `select "foo.bar.baz" from cpu`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","foo.bar.baz"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_FieldWithMultiplePeriodsMeasurementPrefixMatch(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`foo foo.bar.baz=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "baseline", + params: url.Values{"db": []string{"db0"}}, + command: `select * from foo`, + exp: `{"results":[{"series":[{"name":"foo","columns":["time","foo.bar.baz"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, + }, + &Query{ + name: "select field with periods", + params: url.Values{"db": []string{"db0"}}, + command: `select "foo.bar.baz" from foo`, + exp: `{"results":[{"series":[{"name":"foo","columns":["time","foo.bar.baz"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, + }, + }...) + + for i, query := range test.queries { + if i == 0 { + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + } + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +func TestServer_Query_IntoTarget(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`foo value=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf(`foo value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), + fmt.Sprintf(`foo value=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), + fmt.Sprintf(`foo value=4 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), + fmt.Sprintf(`foo value=4,foobar=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "into", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * INTO baz FROM foo`, + exp: `{"results":[{"series":[{"name":"result","columns":["time","written"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, + }, + &Query{ + name: "confirm results", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * FROM baz`, + exp: `{"results":[{"series":[{"name":"baz","columns":["time","foobar","value"],"values":[["2000-01-01T00:00:00Z",null,1],["2000-01-01T00:00:10Z",null,2],["2000-01-01T00:00:20Z",null,3],["2000-01-01T00:00:30Z",null,4],["2000-01-01T00:00:40Z",3,4]]}]}]}`, + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} + +// This test reproduced a data race with closing the +// Subscriber points channel while writes were in-flight in the PointsWriter. +func TestServer_ConcurrentPointsWriter_Subscriber(t *testing.T) { + t.Parallel() + s := OpenDefaultServer(NewConfig(), "") + defer s.Close() + + // goroutine to write points + done := make(chan struct{}) + go func() { + for { + select { + case <-done: + return + default: + wpr := &cluster.WritePointsRequest{ + Database: "db0", + RetentionPolicy: "rp0", + } + s.PointsWriter.WritePoints(wpr) + } + } + }() + + time.Sleep(10 * time.Millisecond) + + close(done) + // Race occurs on s.Close() +} + +// Ensure time in where clause is inclusive +func TestServer_WhereTimeInclusive(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { + t.Fatal(err) + } + if err := s.MetaClient.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { + t.Fatal(err) + } + + writes := []string{ + fmt.Sprintf(`cpu value=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), + fmt.Sprintf(`cpu value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:02Z").UnixNano()), + fmt.Sprintf(`cpu value=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), + } + + test := NewTest("db0", "rp0") + test.writes = Writes{ + &Write{data: strings.Join(writes, "\n")}, + } + + test.addQueries([]*Query{ + &Query{ + name: "all GTE/LTE", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * from cpu where time >= '2000-01-01T00:00:01Z' and time <= '2000-01-01T00:00:03Z'`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:01Z",1],["2000-01-01T00:00:02Z",2],["2000-01-01T00:00:03Z",3]]}]}]}`, + }, + &Query{ + name: "all GTE", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * from cpu where time >= '2000-01-01T00:00:01Z'`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:01Z",1],["2000-01-01T00:00:02Z",2],["2000-01-01T00:00:03Z",3]]}]}]}`, + }, + &Query{ + name: "all LTE", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * from cpu where time <= '2000-01-01T00:00:03Z'`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:01Z",1],["2000-01-01T00:00:02Z",2],["2000-01-01T00:00:03Z",3]]}]}]}`, + }, + &Query{ + name: "first GTE/LTE", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * from cpu where time >= '2000-01-01T00:00:01Z' and time <= '2000-01-01T00:00:01Z'`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:01Z",1]]}]}]}`, + }, + &Query{ + name: "last GTE/LTE", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * from cpu where time >= '2000-01-01T00:00:03Z' and time <= '2000-01-01T00:00:03Z'`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:03Z",3]]}]}]}`, + }, + &Query{ + name: "before GTE/LTE", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * from cpu where time <= '2000-01-01T00:00:00Z'`, + exp: `{"results":[{}]}`, + }, + &Query{ + name: "all GT/LT", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * from cpu where time > '2000-01-01T00:00:00Z' and time < '2000-01-01T00:00:04Z'`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:01Z",1],["2000-01-01T00:00:02Z",2],["2000-01-01T00:00:03Z",3]]}]}]}`, + }, + &Query{ + name: "first GT/LT", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * from cpu where time > '2000-01-01T00:00:00Z' and time < '2000-01-01T00:00:02Z'`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:01Z",1]]}]}]}`, + }, + &Query{ + name: "last GT/LT", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * from cpu where time > '2000-01-01T00:00:02Z' and time < '2000-01-01T00:00:04Z'`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:03Z",3]]}]}]}`, + }, + &Query{ + name: "all GT", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * from cpu where time > '2000-01-01T00:00:00Z'`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:01Z",1],["2000-01-01T00:00:02Z",2],["2000-01-01T00:00:03Z",3]]}]}]}`, + }, + &Query{ + name: "all LT", + params: url.Values{"db": []string{"db0"}}, + command: `SELECT * from cpu where time < '2000-01-01T00:00:04Z'`, + exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:01Z",1],["2000-01-01T00:00:02Z",2],["2000-01-01T00:00:03Z",3]]}]}]}`, + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server_test.md b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server_test.md new file mode 100644 index 0000000000..8df37e3332 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server_test.md @@ -0,0 +1,150 @@ +# Server Integration Tests + +Currently, the file `server_test.go` has integration tests for single node scenarios. +At some point we'll need to add cluster tests, and may add them in a different file, or +rename `server_test.go` to `server_single_node_test.go` or something like that. + +## What is in a test? + +Each test is broken apart effectively into the following areas: + +- Write sample data +- Use cases for table driven test, that include a command (typically a query) and an expected result. + +When each test runs it does the following: + +- init: determines if there are any writes and if so, writes them to the in-memory database +- queries: iterate through each query, executing the command, and comparing the results to the expected result. + +## Idempotent - Allows for parallel tests + +Each test should be `idempotent`, meaning that its data will not be affected by other tests, or use cases within the table tests themselves. +This allows for parallel testing, keeping the test suite total execution time very low. + +### Basic sample test + +```go +// Ensure the server can have a database with multiple measurements. +func TestServer_Query_Multiple_Measurements(t *testing.T) { + t.Parallel() + s := OpenServer(NewConfig(), "") + defer s.Close() + + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) + } + + // Make sure we do writes for measurements that will span across shards + writes := []string{ + fmt.Sprintf("cpu,host=server01 value=100,core=4 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf("cpu1,host=server02 value=50,core=2 %d", mustParseTime(time.RFC3339Nano, "2015-01-01T00:00:00Z").UnixNano()), + } + test := NewTest("db0", "rp0") + test.write = strings.Join(writes, "\n") + + test.addQueries([]*Query{ + &Query{ + name: "measurement in one shard but not another shouldn't panic server", + command: `SELECT host,value FROM db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",100]]}]}]}`, + }, + }...) + + if err := test.init(s); err != nil { + t.Fatalf("test init failed: %s", err) + } + + for _, query := range test.queries { + if query.skip { + t.Logf("SKIP:: %s", query.name) + continue + } + if err := query.Execute(s); err != nil { + t.Error(query.Error(err)) + } else if !query.success() { + t.Error(query.failureMessage()) + } + } +} +``` + +Let's break this down: + +In this test, we first tell it to run in parallel with the `t.Parallel()` call. + +We then open a new server with: + +```go +s := OpenServer(NewConfig(), "") +defer s.Close() +``` + +If needed, we create a database and default retention policy. This is usually needed +when inserting and querying data. This is not needed if you are testing commands like `CREATE DATABASE`, `SHOW DIAGNOSTICS`, etc. + +```go +if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { + t.Fatal(err) +} +``` + +Next, set up the write data you need: + +```go +writes := []string{ + fmt.Sprintf("cpu,host=server01 value=100,core=4 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), + fmt.Sprintf("cpu1,host=server02 value=50,core=2 %d", mustParseTime(time.RFC3339Nano, "2015-01-01T00:00:00Z").UnixNano()), +} +``` +Create a new test with the database and retention policy: + +```go +test := NewTest("db0", "rp0") +``` + +Send in the writes: +```go +test.write = strings.Join(writes, "\n") +``` + +Add some queries (the second one is mocked out to show how to add more than one): + +```go +test.addQueries([]*Query{ + &Query{ + name: "measurement in one shard but not another shouldn't panic server", + command: `SELECT host,value FROM db0.rp0.cpu`, + exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",100]]}]}]}`, + }, + &Query{ + name: "another test here...", + command: `Some query command`, + exp: `the expected results`, + }, +}...) +``` + +The rest of the code is boilerplate execution code. It is purposefully not refactored out to a helper +to make sure the test failure reports the proper lines for debugging purposes. + +#### Running the tests + +To run the tests: + +```sh +go test ./cmd/influxd/run -parallel 500 -timeout 10s +``` + +#### Running a specific test + +```sh +go test ./cmd/influxd/run -parallel 500 -timeout 10s -run TestServer_Query_Fill +``` + +#### Verbose feedback + +By default, all logs are silenced when testing. If you pass in the `-v` flag, the test suite becomes verbose, and enables all logging in the system + +```sh +go test ./cmd/influxd/run -parallel 500 -timeout 10s -run TestServer_Query_Fill -v +``` diff --git a/vendor/github.com/influxdata/influxdb/errors.go b/vendor/github.com/influxdata/influxdb/errors.go new file mode 100644 index 0000000000..7627ee2fdf --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/errors.go @@ -0,0 +1,45 @@ +package influxdb + +import ( + "errors" + "fmt" + "strings" +) + +var ( + // ErrFieldsRequired is returned when a point does not any fields. + ErrFieldsRequired = errors.New("fields required") + + // ErrFieldTypeConflict is returned when a new field already exists with a different type. + ErrFieldTypeConflict = errors.New("field type conflict") +) + +// ErrDatabaseNotFound indicates that a database operation failed on the +// specified database because the specified database does not exist. +func ErrDatabaseNotFound(name string) error { return fmt.Errorf("database not found: %s", name) } + +// ErrRetentionPolicyNotFound indicates that the named retention policy could +// not be found in the database. +func ErrRetentionPolicyNotFound(name string) error { + return fmt.Errorf("retention policy not found: %s", name) +} + +// IsClientError indicates whether an error is a known client error. +func IsClientError(err error) bool { + if err == nil { + return false + } + + if err == ErrFieldsRequired { + return true + } + if err == ErrFieldTypeConflict { + return true + } + + if strings.Contains(err.Error(), ErrFieldTypeConflict.Error()) { + return true + } + + return false +} diff --git a/vendor/github.com/influxdata/influxdb/etc/burn-in/.rvmrc b/vendor/github.com/influxdata/influxdb/etc/burn-in/.rvmrc new file mode 100644 index 0000000000..a9c1a9ca3b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/etc/burn-in/.rvmrc @@ -0,0 +1 @@ +rvm use ruby-2.1.0@burn-in --create diff --git a/vendor/github.com/influxdata/influxdb/etc/burn-in/Gemfile b/vendor/github.com/influxdata/influxdb/etc/burn-in/Gemfile new file mode 100644 index 0000000000..b1816e8b6f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/etc/burn-in/Gemfile @@ -0,0 +1,4 @@ +source 'https://rubygems.org' + +gem "colorize" +gem "influxdb" diff --git a/vendor/github.com/influxdata/influxdb/etc/burn-in/Gemfile.lock b/vendor/github.com/influxdata/influxdb/etc/burn-in/Gemfile.lock new file mode 100644 index 0000000000..9e721c3a75 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/etc/burn-in/Gemfile.lock @@ -0,0 +1,14 @@ +GEM + remote: https://rubygems.org/ + specs: + colorize (0.6.0) + influxdb (0.0.16) + json + json (1.8.1) + +PLATFORMS + ruby + +DEPENDENCIES + colorize + influxdb diff --git a/vendor/github.com/influxdata/influxdb/etc/burn-in/burn-in.rb b/vendor/github.com/influxdata/influxdb/etc/burn-in/burn-in.rb new file mode 100644 index 0000000000..1d44bc2c0e --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/etc/burn-in/burn-in.rb @@ -0,0 +1,79 @@ +require "influxdb" +require "colorize" +require "benchmark" + +require_relative "log" +require_relative "random_gaussian" + +BATCH_SIZE = 10_000 + +Log.info "Starting burn-in suite" +master = InfluxDB::Client.new +master.delete_database("burn-in") rescue nil +master.create_database("burn-in") +master.create_database_user("burn-in", "user", "pass") + +master.database = "burn-in" +# master.query "select * from test1 into test2;" +# master.query "select count(value) from test1 group by time(1m) into test2;" + +influxdb = InfluxDB::Client.new "burn-in", username: "user", password: "pass" + +Log.success "Connected to server #{influxdb.host}:#{influxdb.port}" + +Log.log "Creating RandomGaussian(500, 25)" +gaussian = RandomGaussian.new(500, 25) +point_count = 0 + +while true + Log.log "Generating 10,000 points.." + points = [] + BATCH_SIZE.times do |n| + points << {value: gaussian.rand.to_i.abs} + end + point_count += points.length + + Log.info "Sending points to server.." + begin + st = Time.now + foo = influxdb.write_point("test1", points) + et = Time.now + Log.log foo.inspect + Log.log "#{et-st} seconds elapsed" + Log.success "Write successful." + rescue => e + Log.failure "Write failed:" + Log.log e + end + sleep 0.5 + + Log.info "Checking regular points" + st = Time.now + response = influxdb.query("select count(value) from test1;") + et = Time.now + + Log.log "#{et-st} seconds elapsed" + + response_count = response["test1"].first["count"] + if point_count == response_count + Log.success "Point counts match: #{point_count} == #{response_count}" + else + Log.failure "Point counts don't match: #{point_count} != #{response_count}" + end + + # Log.info "Checking continuous query points for test2" + # st = Time.now + # response = influxdb.query("select count(value) from test2;") + # et = Time.now + + # Log.log "#{et-st} seconds elapsed" + + # response_count = response["test2"].first["count"] + # if point_count == response_count + # Log.success "Point counts match: #{point_count} == #{response_count}" + # else + # Log.failure "Point counts don't match: #{point_count} != #{response_count}" + # end +end + + diff --git a/vendor/github.com/influxdata/influxdb/etc/burn-in/log.rb b/vendor/github.com/influxdata/influxdb/etc/burn-in/log.rb new file mode 100644 index 0000000000..0f70d7633e --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/etc/burn-in/log.rb @@ -0,0 +1,23 @@ +module Log + def self.info(msg) + print Time.now.strftime("%r") + " | " + puts msg.to_s.colorize(:yellow) + end + + def self.success(msg) + print Time.now.strftime("%r") + " | " + puts msg.to_s.colorize(:green) + end + + def self.failure(msg) + print Time.now.strftime("%r") + " | " + puts msg.to_s.colorize(:red) + end + + def self.log(msg) + print Time.now.strftime("%r") + " | " + puts msg.to_s + end +end + + diff --git a/vendor/github.com/influxdata/influxdb/etc/burn-in/random_gaussian.rb b/vendor/github.com/influxdata/influxdb/etc/burn-in/random_gaussian.rb new file mode 100644 index 0000000000..51d6c3c044 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/etc/burn-in/random_gaussian.rb @@ -0,0 +1,31 @@ +class RandomGaussian + def initialize(mean, stddev, rand_helper = lambda { Kernel.rand }) + @rand_helper = rand_helper + @mean = mean + @stddev = stddev + @valid = false + @next = 0 + end + + def rand + if @valid then + @valid = false + return @next + else + @valid = true + x, y = self.class.gaussian(@mean, @stddev, @rand_helper) + @next = y + return x + end + end + + private + def self.gaussian(mean, stddev, rand) + theta = 2 * Math::PI * rand.call + rho = Math.sqrt(-2 * Math.log(1 - rand.call)) + scale = stddev * rho + x = mean + scale * Math.cos(theta) + y = mean + scale * Math.sin(theta) + return x, y + end +end diff --git a/vendor/github.com/influxdata/influxdb/etc/burn-in/random_points.rb b/vendor/github.com/influxdata/influxdb/etc/burn-in/random_points.rb new file mode 100644 index 0000000000..93bc8314f1 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/etc/burn-in/random_points.rb @@ -0,0 +1,29 @@ +require "influxdb" + +ONE_WEEK_IN_SECONDS = 7*24*60*60 +NUM_POINTS = 10_000 +BATCHES = 100 + +master = InfluxDB::Client.new +master.delete_database("ctx") rescue nil +master.create_database("ctx") + +influxdb = InfluxDB::Client.new "ctx" +influxdb.time_precision = "s" + +names = ["foo", "bar", "baz", "quu", "qux"] + +st = Time.now +BATCHES.times do |m| + points = [] + + puts "Writing #{NUM_POINTS} points, time ##{m}.." + NUM_POINTS.times do |n| + timestamp = Time.now.to_i - rand(ONE_WEEK_IN_SECONDS) + points << {value: names.sample, time: timestamp} + end + + influxdb.write_point("ct1", points) +end +puts st +puts Time.now diff --git a/vendor/github.com/influxdata/influxdb/etc/config.sample.toml b/vendor/github.com/influxdata/influxdb/etc/config.sample.toml new file mode 100644 index 0000000000..156cf94d08 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/etc/config.sample.toml @@ -0,0 +1,359 @@ +### Welcome to the InfluxDB configuration file. + +# Once every 24 hours InfluxDB will report anonymous data to m.influxdb.com +# The data includes raft id (random 8 bytes), os, arch, version, and metadata. +# We don't track ip addresses of servers reporting. This is only used +# to track the number of instances running and the versions, which +# is very helpful for us. +# Change this option to true to disable reporting. +reporting-disabled = false + +# we'll try to get the hostname automatically, but if it the os returns something +# that isn't resolvable by other servers in the cluster, use this option to +# manually set the hostname +# hostname = "localhost" + +### +### [meta] +### +### Controls the parameters for the Raft consensus group that stores metadata +### about the InfluxDB cluster. +### + +[meta] + # Controls if this node should run the metaservice and participate in the Raft group + enabled = true + + # Where the metadata/raft database is stored + dir = "/var/lib/influxdb/meta" + + # The default address to bind to + bind-address = ":8088" + + # The default address to bind the API to + http-bind-address = ":8091" + https-enabled = false + https-certificate = "" + + retention-autocreate = true + # The default election timeout for the store + election-timeout = "1s" + # The default heartbeat timeout for the store + heartbeat-timeout = "1s" + # The default leader lease for the store + leader-lease-timeout = "500ms" + # The default commit timeout for the store + commit-timeout = "50ms" + # If trace log messages are printed for the meta service + cluster-tracing = false + # The default for auto promoting a node to a raft node when needed + raft-promotion-enabled = true + # If log messages are printed for the meta service + logging-enabled = true + pprof-enabled = false + # The default duration for leases. + lease-duration = "1m0s" + +### +### [data] +### +### Controls where the actual shard data for InfluxDB lives and how it is +### flushed from the WAL. "dir" may need to be changed to a suitable place +### for your system, but the WAL settings are an advanced configuration. The +### defaults should work for most systems. +### + +[data] + # Controls if this node holds time series data shards in the cluster + enabled = true + + dir = "/var/lib/influxdb/data" + + # The following WAL settings are for the b1 storage engine used in 0.9.2. They won't + # apply to any new shards created after upgrading to a version > 0.9.3. + max-wal-size = 104857600 # Maximum size the WAL can reach before a flush. Defaults to 100MB. + wal-flush-interval = "10m" # Maximum time data can sit in WAL before a flush. + wal-partition-flush-delay = "2s" # The delay time between each WAL partition being flushed. + + # These are the WAL settings for the storage engine >= 0.9.3 + wal-dir = "/var/lib/influxdb/wal" + wal-logging-enabled = true + data-logging-enabled = true + + # When a series in the WAL in-memory cache reaches this size in bytes it is marked as ready to + # flush to the index + # wal-ready-series-size = 25600 + + # Flush and compact a partition once this ratio of series are over the ready size + # wal-compaction-threshold = 0.6 + + # Force a flush and compaction if any series in a partition gets above this size in bytes + # wal-max-series-size = 2097152 + + # Force a flush of all series and full compaction if there have been no writes in this + # amount of time. This is useful for ensuring that shards that are cold for writes don't + # keep a bunch of data cached in memory and in the WAL. + # wal-flush-cold-interval = "10m" + + # Force a partition to flush its largest series if it reaches this approximate size in + # bytes. Remember there are 5 partitions so you'll need at least 5x this amount of memory. + # The more memory you have, the bigger this can be. + # wal-partition-size-threshold = 20971520 + + # Whether queries should be logged before execution. Very useful for troubleshooting, but will + # log any sensitive data contained within a query. + # query-log-enabled = true + + # Settings for the TSM engine + + # CacheMaxMemorySize is the maximum size a shard's cache can + # reach before it starts rejecting writes. + # cache-max-memory-size = 524288000 + + # CacheSnapshotMemorySize is the size at which the engine will + # snapshot the cache and write it to a TSM file, freeing up memory + # cache-snapshot-memory-size = 26214400 + + # CacheSnapshotWriteColdDuration is the length of time at + # which the engine will snapshot the cache and write it to + # a new TSM file if the shard hasn't received writes or deletes + # cache-snapshot-write-cold-duration = "1h" + + # MinCompactionFileCount is the minimum number of TSM files + # that need to exist before a compaction cycle will run + # compact-min-file-count = 3 + + # CompactFullWriteColdDuration is the duration at which the engine + # will compact all TSM files in a shard if it hasn't received a + # write or delete + # compact-full-write-cold-duration = "24h" + + # MaxPointsPerBlock is the maximum number of points in an encoded + # block in a TSM file. Larger numbers may yield better compression + # but could incur a performance peanalty when querying + # max-points-per-block = 1000 + +### +### [hinted-handoff] +### +### Controls the hinted handoff feature, which allows nodes to temporarily +### store queued data when one node of a cluster is down for a short period +### of time. +### + +[hinted-handoff] + enabled = true + dir = "/var/lib/influxdb/hh" + max-size = 1073741824 + max-age = "168h" + retry-rate-limit = 0 + + # Hinted handoff will start retrying writes to down nodes at a rate of once per second. + # If any error occurs, it will backoff in an exponential manner, until the interval + # reaches retry-max-interval. Once writes to all nodes are successfully completed the + # interval will reset to retry-interval. + retry-interval = "1s" + retry-max-interval = "1m" + + # Interval between running checks for data that should be purged. Data is purged from + # hinted-handoff queues for two reasons. 1) The data is older than the max age, or + # 2) the target node has been dropped from the cluster. Data is never dropped until + # it has reached max-age however, for a dropped node or not. + purge-interval = "1h" + +### +### [cluster] +### +### Controls non-Raft cluster behavior, which generally includes how data is +### shared across shards. +### + +[cluster] + shard-writer-timeout = "5s" # The time within which a remote shard must respond to a write request. + write-timeout = "10s" # The time within which a write request must complete on the cluster. + +### +### [retention] +### +### Controls the enforcement of retention policies for evicting old data. +### + +[retention] + enabled = true + check-interval = "30m" + +### +### [shard-precreation] +### +### Controls the precreation of shards, so they are available before data arrives. +### Only shards that, after creation, will have both a start- and end-time in the +### future, will ever be created. Shards are never precreated that would be wholly +### or partially in the past. + +[shard-precreation] + enabled = true + check-interval = "10m" + advance-period = "30m" + +### +### Controls the system self-monitoring, statistics and diagnostics. +### +### The internal database for monitoring data is created automatically if +### if it does not already exist. The target retention within this database +### is called 'monitor' and is also created with a retention period of 7 days +### and a replication factor of 1, if it does not exist. In all cases the +### this retention policy is configured as the default for the database. + +[monitor] + store-enabled = true # Whether to record statistics internally. + store-database = "_internal" # The destination database for recorded statistics + store-interval = "10s" # The interval at which to record statistics + +### +### [admin] +### +### Controls the availability of the built-in, web-based admin interface. If HTTPS is +### enabled for the admin interface, HTTPS must also be enabled on the [http] service. +### + +[admin] + enabled = true + bind-address = ":8083" + https-enabled = false + https-certificate = "/etc/ssl/influxdb.pem" + +### +### [http] +### +### Controls how the HTTP endpoints are configured. These are the primary +### mechanism for getting data into and out of InfluxDB. +### + +[http] + enabled = true + bind-address = ":8086" + auth-enabled = false + log-enabled = true + write-tracing = false + pprof-enabled = false + https-enabled = false + https-certificate = "/etc/ssl/influxdb.pem" + +### +### [[graphite]] +### +### Controls one or many listeners for Graphite data. +### + +[[graphite]] + enabled = false + # database = "graphite" + # bind-address = ":2003" + # protocol = "tcp" + # consistency-level = "one" + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Batching + # will buffer points in memory if you have many coming in. + + # batch-size = 5000 # will flush if this many points get buffered + # batch-pending = 10 # number of batches that may be pending in memory + # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit + # udp-read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + + ### This string joins multiple matching 'measurement' values providing more control over the final measurement name. + # separator = "." + + ### Default tags that will be added to all metrics. These can be overridden at the template level + ### or by tags extracted from metric + # tags = ["region=us-east", "zone=1c"] + + ### Each template line requires a template pattern. It can have an optional + ### filter before the template and separated by spaces. It can also have optional extra + ### tags following the template. Multiple tags should be separated by commas and no spaces + ### similar to the line protocol format. There can be only one default template. + # templates = [ + # "*.app env.service.resource.measurement", + # # Default template + # "server.*", + # ] + +### +### [collectd] +### +### Controls the listener for collectd data. +### + +[collectd] + enabled = false + # bind-address = "" + # database = "" + # typesdb = "" + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Batching + # will buffer points in memory if you have many coming in. + + # batch-size = 1000 # will flush if this many points get buffered + # batch-pending = 5 # number of batches that may be pending in memory + # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit + # read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + +### +### [opentsdb] +### +### Controls the listener for OpenTSDB data. +### + +[opentsdb] + enabled = false + # bind-address = ":4242" + # database = "opentsdb" + # retention-policy = "" + # consistency-level = "one" + # tls-enabled = false + # certificate= "" + # log-point-errors = true # Log an error for every malformed point. + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Only points + # metrics received over the telnet protocol undergo batching. + + # batch-size = 1000 # will flush if this many points get buffered + # batch-pending = 5 # number of batches that may be pending in memory + # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit + +### +### [[udp]] +### +### Controls the listeners for InfluxDB line protocol data via UDP. +### + +[[udp]] + enabled = false + # bind-address = "" + # database = "udp" + # retention-policy = "" + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Batching + # will buffer points in memory if you have many coming in. + + # batch-size = 1000 # will flush if this many points get buffered + # batch-pending = 5 # number of batches that may be pending in memory + # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit + # read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + + # set the expected UDP payload size; lower values tend to yield better performance, default is max UDP size 65536 + # udp-payload-size = 65536 + +### +### [continuous_queries] +### +### Controls how continuous queries are run within InfluxDB. +### + +[continuous_queries] + log-enabled = true + enabled = true + # run-interval = "1s" # interval for how often continuous queries will be checked if they need to run diff --git a/vendor/github.com/influxdata/influxdb/gobuild.sh b/vendor/github.com/influxdata/influxdb/gobuild.sh new file mode 100755 index 0000000000..9a96e7e9b7 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/gobuild.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# This script run inside the Dockerfile_build_ubuntu64_git container and +# gets the latests Go source code and compiles it. +# Then passes control over to the normal build.py script + +set -e + +cd /go/src +git fetch --all +git checkout $GO_CHECKOUT +# Merge in recent changes if we are on a branch +# if we checked out a tag just ignore the error +git pull || true +./make.bash + +# Run normal build.py +cd "$PROJECT_DIR" +exec ./build.py "$@" diff --git a/vendor/github.com/influxdata/influxdb/importer/README.md b/vendor/github.com/influxdata/influxdb/importer/README.md new file mode 100644 index 0000000000..f5bbbc012c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/importer/README.md @@ -0,0 +1,193 @@ +# Import/Export + +## Exporting from 0.8.9 + +Version `0.8.9` of InfluxDB adds support to export your data to a format that can be imported into `0.9.3` and later. + +Note that `0.8.9` can be found here: + +``` +http://get.influxdb.org.s3.amazonaws.com/influxdb_0.8.9_amd64.deb +http://get.influxdb.org.s3.amazonaws.com/influxdb-0.8.9-1.x86_64.rpm +``` + +### Design + +`0.8.9` exports raw data to a flat file that includes two sections, `DDL` and `DML`. You can choose to export them independently (see below). + +The `DDL` section contains the sql commands to create databases and retention policies. the `DML` section is [line protocol](https://github.com/influxdata/influxdb/blob/master/tsdb/README.md) and can be directly posted to the [http endpoint](https://docs.influxdata.com/influxdb/v0.10/guides/writing_data) in `0.10`. Remember that batching is important and we don't recommend batch sizes over 5k without further testing. + +You need to specify a database and shard group when you export. + +To list out your shards, use the following http endpoint: + +`/cluster/shard_spaces` + +example: +```sh +http://username:password@localhost:8086/cluster/shard_spaces +``` + +Then, to export a database with then name "metrics" and a shard space with the name "default", issue the following curl command: + +```sh +curl -o export http://username:password@localhost:8086/export/metrics/default +``` + +Compression is supported, and will result in a significantly smaller file size. + +Use the following command for compression: +```sh +curl -o export.gz --compressed http://username:password@localhost:8086/export/metrics/default +``` + +You can also export just the `DDL` with this option: + +```sh +curl -o export.ddl http://username:password@localhost:8086/export/metrics/default?l=ddl +``` + +Or just the `DML` with this option: + +```sh +curl -o export.dml.gz --compressed http://username:password@localhost:8086/export/metrics/default?l=dml +``` + +### Assumptions + +- Series name mapping follows these [guidelines](https://docs.influxdata.com/influxdb/v0.8/advanced_topics/schema_design/) +- Database name will map directly from `0.8` to `0.10` +- Shard Spaces map to Retention Policies +- Shard Space Duration is ignored, as in `0.10` we determine shard size automatically +- Regex is used to match the correct series names and only exports that data for the database +- Duration becomes the new Retention Policy duration + +- Users are not migrated due to inability to get passwords. Anyone using users will need to manually set these back up in `0.10` + +### Upgrade Recommendations + +It's recommended that you upgrade to `0.9.3` or later first and have all your writes going there. Then, on the `0.8.X` instances, upgrade to `0.8.9`. + +It is important that when exporting you change your config to allow for the http endpoints not timing out. To do so, make this change in your config: + +```toml +# Configure the http api +[api] +read-timeout = "0s" +``` + +### Exceptions + +If a series can't be exported to tags based on the guidelines mentioned above, +we will insert the entire series name as the measurement name. You can either +allow that to import into the new InfluxDB instance, or you can do your own +data massage on it prior to importing it. + +For example, if you have the following series name: + +``` +metric.disk.c.host.server01.single +``` + +It will export as exactly thta as the measurement name and no tags: + +``` +metric.disk.c.host.server01.single +``` + +### Export Metrics + +When you export, you will now get comments inline in the `DML`: + +`# Found 999 Series for export` + +As well as count totals for each series exported: + +`# Series FOO - Points Exported: 999` + +With a total at the bottom: + +`# Points Exported: 999` + +You can grep the file that was exported at the end to get all the export metrics: + +`cat myexport | grep Exported` + +## Importing + +Version `0.9.3` of InfluxDB adds support to import your data from version `0.8.9`. + +## Caveats + +For the export/import to work, all requisites have to be met. For export, all series names in `0.8` should be in the following format: + +``` +.... +``` +for example: +``` +az.us-west-1.host.serverA.cpu +``` +or any number of tags +``` +building.2.temperature +``` + +Additionally, the fields need to have a consistent type (all float64, int64, etc) for every write in `0.8`. Otherwise they have the potential to fail writes in the import. +See below for more information. + +## Running the import command + + To import via the cli, you can specify the following command: + + ```sh + influx -import -path=metrics-default.gz -compressed + ``` + + If the file is not compressed you can issue it without the `-compressed` flag: + + ```sh + influx -import -path=metrics-default + ``` + + To redirect failed import lines to another file, run this command: + + ```sh + influx -import -path=metrics-default.gz -compressed > failures + ``` + + The import will use the line protocol in batches of 5,000 lines per batch when sending data to the server. + +### Throttiling the import + + If you need to throttle the import so the database has time to ingest, you can use the `-pps` flag. This will limit the points per second that will be sent to the server. + + ```sh + influx -import -path=metrics-default.gz -compressed -pps 50000 > failures + ``` + + Which is stating that you don't want MORE than 50,000 points per second to write to the database. Due to the processing that is taking place however, you will likely never get exactly 50,000 pps, more like 35,000 pps, etc. + +## Understanding the results of the import + +During the import, a status message will write out for every 100,000 points imported and report stats on the progress of the import: + +``` +2015/08/21 14:48:01 Processed 3100000 lines. Time elapsed: 56.740578415s. Points per second (PPS): 54634 +``` + + The batch will give some basic stats when finished: + + ```sh + 2015/07/29 23:15:20 Processed 2 commands + 2015/07/29 23:15:20 Processed 70207923 inserts + 2015/07/29 23:15:20 Failed 29785000 inserts + ``` + + Most inserts fail due to the following types of error: + + ```sh + 2015/07/29 22:18:28 error writing batch: write failed: field type conflict: input field "value" on measurement "metric" is type float64, already exists as type integer + ``` + + This is due to the fact that in `0.8` a field could get created and saved as int or float types for independent writes. In `0.9` and greater the field has to have a consistent type. diff --git a/vendor/github.com/influxdata/influxdb/importer/v8/importer.go b/vendor/github.com/influxdata/influxdb/importer/v8/importer.go new file mode 100644 index 0000000000..774ec57c51 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/importer/v8/importer.go @@ -0,0 +1,248 @@ +package v8 // import "github.com/influxdata/influxdb/importer/v8" + +import ( + "bufio" + "compress/gzip" + "fmt" + "io" + "log" + "net/url" + "os" + "strings" + "time" + + "github.com/influxdata/influxdb/client" +) + +const batchSize = 5000 + +// Config is the config used to initialize a Importer importer +type Config struct { + Username string + Password string + URL url.URL + Precision string + WriteConsistency string + Path string + Version string + Compressed bool + PPS int +} + +// NewConfig returns an initialized *Config +func NewConfig() *Config { + return &Config{} +} + +// Importer is the importer used for importing 0.8 data +type Importer struct { + client *client.Client + database string + retentionPolicy string + config *Config + batch []string + totalInserts int + failedInserts int + totalCommands int + throttlePointsWritten int + lastWrite time.Time + throttle *time.Ticker +} + +// NewImporter will return an intialized Importer struct +func NewImporter(config *Config) *Importer { + return &Importer{ + config: config, + batch: make([]string, 0, batchSize), + } +} + +// Import processes the specified file in the Config and writes the data to the databases in chunks specified by batchSize +func (i *Importer) Import() error { + // Create a client and try to connect + config := client.NewConfig() + config.URL = i.config.URL + config.Username = i.config.Username + config.Password = i.config.Password + config.UserAgent = fmt.Sprintf("influxDB importer/%s", i.config.Version) + cl, err := client.NewClient(config) + if err != nil { + return fmt.Errorf("could not create client %s", err) + } + i.client = cl + if _, _, e := i.client.Ping(); e != nil { + return fmt.Errorf("failed to connect to %s\n", i.client.Addr()) + } + + // Validate args + if i.config.Path == "" { + return fmt.Errorf("file argument required") + } + + defer func() { + if i.totalInserts > 0 { + log.Printf("Processed %d commands\n", i.totalCommands) + log.Printf("Processed %d inserts\n", i.totalInserts) + log.Printf("Failed %d inserts\n", i.failedInserts) + } + }() + + // Open the file + f, err := os.Open(i.config.Path) + if err != nil { + return err + } + defer f.Close() + + var r io.Reader + + // If gzipped, wrap in a gzip reader + if i.config.Compressed { + gr, err := gzip.NewReader(f) + if err != nil { + return err + } + defer gr.Close() + // Set the reader to the gzip reader + r = gr + } else { + // Standard text file so our reader can just be the file + r = f + } + + // Get our reader + scanner := bufio.NewScanner(r) + + // Process the DDL + i.processDDL(scanner) + + // Set up our throttle channel. Since there is effectively no other activity at this point + // the smaller resolution gets us much closer to the requested PPS + i.throttle = time.NewTicker(time.Microsecond) + defer i.throttle.Stop() + + // Prime the last write + i.lastWrite = time.Now() + + // Process the DML + i.processDML(scanner) + + // Check if we had any errors scanning the file + if err := scanner.Err(); err != nil { + return fmt.Errorf("reading standard input: %s", err) + } + + return nil +} + +func (i *Importer) processDDL(scanner *bufio.Scanner) { + for scanner.Scan() { + line := scanner.Text() + // If we find the DML token, we are done with DDL + if strings.HasPrefix(line, "# DML") { + return + } + if strings.HasPrefix(line, "#") { + continue + } + // Skip blank lines + if strings.TrimSpace(line) == "" { + continue + } + i.queryExecutor(line) + } +} + +func (i *Importer) processDML(scanner *bufio.Scanner) { + start := time.Now() + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "# CONTEXT-DATABASE:") { + i.database = strings.TrimSpace(strings.Split(line, ":")[1]) + } + if strings.HasPrefix(line, "# CONTEXT-RETENTION-POLICY:") { + i.retentionPolicy = strings.TrimSpace(strings.Split(line, ":")[1]) + } + if strings.HasPrefix(line, "#") { + continue + } + // Skip blank lines + if strings.TrimSpace(line) == "" { + continue + } + i.batchAccumulator(line, start) + } + // Call batchWrite one last time to flush anything out in the batch + i.batchWrite() +} + +func (i *Importer) execute(command string) { + response, err := i.client.Query(client.Query{Command: command, Database: i.database}) + if err != nil { + log.Printf("error: %s\n", err) + return + } + if err := response.Error(); err != nil { + log.Printf("error: %s\n", response.Error()) + } +} + +func (i *Importer) queryExecutor(command string) { + i.totalCommands++ + i.execute(command) +} + +func (i *Importer) batchAccumulator(line string, start time.Time) { + i.batch = append(i.batch, line) + if len(i.batch) == batchSize { + i.batchWrite() + i.batch = i.batch[:0] + // Give some status feedback every 100000 lines processed + processed := i.totalInserts + i.failedInserts + if processed%100000 == 0 { + since := time.Since(start) + pps := float64(processed) / since.Seconds() + log.Printf("Processed %d lines. Time elapsed: %s. Points per second (PPS): %d", processed, since.String(), int64(pps)) + } + } +} + +func (i *Importer) batchWrite() { + // Accumulate the batch size to see how many points we have written this second + i.throttlePointsWritten += len(i.batch) + + // Find out when we last wrote data + since := time.Since(i.lastWrite) + + // Check to see if we've exceeded our points per second for the current timeframe + var currentPPS int + if since.Seconds() > 0 { + currentPPS = int(float64(i.throttlePointsWritten) / since.Seconds()) + } else { + currentPPS = i.throttlePointsWritten + } + + // If our currentPPS is greater than the PPS specified, then we wait and retry + if int(currentPPS) > i.config.PPS && i.config.PPS != 0 { + // Wait for the next tick + <-i.throttle.C + + // Decrement the batch size back out as it is going to get called again + i.throttlePointsWritten -= len(i.batch) + i.batchWrite() + return + } + + _, e := i.client.WriteLineProtocol(strings.Join(i.batch, "\n"), i.database, i.retentionPolicy, i.config.Precision, i.config.WriteConsistency) + if e != nil { + log.Println("error writing batch: ", e) + // Output failed lines to STDOUT so users can capture lines that failed to import + fmt.Println(strings.Join(i.batch, "\n")) + i.failedInserts += len(i.batch) + } else { + i.totalInserts += len(i.batch) + } + i.throttlePointsWritten = 0 + i.lastWrite = time.Now() + return +} diff --git a/vendor/github.com/influxdata/influxdb/influxdb.go b/vendor/github.com/influxdata/influxdb/influxdb.go new file mode 100644 index 0000000000..0befd2992d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxdb.go @@ -0,0 +1 @@ +package influxdb // import "github.com/influxdata/influxdb" diff --git a/vendor/github.com/influxdata/influxdb/influxql/README.md b/vendor/github.com/influxdata/influxdb/influxql/README.md new file mode 100644 index 0000000000..db7cfd9309 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/README.md @@ -0,0 +1,944 @@ +# The Influx Query Language Specification + +## Introduction + +This is a reference for the Influx Query Language ("InfluxQL"). + +InfluxQL is a SQL-like query language for interacting with InfluxDB. It has +been lovingly crafted to feel familiar to those coming from other SQL or +SQL-like environments while providing features specific to storing and analyzing +time series data. + + +## Notation + +The syntax is specified using Extended Backus-Naur Form ("EBNF"). EBNF is the +same notation used in the [Go](http://golang.org) programming language +specification, which can be found [here](https://golang.org/ref/spec). Not so +coincidentally, InfluxDB is written in Go. + +``` +Production = production_name "=" [ Expression ] "." . +Expression = Alternative { "|" Alternative } . +Alternative = Term { Term } . +Term = production_name | token [ "…" token ] | Group | Option | Repetition . +Group = "(" Expression ")" . +Option = "[" Expression "]" . +Repetition = "{" Expression "}" . +``` + +Notation operators in order of increasing precedence: + +``` +| alternation +() grouping +[] option (0 or 1 times) +{} repetition (0 to n times) +``` + + +## Query representation + +### Characters + +InfluxQL is Unicode text encoded in [UTF-8](http://en.wikipedia.org/wiki/UTF-8). + +``` +newline = /* the Unicode code point U+000A */ . +unicode_char = /* an arbitrary Unicode code point except newline */ . +``` + + +## Letters and digits + +Letters are the set of ASCII characters plus the underscore character _ (U+005F) +is considered a letter. + +Only decimal digits are supported. + +``` +letter = ascii_letter | "_" . +ascii_letter = "A" … "Z" | "a" … "z" . +digit = "0" … "9" . +``` + + +## Identifiers + +Identifiers are tokens which refer to database names, retention policy names, +user names, measurement names, tag keys, and field keys. + +The rules: + +- double quoted identifiers can contain any unicode character other than a new line +- double quoted identifiers can contain escaped `"` characters (i.e., `\"`) +- unquoted identifiers must start with an upper or lowercase ASCII character or "_" +- unquoted identifiers may contain only ASCII letters, decimal digits, and "_" + +``` +identifier = unquoted_identifier | quoted_identifier . +unquoted_identifier = ( letter ) { letter | digit } . +quoted_identifier = `"` unicode_char { unicode_char } `"` . +``` + +#### Examples: + +``` +cpu +_cpu_stats +"1h" +"anything really" +"1_Crazy-1337.identifier>NAME👍" +``` + + +## Keywords + +``` +ALL ALTER ANY AS ASC BEGIN +BY CREATE CONTINUOUS DATABASE DATABASES DEFAULT +DELETE DESC DESTINATIONS DIAGNOSTICS DISTINCT DROP +DURATION END EVERY EXISTS EXPLAIN FIELD +FOR FORCE FROM GRANT GRANTS GROUP +GROUPS IF IN INF INNER INSERT +INTO KEY KEYS LIMIT SHOW MEASUREMENT +MEASUREMENTS NOT OFFSET ON ORDER PASSWORD +POLICY POLICIES PRIVILEGES QUERIES QUERY READ +REPLICATION RESAMPLE RETENTION REVOKE SELECT SERIES +SERVER SERVERS SET SHARD SHARDS SLIMIT +SOFFSET STATS SUBSCRIPTION SUBSCRIPTIONS TAG TO +USER USERS VALUES WHERE WITH WRITE +``` + +## Literals + +### Integers + +InfluxQL supports decimal integer literals. Hexadecimal and octal literals are +not currently supported. + +``` +int_lit = ( "1" … "9" ) { digit } . +``` + +### Floats + +InfluxQL supports floating-point literals. Exponents are not currently supported. + +``` +float_lit = int_lit "." int_lit . +``` + +### Strings + +String literals must be surrounded by single quotes. Strings may contain `'` +characters as long as they are escaped (i.e., `\'`). + +``` +string_lit = `'` { unicode_char } `'` . +``` + +### Durations + +Duration literals specify a length of time. An integer literal followed +immediately (with no spaces) by a duration unit listed below is interpreted as +a duration literal. + +### Duration units +| Units | Meaning | +|--------|-----------------------------------------| +| u or µ | microseconds (1 millionth of a second) | +| ms | milliseconds (1 thousandth of a second) | +| s | second | +| m | minute | +| h | hour | +| d | day | +| w | week | + +``` +duration_lit = int_lit duration_unit . +duration_unit = "u" | "µ" | "s" | "h" | "d" | "w" | "ms" . +``` + +### Dates & Times + +The date and time literal format is not specified in EBNF like the rest of this document. It is specified using Go's date / time parsing format, which is a reference date written in the format required by InfluxQL. The reference date time is: + +InfluxQL reference date time: January 2nd, 2006 at 3:04:05 PM + +``` +time_lit = "2006-01-02 15:04:05.999999" | "2006-01-02" . +``` + +### Booleans + +``` +bool_lit = TRUE | FALSE . +``` + +### Regular Expressions + +``` +regex_lit = "/" { unicode_char } "/" . +``` + +## Queries + +A query is composed of one or more statements separated by a semicolon. + +``` +query = statement { ";" statement } . + +statement = alter_retention_policy_stmt | + create_continuous_query_stmt | + create_database_stmt | + create_retention_policy_stmt | + create_subscription_stmt | + create_user_stmt | + delete_stmt | + drop_continuous_query_stmt | + drop_database_stmt | + drop_measurement_stmt | + drop_retention_policy_stmt | + drop_series_stmt | + drop_subscription_stmt | + drop_user_stmt | + grant_stmt | + show_continuous_queries_stmt | + show_databases_stmt | + show_field_keys_stmt | + show_grants_stmt | + show_measurements_stmt | + show_retention_policies | + show_series_stmt | + show_shard_groups_stmt | + show_shards_stmt | + show_subscriptions_stmt| + show_tag_keys_stmt | + show_tag_values_stmt | + show_users_stmt | + revoke_stmt | + select_stmt . +``` + + +## Statements + +### ALTER RETENTION POLICY + +``` +alter_retention_policy_stmt = "ALTER RETENTION POLICY" policy_name on_clause + retention_policy_option + [ retention_policy_option ] + [ retention_policy_option ] . +``` + +#### Examples: + +```sql +-- Set default retention policy for mydb to 1h.cpu. +ALTER RETENTION POLICY "1h.cpu" ON mydb DEFAULT; + +-- Change duration and replication factor. +ALTER RETENTION POLICY policy1 ON somedb DURATION 1h REPLICATION 4 +``` + +### CREATE CONTINUOUS QUERY + +``` +create_continuous_query_stmt = "CREATE CONTINUOUS QUERY" query_name on_clause + [ "RESAMPLE" resample_opts ] + "BEGIN" select_stmt "END" . + +query_name = identifier . + +resample_opts = (every_stmt for_stmt | every_stmt | for_stmt) . +every_stmt = "EVERY" duration_lit +for_stmt = "FOR" duration_lit +``` + +#### Examples: + +```sql +-- selects from default retention policy and writes into 6_months retention policy +CREATE CONTINUOUS QUERY "10m_event_count" +ON db_name +BEGIN + SELECT count(value) + INTO "6_months".events + FROM events + GROUP BY time(10m) +END; + +-- this selects from the output of one continuous query in one retention policy and outputs to another series in another retention policy +CREATE CONTINUOUS QUERY "1h_event_count" +ON db_name +BEGIN + SELECT sum(count) as count + INTO "2_years".events + FROM "6_months".events + GROUP BY time(1h) +END; + +-- this customizes the resample interval so the interval is queried every 10s and intervals are resampled until 2m after their start time +-- when resample is used, at least one of "EVERY" or "FOR" must be used +CREATE CONTINUOUS QUERY "cpu_mean" +ON db_name +RESAMPLE EVERY 10s FOR 2m +BEGIN + SELECT mean(value) + INTO "cpu_mean" + FROM "cpu" + GROUP BY time(1m) +END; +``` + +### CREATE DATABASE + +``` +create_database_stmt = "CREATE DATABASE" db_name . +``` + +#### Example: + +```sql +CREATE DATABASE foo +``` + +### CREATE RETENTION POLICY + +``` +create_retention_policy_stmt = "CREATE RETENTION POLICY" policy_name on_clause + retention_policy_duration + retention_policy_replication + [ "DEFAULT" ] . +``` + +#### Examples + +```sql +-- Create a retention policy. +CREATE RETENTION POLICY "10m.events" ON somedb DURATION 10m REPLICATION 2; + +-- Create a retention policy and set it as the default. +CREATE RETENTION POLICY "10m.events" ON somedb DURATION 10m REPLICATION 2 DEFAULT; +``` + +### CREATE SUBSCRIPTION + +``` +create_subscription_stmt = "CREATE SUBSCRIPTION" subscription_name "ON" db_name "." retention_policy "DESTINATIONS" ("ANY"|"ALL") host { "," host} . +``` + +#### Examples: + +```sql +-- Create a SUBSCRIPTION on database 'mydb' and retention policy 'default' that send data to 'example.com:9090' via UDP. +CREATE SUBSCRIPTION sub0 ON "mydb"."default" DESTINATIONS ALL 'udp://example.com:9090' ; + +-- Create a SUBSCRIPTION on database 'mydb' and retention policy 'default' that round robins the data to 'h1.example.com:9090' and 'h2.example.com:9090'. +CREATE SUBSCRIPTION sub0 ON "mydb"."default" DESTINATIONS ANY 'udp://h1.example.com:9090', 'udp://h2.example.com:9090'; +``` + +### CREATE USER + +``` +create_user_stmt = "CREATE USER" user_name "WITH PASSWORD" password + [ "WITH ALL PRIVILEGES" ] . +``` + +#### Examples: + +```sql +-- Create a normal database user. +CREATE USER jdoe WITH PASSWORD '1337password'; + +-- Create a cluster admin. +-- Note: Unlike the GRANT statement, the "PRIVILEGES" keyword is required here. +CREATE USER jdoe WITH PASSWORD '1337password' WITH ALL PRIVILEGES; +``` + +### DROP CONTINUOUS QUERY + +``` +drop_continuous_query_stmt = "DROP CONTINUOUS QUERY" query_name on_clause . +``` + +#### Example: + +```sql +DROP CONTINUOUS QUERY myquery ON mydb; +``` + +### DROP DATABASE + +``` +drop_database_stmt = "DROP DATABASE" db_name . +``` + +#### Example: + +```sql +DROP DATABASE mydb; +``` + +### DROP MEASUREMENT + +``` +drop_measurement_stmt = "DROP MEASUREMENT" measurement_name . +``` + +#### Examples: + +```sql +-- drop the cpu measurement +DROP MEASUREMENT cpu; +``` + +### DROP RETENTION POLICY + +``` +drop_retention_policy_stmt = "DROP RETENTION POLICY" policy_name on_clause . +``` + +#### Example: + +```sql +-- drop the retention policy named 1h.cpu from mydb +DROP RETENTION POLICY "1h.cpu" ON mydb; +``` + +### DROP SERIES + +``` +drop_series_stmt = "DROP SERIES" ( from_clause | where_clause | from_clause where_clause ) . +``` + +#### Example: + +```sql + +``` + +### DROP SUBSCRIPTION + +``` +drop_subscription_stmt = "DROP SUBSCRIPTION" subscription_name "ON" db_name "." retention_policy . +``` + +#### Example: + +```sql +DROP SUBSCRIPTION sub0 ON "mydb"."default"; + +``` + +### DROP USER + +``` +drop_user_stmt = "DROP USER" user_name . +``` + +#### Example: + +```sql +DROP USER jdoe; + +``` + +### GRANT + +NOTE: Users can be granted privileges on databases that do not exist. + +``` +grant_stmt = "GRANT" privilege [ on_clause ] to_clause . +``` + +#### Examples: + +```sql +-- grant cluster admin privileges +GRANT ALL TO jdoe; + +-- grant read access to a database +GRANT READ ON mydb TO jdoe; +``` + +### SHOW CONTINUOUS QUERIES + +``` +show_continuous_queries_stmt = "SHOW CONTINUOUS QUERIES" . +``` + +#### Example: + +```sql +-- show all continuous queries +SHOW CONTINUOUS QUERIES; +``` + +### SHOW DATABASES + +``` +show_databases_stmt = "SHOW DATABASES" . +``` + +#### Example: + +```sql +-- show all databases +SHOW DATABASES; +``` + +### SHOW FIELD KEYS + +``` +show_field_keys_stmt = "SHOW FIELD KEYS" [ from_clause ] . +``` + +#### Examples: + +```sql +-- show field keys from all measurements +SHOW FIELD KEYS; + +-- show field keys from specified measurement +SHOW FIELD KEYS FROM cpu; +``` + +### SHOW GRANTS + +``` +show_grants_stmt = "SHOW GRANTS FOR" user_name . +``` + +#### Example: + +```sql +-- show grants for jdoe +SHOW GRANTS FOR jdoe; +``` + +### SHOW MEASUREMENTS + +``` +show_measurements_stmt = "SHOW MEASUREMENTS" [ with_measurement_clause ] [ where_clause ] [ limit_clause ] [ offset_clause ] . +``` + +```sql +-- show all measurements +SHOW MEASUREMENTS; + +-- show measurements where region tag = 'uswest' AND host tag = 'serverA' +SHOW MEASUREMENTS WHERE region = 'uswest' AND host = 'serverA'; +``` + +### SHOW RETENTION POLICIES + +``` +show_retention_policies = "SHOW RETENTION POLICIES" on_clause . +``` + +#### Example: + +```sql +-- show all retention policies on a database +SHOW RETENTION POLICIES ON mydb; +``` + +### SHOW SERIES + +``` +show_series_stmt = "SHOW SERIES" [ from_clause ] [ where_clause ] [ limit_clause ] [ offset_clause ] . +``` + +#### Example: + +```sql + +``` + +### SHOW SHARD GROUPS + +``` +show_shard_groups_stmt = "SHOW SHARD GROUPS" . +``` + +#### Example: + +```sql +SHOW SHARD GROUPS; +``` + +### SHOW SHARDS + +``` +show_shards_stmt = "SHOW SHARDS" . +``` + +#### Example: + +```sql +SHOW SHARDS; +``` + +### SHOW SUBSCRIPTIONS + +``` +show_subscriptions_stmt = "SHOW SUBSCRIPTIONS" . +``` + +#### Example: + +```sql +SHOW SUBSCRIPTIONS; +``` + +### SHOW TAG KEYS + +``` +show_tag_keys_stmt = "SHOW TAG KEYS" [ from_clause ] [ where_clause ] [ group_by_clause ] + [ limit_clause ] [ offset_clause ] . +``` + +#### Examples: + +```sql +-- show all tag keys +SHOW TAG KEYS; + +-- show all tag keys from the cpu measurement +SHOW TAG KEYS FROM cpu; + +-- show all tag keys from the cpu measurement where the region key = 'uswest' +SHOW TAG KEYS FROM cpu WHERE region = 'uswest'; + +-- show all tag keys where the host key = 'serverA' +SHOW TAG KEYS WHERE host = 'serverA'; +``` + +### SHOW TAG VALUES + +``` +show_tag_values_stmt = "SHOW TAG VALUES" [ from_clause ] with_tag_clause [ where_clause ] + [ group_by_clause ] [ limit_clause ] [ offset_clause ] . +``` + +#### Examples: + +```sql +-- show all tag values across all measurements for the region tag +SHOW TAG VALUES WITH TAG = 'region'; + +-- show tag values from the cpu measurement for the region tag +SHOW TAG VALUES FROM cpu WITH KEY = 'region'; + +-- show tag values from the cpu measurement for region & host tag keys where service = 'redis' +SHOW TAG VALUES FROM cpu WITH KEY IN (region, host) WHERE service = 'redis'; +``` + +### SHOW USERS + +``` +show_users_stmt = "SHOW USERS" . +``` + +#### Example: + +```sql +-- show all users +SHOW USERS; +``` + +### REVOKE + +``` +revoke_stmt = "REVOKE" privilege [ on_clause ] "FROM" user_name . +``` + +#### Examples: + +```sql +-- revoke cluster admin from jdoe +REVOKE ALL PRIVILEGES FROM jdoe; + +-- revoke read privileges from jdoe on mydb +REVOKE READ ON mydb FROM jdoe; +``` + +### SELECT + +``` +select_stmt = "SELECT" fields from_clause [ into_clause ] [ where_clause ] + [ group_by_clause ] [ order_by_clause ] [ limit_clause ] + [ offset_clause ] [ slimit_clause ] [ soffset_clause ] . +``` + +#### Examples: + +```sql +-- select mean value from the cpu measurement where region = 'uswest' grouped by 10 minute intervals +SELECT mean(value) FROM cpu WHERE region = 'uswest' GROUP BY time(10m) fill(0); + +-- select from all measurements beginning with cpu into the same measurement name in the cpu_1h retention policy +SELECT mean(value) INTO cpu_1h.:MEASUREMENT FROM /cpu.*/ +``` + +## Clauses + +``` +from_clause = "FROM" measurements . + +group_by_clause = "GROUP BY" dimensions fill(fill_option). + +into_clause = "INTO" ( measurement | back_ref ). + +limit_clause = "LIMIT" int_lit . + +offset_clause = "OFFSET" int_lit . + +slimit_clause = "SLIMIT" int_lit . + +soffset_clause = "SOFFSET" int_lit . + +on_clause = "ON" db_name . + +order_by_clause = "ORDER BY" sort_fields . + +to_clause = "TO" user_name . + +where_clause = "WHERE" expr . + +with_measurement_clause = "WITH MEASUREMENT" ( "=" measurement | "=~" regex_lit ) . + +with_tag_clause = "WITH KEY" ( "=" tag_key | "IN (" tag_keys ")" ) . +``` + +## Expressions + +``` +binary_op = "+" | "-" | "*" | "/" | "AND" | "OR" | "=" | "!=" | "<" | + "<=" | ">" | ">=" . + +expr = unary_expr { binary_op unary_expr } . + +unary_expr = "(" expr ")" | var_ref | time_lit | string_lit | int_lit | + float_lit | bool_lit | duration_lit | regex_lit . +``` + +## Other + +``` +alias = "AS" identifier . + +back_ref = ( policy_name ".:MEASUREMENT" ) | + ( db_name "." [ policy_name ] ".:MEASUREMENT" ) . + +db_name = identifier . + +dimension = expr . + +dimensions = dimension { "," dimension } . + +field_key = identifier . + +field = expr [ alias ] . + +fields = field { "," field } . + +fill_option = "null" | "none" | "previous" | int_lit | float_lit . + +host = string_lit . + +measurement = measurement_name | + ( policy_name "." measurement_name ) | + ( db_name "." [ policy_name ] "." measurement_name ) . + +measurements = measurement { "," measurement } . + +measurement_name = identifier . + +password = string_lit . + +policy_name = identifier . + +privilege = "ALL" [ "PRIVILEGES" ] | "READ" | "WRITE" . + +query_name = identifier . + +retention_policy = identifier . + +retention_policy_option = retention_policy_duration | + retention_policy_replication | + "DEFAULT" . + +retention_policy_duration = "DURATION" duration_lit . +retention_policy_replication = "REPLICATION" int_lit + +series_id = int_lit . + +sort_field = field_key [ ASC | DESC ] . + +sort_fields = sort_field { "," sort_field } . + +subscription_name = identifier . + +tag_key = identifier . + +tag_keys = tag_key { "," tag_key } . + +user_name = identifier . + +var_ref = measurement . +``` + + +## Query Engine Internals + +Once you understand the language itself, it's important to know how these +language constructs are implemented in the query engine. This gives you an +intuitive sense for how results will be processed and how to create efficient +queries. + +The life cycle of a query looks like this: + +1. InfluxQL query string is tokenized and then parsed into an abstract syntax + tree (AST). This is the code representation of the query itself. + +2. The AST is passed to the `QueryExecutor` which directs queries to the + appropriate handlers. For example, queries related to meta data are executed + by the meta service and `SELECT` statements are executed by the shards + themselves. + +3. The query engine then determines the shards that match the `SELECT` + statement's time range. From these shards, iterators are created for each + field in the statement. + +4. Iterators are passed to the emitter which drains them and joins the resulting + points. The emitter's job is to convert simple time/value points into the + more complex result objects that are returned to the client. + + +### Understanding Iterators + +Iterators are at the heart of the query engine. They provide a simple interface +for looping over a set of points. For example, this is an iterator over Float +points: + +``` +type FloatIterator interface { + Next() *FloatPoint +} +``` + +These iterators are created through the `IteratorCreator` interface: + +``` +type IteratorCreator interface { + CreateIterator(opt *IteratorOptions) (Iterator, error) +} +``` + +The `IteratorOptions` provide arguments about field selection, time ranges, +and dimensions that the iterator creator can use when planning an iterator. +The `IteratorCreator` interface is used at many levels such as the `Shards`, +`Shard`, and `Engine`. This allows optimizations to be performed when applicable +such as returning a precomputed `COUNT()`. + +Iterators aren't just for reading raw data from storage though. Iterators can be +composed so that they provided additional functionality around an input +iterator. For example, a `DistinctIterator` can compute the distinct values for +each time window for an input iterator. Or a `FillIterator` can generate +additional points that are missing from an input iterator. + +This composition also lends itself well to aggregation. For example, a statement +such as this: + +``` +SELECT MEAN(value) FROM cpu GROUP BY time(10m) +``` + +In this case, `MEAN(value)` is a `MeanIterator` wrapping an iterator from the +underlying shards. However, if we can add an additional iterator to determine +the derivative of the mean: + +``` +SELECT DERIVATIVE(MEAN(value), 20m) FROM cpu GROUP BY time(10m) +``` + + +### Understanding Auxiliary Fields + +Because InfluxQL allows users to use selector functions such as `FIRST()`, +`LAST()`, `MIN()`, and `MAX()`, the engine must provide a way to return related +data at the same time with the selected point. + +For example, in this query: + +``` +SELECT FIRST(value), host FROM cpu GROUP BY time(1h) +``` + +We are selecting the first `value` that occurs every hour but we also want to +retrieve the `host` associated with that point. Since the `Point` types only +specify a single typed `Value` for efficiency, we push the `host` into the +auxiliary fields of the point. These auxiliary fields are attached to the point +until it is passed to the emitter where the fields get split off to their own +iterator. + + +### Built-in Iterators + +There are many helper iterators that let us build queries: + +* Merge Iterator - This iterator combines one or more iterators into a single + new iterator of the same type. This iterator guarantees that all points + within a window will be output before starting the next window but does not + provide ordering guarantees within the window. This allows for fast access + for aggregate queries which do not need stronger sorting guarantees. + +* Sorted Merge Iterator - This iterator also combines one or more iterators + into a new iterator of the same type. However, this iterator guarantees + time ordering of every point. This makes it slower than the `MergeIterator` + but this ordering guarantee is required for non-aggregate queries which + return the raw data points. + +* Limit Iterator - This iterator limits the number of points per name/tag + group. This is the implementation of the `LIMIT` & `OFFSET` syntax. + +* Fill Iterator - This iterator injects extra points if they are missing from + the input iterator. It can provide `null` points, points with the previous + value, or points with a specific value. + +* Buffered Iterator - This iterator provides the ability to "unread" a point + back onto a buffer so it can be read again next time. This is used extensively + to provide lookahead for windowing. + +* Reduce Iterator - This iterator calls a reduction function for each point in + a window. When the window is complete then all points for that window are + output. This is used for simple aggregate functions such as `COUNT()`. + +* Reduce Slice Iterator - This iterator collects all points for a window first + and then passes them all to a reduction function at once. The results are + returned from the iterator. This is used for aggregate functions such as + `DERIVATIVE()`. + +* Transform Iterator - This iterator calls a transform function for each point + from an input iterator. This is used for executing binary expressions. + +* Dedupe Iterator - This iterator only outputs unique points. It is resource + intensive so it is only used for small queries such as meta query statements. + + +### Call Iterators + +Function calls in InfluxQL are implemented at two levels. Some calls can be +wrapped at multiple layers to improve efficiency. For example, a `COUNT()` can +be performed at the shard level and then multiple `CountIterator`s can be +wrapped with another `CountIterator` to compute the count of all shards. These +iterators can be created using `NewCallIterator()`. + +Some iterators are more complex or need to be implemented at a higher level. +For example, the `DERIVATIVE()` needs to retrieve all points for a window first +before performing the calculation. This iterator is created by the engine itself +and is never requested to be created by the lower levels. diff --git a/vendor/github.com/influxdata/influxdb/influxql/ast.go b/vendor/github.com/influxdata/influxdb/influxql/ast.go new file mode 100644 index 0000000000..f4ab637054 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/ast.go @@ -0,0 +1,3921 @@ +package influxql + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/influxdata/influxdb/influxql/internal" +) + +// DataType represents the primitive data types available in InfluxQL. +type DataType int + +const ( + // Unknown primitive data type. + Unknown DataType = 0 + // Float means the data type is a float + Float = 1 + // Integer means the data type is a integer + Integer = 2 + // String means the data type is a string of text. + String = 3 + // Boolean means the data type is a boolean. + Boolean = 4 + // Time means the data type is a time. + Time = 5 + // Duration means the data type is a duration of time. + Duration = 6 +) + +// InspectDataType returns the data type of a given value. +func InspectDataType(v interface{}) DataType { + switch v.(type) { + case float64: + return Float + case int64, int32, int: + return Integer + case string: + return String + case bool: + return Boolean + case time.Time: + return Time + case time.Duration: + return Duration + default: + return Unknown + } +} + +func InspectDataTypes(a []interface{}) []DataType { + dta := make([]DataType, len(a)) + for i, v := range a { + dta[i] = InspectDataType(v) + } + return dta +} + +func (d DataType) String() string { + switch d { + case Float: + return "float" + case Integer: + return "integer" + case String: + return "string" + case Boolean: + return "boolean" + case Time: + return "time" + case Duration: + return "duration" + } + return "unknown" +} + +// Node represents a node in the InfluxDB abstract syntax tree. +type Node interface { + node() + String() string +} + +func (*Query) node() {} +func (Statements) node() {} + +func (*AlterRetentionPolicyStatement) node() {} +func (*CreateContinuousQueryStatement) node() {} +func (*CreateDatabaseStatement) node() {} +func (*CreateRetentionPolicyStatement) node() {} +func (*CreateSubscriptionStatement) node() {} +func (*CreateUserStatement) node() {} +func (*Distinct) node() {} +func (*DeleteStatement) node() {} +func (*DropContinuousQueryStatement) node() {} +func (*DropDatabaseStatement) node() {} +func (*DropMeasurementStatement) node() {} +func (*DropRetentionPolicyStatement) node() {} +func (*DropSeriesStatement) node() {} +func (*DropServerStatement) node() {} +func (*DropSubscriptionStatement) node() {} +func (*DropUserStatement) node() {} +func (*GrantStatement) node() {} +func (*GrantAdminStatement) node() {} +func (*RevokeStatement) node() {} +func (*RevokeAdminStatement) node() {} +func (*SelectStatement) node() {} +func (*SetPasswordUserStatement) node() {} +func (*ShowContinuousQueriesStatement) node() {} +func (*ShowGrantsForUserStatement) node() {} +func (*ShowServersStatement) node() {} +func (*ShowDatabasesStatement) node() {} +func (*ShowFieldKeysStatement) node() {} +func (*ShowRetentionPoliciesStatement) node() {} +func (*ShowMeasurementsStatement) node() {} +func (*ShowSeriesStatement) node() {} +func (*ShowShardGroupsStatement) node() {} +func (*ShowShardsStatement) node() {} +func (*ShowStatsStatement) node() {} +func (*ShowSubscriptionsStatement) node() {} +func (*ShowDiagnosticsStatement) node() {} +func (*ShowTagKeysStatement) node() {} +func (*ShowTagValuesStatement) node() {} +func (*ShowUsersStatement) node() {} + +func (*BinaryExpr) node() {} +func (*BooleanLiteral) node() {} +func (*Call) node() {} +func (*Dimension) node() {} +func (Dimensions) node() {} +func (*DurationLiteral) node() {} +func (*Field) node() {} +func (Fields) node() {} +func (*Measurement) node() {} +func (Measurements) node() {} +func (*nilLiteral) node() {} +func (*NumberLiteral) node() {} +func (*ParenExpr) node() {} +func (*RegexLiteral) node() {} +func (*SortField) node() {} +func (SortFields) node() {} +func (Sources) node() {} +func (*StringLiteral) node() {} +func (*Target) node() {} +func (*TimeLiteral) node() {} +func (*VarRef) node() {} +func (*Wildcard) node() {} + +// Query represents a collection of ordered statements. +type Query struct { + Statements Statements +} + +// String returns a string representation of the query. +func (q *Query) String() string { return q.Statements.String() } + +// Statements represents a list of statements. +type Statements []Statement + +// String returns a string representation of the statements. +func (a Statements) String() string { + var str []string + for _, stmt := range a { + str = append(str, stmt.String()) + } + return strings.Join(str, ";\n") +} + +// Statement represents a single command in InfluxQL. +type Statement interface { + Node + stmt() + RequiredPrivileges() ExecutionPrivileges +} + +// HasDefaultDatabase provides an interface to get the default database from a Statement. +type HasDefaultDatabase interface { + Node + stmt() + DefaultDatabase() string +} + +// ExecutionPrivilege is a privilege required for a user to execute +// a statement on a database or resource. +type ExecutionPrivilege struct { + // Admin privilege required. + Admin bool + + // Name of the database. + Name string + + // Database privilege required. + Privilege Privilege +} + +// ExecutionPrivileges is a list of privileges required to execute a statement. +type ExecutionPrivileges []ExecutionPrivilege + +func (*AlterRetentionPolicyStatement) stmt() {} +func (*CreateContinuousQueryStatement) stmt() {} +func (*CreateDatabaseStatement) stmt() {} +func (*CreateRetentionPolicyStatement) stmt() {} +func (*CreateSubscriptionStatement) stmt() {} +func (*CreateUserStatement) stmt() {} +func (*DeleteStatement) stmt() {} +func (*DropContinuousQueryStatement) stmt() {} +func (*DropDatabaseStatement) stmt() {} +func (*DropMeasurementStatement) stmt() {} +func (*DropRetentionPolicyStatement) stmt() {} +func (*DropSeriesStatement) stmt() {} +func (*DropServerStatement) stmt() {} +func (*DropSubscriptionStatement) stmt() {} +func (*DropUserStatement) stmt() {} +func (*GrantStatement) stmt() {} +func (*GrantAdminStatement) stmt() {} +func (*ShowContinuousQueriesStatement) stmt() {} +func (*ShowGrantsForUserStatement) stmt() {} +func (*ShowServersStatement) stmt() {} +func (*ShowDatabasesStatement) stmt() {} +func (*ShowFieldKeysStatement) stmt() {} +func (*ShowMeasurementsStatement) stmt() {} +func (*ShowRetentionPoliciesStatement) stmt() {} +func (*ShowSeriesStatement) stmt() {} +func (*ShowShardGroupsStatement) stmt() {} +func (*ShowShardsStatement) stmt() {} +func (*ShowStatsStatement) stmt() {} +func (*ShowSubscriptionsStatement) stmt() {} +func (*ShowDiagnosticsStatement) stmt() {} +func (*ShowTagKeysStatement) stmt() {} +func (*ShowTagValuesStatement) stmt() {} +func (*ShowUsersStatement) stmt() {} +func (*RevokeStatement) stmt() {} +func (*RevokeAdminStatement) stmt() {} +func (*SelectStatement) stmt() {} +func (*SetPasswordUserStatement) stmt() {} + +// Expr represents an expression that can be evaluated to a value. +type Expr interface { + Node + expr() +} + +func (*BinaryExpr) expr() {} +func (*BooleanLiteral) expr() {} +func (*Call) expr() {} +func (*Distinct) expr() {} +func (*DurationLiteral) expr() {} +func (*nilLiteral) expr() {} +func (*NumberLiteral) expr() {} +func (*ParenExpr) expr() {} +func (*RegexLiteral) expr() {} +func (*StringLiteral) expr() {} +func (*TimeLiteral) expr() {} +func (*VarRef) expr() {} +func (*Wildcard) expr() {} + +// Literal represents a static literal. +type Literal interface { + Expr + literal() +} + +func (*BooleanLiteral) literal() {} +func (*DurationLiteral) literal() {} +func (*nilLiteral) literal() {} +func (*NumberLiteral) literal() {} +func (*RegexLiteral) literal() {} +func (*StringLiteral) literal() {} +func (*TimeLiteral) literal() {} + +// Source represents a source of data for a statement. +type Source interface { + Node + source() +} + +func (*Measurement) source() {} + +// Sources represents a list of sources. +type Sources []Source + +// Names returns a list of source names. +func (a Sources) Names() []string { + names := make([]string, 0, len(a)) + for _, s := range a { + switch s := s.(type) { + case *Measurement: + names = append(names, s.Name) + } + } + return names +} + +// HasSystemSource returns true if any of the sources are internal, system sources. +func (a Sources) HasSystemSource() bool { + for _, s := range a { + switch s := s.(type) { + case *Measurement: + if IsSystemName(s.Name) { + return true + } + } + } + return false +} + +// String returns a string representation of a Sources array. +func (a Sources) String() string { + var buf bytes.Buffer + + ubound := len(a) - 1 + for i, src := range a { + _, _ = buf.WriteString(src.String()) + if i < ubound { + _, _ = buf.WriteString(", ") + } + } + + return buf.String() +} + +// MarshalBinary encodes a list of sources to a binary format. +func (a Sources) MarshalBinary() ([]byte, error) { + var pb internal.Measurements + pb.Items = make([]*internal.Measurement, len(a)) + for i, source := range a { + pb.Items[i] = encodeMeasurement(source.(*Measurement)) + } + return proto.Marshal(&pb) +} + +// UnmarshalBinary decodes binary data into a list of sources. +func (a *Sources) UnmarshalBinary(buf []byte) error { + var pb internal.Measurements + if err := proto.Unmarshal(buf, &pb); err != nil { + return err + } + *a = make(Sources, len(pb.GetItems())) + for i := range pb.GetItems() { + mm, err := decodeMeasurement(pb.GetItems()[i]) + if err != nil { + return err + } + (*a)[i] = mm + } + return nil +} + +// IsSystemName returns true if name is an internal system name. +// System names are prefixed with an underscore. +func IsSystemName(name string) bool { return strings.HasPrefix(name, "_") } + +// SortField represents a field to sort results by. +type SortField struct { + // Name of the field + Name string + + // Sort order. + Ascending bool +} + +// String returns a string representation of a sort field +func (field *SortField) String() string { + var buf bytes.Buffer + if field.Name != "" { + _, _ = buf.WriteString(field.Name) + _, _ = buf.WriteString(" ") + } + if field.Ascending { + _, _ = buf.WriteString("ASC") + } else { + _, _ = buf.WriteString("DESC") + } + return buf.String() +} + +// SortFields represents an ordered list of ORDER BY fields +type SortFields []*SortField + +// String returns a string representation of sort fields +func (a SortFields) String() string { + fields := make([]string, 0, len(a)) + for _, field := range a { + fields = append(fields, field.String()) + } + return strings.Join(fields, ", ") +} + +// CreateDatabaseStatement represents a command for creating a new database. +type CreateDatabaseStatement struct { + // Name of the database to be created. + Name string + + // IfNotExists indicates whether to return without error if the database + // already exists. + IfNotExists bool + + // RetentionPolicyCreate indicates whether the user explicitly wants to create a retention policy + RetentionPolicyCreate bool + + // RetentionPolicyDuration indicates retention duration for the new database + RetentionPolicyDuration time.Duration + + // RetentionPolicyReplication indicates retention replication for the new database + RetentionPolicyReplication int + + // RetentionPolicyName indicates retention name for the new database + RetentionPolicyName string +} + +// String returns a string representation of the create database statement. +func (s *CreateDatabaseStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("CREATE DATABASE ") + if s.IfNotExists { + _, _ = buf.WriteString("IF NOT EXISTS ") + } + _, _ = buf.WriteString(QuoteIdent(s.Name)) + if s.RetentionPolicyCreate { + _, _ = buf.WriteString(" WITH DURATION ") + _, _ = buf.WriteString(s.RetentionPolicyDuration.String()) + _, _ = buf.WriteString(" REPLICATION ") + _, _ = buf.WriteString(strconv.Itoa(s.RetentionPolicyReplication)) + _, _ = buf.WriteString(" NAME ") + _, _ = buf.WriteString(QuoteIdent(s.RetentionPolicyName)) + } + + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a CreateDatabaseStatement. +func (s *CreateDatabaseStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +} + +// DropDatabaseStatement represents a command to drop a database. +type DropDatabaseStatement struct { + // Name of the database to be dropped. + Name string + + // IfExists indicates whether to return without error if the database + // does not exists. + IfExists bool +} + +// String returns a string representation of the drop database statement. +func (s *DropDatabaseStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("DROP DATABASE ") + if s.IfExists { + _, _ = buf.WriteString("IF EXISTS ") + } + _, _ = buf.WriteString(QuoteIdent(s.Name)) + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a DropDatabaseStatement. +func (s *DropDatabaseStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +} + +// DropRetentionPolicyStatement represents a command to drop a retention policy from a database. +type DropRetentionPolicyStatement struct { + // Name of the policy to drop. + Name string + + // Name of the database to drop the policy from. + Database string +} + +// String returns a string representation of the drop retention policy statement. +func (s *DropRetentionPolicyStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("DROP RETENTION POLICY ") + _, _ = buf.WriteString(QuoteIdent(s.Name)) + _, _ = buf.WriteString(" ON ") + _, _ = buf.WriteString(QuoteIdent(s.Database)) + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a DropRetentionPolicyStatement. +func (s *DropRetentionPolicyStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: false, Name: s.Database, Privilege: WritePrivilege}} +} + +// CreateUserStatement represents a command for creating a new user. +type CreateUserStatement struct { + // Name of the user to be created. + Name string + + // User's password. + Password string + + // User's admin privilege. + Admin bool +} + +// String returns a string representation of the create user statement. +func (s *CreateUserStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("CREATE USER ") + _, _ = buf.WriteString(QuoteIdent(s.Name)) + _, _ = buf.WriteString(" WITH PASSWORD ") + _, _ = buf.WriteString("[REDACTED]") + if s.Admin { + _, _ = buf.WriteString(" WITH ALL PRIVILEGES") + } + return buf.String() +} + +// RequiredPrivileges returns the privilege(s) required to execute a CreateUserStatement. +func (s *CreateUserStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +} + +// DropUserStatement represents a command for dropping a user. +type DropUserStatement struct { + // Name of the user to drop. + Name string +} + +// String returns a string representation of the drop user statement. +func (s *DropUserStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("DROP USER ") + _, _ = buf.WriteString(QuoteIdent(s.Name)) + return buf.String() +} + +// RequiredPrivileges returns the privilege(s) required to execute a DropUserStatement. +func (s *DropUserStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +} + +// Privilege is a type of action a user can be granted the right to use. +type Privilege int + +const ( + // NoPrivileges means no privileges required / granted / revoked. + NoPrivileges Privilege = iota + // ReadPrivilege means read privilege required / granted / revoked. + ReadPrivilege + // WritePrivilege means write privilege required / granted / revoked. + WritePrivilege + // AllPrivileges means all privileges required / granted / revoked. + AllPrivileges +) + +// NewPrivilege returns an initialized *Privilege. +func NewPrivilege(p Privilege) *Privilege { return &p } + +// String returns a string representation of a Privilege. +func (p Privilege) String() string { + switch p { + case NoPrivileges: + return "NO PRIVILEGES" + case ReadPrivilege: + return "READ" + case WritePrivilege: + return "WRITE" + case AllPrivileges: + return "ALL PRIVILEGES" + } + return "" +} + +// GrantStatement represents a command for granting a privilege. +type GrantStatement struct { + // The privilege to be granted. + Privilege Privilege + + // Database to grant the privilege to. + On string + + // Who to grant the privilege to. + User string +} + +// String returns a string representation of the grant statement. +func (s *GrantStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("GRANT ") + _, _ = buf.WriteString(s.Privilege.String()) + _, _ = buf.WriteString(" ON ") + _, _ = buf.WriteString(QuoteIdent(s.On)) + _, _ = buf.WriteString(" TO ") + _, _ = buf.WriteString(QuoteIdent(s.User)) + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a GrantStatement. +func (s *GrantStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +} + +// GrantAdminStatement represents a command for granting admin privilege. +type GrantAdminStatement struct { + // Who to grant the privilege to. + User string +} + +// String returns a string representation of the grant admin statement. +func (s *GrantAdminStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("GRANT ALL PRIVILEGES TO ") + _, _ = buf.WriteString(QuoteIdent(s.User)) + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a GrantAdminStatement. +func (s *GrantAdminStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +} + +// SetPasswordUserStatement represents a command for changing user password. +type SetPasswordUserStatement struct { + // Plain Password + Password string + + // Who to grant the privilege to. + Name string +} + +// String returns a string representation of the set password statement. +func (s *SetPasswordUserStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("SET PASSWORD FOR ") + _, _ = buf.WriteString(QuoteIdent(s.Name)) + _, _ = buf.WriteString(" = ") + _, _ = buf.WriteString("[REDACTED]") + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a SetPasswordUserStatement. +func (s *SetPasswordUserStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +} + +// RevokeStatement represents a command to revoke a privilege from a user. +type RevokeStatement struct { + // The privilege to be revoked. + Privilege Privilege + + // Database to revoke the privilege from. + On string + + // Who to revoke privilege from. + User string +} + +// String returns a string representation of the revoke statement. +func (s *RevokeStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("REVOKE ") + _, _ = buf.WriteString(s.Privilege.String()) + _, _ = buf.WriteString(" ON ") + _, _ = buf.WriteString(QuoteIdent(s.On)) + _, _ = buf.WriteString(" FROM ") + _, _ = buf.WriteString(QuoteIdent(s.User)) + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a RevokeStatement. +func (s *RevokeStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +} + +// RevokeAdminStatement represents a command to revoke admin privilege from a user. +type RevokeAdminStatement struct { + // Who to revoke admin privilege from. + User string +} + +// String returns a string representation of the revoke admin statement. +func (s *RevokeAdminStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("REVOKE ALL PRIVILEGES FROM ") + _, _ = buf.WriteString(QuoteIdent(s.User)) + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a RevokeAdminStatement. +func (s *RevokeAdminStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +} + +// CreateRetentionPolicyStatement represents a command to create a retention policy. +type CreateRetentionPolicyStatement struct { + // Name of policy to create. + Name string + + // Name of database this policy belongs to. + Database string + + // Duration data written to this policy will be retained. + Duration time.Duration + + // Replication factor for data written to this policy. + Replication int + + // Should this policy be set as default for the database? + Default bool +} + +// String returns a string representation of the create retention policy. +func (s *CreateRetentionPolicyStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("CREATE RETENTION POLICY ") + _, _ = buf.WriteString(QuoteIdent(s.Name)) + _, _ = buf.WriteString(" ON ") + _, _ = buf.WriteString(QuoteIdent(s.Database)) + _, _ = buf.WriteString(" DURATION ") + _, _ = buf.WriteString(FormatDuration(s.Duration)) + _, _ = buf.WriteString(" REPLICATION ") + _, _ = buf.WriteString(strconv.Itoa(s.Replication)) + if s.Default { + _, _ = buf.WriteString(" DEFAULT") + } + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a CreateRetentionPolicyStatement. +func (s *CreateRetentionPolicyStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +} + +// AlterRetentionPolicyStatement represents a command to alter an existing retention policy. +type AlterRetentionPolicyStatement struct { + // Name of policy to alter. + Name string + + // Name of the database this policy belongs to. + Database string + + // Duration data written to this policy will be retained. + Duration *time.Duration + + // Replication factor for data written to this policy. + Replication *int + + // Should this policy be set as defalut for the database? + Default bool +} + +// String returns a string representation of the alter retention policy statement. +func (s *AlterRetentionPolicyStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("ALTER RETENTION POLICY ") + _, _ = buf.WriteString(QuoteIdent(s.Name)) + _, _ = buf.WriteString(" ON ") + _, _ = buf.WriteString(QuoteIdent(s.Database)) + + if s.Duration != nil { + _, _ = buf.WriteString(" DURATION ") + _, _ = buf.WriteString(FormatDuration(*s.Duration)) + } + + if s.Replication != nil { + _, _ = buf.WriteString(" REPLICATION ") + _, _ = buf.WriteString(strconv.Itoa(*s.Replication)) + } + + if s.Default { + _, _ = buf.WriteString(" DEFAULT") + } + + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute an AlterRetentionPolicyStatement. +func (s *AlterRetentionPolicyStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +} + +// FillOption represents different options for aggregate windows. +type FillOption int + +const ( + // NullFill means that empty aggregate windows will just have null values. + NullFill FillOption = iota + // NoFill means that empty aggregate windows will be purged from the result. + NoFill + // NumberFill means that empty aggregate windows will be filled with the given number + NumberFill + // PreviousFill means that empty aggregate windows will be filled with whatever the previous aggregate window had + PreviousFill +) + +// SelectStatement represents a command for extracting data from the database. +type SelectStatement struct { + // Expressions returned from the selection. + Fields Fields + + // Target (destination) for the result of the select. + Target *Target + + // Expressions used for grouping the selection. + Dimensions Dimensions + + // Data sources that fields are extracted from. + Sources Sources + + // An expression evaluated on data point. + Condition Expr + + // Fields to sort results by + SortFields SortFields + + // Maximum number of rows to be returned. Unlimited if zero. + Limit int + + // Returns rows starting at an offset from the first row. + Offset int + + // Maxiumum number of series to be returned. Unlimited if zero. + SLimit int + + // Returns series starting at an offset from the first one. + SOffset int + + // memoize the group by interval + groupByInterval time.Duration + + // if it's a query for raw data values (i.e. not an aggregate) + IsRawQuery bool + + // What fill option the select statement uses, if any + Fill FillOption + + // The value to fill empty aggregate buckets with, if any + FillValue interface{} + + // Removes the "time" column from the output. + OmitTime bool + + // Removes duplicate rows from raw queries. + Dedupe bool +} + +// HasDerivative returns true if one of the function calls in the statement is a +// derivative aggregate +func (s *SelectStatement) HasDerivative() bool { + for _, f := range s.FunctionCalls() { + if f.Name == "derivative" || f.Name == "non_negative_derivative" { + return true + } + } + return false +} + +// IsSimpleDerivative return true if one of the function call is a derivative function with a +// variable ref as the first arg +func (s *SelectStatement) IsSimpleDerivative() bool { + for _, f := range s.FunctionCalls() { + if f.Name == "derivative" || f.Name == "non_negative_derivative" { + // it's nested if the first argument is an aggregate function + if _, ok := f.Args[0].(*VarRef); ok { + return true + } + } + } + return false +} + +// HasSimpleCount return true if one of the function calls is a count function with a +// variable ref as the first arg +func (s *SelectStatement) HasSimpleCount() bool { + // recursively check for a simple count(varref) function + var hasCount func(f *Call) bool + hasCount = func(f *Call) bool { + if f.Name == "count" { + // it's nested if the first argument is an aggregate function + if _, ok := f.Args[0].(*VarRef); ok { + return true + } + } else { + for _, arg := range f.Args { + if child, ok := arg.(*Call); ok { + return hasCount(child) + } + } + } + return false + } + for _, f := range s.FunctionCalls() { + if hasCount(f) { + return true + } + } + return false +} + +// TimeAscending returns true if the time field is sorted in chronological order. +func (s *SelectStatement) TimeAscending() bool { + return len(s.SortFields) == 0 || s.SortFields[0].Ascending +} + +// Clone returns a deep copy of the statement. +func (s *SelectStatement) Clone() *SelectStatement { + clone := &SelectStatement{ + Fields: make(Fields, 0, len(s.Fields)), + Dimensions: make(Dimensions, 0, len(s.Dimensions)), + Sources: cloneSources(s.Sources), + SortFields: make(SortFields, 0, len(s.SortFields)), + Condition: CloneExpr(s.Condition), + Limit: s.Limit, + Offset: s.Offset, + SLimit: s.SLimit, + SOffset: s.SOffset, + Fill: s.Fill, + FillValue: s.FillValue, + IsRawQuery: s.IsRawQuery, + } + if s.Target != nil { + clone.Target = &Target{ + Measurement: &Measurement{ + Database: s.Target.Measurement.Database, + RetentionPolicy: s.Target.Measurement.RetentionPolicy, + Name: s.Target.Measurement.Name, + Regex: CloneRegexLiteral(s.Target.Measurement.Regex), + }, + } + } + for _, f := range s.Fields { + clone.Fields = append(clone.Fields, &Field{Expr: CloneExpr(f.Expr), Alias: f.Alias}) + } + for _, d := range s.Dimensions { + clone.Dimensions = append(clone.Dimensions, &Dimension{Expr: CloneExpr(d.Expr)}) + } + for _, f := range s.SortFields { + clone.SortFields = append(clone.SortFields, &SortField{Name: f.Name, Ascending: f.Ascending}) + } + return clone +} + +func cloneSources(sources Sources) Sources { + clone := make(Sources, 0, len(sources)) + for _, s := range sources { + clone = append(clone, cloneSource(s)) + } + return clone +} + +func cloneSource(s Source) Source { + if s == nil { + return nil + } + + switch s := s.(type) { + case *Measurement: + m := &Measurement{Database: s.Database, RetentionPolicy: s.RetentionPolicy, Name: s.Name} + if s.Regex != nil { + m.Regex = &RegexLiteral{Val: regexp.MustCompile(s.Regex.Val.String())} + } + return m + default: + panic("unreachable") + } +} + +// RewriteWildcards returns the re-written form of the select statement. Any wildcard query +// fields are replaced with the supplied fields, and any wildcard GROUP BY fields are replaced +// with the supplied dimensions. +func (s *SelectStatement) RewriteWildcards(ic IteratorCreator) (*SelectStatement, error) { + // Ignore if there are no wildcards. + hasFieldWildcard := s.HasFieldWildcard() + hasDimensionWildcard := s.HasDimensionWildcard() + if !hasFieldWildcard && !hasDimensionWildcard { + return s, nil + } + + // Retrieve a list of unique field and dimensions. + fieldSet, dimensionSet, err := ic.FieldDimensions(s.Sources) + if err != nil { + return s, err + } + + // If there are no dimension wildcards then merge dimensions to fields. + if !hasDimensionWildcard { + // Remove the dimensions present in the group by so they don't get added as fields. + for _, d := range s.Dimensions { + switch expr := d.Expr.(type) { + case *VarRef: + if _, ok := dimensionSet[expr.Val]; ok { + delete(dimensionSet, expr.Val) + } + } + } + + for k := range dimensionSet { + fieldSet[k] = struct{}{} + } + dimensionSet = nil + } + fields := stringSetSlice(fieldSet) + dimensions := stringSetSlice(dimensionSet) + + other := s.Clone() + + // Rewrite all wildcard query fields + if hasFieldWildcard { + // Allocate a slice assuming there is exactly one wildcard for efficiency. + rwFields := make(Fields, 0, len(s.Fields)+len(fields)-1) + for _, f := range s.Fields { + switch f.Expr.(type) { + case *Wildcard: + for _, name := range fields { + rwFields = append(rwFields, &Field{Expr: &VarRef{Val: name}}) + } + default: + rwFields = append(rwFields, f) + } + } + other.Fields = rwFields + } + + // Rewrite all wildcard GROUP BY fields + if hasDimensionWildcard { + // Allocate a slice assuming there is exactly one wildcard for efficiency. + rwDimensions := make(Dimensions, 0, len(s.Dimensions)+len(dimensions)-1) + for _, d := range s.Dimensions { + switch d.Expr.(type) { + case *Wildcard: + for _, name := range dimensions { + rwDimensions = append(rwDimensions, &Dimension{Expr: &VarRef{Val: name}}) + } + default: + rwDimensions = append(rwDimensions, d) + } + } + other.Dimensions = rwDimensions + } + + return other, nil +} + +// RewriteDistinct rewrites the expression to be a call for map/reduce to work correctly +// This method assumes all validation has passed +func (s *SelectStatement) RewriteDistinct() { + WalkFunc(s.Fields, func(n Node) { + switch n := n.(type) { + case *Field: + if expr, ok := n.Expr.(*Distinct); ok { + n.Expr = expr.NewCall() + s.IsRawQuery = false + } + case *Call: + for i, arg := range n.Args { + if arg, ok := arg.(*Distinct); ok { + n.Args[i] = arg.NewCall() + } + } + } + }) +} + +// RewriteTimeFields removes any "time" field references. +func (s *SelectStatement) RewriteTimeFields() { + for i := 0; i < len(s.Fields); i++ { + switch expr := s.Fields[i].Expr.(type) { + case *VarRef: + if expr.Val == "time" { + s.Fields = append(s.Fields[:i], s.Fields[i+1:]...) + } + } + } +} + +// ColumnNames will walk all fields and functions and return the appropriate field names for the select statement +// while maintaining order of the field names +func (s *SelectStatement) ColumnNames() []string { + // First walk each field to determine the number of columns. + columnFields := Fields{} + for _, field := range s.Fields { + columnFields = append(columnFields, field) + + switch f := field.Expr.(type) { + case *Call: + if f.Name == "top" || f.Name == "bottom" { + for _, arg := range f.Args[1:] { + ref, ok := arg.(*VarRef) + if ok { + columnFields = append(columnFields, &Field{Expr: ref}) + } + } + } + } + } + + // Determine if we should add an extra column for an implicit time. + offset := 0 + if !s.OmitTime { + offset++ + } + + columnNames := make([]string, len(columnFields)+offset) + if !s.OmitTime { + // Add the implicit time if requested. + columnNames[0] = "time" + } + + // Keep track of the encountered column names. + names := make(map[string]int) + + // Resolve aliases first. + for i, col := range columnFields { + if col.Alias != "" { + columnNames[i+offset] = col.Alias + names[col.Alias] = 1 + } + } + + // Resolve any generated names and resolve conflicts. + for i, col := range columnFields { + if columnNames[i+offset] != "" { + continue + } + + name := col.Name() + count, conflict := names[name] + if conflict { + for { + resolvedName := fmt.Sprintf("%s_%d", name, count) + _, conflict = names[resolvedName] + if !conflict { + names[name] = count + 1 + name = resolvedName + break + } + count++ + } + } + names[name] += 1 + columnNames[i+offset] = name + } + return columnNames +} + +// HasTimeFieldSpecified will walk all fields and determine if the user explicitly asked for time +// This is needed to determine re-write behaviors for functions like TOP and BOTTOM +func (s *SelectStatement) HasTimeFieldSpecified() bool { + for _, f := range s.Fields { + if f.Name() == "time" { + return true + } + } + return false +} + +// String returns a string representation of the select statement. +func (s *SelectStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("SELECT ") + _, _ = buf.WriteString(s.Fields.String()) + + if s.Target != nil { + _, _ = buf.WriteString(" ") + _, _ = buf.WriteString(s.Target.String()) + } + if len(s.Sources) > 0 { + _, _ = buf.WriteString(" FROM ") + _, _ = buf.WriteString(s.Sources.String()) + } + if s.Condition != nil { + _, _ = buf.WriteString(" WHERE ") + _, _ = buf.WriteString(s.Condition.String()) + } + if len(s.Dimensions) > 0 { + _, _ = buf.WriteString(" GROUP BY ") + _, _ = buf.WriteString(s.Dimensions.String()) + } + switch s.Fill { + case NoFill: + _, _ = buf.WriteString(" fill(none)") + case NumberFill: + _, _ = buf.WriteString(fmt.Sprintf(" fill(%v)", s.FillValue)) + case PreviousFill: + _, _ = buf.WriteString(" fill(previous)") + } + if len(s.SortFields) > 0 { + _, _ = buf.WriteString(" ORDER BY ") + _, _ = buf.WriteString(s.SortFields.String()) + } + if s.Limit > 0 { + _, _ = fmt.Fprintf(&buf, " LIMIT %d", s.Limit) + } + if s.Offset > 0 { + _, _ = buf.WriteString(" OFFSET ") + _, _ = buf.WriteString(strconv.Itoa(s.Offset)) + } + if s.SLimit > 0 { + _, _ = fmt.Fprintf(&buf, " SLIMIT %d", s.SLimit) + } + if s.SOffset > 0 { + _, _ = fmt.Fprintf(&buf, " SOFFSET %d", s.SOffset) + } + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute the SelectStatement. +func (s *SelectStatement) RequiredPrivileges() ExecutionPrivileges { + ep := ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}} + + if s.Target != nil { + p := ExecutionPrivilege{Admin: false, Name: s.Target.Measurement.Database, Privilege: WritePrivilege} + ep = append(ep, p) + } + return ep +} + +// HasWildcard returns whether or not the select statement has at least 1 wildcard +func (s *SelectStatement) HasWildcard() bool { + return s.HasFieldWildcard() || s.HasDimensionWildcard() +} + +// HasFieldWildcard returns whether or not the select statement has at least 1 wildcard in the fields +func (s *SelectStatement) HasFieldWildcard() bool { + for _, f := range s.Fields { + _, ok := f.Expr.(*Wildcard) + if ok { + return true + } + } + + return false +} + +// HasDimensionWildcard returns whether or not the select statement has +// at least 1 wildcard in the dimensions aka `GROUP BY` +func (s *SelectStatement) HasDimensionWildcard() bool { + for _, d := range s.Dimensions { + _, ok := d.Expr.(*Wildcard) + if ok { + return true + } + } + + return false +} + +func (s *SelectStatement) validate(tr targetRequirement) error { + if err := s.validateFields(); err != nil { + return err + } + + if err := s.validateDimensions(); err != nil { + return err + } + + if err := s.validateDistinct(); err != nil { + return err + } + + if err := s.validateCountDistinct(); err != nil { + return err + } + + if err := s.validateAggregates(tr); err != nil { + return err + } + + if err := s.validateDerivative(); err != nil { + return err + } + + return nil +} + +func (s *SelectStatement) validateFields() error { + ns := s.NamesInSelect() + if len(ns) == 1 && ns[0] == "time" { + return fmt.Errorf("at least 1 non-time field must be queried") + } + return nil +} + +func (s *SelectStatement) validateDimensions() error { + var dur time.Duration + for _, dim := range s.Dimensions { + switch expr := dim.Expr.(type) { + case *Call: + // Ensure the call is time() and it only has one duration argument. + // If we already have a duration + if expr.Name != "time" { + return errors.New("only time() calls allowed in dimensions") + } else if len(expr.Args) != 1 { + return errors.New("time dimension expected one argument") + } else if lit, ok := expr.Args[0].(*DurationLiteral); !ok { + return errors.New("time dimension must have one duration argument") + } else if dur != 0 { + return errors.New("multiple time dimensions not allowed") + } else { + dur = lit.Val + } + case *VarRef: + if strings.ToLower(expr.Val) == "time" { + return errors.New("time() is a function and expects at least one argument") + } + case *Wildcard: + default: + return errors.New("only time and tag dimensions allowed") + } + } + return nil +} + +// validSelectWithAggregate determines if a SELECT statement has the correct +// combination of aggregate functions combined with selected fields and tags +// Currently we don't have support for all aggregates, but aggregates that +// can be combined with fields/tags are: +// TOP, BOTTOM, MAX, MIN, FIRST, LAST +func (s *SelectStatement) validSelectWithAggregate() error { + calls := map[string]struct{}{} + numAggregates := 0 + for _, f := range s.Fields { + fieldCalls := walkFunctionCalls(f.Expr) + for _, c := range fieldCalls { + calls[c.Name] = struct{}{} + } + if len(fieldCalls) != 0 { + numAggregates++ + } + } + // For TOP, BOTTOM, MAX, MIN, FIRST, LAST (selector functions) it is ok to ask for fields and tags + // but only if one function is specified. Combining multiple functions and fields and tags is not currently supported + onlySelectors := true + for k := range calls { + switch k { + case "top", "bottom", "max", "min", "first", "last": + default: + onlySelectors = false + break + } + } + if onlySelectors { + // If they only have one selector, they can have as many fields or tags as they want + if numAggregates == 1 { + return nil + } + // If they have multiple selectors, they are not allowed to have any other fields or tags specified + if numAggregates > 1 && len(s.Fields) != numAggregates { + return fmt.Errorf("mixing multiple selector functions with tags or fields is not supported") + } + } + + if numAggregates != 0 && numAggregates != len(s.Fields) { + return fmt.Errorf("mixing aggregate and non-aggregate queries is not supported") + } + return nil +} + +// validTopBottomAggr determines if TOP or BOTTOM aggregates have valid arguments. +func (s *SelectStatement) validTopBottomAggr(expr *Call) error { + if exp, got := 2, len(expr.Args); got < exp { + return fmt.Errorf("invalid number of arguments for %s, expected at least %d, got %d", expr.Name, exp, got) + } + if len(expr.Args) > 1 { + callLimit, ok := expr.Args[len(expr.Args)-1].(*NumberLiteral) + if !ok { + return fmt.Errorf("expected integer as last argument in %s(), found %s", expr.Name, expr.Args[len(expr.Args)-1]) + } + // Check if they asked for a limit smaller than what they passed into the call + if int64(callLimit.Val) > int64(s.Limit) && s.Limit != 0 { + return fmt.Errorf("limit (%d) in %s function can not be larger than the LIMIT (%d) in the select statement", int64(callLimit.Val), expr.Name, int64(s.Limit)) + } + + for _, v := range expr.Args[:len(expr.Args)-1] { + if _, ok := v.(*VarRef); !ok { + return fmt.Errorf("only fields or tags are allowed in %s(), found %s", expr.Name, v) + } + } + } + return nil +} + +// validPercentileAggr determines if PERCENTILE have valid arguments. +func (s *SelectStatement) validPercentileAggr(expr *Call) error { + if err := s.validSelectWithAggregate(); err != nil { + return err + } + if exp, got := 2, len(expr.Args); got != exp { + return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", expr.Name, exp, got) + } + _, ok := expr.Args[1].(*NumberLiteral) + if !ok { + return fmt.Errorf("expected float argument in percentile()") + } + return nil +} + +func (s *SelectStatement) validateAggregates(tr targetRequirement) error { + for _, f := range s.Fields { + for _, expr := range walkFunctionCalls(f.Expr) { + switch expr.Name { + case "derivative", "non_negative_derivative": + if err := s.validSelectWithAggregate(); err != nil { + return err + } + if min, max, got := 1, 2, len(expr.Args); got > max || got < min { + return fmt.Errorf("invalid number of arguments for %s, expected at least %d but no more than %d, got %d", expr.Name, min, max, got) + } + // Validate that if they have grouping by time, they need a sub-call like min/max, etc. + groupByInterval, err := s.GroupByInterval() + if err != nil { + return fmt.Errorf("invalid group interval: %v", err) + } + if groupByInterval > 0 { + c, ok := expr.Args[0].(*Call) + if !ok { + return fmt.Errorf("aggregate function required inside the call to %s", expr.Name) + } + switch c.Name { + case "top", "bottom": + if err := s.validTopBottomAggr(c); err != nil { + return err + } + case "percentile": + if err := s.validPercentileAggr(c); err != nil { + return err + } + default: + if exp, got := 1, len(c.Args); got != exp { + return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", c.Name, exp, got) + } + } + } + case "top", "bottom": + if err := s.validTopBottomAggr(expr); err != nil { + return err + } + case "percentile": + if err := s.validPercentileAggr(expr); err != nil { + return err + } + default: + if err := s.validSelectWithAggregate(); err != nil { + return err + } + if exp, got := 1, len(expr.Args); got != exp { + return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", expr.Name, exp, got) + } + switch fc := expr.Args[0].(type) { + case *VarRef: + // do nothing + case *Call: + if fc.Name != "distinct" { + return fmt.Errorf("expected field argument in %s()", expr.Name) + } + case *Distinct: + if expr.Name != "count" { + return fmt.Errorf("expected field argument in %s()", expr.Name) + } + default: + return fmt.Errorf("expected field argument in %s()", expr.Name) + } + } + } + } + + // Check that we have valid duration and where clauses for aggregates + + // fetch the group by duration + groupByDuration, _ := s.GroupByInterval() + + // If we have a group by interval, but no aggregate function, it's an invalid statement + if s.IsRawQuery && groupByDuration > 0 { + return fmt.Errorf("GROUP BY requires at least one aggregate function") + } + + // If we have an aggregate function with a group by time without a where clause, it's an invalid statement + if tr == targetNotRequired { // ignore create continuous query statements + if !s.IsRawQuery && groupByDuration > 0 && !HasTimeExpr(s.Condition) { + return fmt.Errorf("aggregate functions with GROUP BY time require a WHERE time clause") + } + } + return nil +} + +// HasDistinct checks if a select statement contains DISTINCT +func (s *SelectStatement) HasDistinct() bool { + // determine if we have a call named distinct + for _, f := range s.Fields { + switch c := f.Expr.(type) { + case *Call: + if c.Name == "distinct" { + return true + } + case *Distinct: + return true + } + } + return false +} + +func (s *SelectStatement) validateDistinct() error { + if !s.HasDistinct() { + return nil + } + + if len(s.Fields) > 1 { + return fmt.Errorf("aggregate function distinct() can not be combined with other functions or fields") + } + + switch c := s.Fields[0].Expr.(type) { + case *Call: + if len(c.Args) == 0 { + return fmt.Errorf("distinct function requires at least one argument") + } + + if len(c.Args) != 1 { + return fmt.Errorf("distinct function can only have one argument") + } + } + return nil +} + +// HasCountDistinct checks if a select statement contains COUNT and DISTINCT +func (s *SelectStatement) HasCountDistinct() bool { + for _, f := range s.Fields { + if c, ok := f.Expr.(*Call); ok { + if c.Name == "count" { + for _, a := range c.Args { + if _, ok := a.(*Distinct); ok { + return true + } + if c, ok := a.(*Call); ok { + if c.Name == "distinct" { + return true + } + } + } + } + } + } + return false +} + +func (s *SelectStatement) validateCountDistinct() error { + if !s.HasCountDistinct() { + return nil + } + + valid := func(e Expr) bool { + c, ok := e.(*Call) + if !ok { + return true + } + if c.Name != "count" { + return true + } + for _, a := range c.Args { + if _, ok := a.(*Distinct); ok { + return len(c.Args) == 1 + } + if d, ok := a.(*Call); ok { + if d.Name == "distinct" { + return len(d.Args) == 1 + } + } + } + return true + } + + for _, f := range s.Fields { + if !valid(f.Expr) { + return fmt.Errorf("count(distinct ) can only have one argument") + } + } + + return nil +} + +func (s *SelectStatement) validateDerivative() error { + if !s.HasDerivative() { + return nil + } + + // If a derivative is requested, it must be the only field in the query. We don't support + // multiple fields in combination w/ derivaties yet. + if len(s.Fields) != 1 { + return fmt.Errorf("derivative cannot be used with other fields") + } + + aggr := s.FunctionCalls() + if len(aggr) != 1 { + return fmt.Errorf("derivative cannot be used with other fields") + } + + // Derivative requires two arguments + derivativeCall := aggr[0] + if len(derivativeCall.Args) == 0 { + return fmt.Errorf("derivative requires a field argument") + } + + // First arg must be a field or aggr over a field e.g. (mean(field)) + _, callOk := derivativeCall.Args[0].(*Call) + _, varOk := derivativeCall.Args[0].(*VarRef) + + if !(callOk || varOk) { + return fmt.Errorf("derivative requires a field argument") + } + + // If a duration arg is pased, make sure it's a duration + if len(derivativeCall.Args) == 2 { + // Second must be a duration .e.g (1h) + if _, ok := derivativeCall.Args[1].(*DurationLiteral); !ok { + return fmt.Errorf("derivative requires a duration argument") + } + } + + return nil +} + +// GroupByInterval extracts the time interval, if specified. +func (s *SelectStatement) GroupByInterval() (time.Duration, error) { + // return if we've already pulled it out + if s.groupByInterval != 0 { + return s.groupByInterval, nil + } + + // Ignore if there are no dimensions. + if len(s.Dimensions) == 0 { + return 0, nil + } + + for _, d := range s.Dimensions { + if call, ok := d.Expr.(*Call); ok && call.Name == "time" { + // Make sure there is exactly one argument. + if len(call.Args) != 1 { + return 0, errors.New("time dimension expected one argument") + } + + // Ensure the argument is a duration. + lit, ok := call.Args[0].(*DurationLiteral) + if !ok { + return 0, errors.New("time dimension must have one duration argument") + } + s.groupByInterval = lit.Val + return lit.Val, nil + } + } + return 0, nil +} + +// SetTimeRange sets the start and end time of the select statement to [start, end). i.e. start inclusive, end exclusive. +// This is used commonly for continuous queries so the start and end are in buckets. +func (s *SelectStatement) SetTimeRange(start, end time.Time) error { + cond := fmt.Sprintf("time >= '%s' AND time < '%s'", start.UTC().Format(time.RFC3339Nano), end.UTC().Format(time.RFC3339Nano)) + if s.Condition != nil { + cond = fmt.Sprintf("%s AND %s", s.rewriteWithoutTimeDimensions(), cond) + } + + expr, err := NewParser(strings.NewReader(cond)).ParseExpr() + if err != nil { + return err + } + + // fold out any previously replaced time dimensios and set the condition + s.Condition = Reduce(expr, nil) + + return nil +} + +// rewriteWithoutTimeDimensions will remove any WHERE time... clauses from the select statement +// This is necessary when setting an explicit time range to override any that previously existed. +func (s *SelectStatement) rewriteWithoutTimeDimensions() string { + n := RewriteFunc(s.Condition, func(n Node) Node { + switch n := n.(type) { + case *BinaryExpr: + if n.LHS.String() == "time" { + return &BooleanLiteral{Val: true} + } + return n + case *Call: + return &BooleanLiteral{Val: true} + default: + return n + } + }) + + return n.String() +} + +/* + +BinaryExpr + +SELECT mean(xxx.value) + avg(yyy.value) FROM xxx JOIN yyy WHERE xxx.host = 123 + +from xxx where host = 123 +select avg(value) from yyy where host = 123 + +SELECT xxx.value FROM xxx WHERE xxx.host = 123 +SELECT yyy.value FROM yyy + +--- + +SELECT MEAN(xxx.value) + MEAN(cpu.load.value) +FROM xxx JOIN yyy +GROUP BY host +WHERE (xxx.region == "uswest" OR yyy.region == "uswest") AND xxx.otherfield == "XXX" + +select * from ( + select mean + mean from xxx join yyy + group by time(5m), host +) (xxx.region == "uswest" OR yyy.region == "uswest") AND xxx.otherfield == "XXX" + +(seriesIDS for xxx.region = 'uswest' union seriesIDs for yyy.regnion = 'uswest') | seriesIDS xxx.otherfield = 'XXX' + +WHERE xxx.region == "uswest" AND xxx.otherfield == "XXX" +WHERE yyy.region == "uswest" + + +*/ + +// Substatement returns a single-series statement for a given variable reference. +func (s *SelectStatement) Substatement(ref *VarRef) (*SelectStatement, error) { + // Copy dimensions and properties to new statement. + other := &SelectStatement{ + Fields: Fields{{Expr: ref}}, + Dimensions: s.Dimensions, + Limit: s.Limit, + Offset: s.Offset, + SortFields: s.SortFields, + } + + // If there is only one series source then return it with the whole condition. + if len(s.Sources) == 1 { + other.Sources = s.Sources + other.Condition = s.Condition + return other, nil + } + + // Find the matching source. + name := MatchSource(s.Sources, ref.Val) + if name == "" { + return nil, fmt.Errorf("field source not found: %s", ref.Val) + } + other.Sources = append(other.Sources, &Measurement{Name: name}) + + // Filter out conditions. + if s.Condition != nil { + other.Condition = filterExprBySource(name, s.Condition) + } + + return other, nil +} + +// NamesInWhere returns the field and tag names (idents) referenced in the where clause +func (s *SelectStatement) NamesInWhere() []string { + var a []string + if s.Condition != nil { + a = walkNames(s.Condition) + } + return a +} + +// NamesInSelect returns the field and tag names (idents) in the select clause +func (s *SelectStatement) NamesInSelect() []string { + var a []string + + for _, f := range s.Fields { + a = append(a, walkNames(f.Expr)...) + } + + return a +} + +// NamesInDimension returns the field and tag names (idents) in the group by +func (s *SelectStatement) NamesInDimension() []string { + var a []string + + for _, d := range s.Dimensions { + a = append(a, walkNames(d.Expr)...) + } + + return a +} + +// LimitTagSets returns a tag set list with SLIMIT and SOFFSET applied. +func LimitTagSets(a []*TagSet, slimit, soffset int) []*TagSet { + // Ignore if no limit or offset is specified. + if slimit == 0 && soffset == 0 { + return a + } + + // If offset is beyond the number of tag sets then return nil. + if soffset > len(a) { + return nil + } + + // Clamp limit to the max number of tag sets. + if soffset+slimit > len(a) { + slimit = len(a) - soffset + } + return a[soffset : soffset+slimit] +} + +// walkNames will walk the Expr and return the database fields +func walkNames(exp Expr) []string { + switch expr := exp.(type) { + case *VarRef: + return []string{expr.Val} + case *Call: + if len(expr.Args) == 0 { + return nil + } + lit, ok := expr.Args[0].(*VarRef) + if !ok { + return nil + } + + return []string{lit.Val} + case *BinaryExpr: + var ret []string + ret = append(ret, walkNames(expr.LHS)...) + ret = append(ret, walkNames(expr.RHS)...) + return ret + case *ParenExpr: + return walkNames(expr.Expr) + } + + return nil +} + +// ExprNames returns a list of non-"time" field names from an expression. +func ExprNames(expr Expr) []string { + m := make(map[string]struct{}) + for _, name := range walkNames(expr) { + if name == "time" { + continue + } + m[name] = struct{}{} + } + + a := make([]string, 0, len(m)) + for k := range m { + a = append(a, k) + } + sort.Strings(a) + + return a +} + +// FunctionCalls returns the Call objects from the query +func (s *SelectStatement) FunctionCalls() []*Call { + var a []*Call + for _, f := range s.Fields { + a = append(a, walkFunctionCalls(f.Expr)...) + } + return a +} + +// FunctionCallsByPosition returns the Call objects from the query in the order they appear in the select statement +func (s *SelectStatement) FunctionCallsByPosition() [][]*Call { + var a [][]*Call + for _, f := range s.Fields { + a = append(a, walkFunctionCalls(f.Expr)) + } + return a +} + +// walkFunctionCalls walks the Field of a query for any function calls made +func walkFunctionCalls(exp Expr) []*Call { + switch expr := exp.(type) { + case *VarRef: + return nil + case *Call: + return []*Call{expr} + case *BinaryExpr: + var ret []*Call + ret = append(ret, walkFunctionCalls(expr.LHS)...) + ret = append(ret, walkFunctionCalls(expr.RHS)...) + return ret + case *ParenExpr: + return walkFunctionCalls(expr.Expr) + } + + return nil +} + +// filters an expression to exclude expressions unrelated to a source. +func filterExprBySource(name string, expr Expr) Expr { + switch expr := expr.(type) { + case *VarRef: + if !strings.HasPrefix(expr.Val, name) { + return nil + } + + case *BinaryExpr: + lhs := filterExprBySource(name, expr.LHS) + rhs := filterExprBySource(name, expr.RHS) + + // If an expr is logical then return either LHS/RHS or both. + // If an expr is arithmetic or comparative then require both sides. + if expr.Op == AND || expr.Op == OR { + if lhs == nil && rhs == nil { + return nil + } else if lhs != nil && rhs == nil { + return lhs + } else if lhs == nil && rhs != nil { + return rhs + } + } else { + if lhs == nil || rhs == nil { + return nil + } + } + return &BinaryExpr{Op: expr.Op, LHS: lhs, RHS: rhs} + + case *ParenExpr: + exp := filterExprBySource(name, expr.Expr) + if exp == nil { + return nil + } + return &ParenExpr{Expr: exp} + } + return expr +} + +// MatchSource returns the source name that matches a field name. +// Returns a blank string if no sources match. +func MatchSource(sources Sources, name string) string { + for _, src := range sources { + switch src := src.(type) { + case *Measurement: + if strings.HasPrefix(name, src.Name) { + return src.Name + } + } + } + return "" +} + +// Target represents a target (destination) policy, measurement, and DB. +type Target struct { + // Measurement to write into. + Measurement *Measurement +} + +// String returns a string representation of the Target. +func (t *Target) String() string { + if t == nil { + return "" + } + + var buf bytes.Buffer + _, _ = buf.WriteString("INTO ") + _, _ = buf.WriteString(t.Measurement.String()) + if t.Measurement.Name == "" { + _, _ = buf.WriteString(":MEASUREMENT") + } + + return buf.String() +} + +// DeleteStatement represents a command for removing data from the database. +type DeleteStatement struct { + // Data source that values are removed from. + Source Source + + // An expression evaluated on data point. + Condition Expr +} + +// String returns a string representation of the delete statement. +func (s *DeleteStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("DELETE FROM ") + _, _ = buf.WriteString(s.Source.String()) + if s.Condition != nil { + _, _ = buf.WriteString(" WHERE ") + _, _ = buf.WriteString(s.Condition.String()) + } + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a DeleteStatement. +func (s *DeleteStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: WritePrivilege}} +} + +// ShowSeriesStatement represents a command for listing series in the database. +type ShowSeriesStatement struct { + // Measurement(s) the series are listed for. + Sources Sources + + // An expression evaluated on a series name or tag. + Condition Expr + + // Fields to sort results by + SortFields SortFields + + // Maximum number of rows to be returned. + // Unlimited if zero. + Limit int + + // Returns rows starting at an offset from the first row. + Offset int +} + +// String returns a string representation of the list series statement. +func (s *ShowSeriesStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("SHOW SERIES") + + if s.Sources != nil { + _, _ = buf.WriteString(" FROM ") + _, _ = buf.WriteString(s.Sources.String()) + } + + if s.Condition != nil { + _, _ = buf.WriteString(" WHERE ") + _, _ = buf.WriteString(s.Condition.String()) + } + if len(s.SortFields) > 0 { + _, _ = buf.WriteString(" ORDER BY ") + _, _ = buf.WriteString(s.SortFields.String()) + } + if s.Limit > 0 { + _, _ = buf.WriteString(" LIMIT ") + _, _ = buf.WriteString(strconv.Itoa(s.Limit)) + } + if s.Offset > 0 { + _, _ = buf.WriteString(" OFFSET ") + _, _ = buf.WriteString(strconv.Itoa(s.Offset)) + } + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a ShowSeriesStatement. +func (s *ShowSeriesStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}} +} + +// DropSeriesStatement represents a command for removing a series from the database. +type DropSeriesStatement struct { + // Data source that fields are extracted from (optional) + Sources Sources + + // An expression evaluated on data point (optional) + Condition Expr +} + +// String returns a string representation of the drop series statement. +func (s *DropSeriesStatement) String() string { + var buf bytes.Buffer + buf.WriteString("DROP SERIES") + + if s.Sources != nil { + buf.WriteString(" FROM ") + buf.WriteString(s.Sources.String()) + } + if s.Condition != nil { + buf.WriteString(" WHERE ") + buf.WriteString(s.Condition.String()) + } + + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a DropSeriesStatement. +func (s DropSeriesStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: WritePrivilege}} +} + +// DropServerStatement represents a command for removing a server from the cluster. +type DropServerStatement struct { + // ID of the node to be dropped. + NodeID uint64 + + // Meta indicates if the server being dropped is a meta or data node + Meta bool +} + +// String returns a string representation of the drop series statement. +func (s *DropServerStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("DROP ") + if s.Meta { + _, _ = buf.WriteString(" META SERVER ") + } else { + _, _ = buf.WriteString(" DATA SERVER ") + } + _, _ = buf.WriteString(strconv.FormatUint(s.NodeID, 10)) + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a DropServerStatement. +func (s *DropServerStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Name: "", Privilege: AllPrivileges}} +} + +// ShowContinuousQueriesStatement represents a command for listing continuous queries. +type ShowContinuousQueriesStatement struct{} + +// String returns a string representation of the list continuous queries statement. +func (s *ShowContinuousQueriesStatement) String() string { return "SHOW CONTINUOUS QUERIES" } + +// RequiredPrivileges returns the privilege required to execute a ShowContinuousQueriesStatement. +func (s *ShowContinuousQueriesStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}} +} + +// ShowGrantsForUserStatement represents a command for listing user privileges. +type ShowGrantsForUserStatement struct { + // Name of the user to display privileges. + Name string +} + +// String returns a string representation of the show grants for user. +func (s *ShowGrantsForUserStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("SHOW GRANTS FOR ") + _, _ = buf.WriteString(QuoteIdent(s.Name)) + + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a ShowGrantsForUserStatement +func (s *ShowGrantsForUserStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +} + +// ShowServersStatement represents a command for listing all servers. +type ShowServersStatement struct{} + +// String returns a string representation of the show servers command. +func (s *ShowServersStatement) String() string { return "SHOW SERVERS" } + +// RequiredPrivileges returns the privilege required to execute a ShowServersStatement +func (s *ShowServersStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +} + +// ShowDatabasesStatement represents a command for listing all databases in the cluster. +type ShowDatabasesStatement struct{} + +// String returns a string representation of the list databases command. +func (s *ShowDatabasesStatement) String() string { return "SHOW DATABASES" } + +// RequiredPrivileges returns the privilege required to execute a ShowDatabasesStatement +func (s *ShowDatabasesStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +} + +// CreateContinuousQueryStatement represents a command for creating a continuous query. +type CreateContinuousQueryStatement struct { + // Name of the continuous query to be created. + Name string + + // Name of the database to create the continuous query on. + Database string + + // Source of data (SELECT statement). + Source *SelectStatement + + // Interval to resample previous queries + ResampleEvery time.Duration + + // Maximum duration to resample previous queries + ResampleFor time.Duration +} + +// String returns a string representation of the statement. +func (s *CreateContinuousQueryStatement) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "CREATE CONTINUOUS QUERY %s ON %s ", QuoteIdent(s.Name), QuoteIdent(s.Database)) + + if s.ResampleEvery > 0 || s.ResampleFor > 0 { + buf.WriteString("RESAMPLE ") + if s.ResampleEvery > 0 { + fmt.Fprintf(&buf, "EVERY %s ", FormatDuration(s.ResampleEvery)) + } + if s.ResampleFor > 0 { + fmt.Fprintf(&buf, "FOR %s ", FormatDuration(s.ResampleFor)) + } + } + fmt.Fprintf(&buf, "BEGIN %s END", s.Source.String()) + return buf.String() +} + +// DefaultDatabase returns the default database from the statement. +func (s *CreateContinuousQueryStatement) DefaultDatabase() string { + return s.Database +} + +// RequiredPrivileges returns the privilege required to execute a CreateContinuousQueryStatement. +func (s *CreateContinuousQueryStatement) RequiredPrivileges() ExecutionPrivileges { + ep := ExecutionPrivileges{{Admin: false, Name: s.Database, Privilege: ReadPrivilege}} + + // Selecting into a database that's different from the source? + if s.Source.Target.Measurement.Database != "" { + // Change source database privilege requirement to read. + ep[0].Privilege = ReadPrivilege + + // Add destination database privilege requirement and set it to write. + p := ExecutionPrivilege{ + Admin: false, + Name: s.Source.Target.Measurement.Database, + Privilege: WritePrivilege, + } + ep = append(ep, p) + } + + return ep +} + +func (s *CreateContinuousQueryStatement) validate() error { + interval, err := s.Source.GroupByInterval() + if err != nil { + return err + } + + if s.ResampleFor != 0 { + if s.ResampleEvery != 0 && s.ResampleEvery > interval { + interval = s.ResampleEvery + } + if interval > s.ResampleFor { + return fmt.Errorf("FOR duration must be >= GROUP BY time duration: must be a minimum of %s, got %s", FormatDuration(interval), FormatDuration(s.ResampleFor)) + } + } + return nil +} + +// DropContinuousQueryStatement represents a command for removing a continuous query. +type DropContinuousQueryStatement struct { + Name string + Database string +} + +// String returns a string representation of the statement. +func (s *DropContinuousQueryStatement) String() string { + return fmt.Sprintf("DROP CONTINUOUS QUERY %s ON %s", QuoteIdent(s.Name), QuoteIdent(s.Database)) +} + +// RequiredPrivileges returns the privilege(s) required to execute a DropContinuousQueryStatement +func (s *DropContinuousQueryStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: WritePrivilege}} +} + +// ShowMeasurementsStatement represents a command for listing measurements. +type ShowMeasurementsStatement struct { + // Measurement name or regex. + Source Source + + // An expression evaluated on data point. + Condition Expr + + // Fields to sort results by + SortFields SortFields + + // Maximum number of rows to be returned. + // Unlimited if zero. + Limit int + + // Returns rows starting at an offset from the first row. + Offset int +} + +// String returns a string representation of the statement. +func (s *ShowMeasurementsStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("SHOW MEASUREMENTS") + + if s.Source != nil { + _, _ = buf.WriteString(" WITH MEASUREMENT ") + if m, ok := s.Source.(*Measurement); ok && m.Regex != nil { + _, _ = buf.WriteString("=~ ") + } else { + _, _ = buf.WriteString("= ") + } + _, _ = buf.WriteString(s.Source.String()) + } + if s.Condition != nil { + _, _ = buf.WriteString(" WHERE ") + _, _ = buf.WriteString(s.Condition.String()) + } + if len(s.SortFields) > 0 { + _, _ = buf.WriteString(" ORDER BY ") + _, _ = buf.WriteString(s.SortFields.String()) + } + if s.Limit > 0 { + _, _ = buf.WriteString(" LIMIT ") + _, _ = buf.WriteString(strconv.Itoa(s.Limit)) + } + if s.Offset > 0 { + _, _ = buf.WriteString(" OFFSET ") + _, _ = buf.WriteString(strconv.Itoa(s.Offset)) + } + return buf.String() +} + +// RequiredPrivileges returns the privilege(s) required to execute a ShowMeasurementsStatement +func (s *ShowMeasurementsStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}} +} + +// DropMeasurementStatement represents a command to drop a measurement. +type DropMeasurementStatement struct { + // Name of the measurement to be dropped. + Name string +} + +// String returns a string representation of the drop measurement statement. +func (s *DropMeasurementStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("DROP MEASUREMENT ") + _, _ = buf.WriteString(QuoteIdent(s.Name)) + return buf.String() +} + +// RequiredPrivileges returns the privilege(s) required to execute a DropMeasurementStatement +func (s *DropMeasurementStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +} + +// ShowRetentionPoliciesStatement represents a command for listing retention policies. +type ShowRetentionPoliciesStatement struct { + // Name of the database to list policies for. + Database string +} + +// String returns a string representation of a ShowRetentionPoliciesStatement. +func (s *ShowRetentionPoliciesStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("SHOW RETENTION POLICIES ON ") + _, _ = buf.WriteString(QuoteIdent(s.Database)) + return buf.String() +} + +// RequiredPrivileges returns the privilege(s) required to execute a ShowRetentionPoliciesStatement +func (s *ShowRetentionPoliciesStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}} +} + +// ShowStatsStatement displays statistics for a given module. +type ShowStatsStatement struct { + // Module + Module string +} + +// String returns a string representation of a ShowStatsStatement. +func (s *ShowStatsStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("SHOW STATS ") + if s.Module != "" { + _, _ = buf.WriteString("FOR ") + _, _ = buf.WriteString(s.Module) + } + return buf.String() +} + +// RequiredPrivileges returns the privilege(s) required to execute a ShowStatsStatement +func (s *ShowStatsStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +} + +// ShowShardGroupsStatement represents a command for displaying shard groups in the cluster. +type ShowShardGroupsStatement struct{} + +// String returns a string representation of the SHOW SHARD GROUPS command. +func (s *ShowShardGroupsStatement) String() string { return "SHOW SHARD GROUPS" } + +// RequiredPrivileges returns the privileges required to execute the statement. +func (s *ShowShardGroupsStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +} + +// ShowShardsStatement represents a command for displaying shards in the cluster. +type ShowShardsStatement struct{} + +// String returns a string representation. +func (s *ShowShardsStatement) String() string { return "SHOW SHARDS" } + +// RequiredPrivileges returns the privileges required to execute the statement. +func (s *ShowShardsStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +} + +// ShowDiagnosticsStatement represents a command for show node diagnostics. +type ShowDiagnosticsStatement struct { + // Module + Module string +} + +// String returns a string representation of the ShowDiagnosticsStatement. +func (s *ShowDiagnosticsStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("SHOW DIAGNOSTICS ") + if s.Module != "" { + _, _ = buf.WriteString("FOR ") + _, _ = buf.WriteString(s.Module) + } + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a ShowDiagnosticsStatement +func (s *ShowDiagnosticsStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +} + +// CreateSubscriptionStatement represents a command to add a subscription to the incoming data stream +type CreateSubscriptionStatement struct { + Name string + Database string + RetentionPolicy string + Destinations []string + Mode string +} + +// String returns a string representation of the CreateSubscriptionStatement. +func (s *CreateSubscriptionStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("CREATE SUBSCRIPTION ") + _, _ = buf.WriteString(QuoteIdent(s.Name)) + _, _ = buf.WriteString(" ON ") + _, _ = buf.WriteString(QuoteIdent(s.Database)) + _, _ = buf.WriteString(".") + _, _ = buf.WriteString(QuoteIdent(s.RetentionPolicy)) + _, _ = buf.WriteString(" DESTINATIONS ") + _, _ = buf.WriteString(s.Mode) + _, _ = buf.WriteString(" ") + for i, dest := range s.Destinations { + if i != 0 { + _, _ = buf.WriteString(", ") + } + _, _ = buf.WriteString(QuoteString(dest)) + } + + return buf.String() +} + +// RequiredPrivileges returns the privilege required to execute a CreateSubscriptionStatement +func (s *CreateSubscriptionStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +} + +// DropSubscriptionStatement represents a command to drop a subscription to the incoming data stream. +type DropSubscriptionStatement struct { + Name string + Database string + RetentionPolicy string +} + +// String returns a string representation of the DropSubscriptionStatement. +func (s *DropSubscriptionStatement) String() string { + return fmt.Sprintf(`DROP SUBSCRIPTION %s ON %s.%s`, QuoteIdent(s.Name), QuoteIdent(s.Database), QuoteIdent(s.RetentionPolicy)) +} + +// RequiredPrivileges returns the privilege required to execute a DropSubscriptionStatement +func (s *DropSubscriptionStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +} + +// ShowSubscriptionsStatement represents a command to show a list of subscriptions. +type ShowSubscriptionsStatement struct { +} + +// String returns a string representation of the ShowSubscriptionStatement. +func (s *ShowSubscriptionsStatement) String() string { + return "SHOW SUBSCRIPTIONS" +} + +// RequiredPrivileges returns the privilege required to execute a ShowSubscriptionStatement +func (s *ShowSubscriptionsStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +} + +// ShowTagKeysStatement represents a command for listing tag keys. +type ShowTagKeysStatement struct { + // Data sources that fields are extracted from. + Sources Sources + + // An expression evaluated on data point. + Condition Expr + + // Fields to sort results by + SortFields SortFields + + // Maximum number of tag keys per measurement. Unlimited if zero. + Limit int + + // Returns tag keys starting at an offset from the first row. + Offset int + + // Maxiumum number of series to be returned. Unlimited if zero. + SLimit int + + // Returns series starting at an offset from the first one. + SOffset int +} + +// String returns a string representation of the statement. +func (s *ShowTagKeysStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("SHOW TAG KEYS") + + if s.Sources != nil { + _, _ = buf.WriteString(" FROM ") + _, _ = buf.WriteString(s.Sources.String()) + } + if s.Condition != nil { + _, _ = buf.WriteString(" WHERE ") + _, _ = buf.WriteString(s.Condition.String()) + } + if len(s.SortFields) > 0 { + _, _ = buf.WriteString(" ORDER BY ") + _, _ = buf.WriteString(s.SortFields.String()) + } + if s.Limit > 0 { + _, _ = buf.WriteString(" LIMIT ") + _, _ = buf.WriteString(strconv.Itoa(s.Limit)) + } + if s.Offset > 0 { + _, _ = buf.WriteString(" OFFSET ") + _, _ = buf.WriteString(strconv.Itoa(s.Offset)) + } + if s.SLimit > 0 { + _, _ = buf.WriteString(" SLIMIT ") + _, _ = buf.WriteString(strconv.Itoa(s.SLimit)) + } + if s.SOffset > 0 { + _, _ = buf.WriteString(" SOFFSET ") + _, _ = buf.WriteString(strconv.Itoa(s.SOffset)) + } + return buf.String() +} + +// RequiredPrivileges returns the privilege(s) required to execute a ShowTagKeysStatement +func (s *ShowTagKeysStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}} +} + +// ShowTagValuesStatement represents a command for listing tag values. +type ShowTagValuesStatement struct { + // Data source that fields are extracted from. + Sources Sources + + // Tag key(s) to pull values from. + TagKeys []string + + // An expression evaluated on data point. + Condition Expr + + // Fields to sort results by + SortFields SortFields + + // Maximum number of rows to be returned. + // Unlimited if zero. + Limit int + + // Returns rows starting at an offset from the first row. + Offset int +} + +// String returns a string representation of the statement. +func (s *ShowTagValuesStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("SHOW TAG VALUES") + + if s.Sources != nil { + _, _ = buf.WriteString(" FROM ") + _, _ = buf.WriteString(s.Sources.String()) + } + _, _ = buf.WriteString(" WITH KEY IN (") + for idx, tagKey := range s.TagKeys { + if idx != 0 { + _, _ = buf.WriteString(", ") + } + _, _ = buf.WriteString(QuoteIdent(tagKey)) + } + _, _ = buf.WriteString(")") + if s.Condition != nil { + _, _ = buf.WriteString(" WHERE ") + _, _ = buf.WriteString(s.Condition.String()) + } + if len(s.SortFields) > 0 { + _, _ = buf.WriteString(" ORDER BY ") + _, _ = buf.WriteString(s.SortFields.String()) + } + if s.Limit > 0 { + _, _ = buf.WriteString(" LIMIT ") + _, _ = buf.WriteString(strconv.Itoa(s.Limit)) + } + if s.Offset > 0 { + _, _ = buf.WriteString(" OFFSET ") + _, _ = buf.WriteString(strconv.Itoa(s.Offset)) + } + return buf.String() +} + +// RequiredPrivileges returns the privilege(s) required to execute a ShowTagValuesStatement +func (s *ShowTagValuesStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}} +} + +// ShowUsersStatement represents a command for listing users. +type ShowUsersStatement struct{} + +// String returns a string representation of the ShowUsersStatement. +func (s *ShowUsersStatement) String() string { + return "SHOW USERS" +} + +// RequiredPrivileges returns the privilege(s) required to execute a ShowUsersStatement +func (s *ShowUsersStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}} +} + +// ShowFieldKeysStatement represents a command for listing field keys. +type ShowFieldKeysStatement struct { + // Data sources that fields are extracted from. + Sources Sources + + // Fields to sort results by + SortFields SortFields + + // Maximum number of rows to be returned. + // Unlimited if zero. + Limit int + + // Returns rows starting at an offset from the first row. + Offset int +} + +// String returns a string representation of the statement. +func (s *ShowFieldKeysStatement) String() string { + var buf bytes.Buffer + _, _ = buf.WriteString("SHOW FIELD KEYS") + + if s.Sources != nil { + _, _ = buf.WriteString(" FROM ") + _, _ = buf.WriteString(s.Sources.String()) + } + if len(s.SortFields) > 0 { + _, _ = buf.WriteString(" ORDER BY ") + _, _ = buf.WriteString(s.SortFields.String()) + } + if s.Limit > 0 { + _, _ = buf.WriteString(" LIMIT ") + _, _ = buf.WriteString(strconv.Itoa(s.Limit)) + } + if s.Offset > 0 { + _, _ = buf.WriteString(" OFFSET ") + _, _ = buf.WriteString(strconv.Itoa(s.Offset)) + } + return buf.String() +} + +// RequiredPrivileges returns the privilege(s) required to execute a ShowFieldKeysStatement +func (s *ShowFieldKeysStatement) RequiredPrivileges() ExecutionPrivileges { + return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}} +} + +// Fields represents a list of fields. +type Fields []*Field + +// AliasNames returns a list of calculated field names in +// order of alias, function name, then field. +func (a Fields) AliasNames() []string { + names := []string{} + for _, f := range a { + names = append(names, f.Name()) + } + return names +} + +// Names returns a list of field names. +func (a Fields) Names() []string { + names := []string{} + for _, f := range a { + switch expr := f.Expr.(type) { + case *Call: + names = append(names, expr.Name) + case *VarRef: + names = append(names, expr.Val) + case *BinaryExpr: + names = append(names, walkNames(expr)...) + case *ParenExpr: + names = append(names, walkNames(expr)...) + } + } + return names +} + +// String returns a string representation of the fields. +func (a Fields) String() string { + var str []string + for _, f := range a { + str = append(str, f.String()) + } + return strings.Join(str, ", ") +} + +// Field represents an expression retrieved from a select statement. +type Field struct { + Expr Expr + Alias string +} + +// Name returns the name of the field. Returns alias, if set. +// Otherwise uses the function name or variable name. +func (f *Field) Name() string { + // Return alias, if set. + if f.Alias != "" { + return f.Alias + } + + // Return the function name or variable name, if available. + switch expr := f.Expr.(type) { + case *Call: + return expr.Name + case *BinaryExpr: + return BinaryExprName(expr) + case *ParenExpr: + f := Field{Expr: expr.Expr} + return f.Name() + case *VarRef: + return expr.Val + } + + // Otherwise return a blank name. + return "" +} + +// String returns a string representation of the field. +func (f *Field) String() string { + str := f.Expr.String() + + if f.Alias == "" { + return str + } + return fmt.Sprintf("%s AS %s", str, QuoteIdent(f.Alias)) +} + +// Sort Interface for Fields +func (a Fields) Len() int { return len(a) } +func (a Fields) Less(i, j int) bool { return a[i].Name() < a[j].Name() } +func (a Fields) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// Dimensions represents a list of dimensions. +type Dimensions []*Dimension + +// String returns a string representation of the dimensions. +func (a Dimensions) String() string { + var str []string + for _, d := range a { + str = append(str, d.String()) + } + return strings.Join(str, ", ") +} + +// Normalize returns the interval and tag dimensions separately. +// Returns 0 if no time interval is specified. +func (a Dimensions) Normalize() (time.Duration, []string) { + var dur time.Duration + var tags []string + + for _, dim := range a { + switch expr := dim.Expr.(type) { + case *Call: + lit, _ := expr.Args[0].(*DurationLiteral) + dur = lit.Val + case *VarRef: + tags = append(tags, expr.Val) + } + } + + return dur, tags +} + +// Dimension represents an expression that a select statement is grouped by. +type Dimension struct { + Expr Expr +} + +// String returns a string representation of the dimension. +func (d *Dimension) String() string { return d.Expr.String() } + +// Measurements represents a list of measurements. +type Measurements []*Measurement + +// String returns a string representation of the measurements. +func (a Measurements) String() string { + var str []string + for _, m := range a { + str = append(str, m.String()) + } + return strings.Join(str, ", ") +} + +// Measurement represents a single measurement used as a datasource. +type Measurement struct { + Database string + RetentionPolicy string + Name string + Regex *RegexLiteral + IsTarget bool +} + +// String returns a string representation of the measurement. +func (m *Measurement) String() string { + var buf bytes.Buffer + if m.Database != "" { + _, _ = buf.WriteString(QuoteIdent(m.Database)) + _, _ = buf.WriteString(".") + } + + if m.RetentionPolicy != "" { + _, _ = buf.WriteString(QuoteIdent(m.RetentionPolicy)) + } + + if m.Database != "" || m.RetentionPolicy != "" { + _, _ = buf.WriteString(`.`) + } + + if m.Name != "" { + _, _ = buf.WriteString(QuoteIdent(m.Name)) + } else if m.Regex != nil { + _, _ = buf.WriteString(m.Regex.String()) + } + + return buf.String() +} + +func encodeMeasurement(mm *Measurement) *internal.Measurement { + pb := &internal.Measurement{ + Database: proto.String(mm.Database), + RetentionPolicy: proto.String(mm.RetentionPolicy), + Name: proto.String(mm.Name), + IsTarget: proto.Bool(mm.IsTarget), + } + if mm.Regex != nil { + pb.Regex = proto.String(mm.Regex.String()) + } + return pb +} + +func decodeMeasurement(pb *internal.Measurement) (*Measurement, error) { + mm := &Measurement{ + Database: pb.GetDatabase(), + RetentionPolicy: pb.GetRetentionPolicy(), + Name: pb.GetName(), + IsTarget: pb.GetIsTarget(), + } + + if pb.Regex != nil { + regex, err := regexp.Compile(pb.GetRegex()) + if err != nil { + return nil, fmt.Errorf("invalid binary measurement regex: value=%q, err=%s", pb.GetRegex(), err) + } + mm.Regex = &RegexLiteral{Val: regex} + } + + return mm, nil +} + +// VarRef represents a reference to a variable. +type VarRef struct { + Val string +} + +// String returns a string representation of the variable reference. +func (r *VarRef) String() string { + return QuoteIdent(r.Val) +} + +// Call represents a function call. +type Call struct { + Name string + Args []Expr +} + +// String returns a string representation of the call. +func (c *Call) String() string { + // Join arguments. + var str []string + for _, arg := range c.Args { + str = append(str, arg.String()) + } + + // Write function name and args. + return fmt.Sprintf("%s(%s)", c.Name, strings.Join(str, ", ")) +} + +// Fields will extract any field names from the call. Only specific calls support this. +func (c *Call) Fields() []string { + switch c.Name { + case "top", "bottom": + // maintain the order the user specified in the query + keyMap := make(map[string]struct{}) + keys := []string{} + for i, a := range c.Args { + if i == 0 { + // special case, first argument is always the name of the function regardless of the field name + keys = append(keys, c.Name) + continue + } + switch v := a.(type) { + case *VarRef: + if _, ok := keyMap[v.Val]; !ok { + keyMap[v.Val] = struct{}{} + keys = append(keys, v.Val) + } + } + } + return keys + case "min", "max", "first", "last", "sum", "mean": + // maintain the order the user specified in the query + keyMap := make(map[string]struct{}) + keys := []string{} + for _, a := range c.Args { + switch v := a.(type) { + case *VarRef: + if _, ok := keyMap[v.Val]; !ok { + keyMap[v.Val] = struct{}{} + keys = append(keys, v.Val) + } + } + } + return keys + default: + panic(fmt.Sprintf("*call.Fields is unable to provide information on %s", c.Name)) + } +} + +// Distinct represents a DISTINCT expression. +type Distinct struct { + // Identifier following DISTINCT + Val string +} + +// String returns a string representation of the expression. +func (d *Distinct) String() string { + return fmt.Sprintf("DISTINCT %s", d.Val) +} + +// NewCall returns a new call expression from this expressions. +func (d *Distinct) NewCall() *Call { + return &Call{ + Name: "distinct", + Args: []Expr{ + &VarRef{Val: d.Val}, + }, + } +} + +// NumberLiteral represents a numeric literal. +type NumberLiteral struct { + Val float64 +} + +// String returns a string representation of the literal. +func (l *NumberLiteral) String() string { return strconv.FormatFloat(l.Val, 'f', 3, 64) } + +// BooleanLiteral represents a boolean literal. +type BooleanLiteral struct { + Val bool +} + +// String returns a string representation of the literal. +func (l *BooleanLiteral) String() string { + if l.Val { + return "true" + } + return "false" +} + +// isTrueLiteral returns true if the expression is a literal "true" value. +func isTrueLiteral(expr Expr) bool { + if expr, ok := expr.(*BooleanLiteral); ok { + return expr.Val == true + } + return false +} + +// isFalseLiteral returns true if the expression is a literal "false" value. +func isFalseLiteral(expr Expr) bool { + if expr, ok := expr.(*BooleanLiteral); ok { + return expr.Val == false + } + return false +} + +// StringLiteral represents a string literal. +type StringLiteral struct { + Val string +} + +// String returns a string representation of the literal. +func (l *StringLiteral) String() string { return QuoteString(l.Val) } + +// TimeLiteral represents a point-in-time literal. +type TimeLiteral struct { + Val time.Time +} + +// String returns a string representation of the literal. +func (l *TimeLiteral) String() string { + return `'` + l.Val.UTC().Format(time.RFC3339Nano) + `'` +} + +// DurationLiteral represents a duration literal. +type DurationLiteral struct { + Val time.Duration +} + +// String returns a string representation of the literal. +func (l *DurationLiteral) String() string { return FormatDuration(l.Val) } + +// nilLiteral represents a nil literal. +// This is not available to the query language itself. It's only used internally. +type nilLiteral struct{} + +// String returns a string representation of the literal. +func (l *nilLiteral) String() string { return `nil` } + +// BinaryExpr represents an operation between two expressions. +type BinaryExpr struct { + Op Token + LHS Expr + RHS Expr +} + +// String returns a string representation of the binary expression. +func (e *BinaryExpr) String() string { + return fmt.Sprintf("%s %s %s", e.LHS.String(), e.Op.String(), e.RHS.String()) +} + +func BinaryExprName(expr *BinaryExpr) string { + v := binaryExprNameVisitor{} + Walk(&v, expr) + return strings.Join(v.names, "_") +} + +type binaryExprNameVisitor struct { + names []string +} + +func (v *binaryExprNameVisitor) Visit(n Node) Visitor { + switch n := n.(type) { + case *VarRef: + v.names = append(v.names, n.Val) + case *Call: + v.names = append(v.names, n.Name) + return nil + } + return v +} + +// ParenExpr represents a parenthesized expression. +type ParenExpr struct { + Expr Expr +} + +// String returns a string representation of the parenthesized expression. +func (e *ParenExpr) String() string { return fmt.Sprintf("(%s)", e.Expr.String()) } + +// RegexLiteral represents a regular expression. +type RegexLiteral struct { + Val *regexp.Regexp +} + +// String returns a string representation of the literal. +func (r *RegexLiteral) String() string { + if r.Val != nil { + return fmt.Sprintf("/%s/", strings.Replace(r.Val.String(), `/`, `\/`, -1)) + } + return "" +} + +// CloneRegexLiteral returns a clone of the RegexLiteral. +func CloneRegexLiteral(r *RegexLiteral) *RegexLiteral { + if r == nil { + return nil + } + + clone := &RegexLiteral{} + if r.Val != nil { + clone.Val = regexp.MustCompile(r.Val.String()) + } + + return clone +} + +// Wildcard represents a wild card expression. +type Wildcard struct{} + +// String returns a string representation of the wildcard. +func (e *Wildcard) String() string { return "*" } + +// CloneExpr returns a deep copy of the expression. +func CloneExpr(expr Expr) Expr { + if expr == nil { + return nil + } + switch expr := expr.(type) { + case *BinaryExpr: + return &BinaryExpr{Op: expr.Op, LHS: CloneExpr(expr.LHS), RHS: CloneExpr(expr.RHS)} + case *BooleanLiteral: + return &BooleanLiteral{Val: expr.Val} + case *Call: + args := make([]Expr, len(expr.Args)) + for i, arg := range expr.Args { + args[i] = CloneExpr(arg) + } + return &Call{Name: expr.Name, Args: args} + case *Distinct: + return &Distinct{Val: expr.Val} + case *DurationLiteral: + return &DurationLiteral{Val: expr.Val} + case *NumberLiteral: + return &NumberLiteral{Val: expr.Val} + case *ParenExpr: + return &ParenExpr{Expr: CloneExpr(expr.Expr)} + case *RegexLiteral: + return &RegexLiteral{Val: expr.Val} + case *StringLiteral: + return &StringLiteral{Val: expr.Val} + case *TimeLiteral: + return &TimeLiteral{Val: expr.Val} + case *VarRef: + return &VarRef{Val: expr.Val} + case *Wildcard: + return &Wildcard{} + } + panic("unreachable") +} + +// HasTimeExpr returns true if the expression has a time term. +func HasTimeExpr(expr Expr) bool { + switch n := expr.(type) { + case *BinaryExpr: + if n.Op == AND || n.Op == OR { + return HasTimeExpr(n.LHS) || HasTimeExpr(n.RHS) + } + if ref, ok := n.LHS.(*VarRef); ok && strings.ToLower(ref.Val) == "time" { + return true + } + return false + case *ParenExpr: + // walk down the tree + return HasTimeExpr(n.Expr) + default: + return false + } +} + +// OnlyTimeExpr returns true if the expression only has time constraints. +func OnlyTimeExpr(expr Expr) bool { + if expr == nil { + return false + } + switch n := expr.(type) { + case *BinaryExpr: + if n.Op == AND || n.Op == OR { + return OnlyTimeExpr(n.LHS) && OnlyTimeExpr(n.RHS) + } + if ref, ok := n.LHS.(*VarRef); ok && strings.ToLower(ref.Val) == "time" { + return true + } + return false + case *ParenExpr: + // walk down the tree + return OnlyTimeExpr(n.Expr) + default: + return false + } +} + +// TimeRange returns the minimum and maximum times specified by an expression. +// Returns zero times if there is no bound. +func TimeRange(expr Expr) (min, max time.Time) { + WalkFunc(expr, func(n Node) { + if n, ok := n.(*BinaryExpr); ok { + // Extract literal expression & operator on LHS. + // Check for "time" on the left-hand side first. + // Otherwise check for for the right-hand side and flip the operator. + value, op := timeExprValue(n.LHS, n.RHS), n.Op + if value.IsZero() { + if value = timeExprValue(n.RHS, n.LHS); value.IsZero() { + return + } else if op == LT { + op = GT + } else if op == LTE { + op = GTE + } else if op == GT { + op = LT + } else if op == GTE { + op = LTE + } + } + + // Update the min/max depending on the operator. + // The GT & LT update the value by +/- 1ns not make them "not equal". + switch op { + case GT: + if min.IsZero() || value.After(min) { + min = value.Add(time.Nanosecond) + } + case GTE: + if min.IsZero() || value.After(min) { + min = value + } + case LT: + if max.IsZero() || value.Before(max) { + max = value.Add(-time.Nanosecond) + } + case LTE: + if max.IsZero() || value.Before(max) { + max = value + } + case EQ: + if min.IsZero() || value.After(min) { + min = value + } + if max.IsZero() || value.Add(1*time.Nanosecond).Before(max) { + max = value.Add(1 * time.Nanosecond) + } + } + } + }) + return +} + +// TimeRangeAsEpochNano returns the minimum and maximum times, as epoch nano, specified by +// and expression. If there is no lower bound, the start of the epoch is returned +// for minimum. If there is no higher bound, now is returned for maximum. +func TimeRangeAsEpochNano(expr Expr) (min, max int64) { + tmin, tmax := TimeRange(expr) + if tmin.IsZero() { + min = time.Unix(0, 0).UnixNano() + } else { + min = tmin.UnixNano() + } + if tmax.IsZero() { + max = time.Now().UnixNano() + } else { + max = tmax.UnixNano() + } + return +} + +// timeExprValue returns the time literal value of a "time == " expression. +// Returns zero time if the expression is not a time expression. +func timeExprValue(ref Expr, lit Expr) time.Time { + if ref, ok := ref.(*VarRef); ok && strings.ToLower(ref.Val) == "time" { + switch lit := lit.(type) { + case *TimeLiteral: + return lit.Val + case *DurationLiteral: + return time.Unix(0, int64(lit.Val)).UTC() + case *NumberLiteral: + return time.Unix(0, int64(lit.Val)).UTC() + } + } + return time.Time{} +} + +// Visitor can be called by Walk to traverse an AST hierarchy. +// The Visit() function is called once per node. +type Visitor interface { + Visit(Node) Visitor +} + +// Walk traverses a node hierarchy in depth-first order. +func Walk(v Visitor, node Node) { + if node == nil { + return + } + + if v = v.Visit(node); v == nil { + return + } + + switch n := node.(type) { + case *BinaryExpr: + Walk(v, n.LHS) + Walk(v, n.RHS) + + case *Call: + for _, expr := range n.Args { + Walk(v, expr) + } + + case *CreateContinuousQueryStatement: + Walk(v, n.Source) + + case *Dimension: + Walk(v, n.Expr) + + case Dimensions: + for _, c := range n { + Walk(v, c) + } + + case *DropSeriesStatement: + Walk(v, n.Sources) + Walk(v, n.Condition) + + case *Field: + Walk(v, n.Expr) + + case Fields: + for _, c := range n { + Walk(v, c) + } + + case *ParenExpr: + Walk(v, n.Expr) + + case *Query: + Walk(v, n.Statements) + + case *SelectStatement: + Walk(v, n.Fields) + Walk(v, n.Target) + Walk(v, n.Dimensions) + Walk(v, n.Sources) + Walk(v, n.Condition) + Walk(v, n.SortFields) + + case *ShowSeriesStatement: + Walk(v, n.Sources) + Walk(v, n.Condition) + + case *ShowTagKeysStatement: + Walk(v, n.Sources) + Walk(v, n.Condition) + Walk(v, n.SortFields) + + case *ShowTagValuesStatement: + Walk(v, n.Sources) + Walk(v, n.Condition) + Walk(v, n.SortFields) + + case *ShowFieldKeysStatement: + Walk(v, n.Sources) + Walk(v, n.SortFields) + + case SortFields: + for _, sf := range n { + Walk(v, sf) + } + + case Sources: + for _, s := range n { + Walk(v, s) + } + + case Statements: + for _, s := range n { + Walk(v, s) + } + + case *Target: + if n != nil { + Walk(v, n.Measurement) + } + } +} + +// WalkFunc traverses a node hierarchy in depth-first order. +func WalkFunc(node Node, fn func(Node)) { + Walk(walkFuncVisitor(fn), node) +} + +type walkFuncVisitor func(Node) + +func (fn walkFuncVisitor) Visit(n Node) Visitor { fn(n); return fn } + +// Rewriter can be called by Rewrite to replace nodes in the AST hierarchy. +// The Rewrite() function is called once per node. +type Rewriter interface { + Rewrite(Node) Node +} + +// Rewrite recursively invokes the rewriter to replace each node. +// Nodes are traversed depth-first and rewritten from leaf to root. +func Rewrite(r Rewriter, node Node) Node { + switch n := node.(type) { + case *Query: + n.Statements = Rewrite(r, n.Statements).(Statements) + + case Statements: + for i, s := range n { + n[i] = Rewrite(r, s).(Statement) + } + + case *SelectStatement: + n.Fields = Rewrite(r, n.Fields).(Fields) + n.Dimensions = Rewrite(r, n.Dimensions).(Dimensions) + n.Sources = Rewrite(r, n.Sources).(Sources) + n.Condition = Rewrite(r, n.Condition).(Expr) + + case Fields: + for i, f := range n { + n[i] = Rewrite(r, f).(*Field) + } + + case *Field: + n.Expr = Rewrite(r, n.Expr).(Expr) + + case Dimensions: + for i, d := range n { + n[i] = Rewrite(r, d).(*Dimension) + } + + case *Dimension: + n.Expr = Rewrite(r, n.Expr).(Expr) + + case *BinaryExpr: + n.LHS = Rewrite(r, n.LHS).(Expr) + n.RHS = Rewrite(r, n.RHS).(Expr) + + case *ParenExpr: + n.Expr = Rewrite(r, n.Expr).(Expr) + + case *Call: + for i, expr := range n.Args { + n.Args[i] = Rewrite(r, expr).(Expr) + } + } + + return r.Rewrite(node) +} + +// RewriteFunc rewrites a node hierarchy. +func RewriteFunc(node Node, fn func(Node) Node) Node { + return Rewrite(rewriterFunc(fn), node) +} + +type rewriterFunc func(Node) Node + +func (fn rewriterFunc) Rewrite(n Node) Node { return fn(n) } + +// RewriteExpr recursively invokes the function to replace each expr. +// Nodes are traversed depth-first and rewritten from leaf to root. +func RewriteExpr(expr Expr, fn func(Expr) Expr) Expr { + switch e := expr.(type) { + case *BinaryExpr: + e.LHS = RewriteExpr(e.LHS, fn) + e.RHS = RewriteExpr(e.RHS, fn) + if e.LHS != nil && e.RHS == nil { + expr = e.LHS + } else if e.RHS != nil && e.LHS == nil { + expr = e.RHS + } else if e.LHS == nil && e.RHS == nil { + return nil + } + + case *ParenExpr: + e.Expr = RewriteExpr(e.Expr, fn) + if e.Expr == nil { + return nil + } + + case *Call: + for i, expr := range e.Args { + e.Args[i] = RewriteExpr(expr, fn) + } + } + + return fn(expr) +} + +// Eval evaluates expr against a map. +func Eval(expr Expr, m map[string]interface{}) interface{} { + if expr == nil { + return nil + } + + switch expr := expr.(type) { + case *BinaryExpr: + return evalBinaryExpr(expr, m) + case *BooleanLiteral: + return expr.Val + case *NumberLiteral: + return expr.Val + case *ParenExpr: + return Eval(expr.Expr, m) + case *StringLiteral: + return expr.Val + case *VarRef: + return m[expr.Val] + default: + return nil + } +} + +func evalBinaryExpr(expr *BinaryExpr, m map[string]interface{}) interface{} { + lhs := Eval(expr.LHS, m) + rhs := Eval(expr.RHS, m) + + // Evaluate if both sides are simple types. + switch lhs := lhs.(type) { + case bool: + rhs, _ := rhs.(bool) + switch expr.Op { + case AND: + return lhs && rhs + case OR: + return lhs || rhs + case EQ: + return lhs == rhs + case NEQ: + return lhs != rhs + } + case float64: + rhs, _ := rhs.(float64) + switch expr.Op { + case EQ: + return lhs == rhs + case NEQ: + return lhs != rhs + case LT: + return lhs < rhs + case LTE: + return lhs <= rhs + case GT: + return lhs > rhs + case GTE: + return lhs >= rhs + case ADD: + return lhs + rhs + case SUB: + return lhs - rhs + case MUL: + return lhs * rhs + case DIV: + if rhs == 0 { + return float64(0) + } + return lhs / rhs + } + case int64: + // we parse all number literals as float 64, so we have to convert from + // an interface to the float64, then cast to an int64 for comparison + rhsf, _ := rhs.(float64) + rhs := int64(rhsf) + switch expr.Op { + case EQ: + return lhs == rhs + case NEQ: + return lhs != rhs + case LT: + return lhs < rhs + case LTE: + return lhs <= rhs + case GT: + return lhs > rhs + case GTE: + return lhs >= rhs + case ADD: + return lhs + rhs + case SUB: + return lhs - rhs + case MUL: + return lhs * rhs + case DIV: + if rhs == 0 { + return int64(0) + } + return lhs / rhs + } + case string: + rhs, _ := rhs.(string) + switch expr.Op { + case EQ: + return lhs == rhs + case NEQ: + return lhs != rhs + } + } + return nil +} + +// EvalBool evaluates expr and returns true if result is a boolean true. +// Otherwise returns false. +func EvalBool(expr Expr, m map[string]interface{}) bool { + v, _ := Eval(expr, m).(bool) + return v +} + +// Reduce evaluates expr using the available values in valuer. +// References that don't exist in valuer are ignored. +func Reduce(expr Expr, valuer Valuer) Expr { + expr = reduce(expr, valuer) + + // Unwrap parens at top level. + if expr, ok := expr.(*ParenExpr); ok { + return expr.Expr + } + return expr +} + +func reduce(expr Expr, valuer Valuer) Expr { + if expr == nil { + return nil + } + + switch expr := expr.(type) { + case *BinaryExpr: + return reduceBinaryExpr(expr, valuer) + case *Call: + return reduceCall(expr, valuer) + case *ParenExpr: + return reduceParenExpr(expr, valuer) + case *VarRef: + return reduceVarRef(expr, valuer) + default: + return CloneExpr(expr) + } +} + +func reduceBinaryExpr(expr *BinaryExpr, valuer Valuer) Expr { + // Reduce both sides first. + op := expr.Op + lhs := reduce(expr.LHS, valuer) + rhs := reduce(expr.RHS, valuer) + + // Do not evaluate if one side is nil. + if lhs == nil || rhs == nil { + return &BinaryExpr{LHS: lhs, RHS: rhs, Op: expr.Op} + } + + // If we have a logical operator (AND, OR) and one side is a boolean literal + // then we need to have special handling. + if op == AND { + if isFalseLiteral(lhs) || isFalseLiteral(rhs) { + return &BooleanLiteral{Val: false} + } else if isTrueLiteral(lhs) { + return rhs + } else if isTrueLiteral(rhs) { + return lhs + } + } else if op == OR { + if isTrueLiteral(lhs) || isTrueLiteral(rhs) { + return &BooleanLiteral{Val: true} + } else if isFalseLiteral(lhs) { + return rhs + } else if isFalseLiteral(rhs) { + return lhs + } + } + + // Evaluate if both sides are simple types. + switch lhs := lhs.(type) { + case *BooleanLiteral: + return reduceBinaryExprBooleanLHS(op, lhs, rhs) + case *DurationLiteral: + return reduceBinaryExprDurationLHS(op, lhs, rhs) + case *nilLiteral: + return reduceBinaryExprNilLHS(op, lhs, rhs) + case *NumberLiteral: + return reduceBinaryExprNumberLHS(op, lhs, rhs) + case *StringLiteral: + return reduceBinaryExprStringLHS(op, lhs, rhs) + case *TimeLiteral: + return reduceBinaryExprTimeLHS(op, lhs, rhs) + default: + return &BinaryExpr{Op: op, LHS: lhs, RHS: rhs} + } +} + +func reduceBinaryExprBooleanLHS(op Token, lhs *BooleanLiteral, rhs Expr) Expr { + switch rhs := rhs.(type) { + case *BooleanLiteral: + switch op { + case EQ: + return &BooleanLiteral{Val: lhs.Val == rhs.Val} + case NEQ: + return &BooleanLiteral{Val: lhs.Val != rhs.Val} + case AND: + return &BooleanLiteral{Val: lhs.Val && rhs.Val} + case OR: + return &BooleanLiteral{Val: lhs.Val || rhs.Val} + } + case *nilLiteral: + return &BooleanLiteral{Val: false} + } + return &BinaryExpr{Op: op, LHS: lhs, RHS: rhs} +} + +func reduceBinaryExprDurationLHS(op Token, lhs *DurationLiteral, rhs Expr) Expr { + switch rhs := rhs.(type) { + case *DurationLiteral: + switch op { + case ADD: + return &DurationLiteral{Val: lhs.Val + rhs.Val} + case SUB: + return &DurationLiteral{Val: lhs.Val - rhs.Val} + case EQ: + return &BooleanLiteral{Val: lhs.Val == rhs.Val} + case NEQ: + return &BooleanLiteral{Val: lhs.Val != rhs.Val} + case GT: + return &BooleanLiteral{Val: lhs.Val > rhs.Val} + case GTE: + return &BooleanLiteral{Val: lhs.Val >= rhs.Val} + case LT: + return &BooleanLiteral{Val: lhs.Val < rhs.Val} + case LTE: + return &BooleanLiteral{Val: lhs.Val <= rhs.Val} + } + case *NumberLiteral: + switch op { + case MUL: + return &DurationLiteral{Val: lhs.Val * time.Duration(rhs.Val)} + case DIV: + if rhs.Val == 0 { + return &DurationLiteral{Val: 0} + } + return &DurationLiteral{Val: lhs.Val / time.Duration(rhs.Val)} + } + case *TimeLiteral: + switch op { + case ADD: + return &TimeLiteral{Val: rhs.Val.Add(lhs.Val)} + } + case *nilLiteral: + return &BooleanLiteral{Val: false} + } + return &BinaryExpr{Op: op, LHS: lhs, RHS: rhs} +} + +func reduceBinaryExprNilLHS(op Token, lhs *nilLiteral, rhs Expr) Expr { + switch op { + case EQ, NEQ: + return &BooleanLiteral{Val: false} + } + return &BinaryExpr{Op: op, LHS: lhs, RHS: rhs} +} + +func reduceBinaryExprNumberLHS(op Token, lhs *NumberLiteral, rhs Expr) Expr { + switch rhs := rhs.(type) { + case *NumberLiteral: + switch op { + case ADD: + return &NumberLiteral{Val: lhs.Val + rhs.Val} + case SUB: + return &NumberLiteral{Val: lhs.Val - rhs.Val} + case MUL: + return &NumberLiteral{Val: lhs.Val * rhs.Val} + case DIV: + if rhs.Val == 0 { + return &NumberLiteral{Val: 0} + } + return &NumberLiteral{Val: lhs.Val / rhs.Val} + case EQ: + return &BooleanLiteral{Val: lhs.Val == rhs.Val} + case NEQ: + return &BooleanLiteral{Val: lhs.Val != rhs.Val} + case GT: + return &BooleanLiteral{Val: lhs.Val > rhs.Val} + case GTE: + return &BooleanLiteral{Val: lhs.Val >= rhs.Val} + case LT: + return &BooleanLiteral{Val: lhs.Val < rhs.Val} + case LTE: + return &BooleanLiteral{Val: lhs.Val <= rhs.Val} + } + case *nilLiteral: + return &BooleanLiteral{Val: false} + } + return &BinaryExpr{Op: op, LHS: lhs, RHS: rhs} +} + +func reduceBinaryExprStringLHS(op Token, lhs *StringLiteral, rhs Expr) Expr { + switch rhs := rhs.(type) { + case *StringLiteral: + switch op { + case EQ: + return &BooleanLiteral{Val: lhs.Val == rhs.Val} + case NEQ: + return &BooleanLiteral{Val: lhs.Val != rhs.Val} + case ADD: + return &StringLiteral{Val: lhs.Val + rhs.Val} + } + case *nilLiteral: + switch op { + case EQ, NEQ: + return &BooleanLiteral{Val: false} + } + } + return &BinaryExpr{Op: op, LHS: lhs, RHS: rhs} +} + +func reduceBinaryExprTimeLHS(op Token, lhs *TimeLiteral, rhs Expr) Expr { + switch rhs := rhs.(type) { + case *DurationLiteral: + switch op { + case ADD: + return &TimeLiteral{Val: lhs.Val.Add(rhs.Val)} + case SUB: + return &TimeLiteral{Val: lhs.Val.Add(-rhs.Val)} + } + case *TimeLiteral: + switch op { + case SUB: + return &DurationLiteral{Val: lhs.Val.Sub(rhs.Val)} + case EQ: + return &BooleanLiteral{Val: lhs.Val.Equal(rhs.Val)} + case NEQ: + return &BooleanLiteral{Val: !lhs.Val.Equal(rhs.Val)} + case GT: + return &BooleanLiteral{Val: lhs.Val.After(rhs.Val)} + case GTE: + return &BooleanLiteral{Val: lhs.Val.After(rhs.Val) || lhs.Val.Equal(rhs.Val)} + case LT: + return &BooleanLiteral{Val: lhs.Val.Before(rhs.Val)} + case LTE: + return &BooleanLiteral{Val: lhs.Val.Before(rhs.Val) || lhs.Val.Equal(rhs.Val)} + } + case *nilLiteral: + return &BooleanLiteral{Val: false} + } + return &BinaryExpr{Op: op, LHS: lhs, RHS: rhs} +} + +func reduceCall(expr *Call, valuer Valuer) Expr { + // Evaluate "now()" if valuer is set. + if expr.Name == "now" && len(expr.Args) == 0 && valuer != nil { + if v, ok := valuer.Value("now()"); ok { + v, _ := v.(time.Time) + return &TimeLiteral{Val: v} + } + } + + // Otherwise reduce arguments. + args := make([]Expr, len(expr.Args)) + for i, arg := range expr.Args { + args[i] = reduce(arg, valuer) + } + return &Call{Name: expr.Name, Args: args} +} + +func reduceParenExpr(expr *ParenExpr, valuer Valuer) Expr { + subexpr := reduce(expr.Expr, valuer) + if subexpr, ok := subexpr.(*BinaryExpr); ok { + return &ParenExpr{Expr: subexpr} + } + return subexpr +} + +func reduceVarRef(expr *VarRef, valuer Valuer) Expr { + // Ignore if there is no valuer. + if valuer == nil { + return &VarRef{Val: expr.Val} + } + + // Retrieve the value of the ref. + // Ignore if the value doesn't exist. + v, ok := valuer.Value(expr.Val) + if !ok { + return &VarRef{Val: expr.Val} + } + + // Return the value as a literal. + switch v := v.(type) { + case bool: + return &BooleanLiteral{Val: v} + case time.Duration: + return &DurationLiteral{Val: v} + case float64: + return &NumberLiteral{Val: v} + case string: + return &StringLiteral{Val: v} + case time.Time: + return &TimeLiteral{Val: v} + default: + return &nilLiteral{} + } +} + +// Valuer is the interface that wraps the Value() method. +// +// Value returns the value and existence flag for a given key. +type Valuer interface { + Value(key string) (interface{}, bool) +} + +// NowValuer returns only the value for "now()". +type NowValuer struct { + Now time.Time +} + +// Value is a method that returns the value and existence flag for a given key. +func (v *NowValuer) Value(key string) (interface{}, bool) { + if key == "now()" { + return v.Now, true + } + return nil, false +} + +// ContainsVarRef returns true if expr is a VarRef or contains one. +func ContainsVarRef(expr Expr) bool { + var v containsVarRefVisitor + Walk(&v, expr) + return v.contains +} + +type containsVarRefVisitor struct { + contains bool +} + +func (v *containsVarRefVisitor) Visit(n Node) Visitor { + switch n.(type) { + case *Call: + return nil + case *VarRef: + v.contains = true + } + return v +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/ast_test.go b/vendor/github.com/influxdata/influxdb/influxql/ast_test.go new file mode 100644 index 0000000000..592218e897 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/ast_test.go @@ -0,0 +1,1298 @@ +package influxql_test + +import ( + "fmt" + "reflect" + "strings" + "testing" + "time" + + "github.com/influxdata/influxdb/influxql" +) + +// Ensure a value's data type can be retrieved. +func TestInspectDataType(t *testing.T) { + for i, tt := range []struct { + v interface{} + typ influxql.DataType + }{ + {float64(100), influxql.Float}, + {int64(100), influxql.Integer}, + {int32(100), influxql.Integer}, + {100, influxql.Integer}, + {true, influxql.Boolean}, + {"string", influxql.String}, + {time.Now(), influxql.Time}, + {time.Second, influxql.Duration}, + {nil, influxql.Unknown}, + } { + if typ := influxql.InspectDataType(tt.v); tt.typ != typ { + t.Errorf("%d. %v (%s): unexpected type: %s", i, tt.v, tt.typ, typ) + continue + } + } +} + +func TestDataType_String(t *testing.T) { + for i, tt := range []struct { + typ influxql.DataType + v string + }{ + {influxql.Float, "float"}, + {influxql.Integer, "integer"}, + {influxql.Boolean, "boolean"}, + {influxql.String, "string"}, + {influxql.Time, "time"}, + {influxql.Duration, "duration"}, + {influxql.Unknown, "unknown"}, + } { + if v := tt.typ.String(); tt.v != v { + t.Errorf("%d. %v (%s): unexpected string: %s", i, tt.typ, tt.v, v) + } + } +} + +// Ensure the SELECT statement can extract substatements. +func TestSelectStatement_Substatement(t *testing.T) { + var tests = []struct { + stmt string + expr *influxql.VarRef + sub string + err string + }{ + // 0. Single series + { + stmt: `SELECT value FROM myseries WHERE value > 1`, + expr: &influxql.VarRef{Val: "value"}, + sub: `SELECT value FROM myseries WHERE value > 1.000`, + }, + + // 1. Simple join + { + stmt: `SELECT sum(aa.value) + sum(bb.value) FROM aa, bb`, + expr: &influxql.VarRef{Val: "aa.value"}, + sub: `SELECT "aa.value" FROM aa`, + }, + + // 2. Simple merge + { + stmt: `SELECT sum(aa.value) + sum(bb.value) FROM aa, bb`, + expr: &influxql.VarRef{Val: "bb.value"}, + sub: `SELECT "bb.value" FROM bb`, + }, + + // 3. Join with condition + { + stmt: `SELECT sum(aa.value) + sum(bb.value) FROM aa, bb WHERE aa.host = 'servera' AND bb.host = 'serverb'`, + expr: &influxql.VarRef{Val: "bb.value"}, + sub: `SELECT "bb.value" FROM bb WHERE "bb.host" = 'serverb'`, + }, + + // 4. Join with complex condition + { + stmt: `SELECT sum(aa.value) + sum(bb.value) FROM aa, bb WHERE aa.host = 'servera' AND (bb.host = 'serverb' OR bb.host = 'serverc') AND 1 = 2`, + expr: &influxql.VarRef{Val: "bb.value"}, + sub: `SELECT "bb.value" FROM bb WHERE ("bb.host" = 'serverb' OR "bb.host" = 'serverc') AND 1.000 = 2.000`, + }, + + // 5. 4 with different condition order + { + stmt: `SELECT sum(aa.value) + sum(bb.value) FROM aa, bb WHERE ((bb.host = 'serverb' OR bb.host = 'serverc') AND aa.host = 'servera') AND 1 = 2`, + expr: &influxql.VarRef{Val: "bb.value"}, + sub: `SELECT "bb.value" FROM bb WHERE (("bb.host" = 'serverb' OR "bb.host" = 'serverc')) AND 1.000 = 2.000`, + }, + } + + for i, tt := range tests { + // Parse statement. + stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement() + if err != nil { + t.Fatalf("invalid statement: %q: %s", tt.stmt, err) + } + + // Extract substatement. + sub, err := stmt.(*influxql.SelectStatement).Substatement(tt.expr) + if err != nil { + t.Errorf("%d. %q: unexpected error: %s", i, tt.stmt, err) + continue + } + if substr := sub.String(); tt.sub != substr { + t.Errorf("%d. %q: unexpected substatement:\n\nexp=%s\n\ngot=%s\n\n", i, tt.stmt, tt.sub, substr) + continue + } + } +} + +// Ensure the SELECT statement can extract GROUP BY interval. +func TestSelectStatement_GroupByInterval(t *testing.T) { + q := "SELECT sum(value) from foo where time < now() GROUP BY time(10m)" + stmt, err := influxql.NewParser(strings.NewReader(q)).ParseStatement() + if err != nil { + t.Fatalf("invalid statement: %q: %s", stmt, err) + } + + s := stmt.(*influxql.SelectStatement) + d, err := s.GroupByInterval() + if d != 10*time.Minute { + t.Fatalf("group by interval not equal:\nexp=%s\ngot=%s", 10*time.Minute, d) + } + if err != nil { + t.Fatalf("error parsing group by interval: %s", err.Error()) + } +} + +// Ensure the SELECT statement can have its start and end time set +func TestSelectStatement_SetTimeRange(t *testing.T) { + q := "SELECT sum(value) from foo where time < now() GROUP BY time(10m)" + stmt, err := influxql.NewParser(strings.NewReader(q)).ParseStatement() + if err != nil { + t.Fatalf("invalid statement: %q: %s", stmt, err) + } + + s := stmt.(*influxql.SelectStatement) + min, max := influxql.TimeRange(s.Condition) + start := time.Now().Add(-20 * time.Hour).Round(time.Second).UTC() + end := time.Now().Add(10 * time.Hour).Round(time.Second).UTC() + s.SetTimeRange(start, end) + min, max = influxql.TimeRange(s.Condition) + + if min != start { + t.Fatalf("start time wasn't set properly.\n exp: %s\n got: %s", start, min) + } + // the end range is actually one nanosecond before the given one since end is exclusive + end = end.Add(-time.Nanosecond) + if max != end { + t.Fatalf("end time wasn't set properly.\n exp: %s\n got: %s", end, max) + } + + // ensure we can set a time on a select that already has one set + start = time.Now().Add(-20 * time.Hour).Round(time.Second).UTC() + end = time.Now().Add(10 * time.Hour).Round(time.Second).UTC() + q = fmt.Sprintf("SELECT sum(value) from foo WHERE time >= %ds and time <= %ds GROUP BY time(10m)", start.Unix(), end.Unix()) + stmt, err = influxql.NewParser(strings.NewReader(q)).ParseStatement() + if err != nil { + t.Fatalf("invalid statement: %q: %s", stmt, err) + } + + s = stmt.(*influxql.SelectStatement) + min, max = influxql.TimeRange(s.Condition) + if start != min || end != max { + t.Fatalf("start and end times weren't equal:\n exp: %s\n got: %s\n exp: %s\n got:%s\n", start, min, end, max) + } + + // update and ensure it saves it + start = time.Now().Add(-40 * time.Hour).Round(time.Second).UTC() + end = time.Now().Add(20 * time.Hour).Round(time.Second).UTC() + s.SetTimeRange(start, end) + min, max = influxql.TimeRange(s.Condition) + + // TODO: right now the SetTimeRange can't override the start time if it's more recent than what they're trying to set it to. + // shouldn't matter for our purposes with continuous queries, but fix this later + + if min != start { + t.Fatalf("start time wasn't set properly.\n exp: %s\n got: %s", start, min) + } + // the end range is actually one nanosecond before the given one since end is exclusive + end = end.Add(-time.Nanosecond) + if max != end { + t.Fatalf("end time wasn't set properly.\n exp: %s\n got: %s", end, max) + } + + // ensure that when we set a time range other where clause conditions are still there + q = "SELECT sum(value) from foo WHERE foo = 'bar' and time < now() GROUP BY time(10m)" + stmt, err = influxql.NewParser(strings.NewReader(q)).ParseStatement() + if err != nil { + t.Fatalf("invalid statement: %q: %s", stmt, err) + } + + s = stmt.(*influxql.SelectStatement) + + // update and ensure it saves it + start = time.Now().Add(-40 * time.Hour).Round(time.Second).UTC() + end = time.Now().Add(20 * time.Hour).Round(time.Second).UTC() + s.SetTimeRange(start, end) + min, max = influxql.TimeRange(s.Condition) + + if min != start { + t.Fatalf("start time wasn't set properly.\n exp: %s\n got: %s", start, min) + } + // the end range is actually one nanosecond before the given one since end is exclusive + end = end.Add(-time.Nanosecond) + if max != end { + t.Fatalf("end time wasn't set properly.\n exp: %s\n got: %s", end, max) + } + + // ensure the where clause is there + hasWhere := false + influxql.WalkFunc(s.Condition, func(n influxql.Node) { + if ex, ok := n.(*influxql.BinaryExpr); ok { + if lhs, ok := ex.LHS.(*influxql.VarRef); ok { + if lhs.Val == "foo" { + if rhs, ok := ex.RHS.(*influxql.StringLiteral); ok { + if rhs.Val == "bar" { + hasWhere = true + } + } + } + } + } + }) + if !hasWhere { + t.Fatal("set time range cleared out the where clause") + } +} + +// Ensure the idents from the select clause can come out +func TestSelect_NamesInSelect(t *testing.T) { + s := MustParseSelectStatement("select count(asdf), count(bar) from cpu") + a := s.NamesInSelect() + if !reflect.DeepEqual(a, []string{"asdf", "bar"}) { + t.Fatal("expected names asdf and bar") + } +} + +// Ensure the idents from the where clause can come out +func TestSelect_NamesInWhere(t *testing.T) { + s := MustParseSelectStatement("select * from cpu where time > 23s AND (asdf = 'jkl' OR (foo = 'bar' AND baz = 'bar'))") + a := s.NamesInWhere() + if !reflect.DeepEqual(a, []string{"time", "asdf", "foo", "baz"}) { + t.Fatalf("exp: time,asdf,foo,baz\ngot: %s\n", strings.Join(a, ",")) + } +} + +func TestSelectStatement_HasWildcard(t *testing.T) { + var tests = []struct { + stmt string + wildcard bool + }{ + // No wildcards + { + stmt: `SELECT value FROM cpu`, + wildcard: false, + }, + + // Query wildcard + { + stmt: `SELECT * FROM cpu`, + wildcard: true, + }, + + // No GROUP BY wildcards + { + stmt: `SELECT value FROM cpu GROUP BY host`, + wildcard: false, + }, + + // No GROUP BY wildcards, time only + { + stmt: `SELECT mean(value) FROM cpu where time < now() GROUP BY time(5ms)`, + wildcard: false, + }, + + // GROUP BY wildcard + { + stmt: `SELECT value FROM cpu GROUP BY *`, + wildcard: true, + }, + + // GROUP BY wildcard with time + { + stmt: `SELECT mean(value) FROM cpu where time < now() GROUP BY *,time(1m)`, + wildcard: true, + }, + + // GROUP BY wildcard with explicit + { + stmt: `SELECT value FROM cpu GROUP BY *,host`, + wildcard: true, + }, + + // GROUP BY multiple wildcards + { + stmt: `SELECT value FROM cpu GROUP BY *,*`, + wildcard: true, + }, + + // Combo + { + stmt: `SELECT * FROM cpu GROUP BY *`, + wildcard: true, + }, + } + + for i, tt := range tests { + // Parse statement. + stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement() + if err != nil { + t.Fatalf("invalid statement: %q: %s", tt.stmt, err) + } + + // Test wildcard detection. + if w := stmt.(*influxql.SelectStatement).HasWildcard(); tt.wildcard != w { + t.Errorf("%d. %q: unexpected wildcard detection:\n\nexp=%v\n\ngot=%v\n\n", i, tt.stmt, tt.wildcard, w) + continue + } + } +} + +// Test SELECT statement wildcard rewrite. +func TestSelectStatement_RewriteWildcards(t *testing.T) { + var tests = []struct { + stmt string + rewrite string + }{ + // No wildcards + { + stmt: `SELECT value FROM cpu`, + rewrite: `SELECT value FROM cpu`, + }, + + // Query wildcard + { + stmt: `SELECT * FROM cpu`, + rewrite: `SELECT host, region, value1, value2 FROM cpu`, + }, + + // Parser fundamentally prohibits multiple query sources + + // Query wildcard with explicit + { + stmt: `SELECT *,value1 FROM cpu`, + rewrite: `SELECT host, region, value1, value2, value1 FROM cpu`, + }, + + // Query multiple wildcards + { + stmt: `SELECT *,* FROM cpu`, + rewrite: `SELECT host, region, value1, value2, host, region, value1, value2 FROM cpu`, + }, + + // Query wildcards with group by + { + stmt: `SELECT * FROM cpu GROUP BY host`, + rewrite: `SELECT region, value1, value2 FROM cpu GROUP BY host`, + }, + + // No GROUP BY wildcards + { + stmt: `SELECT value FROM cpu GROUP BY host`, + rewrite: `SELECT value FROM cpu GROUP BY host`, + }, + + // No GROUP BY wildcards, time only + { + stmt: `SELECT mean(value) FROM cpu where time < now() GROUP BY time(5ms)`, + rewrite: `SELECT mean(value) FROM cpu WHERE time < now() GROUP BY time(5ms)`, + }, + + // GROUP BY wildcard + { + stmt: `SELECT value FROM cpu GROUP BY *`, + rewrite: `SELECT value FROM cpu GROUP BY host, region`, + }, + + // GROUP BY wildcard with time + { + stmt: `SELECT mean(value) FROM cpu where time < now() GROUP BY *,time(1m)`, + rewrite: `SELECT mean(value) FROM cpu WHERE time < now() GROUP BY host, region, time(1m)`, + }, + + // GROUP BY wildarde with fill + { + stmt: `SELECT mean(value) FROM cpu where time < now() GROUP BY *,time(1m) fill(0)`, + rewrite: `SELECT mean(value) FROM cpu WHERE time < now() GROUP BY host, region, time(1m) fill(0)`, + }, + + // GROUP BY wildcard with explicit + { + stmt: `SELECT value FROM cpu GROUP BY *,host`, + rewrite: `SELECT value FROM cpu GROUP BY host, region, host`, + }, + + // GROUP BY multiple wildcards + { + stmt: `SELECT value FROM cpu GROUP BY *,*`, + rewrite: `SELECT value FROM cpu GROUP BY host, region, host, region`, + }, + + // Combo + { + stmt: `SELECT * FROM cpu GROUP BY *`, + rewrite: `SELECT value1, value2 FROM cpu GROUP BY host, region`, + }, + } + + for i, tt := range tests { + // Parse statement. + stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement() + if err != nil { + t.Fatalf("invalid statement: %q: %s", tt.stmt, err) + } + + var ic IteratorCreator + ic.FieldDimensionsFn = func(sources influxql.Sources) (fields, dimensions map[string]struct{}, err error) { + fields = map[string]struct{}{"value1": struct{}{}, "value2": struct{}{}} + dimensions = map[string]struct{}{"host": struct{}{}, "region": struct{}{}} + return + } + + // Rewrite statement. + rw, err := stmt.(*influxql.SelectStatement).RewriteWildcards(&ic) + if err != nil { + t.Errorf("%d. %q: error: %s", i, tt.stmt, err) + } else if rw == nil { + t.Errorf("%d. %q: unexpected nil statement", i, tt.stmt) + } else if rw := rw.String(); tt.rewrite != rw { + t.Errorf("%d. %q: unexpected rewrite:\n\nexp=%s\n\ngot=%s\n\n", i, tt.stmt, tt.rewrite, rw) + } + } +} + +// Ensure that the IsRawQuery flag gets set properly +func TestSelectStatement_IsRawQuerySet(t *testing.T) { + var tests = []struct { + stmt string + isRaw bool + }{ + { + stmt: "select * from foo", + isRaw: true, + }, + { + stmt: "select value1,value2 from foo", + isRaw: true, + }, + { + stmt: "select value1,value2 from foo, time(10m)", + isRaw: true, + }, + { + stmt: "select mean(value) from foo where time < now() group by time(5m)", + isRaw: false, + }, + { + stmt: "select mean(value) from foo group by bar", + isRaw: false, + }, + { + stmt: "select mean(value) from foo group by *", + isRaw: false, + }, + { + stmt: "select mean(value) from foo group by *", + isRaw: false, + }, + } + + for _, tt := range tests { + s := MustParseSelectStatement(tt.stmt) + if s.IsRawQuery != tt.isRaw { + t.Errorf("'%s', IsRawQuery should be %v", tt.stmt, tt.isRaw) + } + } +} + +func TestSelectStatement_HasDerivative(t *testing.T) { + var tests = []struct { + stmt string + derivative bool + }{ + // No derivatives + { + stmt: `SELECT value FROM cpu`, + derivative: false, + }, + + // Query derivative + { + stmt: `SELECT derivative(value) FROM cpu`, + derivative: true, + }, + + // No GROUP BY time only + { + stmt: `SELECT mean(value) FROM cpu where time < now() GROUP BY time(5ms)`, + derivative: false, + }, + + // No GROUP BY derivatives, time only + { + stmt: `SELECT derivative(mean(value)) FROM cpu where time < now() GROUP BY time(5ms)`, + derivative: true, + }, + + { + stmt: `SELECT value FROM cpu`, + derivative: false, + }, + + // Query derivative + { + stmt: `SELECT non_negative_derivative(value) FROM cpu`, + derivative: true, + }, + + // No GROUP BY derivatives, time only + { + stmt: `SELECT non_negative_derivative(mean(value)) FROM cpu where time < now() GROUP BY time(5ms)`, + derivative: true, + }, + + // Invalid derivative function name + { + stmt: `SELECT typoDerivative(value) FROM cpu where time < now()`, + derivative: false, + }, + } + + for i, tt := range tests { + // Parse statement. + t.Logf("index: %d, statement: %s", i, tt.stmt) + stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement() + if err != nil { + t.Fatalf("invalid statement: %q: %s", tt.stmt, err) + } + + // Test derivative detection. + if d := stmt.(*influxql.SelectStatement).HasDerivative(); tt.derivative != d { + t.Errorf("%d. %q: unexpected derivative detection:\n\nexp=%v\n\ngot=%v\n\n", i, tt.stmt, tt.derivative, d) + continue + } + } +} + +func TestSelectStatement_IsSimpleDerivative(t *testing.T) { + var tests = []struct { + stmt string + derivative bool + }{ + // No derivatives + { + stmt: `SELECT value FROM cpu`, + derivative: false, + }, + + // Query derivative + { + stmt: `SELECT derivative(value) FROM cpu`, + derivative: true, + }, + + // Query derivative + { + stmt: `SELECT non_negative_derivative(value) FROM cpu`, + derivative: true, + }, + + // No GROUP BY time only + { + stmt: `SELECT mean(value) FROM cpu where time < now() GROUP BY time(5ms)`, + derivative: false, + }, + + // No GROUP BY derivatives, time only + { + stmt: `SELECT non_negative_derivative(mean(value)) FROM cpu where time < now() GROUP BY time(5ms)`, + derivative: false, + }, + + // Invalid derivative function name + { + stmt: `SELECT typoDerivative(value) FROM cpu where time < now()`, + derivative: false, + }, + } + + for i, tt := range tests { + // Parse statement. + t.Logf("index: %d, statement: %s", i, tt.stmt) + stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement() + if err != nil { + t.Fatalf("invalid statement: %q: %s", tt.stmt, err) + } + + // Test derivative detection. + if d := stmt.(*influxql.SelectStatement).IsSimpleDerivative(); tt.derivative != d { + t.Errorf("%d. %q: unexpected derivative detection:\n\nexp=%v\n\ngot=%v\n\n", i, tt.stmt, tt.derivative, d) + continue + } + } +} + +func TestSelectStatement_HasSimpleCount(t *testing.T) { + var tests = []struct { + stmt string + count bool + }{ + // No counts + { + stmt: `SELECT value FROM cpu`, + count: false, + }, + + // Query count + { + stmt: `SELECT count(value) FROM cpu`, + count: true, + }, + + // No GROUP BY time only + { + stmt: `SELECT count(distinct(value)) FROM cpu where time < now() GROUP BY time(5ms)`, + count: false, + }, + + // Query count + { + stmt: `SELECT typoCount(value) FROM cpu`, + count: false, + }, + + // No GROUP BY time only + { + stmt: `SELECT typoCount(distinct(value)) FROM cpu where time < now() GROUP BY time(5ms)`, + count: false, + }, + } + + for i, tt := range tests { + // Parse statement. + t.Logf("index: %d, statement: %s", i, tt.stmt) + stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement() + if err != nil { + t.Fatalf("invalid statement: %q: %s", tt.stmt, err) + } + + // Test count detection. + if c := stmt.(*influxql.SelectStatement).HasSimpleCount(); tt.count != c { + t.Errorf("%d. %q: unexpected count detection:\n\nexp=%v\n\ngot=%v\n\n", i, tt.stmt, tt.count, c) + continue + } + } +} + +func TestSelectStatement_HasCountDistinct(t *testing.T) { + var tests = []struct { + stmt string + count bool + }{ + // No counts + { + stmt: `SELECT value FROM cpu`, + count: false, + }, + + // Query count + { + stmt: `SELECT count(value) FROM cpu`, + count: false, + }, + + // No GROUP BY time only + { + stmt: `SELECT count(distinct(value)) FROM cpu where time < now() GROUP BY time(5ms)`, + count: true, + }, + + // Query count + { + stmt: `SELECT typoCount(value) FROM cpu`, + count: false, + }, + + // No GROUP BY time only + { + stmt: `SELECT typoCount(distinct(value)) FROM cpu where time < now() GROUP BY time(5ms)`, + count: false, + }, + } + + for i, tt := range tests { + // Parse statement. + t.Logf("index: %d, statement: %s", i, tt.stmt) + stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement() + if err != nil { + t.Fatalf("invalid statement: %q: %s", tt.stmt, err) + } + + // Test count detection. + if c := stmt.(*influxql.SelectStatement).HasCountDistinct(); tt.count != c { + t.Errorf("%d. %q: unexpected count detection:\n\nexp=%v\n\ngot=%v\n\n", i, tt.stmt, tt.count, c) + continue + } + } +} + +// Ensure binary expression names can be evaluated. +func TestBinaryExprName(t *testing.T) { + for i, tt := range []struct { + expr string + name string + }{ + {expr: `value + 1`, name: `value`}, + {expr: `"user" / total`, name: `user_total`}, + {expr: `("user" + total) / total`, name: `user_total_total`}, + } { + expr := influxql.MustParseExpr(tt.expr) + switch expr := expr.(type) { + case *influxql.BinaryExpr: + name := influxql.BinaryExprName(expr) + if name != tt.name { + t.Errorf("%d. unexpected name %s, got %s", i, name, tt.name) + } + default: + t.Errorf("%d. unexpected expr type: %T", i, expr) + } + } +} + +// Ensure the time range of an expression can be extracted. +func TestTimeRange(t *testing.T) { + for i, tt := range []struct { + expr string + min, max string + }{ + // LHS VarRef + {expr: `time > '2000-01-01 00:00:00'`, min: `2000-01-01T00:00:00.000000001Z`, max: `0001-01-01T00:00:00Z`}, + {expr: `time >= '2000-01-01 00:00:00'`, min: `2000-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`}, + {expr: `time < '2000-01-01 00:00:00'`, min: `0001-01-01T00:00:00Z`, max: `1999-12-31T23:59:59.999999999Z`}, + {expr: `time <= '2000-01-01 00:00:00'`, min: `0001-01-01T00:00:00Z`, max: `2000-01-01T00:00:00Z`}, + + // RHS VarRef + {expr: `'2000-01-01 00:00:00' > time`, min: `0001-01-01T00:00:00Z`, max: `1999-12-31T23:59:59.999999999Z`}, + {expr: `'2000-01-01 00:00:00' >= time`, min: `0001-01-01T00:00:00Z`, max: `2000-01-01T00:00:00Z`}, + {expr: `'2000-01-01 00:00:00' < time`, min: `2000-01-01T00:00:00.000000001Z`, max: `0001-01-01T00:00:00Z`}, + {expr: `'2000-01-01 00:00:00' <= time`, min: `2000-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`}, + + // number literal + {expr: `time < 10`, min: `0001-01-01T00:00:00Z`, max: `1970-01-01T00:00:00.000000009Z`}, + + // Equality + {expr: `time = '2000-01-01 00:00:00'`, min: `2000-01-01T00:00:00Z`, max: `2000-01-01T00:00:00.000000001Z`}, + + // Multiple time expressions. + {expr: `time >= '2000-01-01 00:00:00' AND time < '2000-01-02 00:00:00'`, min: `2000-01-01T00:00:00Z`, max: `2000-01-01T23:59:59.999999999Z`}, + + // Min/max crossover + {expr: `time >= '2000-01-01 00:00:00' AND time <= '1999-01-01 00:00:00'`, min: `2000-01-01T00:00:00Z`, max: `1999-01-01T00:00:00Z`}, + + // Absolute time + {expr: `time = 1388534400s`, min: `2014-01-01T00:00:00Z`, max: `2014-01-01T00:00:00.000000001Z`}, + + // Non-comparative expressions. + {expr: `time`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`}, + {expr: `time + 2`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`}, + {expr: `time - '2000-01-01 00:00:00'`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`}, + {expr: `time AND '2000-01-01 00:00:00'`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`}, + } { + // Extract time range. + expr := MustParseExpr(tt.expr) + min, max := influxql.TimeRange(expr) + + // Compare with expected min/max. + if min := min.Format(time.RFC3339Nano); tt.min != min { + t.Errorf("%d. %s: unexpected min:\n\nexp=%s\n\ngot=%s\n\n", i, tt.expr, tt.min, min) + continue + } + if max := max.Format(time.RFC3339Nano); tt.max != max { + t.Errorf("%d. %s: unexpected max:\n\nexp=%s\n\ngot=%s\n\n", i, tt.expr, tt.max, max) + continue + } + } +} + +// Ensure that we see if a where clause has only time limitations +func TestOnlyTimeExpr(t *testing.T) { + var tests = []struct { + stmt string + exp bool + }{ + { + stmt: `SELECT value FROM myseries WHERE value > 1`, + exp: false, + }, + { + stmt: `SELECT value FROM foo WHERE time >= '2000-01-01T00:00:05Z'`, + exp: true, + }, + { + stmt: `SELECT value FROM foo WHERE time >= '2000-01-01T00:00:05Z' AND time < '2000-01-01T00:00:05Z'`, + exp: true, + }, + { + stmt: `SELECT value FROM foo WHERE time >= '2000-01-01T00:00:05Z' AND asdf = 'bar'`, + exp: false, + }, + { + stmt: `SELECT value FROM foo WHERE asdf = 'jkl' AND (time >= '2000-01-01T00:00:05Z' AND time < '2000-01-01T00:00:05Z')`, + exp: false, + }, + } + + for i, tt := range tests { + // Parse statement. + stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement() + if err != nil { + t.Fatalf("invalid statement: %q: %s", tt.stmt, err) + } + if influxql.OnlyTimeExpr(stmt.(*influxql.SelectStatement).Condition) != tt.exp { + t.Fatalf("%d. expected statement to return only time dimension to be %t: %s", i, tt.exp, tt.stmt) + } + } +} + +// Ensure an AST node can be rewritten. +func TestRewrite(t *testing.T) { + expr := MustParseExpr(`time > 1 OR foo = 2`) + + // Flip LHS & RHS in all binary expressions. + act := influxql.RewriteFunc(expr, func(n influxql.Node) influxql.Node { + switch n := n.(type) { + case *influxql.BinaryExpr: + return &influxql.BinaryExpr{Op: n.Op, LHS: n.RHS, RHS: n.LHS} + default: + return n + } + }) + + // Verify that everything is flipped. + if act := act.String(); act != `2.000 = foo OR 1.000 > time` { + t.Fatalf("unexpected result: %s", act) + } +} + +// Ensure an Expr can be rewritten handling nils. +func TestRewriteExpr(t *testing.T) { + expr := MustParseExpr(`(time > 1 AND time < 10) OR foo = 2`) + + // Remove all time expressions. + act := influxql.RewriteExpr(expr, func(e influxql.Expr) influxql.Expr { + switch e := e.(type) { + case *influxql.BinaryExpr: + if lhs, ok := e.LHS.(*influxql.VarRef); ok && lhs.Val == "time" { + return nil + } + } + return e + }) + + // Verify that everything is flipped. + if act := act.String(); act != `foo = 2.000` { + t.Fatalf("unexpected result: %s", act) + } +} + +// Ensure that the String() value of a statement is parseable +func TestParseString(t *testing.T) { + var tests = []struct { + stmt string + }{ + { + stmt: `SELECT "cpu load" FROM myseries`, + }, + { + stmt: `SELECT "cpu load" FROM "my series"`, + }, + { + stmt: `SELECT "cpu\"load" FROM myseries`, + }, + { + stmt: `SELECT "cpu'load" FROM myseries`, + }, + { + stmt: `SELECT "cpu load" FROM "my\"series"`, + }, + { + stmt: `SELECT "field with spaces" FROM "\"ugly\" db"."\"ugly\" rp"."\"ugly\" measurement"`, + }, + { + stmt: `SELECT * FROM myseries`, + }, + { + stmt: `DROP DATABASE "!"`, + }, + { + stmt: `DROP RETENTION POLICY "my rp" ON "a database"`, + }, + { + stmt: `CREATE RETENTION POLICY "my rp" ON "a database" DURATION 1d REPLICATION 1`, + }, + { + stmt: `ALTER RETENTION POLICY "my rp" ON "a database" DEFAULT`, + }, + { + stmt: `SHOW RETENTION POLICIES ON "a database"`, + }, + { + stmt: `SHOW TAG VALUES WITH KEY IN ("a long name", short)`, + }, + { + stmt: `DROP CONTINUOUS QUERY "my query" ON "my database"`, + }, + // See issues https://github.com/influxdata/influxdb/issues/1647 + // and https://github.com/influxdata/influxdb/issues/4404 + //{ + // stmt: `DELETE FROM "my db"."my rp"."my measurement"`, + //}, + { + stmt: `DROP SUBSCRIPTION "ugly \"subscription\" name" ON "\"my\" db"."\"my\" rp"`, + }, + { + stmt: `CREATE SUBSCRIPTION "ugly \"subscription\" name" ON "\"my\" db"."\"my\" rp" DESTINATIONS ALL 'my host', 'my other host'`, + }, + { + stmt: `SHOW MEASUREMENTS WITH MEASUREMENT =~ /foo/`, + }, + { + stmt: `SHOW MEASUREMENTS WITH MEASUREMENT = "and/or"`, + }, + { + stmt: `DROP USER "user with spaces"`, + }, + { + stmt: `GRANT ALL PRIVILEGES ON "db with spaces" TO "user with spaces"`, + }, + { + stmt: `GRANT ALL PRIVILEGES TO "user with spaces"`, + }, + { + stmt: `SHOW GRANTS FOR "user with spaces"`, + }, + { + stmt: `REVOKE ALL PRIVILEGES ON "db with spaces" FROM "user with spaces"`, + }, + { + stmt: `REVOKE ALL PRIVILEGES FROM "user with spaces"`, + }, + { + stmt: `CREATE DATABASE "db with spaces"`, + }, + } + + for _, tt := range tests { + // Parse statement. + stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement() + if err != nil { + t.Fatalf("invalid statement: %q: %s", tt.stmt, err) + } + + stmtCopy, err := influxql.NewParser(strings.NewReader(stmt.String())).ParseStatement() + if err != nil { + t.Fatalf("failed to parse string: %v\norig: %v\ngot: %v", err, tt.stmt, stmt.String()) + } + + if !reflect.DeepEqual(stmt, stmtCopy) { + t.Fatalf("statement changed after stringifying and re-parsing:\noriginal : %v\nre-parsed: %v\n", tt.stmt, stmtCopy.String()) + } + } +} + +// Ensure an expression can be reduced. +func TestEval(t *testing.T) { + for i, tt := range []struct { + in string + out interface{} + data map[string]interface{} + }{ + // Number literals. + {in: `1 + 2`, out: float64(3)}, + {in: `(foo*2) + ( (4/2) + (3 * 5) - 0.5 )`, out: float64(26.5), data: map[string]interface{}{"foo": float64(5)}}, + {in: `foo / 2`, out: float64(2), data: map[string]interface{}{"foo": float64(4)}}, + {in: `4 = 4`, out: true}, + {in: `4 <> 4`, out: false}, + {in: `6 > 4`, out: true}, + {in: `4 >= 4`, out: true}, + {in: `4 < 6`, out: true}, + {in: `4 <= 4`, out: true}, + {in: `4 AND 5`, out: nil}, + + // Boolean literals. + {in: `true AND false`, out: false}, + {in: `true OR false`, out: true}, + + // String literals. + {in: `'foo' = 'bar'`, out: false}, + {in: `'foo' = 'foo'`, out: true}, + + // Variable references. + {in: `foo`, out: "bar", data: map[string]interface{}{"foo": "bar"}}, + {in: `foo = 'bar'`, out: true, data: map[string]interface{}{"foo": "bar"}}, + {in: `foo = 'bar'`, out: nil, data: map[string]interface{}{"foo": nil}}, + {in: `foo <> 'bar'`, out: true, data: map[string]interface{}{"foo": "xxx"}}, + } { + // Evaluate expression. + out := influxql.Eval(MustParseExpr(tt.in), tt.data) + + // Compare with expected output. + if !reflect.DeepEqual(tt.out, out) { + t.Errorf("%d. %s: unexpected output:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.in, tt.out, out) + continue + } + } +} + +// Ensure an expression can be reduced. +func TestReduce(t *testing.T) { + now := mustParseTime("2000-01-01T00:00:00Z") + + for i, tt := range []struct { + in string + out string + data Valuer + }{ + // Number literals. + {in: `1 + 2`, out: `3.000`}, + {in: `(foo*2) + ( (4/2) + (3 * 5) - 0.5 )`, out: `(foo * 2.000) + 16.500`}, + {in: `foo(bar(2 + 3), 4)`, out: `foo(bar(5.000), 4.000)`}, + {in: `4 / 0`, out: `0.000`}, + {in: `4 = 4`, out: `true`}, + {in: `4 <> 4`, out: `false`}, + {in: `6 > 4`, out: `true`}, + {in: `4 >= 4`, out: `true`}, + {in: `4 < 6`, out: `true`}, + {in: `4 <= 4`, out: `true`}, + {in: `4 AND 5`, out: `4.000 AND 5.000`}, + + // Boolean literals. + {in: `true AND false`, out: `false`}, + {in: `true OR false`, out: `true`}, + {in: `true OR (foo = bar AND 1 > 2)`, out: `true`}, + {in: `(foo = bar AND 1 > 2) OR true`, out: `true`}, + {in: `false OR (foo = bar AND 1 > 2)`, out: `false`}, + {in: `(foo = bar AND 1 > 2) OR false`, out: `false`}, + {in: `true = false`, out: `false`}, + {in: `true <> false`, out: `true`}, + {in: `true + false`, out: `true + false`}, + + // Time literals. + {in: `now() + 2h`, out: `'2000-01-01T02:00:00Z'`, data: map[string]interface{}{"now()": now}}, + {in: `now() / 2h`, out: `'2000-01-01T00:00:00Z' / 2h`, data: map[string]interface{}{"now()": now}}, + {in: `4µ + now()`, out: `'2000-01-01T00:00:00.000004Z'`, data: map[string]interface{}{"now()": now}}, + {in: `now() = now()`, out: `true`, data: map[string]interface{}{"now()": now}}, + {in: `now() <> now()`, out: `false`, data: map[string]interface{}{"now()": now}}, + {in: `now() < now() + 1h`, out: `true`, data: map[string]interface{}{"now()": now}}, + {in: `now() <= now() + 1h`, out: `true`, data: map[string]interface{}{"now()": now}}, + {in: `now() >= now() - 1h`, out: `true`, data: map[string]interface{}{"now()": now}}, + {in: `now() > now() - 1h`, out: `true`, data: map[string]interface{}{"now()": now}}, + {in: `now() - (now() - 60s)`, out: `1m`, data: map[string]interface{}{"now()": now}}, + {in: `now() AND now()`, out: `'2000-01-01T00:00:00Z' AND '2000-01-01T00:00:00Z'`, data: map[string]interface{}{"now()": now}}, + {in: `now()`, out: `now()`}, + + // Duration literals. + {in: `10m + 1h - 60s`, out: `69m`}, + {in: `(10m / 2) * 5`, out: `25m`}, + {in: `60s = 1m`, out: `true`}, + {in: `60s <> 1m`, out: `false`}, + {in: `60s < 1h`, out: `true`}, + {in: `60s <= 1h`, out: `true`}, + {in: `60s > 12s`, out: `true`}, + {in: `60s >= 1m`, out: `true`}, + {in: `60s AND 1m`, out: `1m AND 1m`}, + {in: `60m / 0`, out: `0s`}, + {in: `60m + 50`, out: `1h + 50.000`}, + + // String literals. + {in: `'foo' + 'bar'`, out: `'foobar'`}, + + // Variable references. + {in: `foo`, out: `'bar'`, data: map[string]interface{}{"foo": "bar"}}, + {in: `foo = 'bar'`, out: `true`, data: map[string]interface{}{"foo": "bar"}}, + {in: `foo = 'bar'`, out: `false`, data: map[string]interface{}{"foo": nil}}, + {in: `foo <> 'bar'`, out: `false`, data: map[string]interface{}{"foo": nil}}, + } { + // Fold expression. + expr := influxql.Reduce(MustParseExpr(tt.in), tt.data) + + // Compare with expected output. + if out := expr.String(); tt.out != out { + t.Errorf("%d. %s: unexpected expr:\n\nexp=%s\n\ngot=%s\n\n", i, tt.in, tt.out, out) + continue + } + } +} + +func Test_fieldsNames(t *testing.T) { + for _, test := range []struct { + in []string + out []string + alias []string + }{ + { //case: binary expr(valRef) + in: []string{"value+value"}, + out: []string{"value", "value"}, + alias: []string{"value_value"}, + }, + { //case: binary expr + valRef + in: []string{"value+value", "temperature"}, + out: []string{"value", "value", "temperature"}, + alias: []string{"value_value", "temperature"}, + }, + { //case: aggregate expr + in: []string{"mean(value)"}, + out: []string{"mean"}, + alias: []string{"mean"}, + }, + { //case: binary expr(aggregate expr) + in: []string{"mean(value) + max(value)"}, + out: []string{"value", "value"}, + alias: []string{"mean_max"}, + }, + { //case: binary expr(aggregate expr) + valRef + in: []string{"mean(value) + max(value)", "temperature"}, + out: []string{"value", "value", "temperature"}, + alias: []string{"mean_max", "temperature"}, + }, + { //case: mixed aggregate and varRef + in: []string{"mean(value) + temperature"}, + out: []string{"value", "temperature"}, + alias: []string{"mean_temperature"}, + }, + { //case: ParenExpr(varRef) + in: []string{"(value)"}, + out: []string{"value"}, + alias: []string{"value"}, + }, + { //case: ParenExpr(varRef + varRef) + in: []string{"(value + value)"}, + out: []string{"value", "value"}, + alias: []string{"value_value"}, + }, + { //case: ParenExpr(aggregate) + in: []string{"(mean(value))"}, + out: []string{"value"}, + alias: []string{"mean"}, + }, + { //case: ParenExpr(aggregate + aggregate) + in: []string{"(mean(value) + max(value))"}, + out: []string{"value", "value"}, + alias: []string{"mean_max"}, + }, + } { + fields := influxql.Fields{} + for _, s := range test.in { + expr := MustParseExpr(s) + fields = append(fields, &influxql.Field{Expr: expr}) + } + got := fields.Names() + if !reflect.DeepEqual(got, test.out) { + t.Errorf("get fields name:\nexp=%v\ngot=%v\n", test.out, got) + } + alias := fields.AliasNames() + if !reflect.DeepEqual(alias, test.alias) { + t.Errorf("get fields alias name:\nexp=%v\ngot=%v\n", test.alias, alias) + } + } + +} + +func TestSelect_ColumnNames(t *testing.T) { + for i, tt := range []struct { + stmt *influxql.SelectStatement + columns []string + }{ + { + stmt: &influxql.SelectStatement{ + Fields: influxql.Fields([]*influxql.Field{ + {Expr: &influxql.VarRef{Val: "value"}}, + }), + }, + columns: []string{"time", "value"}, + }, + { + stmt: &influxql.SelectStatement{ + Fields: influxql.Fields([]*influxql.Field{ + {Expr: &influxql.VarRef{Val: "value"}}, + {Expr: &influxql.VarRef{Val: "value"}}, + {Expr: &influxql.VarRef{Val: "value_1"}}, + }), + }, + columns: []string{"time", "value", "value_1", "value_1_1"}, + }, + { + stmt: &influxql.SelectStatement{ + Fields: influxql.Fields([]*influxql.Field{ + {Expr: &influxql.VarRef{Val: "value"}}, + {Expr: &influxql.VarRef{Val: "value_1"}}, + {Expr: &influxql.VarRef{Val: "value"}}, + }), + }, + columns: []string{"time", "value", "value_1", "value_2"}, + }, + { + stmt: &influxql.SelectStatement{ + Fields: influxql.Fields([]*influxql.Field{ + {Expr: &influxql.VarRef{Val: "value"}}, + {Expr: &influxql.VarRef{Val: "total"}, Alias: "value"}, + {Expr: &influxql.VarRef{Val: "value"}}, + }), + }, + columns: []string{"time", "value_1", "value", "value_2"}, + }, + } { + columns := tt.stmt.ColumnNames() + if !reflect.DeepEqual(columns, tt.columns) { + t.Errorf("%d. expected %s, got %s", i, tt.columns, columns) + } + } +} + +func TestSources_Names(t *testing.T) { + sources := influxql.Sources([]influxql.Source{ + &influxql.Measurement{ + Name: "cpu", + }, + &influxql.Measurement{ + Name: "mem", + }, + }) + + names := sources.Names() + if names[0] != "cpu" { + t.Errorf("expected cpu, got %s", names[0]) + } + if names[1] != "mem" { + t.Errorf("expected mem, got %s", names[1]) + } +} + +func TestSources_HasSystemSource(t *testing.T) { + sources := influxql.Sources([]influxql.Source{ + &influxql.Measurement{ + Name: "_measurements", + }, + }) + + ok := sources.HasSystemSource() + if !ok { + t.Errorf("expected to find a system source, found none") + } + + sources = influxql.Sources([]influxql.Source{ + &influxql.Measurement{ + Name: "cpu", + }, + }) + + ok = sources.HasSystemSource() + if ok { + t.Errorf("expected to find no system source, found one") + } +} + +// Valuer represents a simple wrapper around a map to implement the influxql.Valuer interface. +type Valuer map[string]interface{} + +// Value returns the value and existence of a key. +func (o Valuer) Value(key string) (v interface{}, ok bool) { + v, ok = o[key] + return +} + +// mustParseTime parses an IS0-8601 string. Panic on error. +func mustParseTime(s string) time.Time { + t, err := time.Parse(time.RFC3339, s) + if err != nil { + panic(err.Error()) + } + return t +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/call_iterator.go b/vendor/github.com/influxdata/influxdb/influxql/call_iterator.go new file mode 100644 index 0000000000..19f9d17206 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/call_iterator.go @@ -0,0 +1,1093 @@ +package influxql + +import ( + "bytes" + "container/heap" + "fmt" + "math" + "sort" +) + +/* +This file contains iterator implementations for each function call available +in InfluxQL. Call iterators are separated into two groups: + +1. Map/reduce-style iterators - these are passed to IteratorCreator so that + processing can be at the low-level storage and aggregates are returned. + +2. Raw aggregate iterators - these require the full set of data for a window. + These are handled by the select() function and raw points are streamed in + from the low-level storage. + +There are helpers to aid in building aggregate iterators. For simple map/reduce +iterators, you can use the reduceIterator types and pass a reduce function. This +reduce function is passed a previous and current value and the new timestamp, +value, and auxilary fields are returned from it. + +For raw aggregate iterators, you can use the reduceSliceIterators which pass +in a slice of all points to the function and return a point. For more complex +iterator types, you may need to create your own iterators by hand. + +Once your iterator is complete, you'll need to add it to the NewCallIterator() +function if it is to be available to IteratorCreators and add it to the select() +function to allow it to be included during planning. +*/ + +// NewCallIterator returns a new iterator for a Call. +func NewCallIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + name := opt.Expr.(*Call).Name + switch name { + case "count": + return newCountIterator(input, opt) + case "min": + return newMinIterator(input, opt) + case "max": + return newMaxIterator(input, opt) + case "sum": + return newSumIterator(input, opt) + case "first": + return newFirstIterator(input, opt) + case "last": + return newLastIterator(input, opt) + case "mean": + return newMeanIterator(input, opt) + default: + return nil, fmt.Errorf("unsupported function call: %s", name) + } +} + +// newCountIterator returns an iterator for operating on a count() call. +func newCountIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + // FIXME: Wrap iterator in int-type iterator and always output int value. + + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, IntegerPointEmitter) { + fn := NewFloatFuncIntegerReducer(FloatCountReduce) + return fn, fn + } + return &floatReduceIntegerIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerFuncReducer(IntegerCountReduce) + return fn, fn + } + return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + case StringIterator: + createFn := func() (StringPointAggregator, IntegerPointEmitter) { + fn := NewStringFuncIntegerReducer(StringCountReduce) + return fn, fn + } + return &stringReduceIntegerIterator{input: newBufStringIterator(input), opt: opt, create: createFn}, nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, IntegerPointEmitter) { + fn := NewBooleanFuncIntegerReducer(BooleanCountReduce) + return fn, fn + } + return &booleanReduceIntegerIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported count iterator type: %T", input) + } +} + +// FloatCountReduce returns the count of points. +func FloatCountReduce(prev *IntegerPoint, curr *FloatPoint) (int64, int64, []interface{}) { + if prev == nil { + return ZeroTime, 1, nil + } + return ZeroTime, prev.Value + 1, nil +} + +// IntegerCountReduce returns the count of points. +func IntegerCountReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { + if prev == nil { + return ZeroTime, 1, nil + } + return ZeroTime, prev.Value + 1, nil +} + +// StringCountReduce returns the count of points. +func StringCountReduce(prev *IntegerPoint, curr *StringPoint) (int64, int64, []interface{}) { + if prev == nil { + return ZeroTime, 1, nil + } + return ZeroTime, prev.Value + 1, nil +} + +// BooleanCountReduce returns the count of points. +func BooleanCountReduce(prev *IntegerPoint, curr *BooleanPoint) (int64, int64, []interface{}) { + if prev == nil { + return ZeroTime, 1, nil + } + return ZeroTime, prev.Value + 1, nil +} + +// newMinIterator returns an iterator for operating on a min() call. +func newMinIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatFuncReducer(FloatMinReduce) + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerFuncReducer(IntegerMinReduce) + return fn, fn + } + return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported min iterator type: %T", input) + } +} + +// FloatMinReduce returns the minimum value between prev & curr. +func FloatMinReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { + if prev == nil || curr.Value < prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + +// IntegerMinReduce returns the minimum value between prev & curr. +func IntegerMinReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { + if prev == nil || curr.Value < prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + +// newMaxIterator returns an iterator for operating on a max() call. +func newMaxIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatFuncReducer(FloatMaxReduce) + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerFuncReducer(IntegerMaxReduce) + return fn, fn + } + return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported max iterator type: %T", input) + } +} + +// FloatMaxReduce returns the maximum value between prev & curr. +func FloatMaxReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { + if prev == nil || curr.Value > prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + +// IntegerMaxReduce returns the maximum value between prev & curr. +func IntegerMaxReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { + if prev == nil || curr.Value > prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + +// newSumIterator returns an iterator for operating on a sum() call. +func newSumIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatFuncReducer(FloatSumReduce) + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerFuncReducer(IntegerSumReduce) + return fn, fn + } + return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported sum iterator type: %T", input) + } +} + +// FloatSumReduce returns the sum prev value & curr value. +func FloatSumReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { + if prev == nil { + return ZeroTime, curr.Value, nil + } + return prev.Time, prev.Value + curr.Value, nil +} + +// IntegerSumReduce returns the sum prev value & curr value. +func IntegerSumReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { + if prev == nil { + return ZeroTime, curr.Value, nil + } + return prev.Time, prev.Value + curr.Value, nil +} + +// newFirstIterator returns an iterator for operating on a first() call. +func newFirstIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatFuncReducer(FloatFirstReduce) + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerFuncReducer(IntegerFirstReduce) + return fn, fn + } + return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + case StringIterator: + createFn := func() (StringPointAggregator, StringPointEmitter) { + fn := NewStringFuncReducer(StringFirstReduce) + return fn, fn + } + return &stringReduceStringIterator{input: newBufStringIterator(input), opt: opt, create: createFn}, nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { + fn := NewBooleanFuncReducer(BooleanFirstReduce) + return fn, fn + } + return &booleanReduceBooleanIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported first iterator type: %T", input) + } +} + +// FloatFirstReduce returns the first point sorted by time. +func FloatFirstReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { + if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + +// IntegerFirstReduce returns the first point sorted by time. +func IntegerFirstReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { + if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + +// StringFirstReduce returns the first point sorted by time. +func StringFirstReduce(prev, curr *StringPoint) (int64, string, []interface{}) { + if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + +// BooleanFirstReduce returns the first point sorted by time. +func BooleanFirstReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) { + if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && !curr.Value && prev.Value) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + +// newLastIterator returns an iterator for operating on a last() call. +func newLastIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatFuncReducer(FloatLastReduce) + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerFuncReducer(IntegerLastReduce) + return fn, fn + } + return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + case StringIterator: + createFn := func() (StringPointAggregator, StringPointEmitter) { + fn := NewStringFuncReducer(StringLastReduce) + return fn, fn + } + return &stringReduceStringIterator{input: newBufStringIterator(input), opt: opt, create: createFn}, nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { + fn := NewBooleanFuncReducer(BooleanLastReduce) + return fn, fn + } + return &booleanReduceBooleanIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported last iterator type: %T", input) + } +} + +// FloatLastReduce returns the last point sorted by time. +func FloatLastReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { + if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + +// IntegerLastReduce returns the last point sorted by time. +func IntegerLastReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { + if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + +// StringLastReduce returns the first point sorted by time. +func StringLastReduce(prev, curr *StringPoint) (int64, string, []interface{}) { + if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + +// BooleanLastReduce returns the first point sorted by time. +func BooleanLastReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) { + if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value && !prev.Value) { + return curr.Time, curr.Value, curr.Aux + } + return prev.Time, prev.Value, prev.Aux +} + +// NewDistinctIterator returns an iterator for operating on a distinct() call. +func NewDistinctIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSliceFuncReducer(FloatDistinctReduceSlice) + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerSliceFuncReducer(IntegerDistinctReduceSlice) + return fn, fn + } + return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + case StringIterator: + createFn := func() (StringPointAggregator, StringPointEmitter) { + fn := NewStringSliceFuncReducer(StringDistinctReduceSlice) + return fn, fn + } + return &stringReduceStringIterator{input: newBufStringIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported distinct iterator type: %T", input) + } +} + +// FloatDistinctReduceSlice returns the distinct value within a window. +func FloatDistinctReduceSlice(a []FloatPoint) []FloatPoint { + m := make(map[float64]FloatPoint) + for _, p := range a { + if _, ok := m[p.Value]; !ok { + m[p.Value] = p + } + } + + points := make([]FloatPoint, 0, len(m)) + for _, p := range m { + points = append(points, FloatPoint{Time: p.Time, Value: p.Value}) + } + sort.Sort(floatPoints(points)) + return points +} + +// IntegerDistinctReduceSlice returns the distinct value within a window. +func IntegerDistinctReduceSlice(a []IntegerPoint) []IntegerPoint { + m := make(map[int64]IntegerPoint) + for _, p := range a { + if _, ok := m[p.Value]; !ok { + m[p.Value] = p + } + } + + points := make([]IntegerPoint, 0, len(m)) + for _, p := range m { + points = append(points, IntegerPoint{Time: p.Time, Value: p.Value}) + } + sort.Sort(integerPoints(points)) + return points +} + +// StringDistinctReduceSlice returns the distinct value within a window. +func StringDistinctReduceSlice(a []StringPoint) []StringPoint { + m := make(map[string]StringPoint) + for _, p := range a { + if _, ok := m[p.Value]; !ok { + m[p.Value] = p + } + } + + points := make([]StringPoint, 0, len(m)) + for _, p := range m { + points = append(points, StringPoint{Time: p.Time, Value: p.Value}) + } + sort.Sort(stringPoints(points)) + return points +} + +// newMeanIterator returns an iterator for operating on a mean() call. +func newMeanIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatMeanReducer() + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewIntegerMeanReducer() + return fn, fn + } + return &integerReduceFloatIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported mean iterator type: %T", input) + } +} + +// newMedianIterator returns an iterator for operating on a median() call. +func newMedianIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSliceFuncReducer(FloatMedianReduceSlice) + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewIntegerSliceFuncFloatReducer(IntegerMedianReduceSlice) + return fn, fn + } + return &integerReduceFloatIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported median iterator type: %T", input) + } +} + +// FloatMedianReduceSlice returns the median value within a window. +func FloatMedianReduceSlice(a []FloatPoint) []FloatPoint { + if len(a) == 1 { + return a + } + + // OPTIMIZE(benbjohnson): Use getSortedRange() from v0.9.5.1. + + // Return the middle value from the points. + // If there are an even number of points then return the mean of the two middle points. + sort.Sort(floatPointsByValue(a)) + if len(a)%2 == 0 { + lo, hi := a[len(a)/2-1], a[(len(a)/2)] + return []FloatPoint{{Time: ZeroTime, Value: lo.Value + (hi.Value-lo.Value)/2}} + } + return []FloatPoint{{Time: ZeroTime, Value: a[len(a)/2].Value}} +} + +// IntegerMedianReduceSlice returns the median value within a window. +func IntegerMedianReduceSlice(a []IntegerPoint) []FloatPoint { + if len(a) == 1 { + return []FloatPoint{{Time: ZeroTime, Value: float64(a[0].Value)}} + } + + // OPTIMIZE(benbjohnson): Use getSortedRange() from v0.9.5.1. + + // Return the middle value from the points. + // If there are an even number of points then return the mean of the two middle points. + sort.Sort(integerPointsByValue(a)) + if len(a)%2 == 0 { + lo, hi := a[len(a)/2-1], a[(len(a)/2)] + return []FloatPoint{{Time: ZeroTime, Value: float64(lo.Value) + float64(hi.Value-lo.Value)/2}} + } + return []FloatPoint{{Time: ZeroTime, Value: float64(a[len(a)/2].Value)}} +} + +// newStddevIterator returns an iterator for operating on a stddev() call. +func newStddevIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSliceFuncReducer(FloatStddevReduceSlice) + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewIntegerSliceFuncFloatReducer(IntegerStddevReduceSlice) + return fn, fn + } + return &integerReduceFloatIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + case StringIterator: + createFn := func() (StringPointAggregator, StringPointEmitter) { + fn := NewStringSliceFuncReducer(StringStddevReduceSlice) + return fn, fn + } + return &stringReduceStringIterator{input: newBufStringIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported stddev iterator type: %T", input) + } +} + +// FloatStddevReduceSlice returns the stddev value within a window. +func FloatStddevReduceSlice(a []FloatPoint) []FloatPoint { + // If there is only one point then return 0. + if len(a) < 2 { + return []FloatPoint{{Time: ZeroTime, Nil: true}} + } + + // Calculate the mean. + var mean float64 + var count int + for _, p := range a { + if math.IsNaN(p.Value) { + continue + } + count++ + mean += (p.Value - mean) / float64(count) + } + + // Calculate the variance. + var variance float64 + for _, p := range a { + if math.IsNaN(p.Value) { + continue + } + variance += math.Pow(p.Value-mean, 2) + } + return []FloatPoint{{ + Time: ZeroTime, + Value: math.Sqrt(variance / float64(count-1)), + }} +} + +// IntegerStddevReduceSlice returns the stddev value within a window. +func IntegerStddevReduceSlice(a []IntegerPoint) []FloatPoint { + // If there is only one point then return 0. + if len(a) < 2 { + return []FloatPoint{{Time: ZeroTime, Nil: true}} + } + + // Calculate the mean. + var mean float64 + var count int + for _, p := range a { + count++ + mean += (float64(p.Value) - mean) / float64(count) + } + + // Calculate the variance. + var variance float64 + for _, p := range a { + variance += math.Pow(float64(p.Value)-mean, 2) + } + return []FloatPoint{{ + Time: ZeroTime, + Value: math.Sqrt(variance / float64(count-1)), + }} +} + +// StringStddevReduceSlice always returns "". +func StringStddevReduceSlice(a []StringPoint) []StringPoint { + return []StringPoint{{Time: ZeroTime, Value: ""}} +} + +// newSpreadIterator returns an iterator for operating on a spread() call. +func newSpreadIterator(input Iterator, opt IteratorOptions) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSliceFuncReducer(FloatSpreadReduceSlice) + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerSliceFuncReducer(IntegerSpreadReduceSlice) + return fn, fn + } + return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported spread iterator type: %T", input) + } +} + +// FloatSpreadReduceSlice returns the spread value within a window. +func FloatSpreadReduceSlice(a []FloatPoint) []FloatPoint { + // Find min & max values. + min, max := a[0].Value, a[0].Value + for _, p := range a[1:] { + min = math.Min(min, p.Value) + max = math.Max(max, p.Value) + } + return []FloatPoint{{Time: ZeroTime, Value: max - min}} +} + +// IntegerSpreadReduceSlice returns the spread value within a window. +func IntegerSpreadReduceSlice(a []IntegerPoint) []IntegerPoint { + // Find min & max values. + min, max := a[0].Value, a[0].Value + for _, p := range a[1:] { + if p.Value < min { + min = p.Value + } + if p.Value > max { + max = p.Value + } + } + return []IntegerPoint{{Time: ZeroTime, Value: max - min}} +} + +// newTopIterator returns an iterator for operating on a top() call. +func newTopIterator(input Iterator, opt IteratorOptions, n *NumberLiteral, tags []int) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + aggregateFn := NewFloatTopReduceSliceFunc(int(n.Val), tags, opt.Interval) + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSliceFuncReducer(aggregateFn) + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + aggregateFn := NewIntegerTopReduceSliceFunc(int(n.Val), tags, opt.Interval) + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerSliceFuncReducer(aggregateFn) + return fn, fn + } + return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported top iterator type: %T", input) + } +} + +// NewFloatTopReduceSliceFunc returns the top values within a window. +func NewFloatTopReduceSliceFunc(n int, tags []int, interval Interval) FloatReduceSliceFunc { + return func(a []FloatPoint) []FloatPoint { + // Filter by tags if they exist. + if len(tags) > 0 { + a = filterFloatByUniqueTags(a, tags, func(cur, p *FloatPoint) bool { + return p.Value > cur.Value || (p.Value == cur.Value && p.Time < cur.Time) + }) + } + + // If we ask for more elements than exist, restrict n to be the length of the array. + size := n + if size > len(a) { + size = len(a) + } + + // Construct a heap preferring higher values and breaking ties + // based on the earliest time for a point. + h := floatPointsSortBy(a, func(a, b *FloatPoint) bool { + if a.Value != b.Value { + return a.Value > b.Value + } + return a.Time < b.Time + }) + heap.Init(h) + + // Pop the first n elements and then sort by time. + points := make([]FloatPoint, 0, size) + for i := 0; i < size; i++ { + p := heap.Pop(h).(FloatPoint) + points = append(points, p) + } + + // Either zero out all values or sort the points by time + // depending on if a time interval was given or not. + if !interval.IsZero() { + for i := range points { + points[i].Time = ZeroTime + } + } else { + sort.Stable(floatPointsByTime(points)) + } + return points + } +} + +// NewIntegerTopReduceSliceFunc returns the top values within a window. +func NewIntegerTopReduceSliceFunc(n int, tags []int, interval Interval) IntegerReduceSliceFunc { + return func(a []IntegerPoint) []IntegerPoint { + // Filter by tags if they exist. + if len(tags) > 0 { + a = filterIntegerByUniqueTags(a, tags, func(cur, p *IntegerPoint) bool { + return p.Value > cur.Value || (p.Value == cur.Value && p.Time < cur.Time) + }) + } + + // If we ask for more elements than exist, restrict n to be the length of the array. + size := n + if size > len(a) { + size = len(a) + } + + // Construct a heap preferring higher values and breaking ties + // based on the earliest time for a point. + h := integerPointsSortBy(a, func(a, b *IntegerPoint) bool { + if a.Value != b.Value { + return a.Value > b.Value + } + return a.Time < b.Time + }) + heap.Init(h) + + // Pop the first n elements and then sort by time. + points := make([]IntegerPoint, 0, size) + for i := 0; i < size; i++ { + p := heap.Pop(h).(IntegerPoint) + points = append(points, p) + } + + // Either zero out all values or sort the points by time + // depending on if a time interval was given or not. + if !interval.IsZero() { + for i := range points { + points[i].Time = ZeroTime + } + } else { + sort.Stable(integerPointsByTime(points)) + } + return points + } +} + +// newBottomIterator returns an iterator for operating on a bottom() call. +func newBottomIterator(input Iterator, opt IteratorOptions, n *NumberLiteral, tags []int) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + aggregateFn := NewFloatBottomReduceSliceFunc(int(n.Val), tags, opt.Interval) + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSliceFuncReducer(aggregateFn) + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + aggregateFn := NewIntegerBottomReduceSliceFunc(int(n.Val), tags, opt.Interval) + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerSliceFuncReducer(aggregateFn) + return fn, fn + } + return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported bottom iterator type: %T", input) + } +} + +// NewFloatBottomReduceSliceFunc returns the bottom values within a window. +func NewFloatBottomReduceSliceFunc(n int, tags []int, interval Interval) FloatReduceSliceFunc { + return func(a []FloatPoint) []FloatPoint { + // Filter by tags if they exist. + if len(tags) > 0 { + a = filterFloatByUniqueTags(a, tags, func(cur, p *FloatPoint) bool { + return p.Value < cur.Value || (p.Value == cur.Value && p.Time < cur.Time) + }) + } + + // If we ask for more elements than exist, restrict n to be the length of the array. + size := n + if size > len(a) { + size = len(a) + } + + // Construct a heap preferring lower values and breaking ties + // based on the earliest time for a point. + h := floatPointsSortBy(a, func(a, b *FloatPoint) bool { + if a.Value != b.Value { + return a.Value < b.Value + } + return a.Time < b.Time + }) + heap.Init(h) + + // Pop the first n elements and then sort by time. + points := make([]FloatPoint, 0, size) + for i := 0; i < size; i++ { + p := heap.Pop(h).(FloatPoint) + points = append(points, p) + } + + // Either zero out all values or sort the points by time + // depending on if a time interval was given or not. + if !interval.IsZero() { + for i := range points { + points[i].Time = ZeroTime + } + } else { + sort.Stable(floatPointsByTime(points)) + } + return points + } +} + +// NewIntegerBottomReduceSliceFunc returns the bottom values within a window. +func NewIntegerBottomReduceSliceFunc(n int, tags []int, interval Interval) IntegerReduceSliceFunc { + return func(a []IntegerPoint) []IntegerPoint { + // Filter by tags if they exist. + if len(tags) > 0 { + a = filterIntegerByUniqueTags(a, tags, func(cur, p *IntegerPoint) bool { + return p.Value < cur.Value || (p.Value == cur.Value && p.Time < cur.Time) + }) + } + + // If we ask for more elements than exist, restrict n to be the length of the array. + size := n + if size > len(a) { + size = len(a) + } + + // Construct a heap preferring lower values and breaking ties + // based on the earliest time for a point. + h := integerPointsSortBy(a, func(a, b *IntegerPoint) bool { + if a.Value != b.Value { + return a.Value < b.Value + } + return a.Time < b.Time + }) + heap.Init(h) + + // Pop the first n elements and then sort by time. + points := make([]IntegerPoint, 0, size) + for i := 0; i < size; i++ { + p := heap.Pop(h).(IntegerPoint) + points = append(points, p) + } + + // Either zero out all values or sort the points by time + // depending on if a time interval was given or not. + if !interval.IsZero() { + for i := range points { + points[i].Time = ZeroTime + } + } else { + sort.Stable(integerPointsByTime(points)) + } + return points + } +} + +func filterFloatByUniqueTags(a []FloatPoint, tags []int, cmpFunc func(cur, p *FloatPoint) bool) []FloatPoint { + pointMap := make(map[string]FloatPoint) + for _, p := range a { + keyBuf := bytes.NewBuffer(nil) + for i, index := range tags { + if i > 0 { + keyBuf.WriteString(",") + } + fmt.Fprintf(keyBuf, "%s", p.Aux[index]) + } + key := keyBuf.String() + + cur, ok := pointMap[key] + if ok { + if cmpFunc(&cur, &p) { + pointMap[key] = p + } + } else { + pointMap[key] = p + } + } + + // Recreate the original array with our new filtered list. + points := make([]FloatPoint, 0, len(pointMap)) + for _, p := range pointMap { + points = append(points, p) + } + return points +} + +func filterIntegerByUniqueTags(a []IntegerPoint, tags []int, cmpFunc func(cur, p *IntegerPoint) bool) []IntegerPoint { + pointMap := make(map[string]IntegerPoint) + for _, p := range a { + keyBuf := bytes.NewBuffer(nil) + for i, index := range tags { + if i > 0 { + keyBuf.WriteString(",") + } + fmt.Fprintf(keyBuf, "%s", p.Aux[index]) + } + key := keyBuf.String() + + cur, ok := pointMap[key] + if ok { + if cmpFunc(&cur, &p) { + pointMap[key] = p + } + } else { + pointMap[key] = p + } + } + + // Recreate the original array with our new filtered list. + points := make([]IntegerPoint, 0, len(pointMap)) + for _, p := range pointMap { + points = append(points, p) + } + return points +} + +// newPercentileIterator returns an iterator for operating on a percentile() call. +func newPercentileIterator(input Iterator, opt IteratorOptions, percentile float64) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + floatPercentileReduceSlice := NewFloatPercentileReduceSliceFunc(percentile) + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSliceFuncReducer(floatPercentileReduceSlice) + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + integerPercentileReduceSlice := NewIntegerPercentileReduceSliceFunc(percentile) + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerSliceFuncReducer(integerPercentileReduceSlice) + return fn, fn + } + return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported percentile iterator type: %T", input) + } +} + +// NewFloatPercentileReduceSliceFunc returns the percentile value within a window. +func NewFloatPercentileReduceSliceFunc(percentile float64) FloatReduceSliceFunc { + return func(a []FloatPoint) []FloatPoint { + length := len(a) + i := int(math.Floor(float64(length)*percentile/100.0+0.5)) - 1 + + if i < 0 || i >= length { + return nil + } + + sort.Sort(floatPointsByValue(a)) + return []FloatPoint{{Time: ZeroTime, Value: a[i].Value}} + } +} + +// NewIntegerPercentileReduceSliceFunc returns the percentile value within a window. +func NewIntegerPercentileReduceSliceFunc(percentile float64) IntegerReduceSliceFunc { + return func(a []IntegerPoint) []IntegerPoint { + length := len(a) + i := int(math.Floor(float64(length)*percentile/100.0+0.5)) - 1 + + if i < 0 || i >= length { + return nil + } + + sort.Sort(integerPointsByValue(a)) + return []IntegerPoint{{Time: ZeroTime, Value: a[i].Value}} + } +} + +// newDerivativeIterator returns an iterator for operating on a derivative() call. +func newDerivativeIterator(input Iterator, opt IteratorOptions, interval Interval, isNonNegative bool) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + floatDerivativeReduceSlice := NewFloatDerivativeReduceSliceFunc(interval, isNonNegative) + createFn := func() (FloatPointAggregator, FloatPointEmitter) { + fn := NewFloatSliceFuncReducer(floatDerivativeReduceSlice) + return fn, fn + } + return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + case IntegerIterator: + integerDerivativeReduceSlice := NewIntegerDerivativeReduceSliceFunc(interval, isNonNegative) + createFn := func() (IntegerPointAggregator, FloatPointEmitter) { + fn := NewIntegerSliceFuncFloatReducer(integerDerivativeReduceSlice) + return fn, fn + } + return &integerReduceFloatIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + default: + return nil, fmt.Errorf("unsupported derivative iterator type: %T", input) + } +} + +// NewFloatDerivativeReduceSliceFunc returns the derivative value within a window. +func NewFloatDerivativeReduceSliceFunc(interval Interval, isNonNegative bool) FloatReduceSliceFunc { + prev := FloatPoint{Nil: true} + + return func(a []FloatPoint) []FloatPoint { + if len(a) == 0 { + return a + } else if len(a) == 1 { + return []FloatPoint{{Time: a[0].Time, Nil: true}} + } + + if prev.Nil { + prev = a[0] + } + + output := make([]FloatPoint, 0, len(a)-1) + for i := 1; i < len(a); i++ { + p := &a[i] + + // Calculate the derivative of successive points by dividing the + // difference of each value by the elapsed time normalized to the interval. + diff := p.Value - prev.Value + elapsed := p.Time - prev.Time + + value := 0.0 + if elapsed > 0 { + value = diff / (float64(elapsed) / float64(interval.Duration)) + } + + prev = *p + + // Drop negative values for non-negative derivatives. + if isNonNegative && diff < 0 { + continue + } + + output = append(output, FloatPoint{Time: p.Time, Value: value}) + } + return output + } +} + +// NewIntegerDerivativeReduceSliceFunc returns the derivative value within a window. +func NewIntegerDerivativeReduceSliceFunc(interval Interval, isNonNegative bool) IntegerReduceFloatSliceFunc { + prev := IntegerPoint{Nil: true} + + return func(a []IntegerPoint) []FloatPoint { + if len(a) == 0 { + return []FloatPoint{} + } else if len(a) == 1 { + return []FloatPoint{{Time: a[0].Time, Nil: true}} + } + + if prev.Nil { + prev = a[0] + } + + output := make([]FloatPoint, 0, len(a)-1) + for i := 1; i < len(a); i++ { + p := &a[i] + + // Calculate the derivative of successive points by dividing the + // difference of each value by the elapsed time normalized to the interval. + diff := float64(p.Value - prev.Value) + elapsed := p.Time - prev.Time + + value := 0.0 + if elapsed > 0 { + value = diff / (float64(elapsed) / float64(interval.Duration)) + } + + prev = *p + + // Drop negative values for non-negative derivatives. + if isNonNegative && diff < 0 { + continue + } + + output = append(output, FloatPoint{Time: p.Time, Value: value}) + } + return output + } +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/call_iterator_test.go b/vendor/github.com/influxdata/influxdb/influxql/call_iterator_test.go new file mode 100644 index 0000000000..c0f0734061 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/call_iterator_test.go @@ -0,0 +1,599 @@ +package influxql_test + +import ( + "math/rand" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/pkg/deep" +) + +// Ensure that a float iterator can be created for a count() call. +func TestCallIterator_Count_Float(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "cpu", Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + + {Name: "cpu", Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Name: "cpu", Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "mem", Time: 23, Value: 10, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`count("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 3, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 5, Value: 1, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "mem", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that an integer iterator can be created for a count() call. +func TestCallIterator_Count_Integer(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "cpu", Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + + {Name: "cpu", Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Name: "cpu", Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "mem", Time: 23, Value: 10, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`count("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 3, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 5, Value: 1, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "mem", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a string iterator can be created for a count() call. +func TestCallIterator_Count_String(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &StringIterator{Points: []influxql.StringPoint{ + {Name: "cpu", Time: 0, Value: "d", Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 1, Value: "c", Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "cpu", Time: 2, Value: "b", Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 1, Value: "b", Tags: ParseTags("region=us-west,host=hostA")}, + + {Name: "cpu", Time: 5, Value: "e", Tags: ParseTags("region=us-east,host=hostA")}, + + {Name: "cpu", Time: 23, Value: "a", Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "mem", Time: 23, Value: "b", Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`count("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 3, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 5, Value: 1, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "mem", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a boolean iterator can be created for a count() call. +func TestCallIterator_Count_Boolean(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &BooleanIterator{Points: []influxql.BooleanPoint{ + {Name: "cpu", Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "cpu", Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Name: "cpu", Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostA")}, + + {Name: "cpu", Time: 5, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + + {Name: "cpu", Time: 23, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, + {Name: "mem", Time: 23, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`count("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 3, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 5, Value: 1, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Name: "mem", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a float iterator can be created for a min() call. +func TestCallIterator_Min_Float(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &FloatIterator{Points: []influxql.FloatPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 4, Value: 12, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`min("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Time: 1, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 4}}, + {&influxql.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a integer iterator can be created for a min() call. +func TestCallIterator_Min_Integer(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &IntegerIterator{Points: []influxql.IntegerPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 4, Value: 12, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`min("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Time: 1, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 4}}, + {&influxql.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a float iterator can be created for a max() call. +func TestCallIterator_Max_Float(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &FloatIterator{Points: []influxql.FloatPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`max("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a integer iterator can be created for a max() call. +func TestCallIterator_Max_Integer(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &IntegerIterator{Points: []influxql.IntegerPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`max("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a float iterator can be created for a sum() call. +func TestCallIterator_Sum_Float(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &FloatIterator{Points: []influxql.FloatPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`sum("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Time: 0, Value: 35, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.FloatPoint{Time: 0, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 20, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that an integer iterator can be created for a sum() call. +func TestCallIterator_Sum_Integer(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &IntegerIterator{Points: []influxql.IntegerPoint{ + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`sum("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Time: 0, Value: 35, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.IntegerPoint{Time: 0, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 20, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a float iterator can be created for a first() call. +func TestCallIterator_First_Float(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &FloatIterator{Points: []influxql.FloatPoint{ + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`first("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that an integer iterator can be created for a first() call. +func TestCallIterator_First_Integer(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &IntegerIterator{Points: []influxql.IntegerPoint{ + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`first("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a string iterator can be created for a first() call. +func TestCallIterator_First_String(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &StringIterator{Points: []influxql.StringPoint{ + {Time: 1, Value: "c", Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: "b", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: "d", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: "b", Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 6, Value: "e", Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: "a", Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`first("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.StringPoint{Time: 0, Value: "d", Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.StringPoint{Time: 1, Value: "c", Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.StringPoint{Time: 6, Value: "e", Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.StringPoint{Time: 23, Value: "a", Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a boolean iterator can be created for a first() call. +func TestCallIterator_First_Boolean(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &BooleanIterator{Points: []influxql.BooleanPoint{ + {Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 6, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`first("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.BooleanPoint{Time: 0, Value: true, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.BooleanPoint{Time: 1, Value: true, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.BooleanPoint{Time: 6, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.BooleanPoint{Time: 23, Value: false, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a float iterator can be created for a last() call. +func TestCallIterator_Last_Float(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &FloatIterator{Points: []influxql.FloatPoint{ + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`last("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Time: 2, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.FloatPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that an integer iterator can be created for a last() call. +func TestCallIterator_Last_Integer(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &IntegerIterator{Points: []influxql.IntegerPoint{ + {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`last("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Time: 2, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a string iterator can be created for a last() call. +func TestCallIterator_Last_String(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &StringIterator{Points: []influxql.StringPoint{ + {Time: 1, Value: "c", Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: "b", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: "d", Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: "b", Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 6, Value: "e", Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: "a", Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`last("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.StringPoint{Time: 2, Value: "b", Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.StringPoint{Time: 1, Value: "c", Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.StringPoint{Time: 6, Value: "e", Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.StringPoint{Time: 23, Value: "a", Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure that a boolean iterator can be created for a last() call. +func TestCallIterator_Last_Boolean(t *testing.T) { + itr, _ := influxql.NewCallIterator( + &BooleanIterator{Points: []influxql.BooleanPoint{ + {Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, + {Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, + {Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostA")}, + + {Time: 6, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, + + {Time: 23, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, + }}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`last("value")`), + Dimensions: []string{"host"}, + Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, + }, + ) + + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.BooleanPoint{Time: 2, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 3}}, + {&influxql.BooleanPoint{Time: 1, Value: true, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + {&influxql.BooleanPoint{Time: 6, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 1}}, + {&influxql.BooleanPoint{Time: 23, Value: false, Tags: ParseTags("host=hostB"), Aggregated: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +func TestNewCallIterator_UnsupportedExprName(t *testing.T) { + _, err := influxql.NewCallIterator( + &FloatIterator{}, + influxql.IteratorOptions{ + Expr: MustParseExpr(`foobar("value")`), + }, + ) + + if err == nil || err.Error() != "unsupported function call: foobar" { + t.Errorf("unexpected error: %s", err) + } +} + +func BenchmarkCallIterator_Min_Float(b *testing.B) { + input := GenerateFloatIterator(rand.New(rand.NewSource(0)), b.N) + b.ResetTimer() + b.ReportAllocs() + + itr, err := influxql.NewCallIterator(input, influxql.IteratorOptions{ + Expr: MustParseExpr("min(value)"), + Interval: influxql.Interval{Duration: 1 * time.Hour}, + }) + if err != nil { + b.Fatal(err) + } + + switch itr := itr.(type) { + case influxql.FloatIterator: + for { + if p := itr.Next(); p == nil { + break + } + } + default: + b.Fatalf("incorrect iterator type: %T", itr) + } +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/cast.go b/vendor/github.com/influxdata/influxdb/influxql/cast.go new file mode 100644 index 0000000000..b993a17dfb --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/cast.go @@ -0,0 +1,41 @@ +package influxql + +func castToFloat(v interface{}) float64 { + switch v := v.(type) { + case float64: + return v + case int64: + return float64(v) + default: + return float64(0) + } +} + +func castToInteger(v interface{}) int64 { + switch v := v.(type) { + case float64: + return int64(v) + case int64: + return v + default: + return int64(0) + } +} + +func castToString(v interface{}) string { + switch v := v.(type) { + case string: + return v + default: + return "" + } +} + +func castToBoolean(v interface{}) bool { + switch v := v.(type) { + case bool: + return v + default: + return false + } +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/doc.go b/vendor/github.com/influxdata/influxdb/influxql/doc.go new file mode 100644 index 0000000000..f93da4b45f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/doc.go @@ -0,0 +1,64 @@ +/* +Package influxql implements a parser for the InfluxDB query language. + +InfluxQL is a DML and DDL language for the InfluxDB time series database. +It provides the ability to query for aggregate statistics as well as create +and configure the InfluxDB server. + +Selecting data + +The SELECT query is used for retrieving data from one or more series. It allows +for a list of columns followed by a list of series to select from. + + SELECT value FROM cpu_load + +You can also add a a conditional expression to limit the results of the query: + + SELECT value FROM cpu_load WHERE host = 'influxdb.com' + +Two or more series can be combined into a single query and executed together: + + SELECT cpu0.value + cpu1.value + FROM cpu_load AS cpu0 INNER JOIN cpu_load cpu1 ON cpu0.host = cpu1.host + +Limits and ordering can be set on selection queries as well: + + SELECT value FROM cpu_load LIMIT 100 ORDER DESC; + + +Removing data + +The DELETE query is available to remove time series data points from the +database. This query will delete "cpu_load" values older than an hour: + + DELETE FROM cpu_load WHERE time < now() - 1h + + +Continuous Queries + +Queries can be run indefinitely on the server in order to generate new series. +This is done by running a "SELECT INTO" query. For example, this query computes +the hourly mean for cpu_load and stores it into a "cpu_load" series in the +"daily" shard space. + + SELECT mean(value) AS value FROM cpu_load GROUP BY 1h + INTO daily.cpu_load + +If there is existing data on the source series then this query will be run for +all historic data. To only execute the query on new incoming data you can append +"NO BACKFILL" to the end of the query: + + SELECT mean(value) AS value FROM cpu_load GROUP BY 1h + INTO daily.cpu_load NO BACKFILL + +Continuous queries will return an id that can be used to remove them in the +future. To remove a continous query, use the DROP CONTINUOUS QUERY statement: + + DROP CONTINUOUS QUERY 12 + +You can also list all continuous queries by running: + + LIST CONTINUOUS QUERIES + +*/ +package influxql diff --git a/vendor/github.com/influxdata/influxdb/influxql/emitter.go b/vendor/github.com/influxdata/influxdb/influxql/emitter.go new file mode 100644 index 0000000000..84ba7e3b7f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/emitter.go @@ -0,0 +1,199 @@ +package influxql + +import ( + "fmt" + "time" + + "github.com/influxdata/influxdb/models" +) + +// Emitter groups values together by name, +type Emitter struct { + buf []Point + itrs []Iterator + ascending bool + + tags Tags + row *models.Row + + // The columns to attach to each row. + Columns []string + + // Removes the "time" column from output. + // Used for meta queries where time does not apply. + OmitTime bool +} + +// NewEmitter returns a new instance of Emitter that pulls from itrs. +func NewEmitter(itrs []Iterator, ascending bool) *Emitter { + return &Emitter{ + buf: make([]Point, len(itrs)), + itrs: itrs, + ascending: ascending, + } +} + +// Close closes the underlying iterators. +func (e *Emitter) Close() error { + return Iterators(e.itrs).Close() +} + +// Emit returns the next row from the iterators. +func (e *Emitter) Emit() *models.Row { + // Immediately end emission if there are no iterators. + if len(e.itrs) == 0 { + return nil + } + + // Continually read from iterators until they are exhausted. + for { + // Fill buffer. Return row if no more points remain. + t, name, tags := e.loadBuf() + if t == ZeroTime { + row := e.row + e.row = nil + return row + } + + // Read next set of values from all iterators at a given time/name/tags. + // If no values are returned then return row. + values := e.readAt(t, name, tags) + if values == nil { + row := e.row + e.row = nil + return row + } + + // If there's no row yet then create one. + // If the name and tags match the existing row, append to that row. + // Otherwise return existing row and add values to next emitted row. + if e.row == nil { + e.createRow(name, tags, values) + } else if e.row.Name == name && e.tags.Equals(&tags) { + e.row.Values = append(e.row.Values, values) + } else { + row := e.row + e.createRow(name, tags, values) + return row + } + } +} + +// loadBuf reads in points into empty buffer slots. +// Returns the next time/name/tags to emit for. +func (e *Emitter) loadBuf() (t int64, name string, tags Tags) { + t = ZeroTime + + for i := range e.itrs { + // Load buffer, if empty. + if e.buf[i] == nil { + e.buf[i] = e.readIterator(e.itrs[i]) + } + + // Skip if buffer is empty. + p := e.buf[i] + if p == nil { + continue + } + itrTime, itrName, itrTags := p.time(), p.name(), p.tags() + + // Initialize range values if not set. + if t == ZeroTime { + t, name, tags = itrTime, itrName, itrTags + continue + } + + // Update range values if lower and emitter is in time ascending order. + if e.ascending { + if (itrTime < t) || (itrTime == t && itrName < name) || (itrTime == t && itrName == name && itrTags.ID() < tags.ID()) { + t, name, tags = itrTime, itrName, itrTags + } + continue + } + + // Update range values if higher and emitter is in time descending order. + if (itrTime > t) || (itrTime == t && itrName > name) || (itrTime == t && itrName == name && itrTags.ID() > tags.ID()) { + t, name, tags = itrTime, itrName, itrTags + } + } + + return +} + +// createRow creates a new row attached to the emitter. +func (e *Emitter) createRow(name string, tags Tags, values []interface{}) { + e.tags = tags + e.row = &models.Row{ + Name: name, + Tags: tags.KeyValues(), + Columns: e.Columns, + Values: [][]interface{}{values}, + } +} + +// readAt returns the next slice of values from the iterators at time/name/tags. +// Returns nil values once the iterators are exhausted. +func (e *Emitter) readAt(t int64, name string, tags Tags) []interface{} { + // If time is included then move colums over by one. + offset := 1 + if e.OmitTime { + offset = 0 + } + + values := make([]interface{}, len(e.itrs)+offset) + if !e.OmitTime { + values[0] = time.Unix(0, t).UTC() + } + + for i, p := range e.buf { + // Skip if buffer is empty. + if p == nil { + values[i+offset] = nil + continue + } + + // Skip point if it doesn't match time/name/tags. + pTags := p.tags() + if p.time() != t || p.name() != name || !pTags.Equals(&tags) { + values[i+offset] = nil + continue + } + + // Read point value. + values[i+offset] = p.value() + + // Clear buffer. + e.buf[i] = nil + } + + return values +} + +// readIterator reads the next point from itr. +func (e *Emitter) readIterator(itr Iterator) Point { + if itr == nil { + return nil + } + + switch itr := itr.(type) { + case FloatIterator: + if p := itr.Next(); p != nil { + return p + } + case IntegerIterator: + if p := itr.Next(); p != nil { + return p + } + case StringIterator: + if p := itr.Next(); p != nil { + return p + } + case BooleanIterator: + if p := itr.Next(); p != nil { + return p + } + default: + panic(fmt.Sprintf("unsupported iterator: %T", itr)) + } + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/emitter_test.go b/vendor/github.com/influxdata/influxdb/influxql/emitter_test.go new file mode 100644 index 0000000000..436dfce3fa --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/emitter_test.go @@ -0,0 +1,69 @@ +package influxql_test + +import ( + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/pkg/deep" +) + +// Ensure the emitter can group iterators together into rows. +func TestEmitter_Emit(t *testing.T) { + // Build an emitter that pulls from two iterators. + e := influxql.NewEmitter([]influxql.Iterator{ + &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("region=west"), Time: 1, Value: 2}, + }}, + &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west"), Time: 1, Value: 4}, + {Name: "cpu", Tags: ParseTags("region=north"), Time: 0, Value: 4}, + {Name: "mem", Time: 4, Value: 5}, + }}, + }, true) + e.Columns = []string{"col1", "col2"} + + // Verify the cpu region=west is emitted first. + if row := e.Emit(); !deep.Equal(row, &models.Row{ + Name: "cpu", + Tags: map[string]string{"region": "west"}, + Columns: []string{"col1", "col2"}, + Values: [][]interface{}{ + {time.Unix(0, 0).UTC(), float64(1), nil}, + {time.Unix(0, 1).UTC(), float64(2), float64(4)}, + }, + }) { + t.Fatalf("unexpected row(0): %s", spew.Sdump(row)) + } + + // Verify the cpu region=north is emitted next. + if row := e.Emit(); !deep.Equal(row, &models.Row{ + Name: "cpu", + Tags: map[string]string{"region": "north"}, + Columns: []string{"col1", "col2"}, + Values: [][]interface{}{ + {time.Unix(0, 0).UTC(), nil, float64(4)}, + }, + }) { + t.Fatalf("unexpected row(1): %s", spew.Sdump(row)) + } + + // Verify the mem series is emitted last. + if row := e.Emit(); !deep.Equal(row, &models.Row{ + Name: "mem", + Columns: []string{"col1", "col2"}, + Values: [][]interface{}{ + {time.Unix(0, 4).UTC(), nil, float64(5)}, + }, + }) { + t.Fatalf("unexpected row(2): %s", spew.Sdump(row)) + } + + // Verify EOF. + if row := e.Emit(); row != nil { + t.Fatalf("unexpected eof: %s", spew.Sdump(row)) + } +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/functions.gen.go b/vendor/github.com/influxdata/influxdb/influxql/functions.gen.go new file mode 100644 index 0000000000..717efc84f9 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/functions.gen.go @@ -0,0 +1,1003 @@ +// Generated by tmpl +// https://github.com/benbjohnson/tmpl +// +// DO NOT EDIT! +// Source: functions.gen.go.tmpl + +package influxql + +// FloatPointAggregator aggregates points to produce a single point. +type FloatPointAggregator interface { + AggregateFloat(p *FloatPoint) +} + +// FloatBulkPointAggregator aggregates multiple points at a time. +type FloatBulkPointAggregator interface { + AggregateFloatBulk(points []FloatPoint) +} + +// AggregateFloatPoints feeds a slice of FloatPoint into an +// aggregator. If the aggregator is a FloatBulkPointAggregator, it will +// use the AggregateBulk method. +func AggregateFloatPoints(a FloatPointAggregator, points []FloatPoint) { + switch a := a.(type) { + case FloatBulkPointAggregator: + a.AggregateFloatBulk(points) + default: + for _, p := range points { + a.AggregateFloat(&p) + } + } +} + +// FloatPointEmitter produces a single point from an aggregate. +type FloatPointEmitter interface { + Emit() []FloatPoint +} + +// FloatReduceFunc is the function called by a FloatPoint reducer. +type FloatReduceFunc func(prev *FloatPoint, curr *FloatPoint) (t int64, v float64, aux []interface{}) + +type FloatFuncReducer struct { + prev *FloatPoint + fn FloatReduceFunc +} + +func NewFloatFuncReducer(fn FloatReduceFunc) *FloatFuncReducer { + return &FloatFuncReducer{fn: fn} +} + +func (r *FloatFuncReducer) AggregateFloat(p *FloatPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &FloatPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +func (r *FloatFuncReducer) Emit() []FloatPoint { + return []FloatPoint{*r.prev} +} + +// FloatReduceSliceFunc is the function called by a FloatPoint reducer. +type FloatReduceSliceFunc func(a []FloatPoint) []FloatPoint + +type FloatSliceFuncReducer struct { + points []FloatPoint + fn FloatReduceSliceFunc +} + +func NewFloatSliceFuncReducer(fn FloatReduceSliceFunc) *FloatSliceFuncReducer { + return &FloatSliceFuncReducer{fn: fn} +} + +func (r *FloatSliceFuncReducer) AggregateFloat(p *FloatPoint) { + r.points = append(r.points, *p) +} + +func (r *FloatSliceFuncReducer) AggregateFloatBulk(points []FloatPoint) { + r.points = append(r.points, points...) +} + +func (r *FloatSliceFuncReducer) Emit() []FloatPoint { + return r.fn(r.points) +} + +// FloatReduceIntegerFunc is the function called by a FloatPoint reducer. +type FloatReduceIntegerFunc func(prev *IntegerPoint, curr *FloatPoint) (t int64, v int64, aux []interface{}) + +type FloatFuncIntegerReducer struct { + prev *IntegerPoint + fn FloatReduceIntegerFunc +} + +func NewFloatFuncIntegerReducer(fn FloatReduceIntegerFunc) *FloatFuncIntegerReducer { + return &FloatFuncIntegerReducer{fn: fn} +} + +func (r *FloatFuncIntegerReducer) AggregateFloat(p *FloatPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &IntegerPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +func (r *FloatFuncIntegerReducer) Emit() []IntegerPoint { + return []IntegerPoint{*r.prev} +} + +// FloatReduceIntegerSliceFunc is the function called by a FloatPoint reducer. +type FloatReduceIntegerSliceFunc func(a []FloatPoint) []IntegerPoint + +type FloatSliceFuncIntegerReducer struct { + points []FloatPoint + fn FloatReduceIntegerSliceFunc +} + +func NewFloatSliceFuncIntegerReducer(fn FloatReduceIntegerSliceFunc) *FloatSliceFuncIntegerReducer { + return &FloatSliceFuncIntegerReducer{fn: fn} +} + +func (r *FloatSliceFuncIntegerReducer) AggregateFloat(p *FloatPoint) { + r.points = append(r.points, *p) +} + +func (r *FloatSliceFuncIntegerReducer) AggregateFloatBulk(points []FloatPoint) { + r.points = append(r.points, points...) +} + +func (r *FloatSliceFuncIntegerReducer) Emit() []IntegerPoint { + return r.fn(r.points) +} + +// FloatReduceStringFunc is the function called by a FloatPoint reducer. +type FloatReduceStringFunc func(prev *StringPoint, curr *FloatPoint) (t int64, v string, aux []interface{}) + +type FloatFuncStringReducer struct { + prev *StringPoint + fn FloatReduceStringFunc +} + +func NewFloatFuncStringReducer(fn FloatReduceStringFunc) *FloatFuncStringReducer { + return &FloatFuncStringReducer{fn: fn} +} + +func (r *FloatFuncStringReducer) AggregateFloat(p *FloatPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &StringPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +func (r *FloatFuncStringReducer) Emit() []StringPoint { + return []StringPoint{*r.prev} +} + +// FloatReduceStringSliceFunc is the function called by a FloatPoint reducer. +type FloatReduceStringSliceFunc func(a []FloatPoint) []StringPoint + +type FloatSliceFuncStringReducer struct { + points []FloatPoint + fn FloatReduceStringSliceFunc +} + +func NewFloatSliceFuncStringReducer(fn FloatReduceStringSliceFunc) *FloatSliceFuncStringReducer { + return &FloatSliceFuncStringReducer{fn: fn} +} + +func (r *FloatSliceFuncStringReducer) AggregateFloat(p *FloatPoint) { + r.points = append(r.points, *p) +} + +func (r *FloatSliceFuncStringReducer) AggregateFloatBulk(points []FloatPoint) { + r.points = append(r.points, points...) +} + +func (r *FloatSliceFuncStringReducer) Emit() []StringPoint { + return r.fn(r.points) +} + +// FloatReduceBooleanFunc is the function called by a FloatPoint reducer. +type FloatReduceBooleanFunc func(prev *BooleanPoint, curr *FloatPoint) (t int64, v bool, aux []interface{}) + +type FloatFuncBooleanReducer struct { + prev *BooleanPoint + fn FloatReduceBooleanFunc +} + +func NewFloatFuncBooleanReducer(fn FloatReduceBooleanFunc) *FloatFuncBooleanReducer { + return &FloatFuncBooleanReducer{fn: fn} +} + +func (r *FloatFuncBooleanReducer) AggregateFloat(p *FloatPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &BooleanPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +func (r *FloatFuncBooleanReducer) Emit() []BooleanPoint { + return []BooleanPoint{*r.prev} +} + +// FloatReduceBooleanSliceFunc is the function called by a FloatPoint reducer. +type FloatReduceBooleanSliceFunc func(a []FloatPoint) []BooleanPoint + +type FloatSliceFuncBooleanReducer struct { + points []FloatPoint + fn FloatReduceBooleanSliceFunc +} + +func NewFloatSliceFuncBooleanReducer(fn FloatReduceBooleanSliceFunc) *FloatSliceFuncBooleanReducer { + return &FloatSliceFuncBooleanReducer{fn: fn} +} + +func (r *FloatSliceFuncBooleanReducer) AggregateFloat(p *FloatPoint) { + r.points = append(r.points, *p) +} + +func (r *FloatSliceFuncBooleanReducer) AggregateFloatBulk(points []FloatPoint) { + r.points = append(r.points, points...) +} + +func (r *FloatSliceFuncBooleanReducer) Emit() []BooleanPoint { + return r.fn(r.points) +} + +// IntegerPointAggregator aggregates points to produce a single point. +type IntegerPointAggregator interface { + AggregateInteger(p *IntegerPoint) +} + +// IntegerBulkPointAggregator aggregates multiple points at a time. +type IntegerBulkPointAggregator interface { + AggregateIntegerBulk(points []IntegerPoint) +} + +// AggregateIntegerPoints feeds a slice of IntegerPoint into an +// aggregator. If the aggregator is a IntegerBulkPointAggregator, it will +// use the AggregateBulk method. +func AggregateIntegerPoints(a IntegerPointAggregator, points []IntegerPoint) { + switch a := a.(type) { + case IntegerBulkPointAggregator: + a.AggregateIntegerBulk(points) + default: + for _, p := range points { + a.AggregateInteger(&p) + } + } +} + +// IntegerPointEmitter produces a single point from an aggregate. +type IntegerPointEmitter interface { + Emit() []IntegerPoint +} + +// IntegerReduceFloatFunc is the function called by a IntegerPoint reducer. +type IntegerReduceFloatFunc func(prev *FloatPoint, curr *IntegerPoint) (t int64, v float64, aux []interface{}) + +type IntegerFuncFloatReducer struct { + prev *FloatPoint + fn IntegerReduceFloatFunc +} + +func NewIntegerFuncFloatReducer(fn IntegerReduceFloatFunc) *IntegerFuncFloatReducer { + return &IntegerFuncFloatReducer{fn: fn} +} + +func (r *IntegerFuncFloatReducer) AggregateInteger(p *IntegerPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &FloatPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +func (r *IntegerFuncFloatReducer) Emit() []FloatPoint { + return []FloatPoint{*r.prev} +} + +// IntegerReduceFloatSliceFunc is the function called by a IntegerPoint reducer. +type IntegerReduceFloatSliceFunc func(a []IntegerPoint) []FloatPoint + +type IntegerSliceFuncFloatReducer struct { + points []IntegerPoint + fn IntegerReduceFloatSliceFunc +} + +func NewIntegerSliceFuncFloatReducer(fn IntegerReduceFloatSliceFunc) *IntegerSliceFuncFloatReducer { + return &IntegerSliceFuncFloatReducer{fn: fn} +} + +func (r *IntegerSliceFuncFloatReducer) AggregateInteger(p *IntegerPoint) { + r.points = append(r.points, *p) +} + +func (r *IntegerSliceFuncFloatReducer) AggregateIntegerBulk(points []IntegerPoint) { + r.points = append(r.points, points...) +} + +func (r *IntegerSliceFuncFloatReducer) Emit() []FloatPoint { + return r.fn(r.points) +} + +// IntegerReduceFunc is the function called by a IntegerPoint reducer. +type IntegerReduceFunc func(prev *IntegerPoint, curr *IntegerPoint) (t int64, v int64, aux []interface{}) + +type IntegerFuncReducer struct { + prev *IntegerPoint + fn IntegerReduceFunc +} + +func NewIntegerFuncReducer(fn IntegerReduceFunc) *IntegerFuncReducer { + return &IntegerFuncReducer{fn: fn} +} + +func (r *IntegerFuncReducer) AggregateInteger(p *IntegerPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &IntegerPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +func (r *IntegerFuncReducer) Emit() []IntegerPoint { + return []IntegerPoint{*r.prev} +} + +// IntegerReduceSliceFunc is the function called by a IntegerPoint reducer. +type IntegerReduceSliceFunc func(a []IntegerPoint) []IntegerPoint + +type IntegerSliceFuncReducer struct { + points []IntegerPoint + fn IntegerReduceSliceFunc +} + +func NewIntegerSliceFuncReducer(fn IntegerReduceSliceFunc) *IntegerSliceFuncReducer { + return &IntegerSliceFuncReducer{fn: fn} +} + +func (r *IntegerSliceFuncReducer) AggregateInteger(p *IntegerPoint) { + r.points = append(r.points, *p) +} + +func (r *IntegerSliceFuncReducer) AggregateIntegerBulk(points []IntegerPoint) { + r.points = append(r.points, points...) +} + +func (r *IntegerSliceFuncReducer) Emit() []IntegerPoint { + return r.fn(r.points) +} + +// IntegerReduceStringFunc is the function called by a IntegerPoint reducer. +type IntegerReduceStringFunc func(prev *StringPoint, curr *IntegerPoint) (t int64, v string, aux []interface{}) + +type IntegerFuncStringReducer struct { + prev *StringPoint + fn IntegerReduceStringFunc +} + +func NewIntegerFuncStringReducer(fn IntegerReduceStringFunc) *IntegerFuncStringReducer { + return &IntegerFuncStringReducer{fn: fn} +} + +func (r *IntegerFuncStringReducer) AggregateInteger(p *IntegerPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &StringPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +func (r *IntegerFuncStringReducer) Emit() []StringPoint { + return []StringPoint{*r.prev} +} + +// IntegerReduceStringSliceFunc is the function called by a IntegerPoint reducer. +type IntegerReduceStringSliceFunc func(a []IntegerPoint) []StringPoint + +type IntegerSliceFuncStringReducer struct { + points []IntegerPoint + fn IntegerReduceStringSliceFunc +} + +func NewIntegerSliceFuncStringReducer(fn IntegerReduceStringSliceFunc) *IntegerSliceFuncStringReducer { + return &IntegerSliceFuncStringReducer{fn: fn} +} + +func (r *IntegerSliceFuncStringReducer) AggregateInteger(p *IntegerPoint) { + r.points = append(r.points, *p) +} + +func (r *IntegerSliceFuncStringReducer) AggregateIntegerBulk(points []IntegerPoint) { + r.points = append(r.points, points...) +} + +func (r *IntegerSliceFuncStringReducer) Emit() []StringPoint { + return r.fn(r.points) +} + +// IntegerReduceBooleanFunc is the function called by a IntegerPoint reducer. +type IntegerReduceBooleanFunc func(prev *BooleanPoint, curr *IntegerPoint) (t int64, v bool, aux []interface{}) + +type IntegerFuncBooleanReducer struct { + prev *BooleanPoint + fn IntegerReduceBooleanFunc +} + +func NewIntegerFuncBooleanReducer(fn IntegerReduceBooleanFunc) *IntegerFuncBooleanReducer { + return &IntegerFuncBooleanReducer{fn: fn} +} + +func (r *IntegerFuncBooleanReducer) AggregateInteger(p *IntegerPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &BooleanPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +func (r *IntegerFuncBooleanReducer) Emit() []BooleanPoint { + return []BooleanPoint{*r.prev} +} + +// IntegerReduceBooleanSliceFunc is the function called by a IntegerPoint reducer. +type IntegerReduceBooleanSliceFunc func(a []IntegerPoint) []BooleanPoint + +type IntegerSliceFuncBooleanReducer struct { + points []IntegerPoint + fn IntegerReduceBooleanSliceFunc +} + +func NewIntegerSliceFuncBooleanReducer(fn IntegerReduceBooleanSliceFunc) *IntegerSliceFuncBooleanReducer { + return &IntegerSliceFuncBooleanReducer{fn: fn} +} + +func (r *IntegerSliceFuncBooleanReducer) AggregateInteger(p *IntegerPoint) { + r.points = append(r.points, *p) +} + +func (r *IntegerSliceFuncBooleanReducer) AggregateIntegerBulk(points []IntegerPoint) { + r.points = append(r.points, points...) +} + +func (r *IntegerSliceFuncBooleanReducer) Emit() []BooleanPoint { + return r.fn(r.points) +} + +// StringPointAggregator aggregates points to produce a single point. +type StringPointAggregator interface { + AggregateString(p *StringPoint) +} + +// StringBulkPointAggregator aggregates multiple points at a time. +type StringBulkPointAggregator interface { + AggregateStringBulk(points []StringPoint) +} + +// AggregateStringPoints feeds a slice of StringPoint into an +// aggregator. If the aggregator is a StringBulkPointAggregator, it will +// use the AggregateBulk method. +func AggregateStringPoints(a StringPointAggregator, points []StringPoint) { + switch a := a.(type) { + case StringBulkPointAggregator: + a.AggregateStringBulk(points) + default: + for _, p := range points { + a.AggregateString(&p) + } + } +} + +// StringPointEmitter produces a single point from an aggregate. +type StringPointEmitter interface { + Emit() []StringPoint +} + +// StringReduceFloatFunc is the function called by a StringPoint reducer. +type StringReduceFloatFunc func(prev *FloatPoint, curr *StringPoint) (t int64, v float64, aux []interface{}) + +type StringFuncFloatReducer struct { + prev *FloatPoint + fn StringReduceFloatFunc +} + +func NewStringFuncFloatReducer(fn StringReduceFloatFunc) *StringFuncFloatReducer { + return &StringFuncFloatReducer{fn: fn} +} + +func (r *StringFuncFloatReducer) AggregateString(p *StringPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &FloatPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +func (r *StringFuncFloatReducer) Emit() []FloatPoint { + return []FloatPoint{*r.prev} +} + +// StringReduceFloatSliceFunc is the function called by a StringPoint reducer. +type StringReduceFloatSliceFunc func(a []StringPoint) []FloatPoint + +type StringSliceFuncFloatReducer struct { + points []StringPoint + fn StringReduceFloatSliceFunc +} + +func NewStringSliceFuncFloatReducer(fn StringReduceFloatSliceFunc) *StringSliceFuncFloatReducer { + return &StringSliceFuncFloatReducer{fn: fn} +} + +func (r *StringSliceFuncFloatReducer) AggregateString(p *StringPoint) { + r.points = append(r.points, *p) +} + +func (r *StringSliceFuncFloatReducer) AggregateStringBulk(points []StringPoint) { + r.points = append(r.points, points...) +} + +func (r *StringSliceFuncFloatReducer) Emit() []FloatPoint { + return r.fn(r.points) +} + +// StringReduceIntegerFunc is the function called by a StringPoint reducer. +type StringReduceIntegerFunc func(prev *IntegerPoint, curr *StringPoint) (t int64, v int64, aux []interface{}) + +type StringFuncIntegerReducer struct { + prev *IntegerPoint + fn StringReduceIntegerFunc +} + +func NewStringFuncIntegerReducer(fn StringReduceIntegerFunc) *StringFuncIntegerReducer { + return &StringFuncIntegerReducer{fn: fn} +} + +func (r *StringFuncIntegerReducer) AggregateString(p *StringPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &IntegerPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +func (r *StringFuncIntegerReducer) Emit() []IntegerPoint { + return []IntegerPoint{*r.prev} +} + +// StringReduceIntegerSliceFunc is the function called by a StringPoint reducer. +type StringReduceIntegerSliceFunc func(a []StringPoint) []IntegerPoint + +type StringSliceFuncIntegerReducer struct { + points []StringPoint + fn StringReduceIntegerSliceFunc +} + +func NewStringSliceFuncIntegerReducer(fn StringReduceIntegerSliceFunc) *StringSliceFuncIntegerReducer { + return &StringSliceFuncIntegerReducer{fn: fn} +} + +func (r *StringSliceFuncIntegerReducer) AggregateString(p *StringPoint) { + r.points = append(r.points, *p) +} + +func (r *StringSliceFuncIntegerReducer) AggregateStringBulk(points []StringPoint) { + r.points = append(r.points, points...) +} + +func (r *StringSliceFuncIntegerReducer) Emit() []IntegerPoint { + return r.fn(r.points) +} + +// StringReduceFunc is the function called by a StringPoint reducer. +type StringReduceFunc func(prev *StringPoint, curr *StringPoint) (t int64, v string, aux []interface{}) + +type StringFuncReducer struct { + prev *StringPoint + fn StringReduceFunc +} + +func NewStringFuncReducer(fn StringReduceFunc) *StringFuncReducer { + return &StringFuncReducer{fn: fn} +} + +func (r *StringFuncReducer) AggregateString(p *StringPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &StringPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +func (r *StringFuncReducer) Emit() []StringPoint { + return []StringPoint{*r.prev} +} + +// StringReduceSliceFunc is the function called by a StringPoint reducer. +type StringReduceSliceFunc func(a []StringPoint) []StringPoint + +type StringSliceFuncReducer struct { + points []StringPoint + fn StringReduceSliceFunc +} + +func NewStringSliceFuncReducer(fn StringReduceSliceFunc) *StringSliceFuncReducer { + return &StringSliceFuncReducer{fn: fn} +} + +func (r *StringSliceFuncReducer) AggregateString(p *StringPoint) { + r.points = append(r.points, *p) +} + +func (r *StringSliceFuncReducer) AggregateStringBulk(points []StringPoint) { + r.points = append(r.points, points...) +} + +func (r *StringSliceFuncReducer) Emit() []StringPoint { + return r.fn(r.points) +} + +// StringReduceBooleanFunc is the function called by a StringPoint reducer. +type StringReduceBooleanFunc func(prev *BooleanPoint, curr *StringPoint) (t int64, v bool, aux []interface{}) + +type StringFuncBooleanReducer struct { + prev *BooleanPoint + fn StringReduceBooleanFunc +} + +func NewStringFuncBooleanReducer(fn StringReduceBooleanFunc) *StringFuncBooleanReducer { + return &StringFuncBooleanReducer{fn: fn} +} + +func (r *StringFuncBooleanReducer) AggregateString(p *StringPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &BooleanPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +func (r *StringFuncBooleanReducer) Emit() []BooleanPoint { + return []BooleanPoint{*r.prev} +} + +// StringReduceBooleanSliceFunc is the function called by a StringPoint reducer. +type StringReduceBooleanSliceFunc func(a []StringPoint) []BooleanPoint + +type StringSliceFuncBooleanReducer struct { + points []StringPoint + fn StringReduceBooleanSliceFunc +} + +func NewStringSliceFuncBooleanReducer(fn StringReduceBooleanSliceFunc) *StringSliceFuncBooleanReducer { + return &StringSliceFuncBooleanReducer{fn: fn} +} + +func (r *StringSliceFuncBooleanReducer) AggregateString(p *StringPoint) { + r.points = append(r.points, *p) +} + +func (r *StringSliceFuncBooleanReducer) AggregateStringBulk(points []StringPoint) { + r.points = append(r.points, points...) +} + +func (r *StringSliceFuncBooleanReducer) Emit() []BooleanPoint { + return r.fn(r.points) +} + +// BooleanPointAggregator aggregates points to produce a single point. +type BooleanPointAggregator interface { + AggregateBoolean(p *BooleanPoint) +} + +// BooleanBulkPointAggregator aggregates multiple points at a time. +type BooleanBulkPointAggregator interface { + AggregateBooleanBulk(points []BooleanPoint) +} + +// AggregateBooleanPoints feeds a slice of BooleanPoint into an +// aggregator. If the aggregator is a BooleanBulkPointAggregator, it will +// use the AggregateBulk method. +func AggregateBooleanPoints(a BooleanPointAggregator, points []BooleanPoint) { + switch a := a.(type) { + case BooleanBulkPointAggregator: + a.AggregateBooleanBulk(points) + default: + for _, p := range points { + a.AggregateBoolean(&p) + } + } +} + +// BooleanPointEmitter produces a single point from an aggregate. +type BooleanPointEmitter interface { + Emit() []BooleanPoint +} + +// BooleanReduceFloatFunc is the function called by a BooleanPoint reducer. +type BooleanReduceFloatFunc func(prev *FloatPoint, curr *BooleanPoint) (t int64, v float64, aux []interface{}) + +type BooleanFuncFloatReducer struct { + prev *FloatPoint + fn BooleanReduceFloatFunc +} + +func NewBooleanFuncFloatReducer(fn BooleanReduceFloatFunc) *BooleanFuncFloatReducer { + return &BooleanFuncFloatReducer{fn: fn} +} + +func (r *BooleanFuncFloatReducer) AggregateBoolean(p *BooleanPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &FloatPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +func (r *BooleanFuncFloatReducer) Emit() []FloatPoint { + return []FloatPoint{*r.prev} +} + +// BooleanReduceFloatSliceFunc is the function called by a BooleanPoint reducer. +type BooleanReduceFloatSliceFunc func(a []BooleanPoint) []FloatPoint + +type BooleanSliceFuncFloatReducer struct { + points []BooleanPoint + fn BooleanReduceFloatSliceFunc +} + +func NewBooleanSliceFuncFloatReducer(fn BooleanReduceFloatSliceFunc) *BooleanSliceFuncFloatReducer { + return &BooleanSliceFuncFloatReducer{fn: fn} +} + +func (r *BooleanSliceFuncFloatReducer) AggregateBoolean(p *BooleanPoint) { + r.points = append(r.points, *p) +} + +func (r *BooleanSliceFuncFloatReducer) AggregateBooleanBulk(points []BooleanPoint) { + r.points = append(r.points, points...) +} + +func (r *BooleanSliceFuncFloatReducer) Emit() []FloatPoint { + return r.fn(r.points) +} + +// BooleanReduceIntegerFunc is the function called by a BooleanPoint reducer. +type BooleanReduceIntegerFunc func(prev *IntegerPoint, curr *BooleanPoint) (t int64, v int64, aux []interface{}) + +type BooleanFuncIntegerReducer struct { + prev *IntegerPoint + fn BooleanReduceIntegerFunc +} + +func NewBooleanFuncIntegerReducer(fn BooleanReduceIntegerFunc) *BooleanFuncIntegerReducer { + return &BooleanFuncIntegerReducer{fn: fn} +} + +func (r *BooleanFuncIntegerReducer) AggregateBoolean(p *BooleanPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &IntegerPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +func (r *BooleanFuncIntegerReducer) Emit() []IntegerPoint { + return []IntegerPoint{*r.prev} +} + +// BooleanReduceIntegerSliceFunc is the function called by a BooleanPoint reducer. +type BooleanReduceIntegerSliceFunc func(a []BooleanPoint) []IntegerPoint + +type BooleanSliceFuncIntegerReducer struct { + points []BooleanPoint + fn BooleanReduceIntegerSliceFunc +} + +func NewBooleanSliceFuncIntegerReducer(fn BooleanReduceIntegerSliceFunc) *BooleanSliceFuncIntegerReducer { + return &BooleanSliceFuncIntegerReducer{fn: fn} +} + +func (r *BooleanSliceFuncIntegerReducer) AggregateBoolean(p *BooleanPoint) { + r.points = append(r.points, *p) +} + +func (r *BooleanSliceFuncIntegerReducer) AggregateBooleanBulk(points []BooleanPoint) { + r.points = append(r.points, points...) +} + +func (r *BooleanSliceFuncIntegerReducer) Emit() []IntegerPoint { + return r.fn(r.points) +} + +// BooleanReduceStringFunc is the function called by a BooleanPoint reducer. +type BooleanReduceStringFunc func(prev *StringPoint, curr *BooleanPoint) (t int64, v string, aux []interface{}) + +type BooleanFuncStringReducer struct { + prev *StringPoint + fn BooleanReduceStringFunc +} + +func NewBooleanFuncStringReducer(fn BooleanReduceStringFunc) *BooleanFuncStringReducer { + return &BooleanFuncStringReducer{fn: fn} +} + +func (r *BooleanFuncStringReducer) AggregateBoolean(p *BooleanPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &StringPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +func (r *BooleanFuncStringReducer) Emit() []StringPoint { + return []StringPoint{*r.prev} +} + +// BooleanReduceStringSliceFunc is the function called by a BooleanPoint reducer. +type BooleanReduceStringSliceFunc func(a []BooleanPoint) []StringPoint + +type BooleanSliceFuncStringReducer struct { + points []BooleanPoint + fn BooleanReduceStringSliceFunc +} + +func NewBooleanSliceFuncStringReducer(fn BooleanReduceStringSliceFunc) *BooleanSliceFuncStringReducer { + return &BooleanSliceFuncStringReducer{fn: fn} +} + +func (r *BooleanSliceFuncStringReducer) AggregateBoolean(p *BooleanPoint) { + r.points = append(r.points, *p) +} + +func (r *BooleanSliceFuncStringReducer) AggregateBooleanBulk(points []BooleanPoint) { + r.points = append(r.points, points...) +} + +func (r *BooleanSliceFuncStringReducer) Emit() []StringPoint { + return r.fn(r.points) +} + +// BooleanReduceFunc is the function called by a BooleanPoint reducer. +type BooleanReduceFunc func(prev *BooleanPoint, curr *BooleanPoint) (t int64, v bool, aux []interface{}) + +type BooleanFuncReducer struct { + prev *BooleanPoint + fn BooleanReduceFunc +} + +func NewBooleanFuncReducer(fn BooleanReduceFunc) *BooleanFuncReducer { + return &BooleanFuncReducer{fn: fn} +} + +func (r *BooleanFuncReducer) AggregateBoolean(p *BooleanPoint) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &BooleanPoint{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +func (r *BooleanFuncReducer) Emit() []BooleanPoint { + return []BooleanPoint{*r.prev} +} + +// BooleanReduceSliceFunc is the function called by a BooleanPoint reducer. +type BooleanReduceSliceFunc func(a []BooleanPoint) []BooleanPoint + +type BooleanSliceFuncReducer struct { + points []BooleanPoint + fn BooleanReduceSliceFunc +} + +func NewBooleanSliceFuncReducer(fn BooleanReduceSliceFunc) *BooleanSliceFuncReducer { + return &BooleanSliceFuncReducer{fn: fn} +} + +func (r *BooleanSliceFuncReducer) AggregateBoolean(p *BooleanPoint) { + r.points = append(r.points, *p) +} + +func (r *BooleanSliceFuncReducer) AggregateBooleanBulk(points []BooleanPoint) { + r.points = append(r.points, points...) +} + +func (r *BooleanSliceFuncReducer) Emit() []BooleanPoint { + return r.fn(r.points) +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/functions.gen.go.tmpl b/vendor/github.com/influxdata/influxdb/influxql/functions.gen.go.tmpl new file mode 100644 index 0000000000..d8f0f13ed2 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/functions.gen.go.tmpl @@ -0,0 +1,90 @@ +package influxql + +{{with $types := .}}{{range $k := $types}} + +// {{$k.Name}}PointAggregator aggregates points to produce a single point. +type {{$k.Name}}PointAggregator interface { + Aggregate{{$k.Name}}(p *{{$k.Name}}Point) +} + +// {{$k.Name}}BulkPointAggregator aggregates multiple points at a time. +type {{$k.Name}}BulkPointAggregator interface { + Aggregate{{$k.Name}}Bulk(points []{{$k.Name}}Point) +} + +// Aggregate{{$k.Name}}Points feeds a slice of {{$k.Name}}Point into an +// aggregator. If the aggregator is a {{$k.Name}}BulkPointAggregator, it will +// use the AggregateBulk method. +func Aggregate{{$k.Name}}Points(a {{$k.Name}}PointAggregator, points []{{$k.Name}}Point) { + switch a := a.(type) { + case {{$k.Name}}BulkPointAggregator: + a.Aggregate{{$k.Name}}Bulk(points) + default: + for _, p := range points { + a.Aggregate{{$k.Name}}(&p) + } + } +} + +// {{$k.Name}}PointEmitter produces a single point from an aggregate. +type {{$k.Name}}PointEmitter interface { + Emit() []{{$k.Name}}Point +} + +{{range $v := $types}} + +// {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func is the function called by a {{$k.Name}}Point reducer. +type {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func func(prev *{{$v.Name}}Point, curr *{{$k.Name}}Point) (t int64, v {{$v.Type}}, aux []interface{}) + +type {{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer struct { + prev *{{$v.Name}}Point + fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func +} + +func New{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer(fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func) *{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer { + return &{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer{fn: fn} +} + +func (r *{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) { + t, v, aux := r.fn(r.prev, p) + if r.prev == nil { + r.prev = &{{$v.Name}}Point{} + } + r.prev.Time = t + r.prev.Value = v + r.prev.Aux = aux + if p.Aggregated > 1 { + r.prev.Aggregated += p.Aggregated + } else { + r.prev.Aggregated++ + } +} + +func (r *{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Emit() []{{$v.Name}}Point { + return []{{$v.Name}}Point{*r.prev} +} + +// {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc is the function called by a {{$k.Name}}Point reducer. +type {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc func(a []{{$k.Name}}Point) []{{$v.Name}}Point + +type {{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer struct { + points []{{$k.Name}}Point + fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc +} + +func New{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer(fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc) *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer { + return &{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer{fn: fn} +} + +func (r *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) { + r.points = append(r.points, *p) +} + +func (r *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Aggregate{{$k.Name}}Bulk(points []{{$k.Name}}Point) { + r.points = append(r.points, points...) +} + +func (r *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Emit() []{{$v.Name}}Point { + return r.fn(r.points) +} +{{end}}{{end}}{{end}} diff --git a/vendor/github.com/influxdata/influxdb/influxql/functions.go b/vendor/github.com/influxdata/influxdb/influxql/functions.go new file mode 100644 index 0000000000..c90b3b89d9 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/functions.go @@ -0,0 +1,55 @@ +package influxql + +type FloatMeanReducer struct { + sum float64 + count uint32 +} + +func NewFloatMeanReducer() *FloatMeanReducer { + return &FloatMeanReducer{} +} + +func (r *FloatMeanReducer) AggregateFloat(p *FloatPoint) { + if p.Aggregated >= 2 { + r.sum += p.Value * float64(p.Aggregated) + r.count += p.Aggregated + } else { + r.sum += p.Value + r.count++ + } +} + +func (r *FloatMeanReducer) Emit() []FloatPoint { + return []FloatPoint{{ + Time: ZeroTime, + Value: r.sum / float64(r.count), + Aggregated: r.count, + }} +} + +type IntegerMeanReducer struct { + sum int64 + count uint32 +} + +func NewIntegerMeanReducer() *IntegerMeanReducer { + return &IntegerMeanReducer{} +} + +func (r *IntegerMeanReducer) AggregateInteger(p *IntegerPoint) { + if p.Aggregated >= 2 { + r.sum += p.Value * int64(p.Aggregated) + r.count += p.Aggregated + } else { + r.sum += p.Value + r.count++ + } +} + +func (r *IntegerMeanReducer) Emit() []FloatPoint { + return []FloatPoint{{ + Time: ZeroTime, + Value: float64(r.sum) / float64(r.count), + Aggregated: r.count, + }} +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/influxql.go b/vendor/github.com/influxdata/influxdb/influxql/influxql.go new file mode 100644 index 0000000000..324e399d97 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/influxql.go @@ -0,0 +1,7 @@ +package influxql // import "github.com/influxdata/influxdb/influxql" + +//go:generate tmpl -data=@tmpldata iterator.gen.go.tmpl +//go:generate tmpl -data=@tmpldata point.gen.go.tmpl +//go:generate tmpl -data=@tmpldata functions.gen.go.tmpl + +//go:generate protoc --gogo_out=. internal/internal.proto diff --git a/vendor/github.com/influxdata/influxdb/influxql/internal/internal.pb.go b/vendor/github.com/influxdata/influxdb/influxql/internal/internal.pb.go new file mode 100644 index 0000000000..03c7c5f580 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/internal/internal.pb.go @@ -0,0 +1,449 @@ +// Code generated by protoc-gen-gogo. +// source: internal/internal.proto +// DO NOT EDIT! + +/* +Package internal is a generated protocol buffer package. + +It is generated from these files: + internal/internal.proto + +It has these top-level messages: + Point + Aux + IteratorOptions + Measurements + Measurement + Interval + Series + SeriesList +*/ +package internal + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type Point struct { + Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` + Tags *string `protobuf:"bytes,2,req,name=Tags" json:"Tags,omitempty"` + Time *int64 `protobuf:"varint,3,req,name=Time" json:"Time,omitempty"` + Nil *bool `protobuf:"varint,4,req,name=Nil" json:"Nil,omitempty"` + Aux []*Aux `protobuf:"bytes,5,rep,name=Aux" json:"Aux,omitempty"` + Aggregated *uint32 `protobuf:"varint,6,opt,name=Aggregated" json:"Aggregated,omitempty"` + FloatValue *float64 `protobuf:"fixed64,7,opt,name=FloatValue" json:"FloatValue,omitempty"` + IntegerValue *int64 `protobuf:"varint,8,opt,name=IntegerValue" json:"IntegerValue,omitempty"` + StringValue *string `protobuf:"bytes,9,opt,name=StringValue" json:"StringValue,omitempty"` + BooleanValue *bool `protobuf:"varint,10,opt,name=BooleanValue" json:"BooleanValue,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Point) Reset() { *m = Point{} } +func (m *Point) String() string { return proto.CompactTextString(m) } +func (*Point) ProtoMessage() {} + +func (m *Point) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Point) GetTags() string { + if m != nil && m.Tags != nil { + return *m.Tags + } + return "" +} + +func (m *Point) GetTime() int64 { + if m != nil && m.Time != nil { + return *m.Time + } + return 0 +} + +func (m *Point) GetNil() bool { + if m != nil && m.Nil != nil { + return *m.Nil + } + return false +} + +func (m *Point) GetAux() []*Aux { + if m != nil { + return m.Aux + } + return nil +} + +func (m *Point) GetAggregated() uint32 { + if m != nil && m.Aggregated != nil { + return *m.Aggregated + } + return 0 +} + +func (m *Point) GetFloatValue() float64 { + if m != nil && m.FloatValue != nil { + return *m.FloatValue + } + return 0 +} + +func (m *Point) GetIntegerValue() int64 { + if m != nil && m.IntegerValue != nil { + return *m.IntegerValue + } + return 0 +} + +func (m *Point) GetStringValue() string { + if m != nil && m.StringValue != nil { + return *m.StringValue + } + return "" +} + +func (m *Point) GetBooleanValue() bool { + if m != nil && m.BooleanValue != nil { + return *m.BooleanValue + } + return false +} + +type Aux struct { + DataType *int32 `protobuf:"varint,1,req,name=DataType" json:"DataType,omitempty"` + FloatValue *float64 `protobuf:"fixed64,2,opt,name=FloatValue" json:"FloatValue,omitempty"` + IntegerValue *int64 `protobuf:"varint,3,opt,name=IntegerValue" json:"IntegerValue,omitempty"` + StringValue *string `protobuf:"bytes,4,opt,name=StringValue" json:"StringValue,omitempty"` + BooleanValue *bool `protobuf:"varint,5,opt,name=BooleanValue" json:"BooleanValue,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Aux) Reset() { *m = Aux{} } +func (m *Aux) String() string { return proto.CompactTextString(m) } +func (*Aux) ProtoMessage() {} + +func (m *Aux) GetDataType() int32 { + if m != nil && m.DataType != nil { + return *m.DataType + } + return 0 +} + +func (m *Aux) GetFloatValue() float64 { + if m != nil && m.FloatValue != nil { + return *m.FloatValue + } + return 0 +} + +func (m *Aux) GetIntegerValue() int64 { + if m != nil && m.IntegerValue != nil { + return *m.IntegerValue + } + return 0 +} + +func (m *Aux) GetStringValue() string { + if m != nil && m.StringValue != nil { + return *m.StringValue + } + return "" +} + +func (m *Aux) GetBooleanValue() bool { + if m != nil && m.BooleanValue != nil { + return *m.BooleanValue + } + return false +} + +type IteratorOptions struct { + Expr *string `protobuf:"bytes,1,opt,name=Expr" json:"Expr,omitempty"` + Aux []string `protobuf:"bytes,2,rep,name=Aux" json:"Aux,omitempty"` + Sources []*Measurement `protobuf:"bytes,3,rep,name=Sources" json:"Sources,omitempty"` + Interval *Interval `protobuf:"bytes,4,opt,name=Interval" json:"Interval,omitempty"` + Dimensions []string `protobuf:"bytes,5,rep,name=Dimensions" json:"Dimensions,omitempty"` + Fill *int32 `protobuf:"varint,6,opt,name=Fill" json:"Fill,omitempty"` + FillValue *float64 `protobuf:"fixed64,7,opt,name=FillValue" json:"FillValue,omitempty"` + Condition *string `protobuf:"bytes,8,opt,name=Condition" json:"Condition,omitempty"` + StartTime *int64 `protobuf:"varint,9,opt,name=StartTime" json:"StartTime,omitempty"` + EndTime *int64 `protobuf:"varint,10,opt,name=EndTime" json:"EndTime,omitempty"` + Ascending *bool `protobuf:"varint,11,opt,name=Ascending" json:"Ascending,omitempty"` + Limit *int64 `protobuf:"varint,12,opt,name=Limit" json:"Limit,omitempty"` + Offset *int64 `protobuf:"varint,13,opt,name=Offset" json:"Offset,omitempty"` + SLimit *int64 `protobuf:"varint,14,opt,name=SLimit" json:"SLimit,omitempty"` + SOffset *int64 `protobuf:"varint,15,opt,name=SOffset" json:"SOffset,omitempty"` + Dedupe *bool `protobuf:"varint,16,opt,name=Dedupe" json:"Dedupe,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IteratorOptions) Reset() { *m = IteratorOptions{} } +func (m *IteratorOptions) String() string { return proto.CompactTextString(m) } +func (*IteratorOptions) ProtoMessage() {} + +func (m *IteratorOptions) GetExpr() string { + if m != nil && m.Expr != nil { + return *m.Expr + } + return "" +} + +func (m *IteratorOptions) GetAux() []string { + if m != nil { + return m.Aux + } + return nil +} + +func (m *IteratorOptions) GetSources() []*Measurement { + if m != nil { + return m.Sources + } + return nil +} + +func (m *IteratorOptions) GetInterval() *Interval { + if m != nil { + return m.Interval + } + return nil +} + +func (m *IteratorOptions) GetDimensions() []string { + if m != nil { + return m.Dimensions + } + return nil +} + +func (m *IteratorOptions) GetFill() int32 { + if m != nil && m.Fill != nil { + return *m.Fill + } + return 0 +} + +func (m *IteratorOptions) GetFillValue() float64 { + if m != nil && m.FillValue != nil { + return *m.FillValue + } + return 0 +} + +func (m *IteratorOptions) GetCondition() string { + if m != nil && m.Condition != nil { + return *m.Condition + } + return "" +} + +func (m *IteratorOptions) GetStartTime() int64 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *IteratorOptions) GetEndTime() int64 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *IteratorOptions) GetAscending() bool { + if m != nil && m.Ascending != nil { + return *m.Ascending + } + return false +} + +func (m *IteratorOptions) GetLimit() int64 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +func (m *IteratorOptions) GetOffset() int64 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return 0 +} + +func (m *IteratorOptions) GetSLimit() int64 { + if m != nil && m.SLimit != nil { + return *m.SLimit + } + return 0 +} + +func (m *IteratorOptions) GetSOffset() int64 { + if m != nil && m.SOffset != nil { + return *m.SOffset + } + return 0 +} + +func (m *IteratorOptions) GetDedupe() bool { + if m != nil && m.Dedupe != nil { + return *m.Dedupe + } + return false +} + +type Measurements struct { + Items []*Measurement `protobuf:"bytes,1,rep,name=Items" json:"Items,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Measurements) Reset() { *m = Measurements{} } +func (m *Measurements) String() string { return proto.CompactTextString(m) } +func (*Measurements) ProtoMessage() {} + +func (m *Measurements) GetItems() []*Measurement { + if m != nil { + return m.Items + } + return nil +} + +type Measurement struct { + Database *string `protobuf:"bytes,1,opt,name=Database" json:"Database,omitempty"` + RetentionPolicy *string `protobuf:"bytes,2,opt,name=RetentionPolicy" json:"RetentionPolicy,omitempty"` + Name *string `protobuf:"bytes,3,opt,name=Name" json:"Name,omitempty"` + Regex *string `protobuf:"bytes,4,opt,name=Regex" json:"Regex,omitempty"` + IsTarget *bool `protobuf:"varint,5,opt,name=IsTarget" json:"IsTarget,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Measurement) Reset() { *m = Measurement{} } +func (m *Measurement) String() string { return proto.CompactTextString(m) } +func (*Measurement) ProtoMessage() {} + +func (m *Measurement) GetDatabase() string { + if m != nil && m.Database != nil { + return *m.Database + } + return "" +} + +func (m *Measurement) GetRetentionPolicy() string { + if m != nil && m.RetentionPolicy != nil { + return *m.RetentionPolicy + } + return "" +} + +func (m *Measurement) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Measurement) GetRegex() string { + if m != nil && m.Regex != nil { + return *m.Regex + } + return "" +} + +func (m *Measurement) GetIsTarget() bool { + if m != nil && m.IsTarget != nil { + return *m.IsTarget + } + return false +} + +type Interval struct { + Duration *int64 `protobuf:"varint,1,opt,name=Duration" json:"Duration,omitempty"` + Offset *int64 `protobuf:"varint,2,opt,name=Offset" json:"Offset,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Interval) Reset() { *m = Interval{} } +func (m *Interval) String() string { return proto.CompactTextString(m) } +func (*Interval) ProtoMessage() {} + +func (m *Interval) GetDuration() int64 { + if m != nil && m.Duration != nil { + return *m.Duration + } + return 0 +} + +func (m *Interval) GetOffset() int64 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return 0 +} + +type Series struct { + Name *string `protobuf:"bytes,1,opt,name=Name" json:"Name,omitempty"` + Tags []byte `protobuf:"bytes,2,opt,name=Tags" json:"Tags,omitempty"` + Aux []uint32 `protobuf:"varint,3,rep,name=Aux" json:"Aux,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Series) Reset() { *m = Series{} } +func (m *Series) String() string { return proto.CompactTextString(m) } +func (*Series) ProtoMessage() {} + +func (m *Series) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Series) GetTags() []byte { + if m != nil { + return m.Tags + } + return nil +} + +func (m *Series) GetAux() []uint32 { + if m != nil { + return m.Aux + } + return nil +} + +type SeriesList struct { + Items []*Series `protobuf:"bytes,1,rep,name=Items" json:"Items,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SeriesList) Reset() { *m = SeriesList{} } +func (m *SeriesList) String() string { return proto.CompactTextString(m) } +func (*SeriesList) ProtoMessage() {} + +func (m *SeriesList) GetItems() []*Series { + if m != nil { + return m.Items + } + return nil +} + +func init() { + proto.RegisterType((*Point)(nil), "internal.Point") + proto.RegisterType((*Aux)(nil), "internal.Aux") + proto.RegisterType((*IteratorOptions)(nil), "internal.IteratorOptions") + proto.RegisterType((*Measurements)(nil), "internal.Measurements") + proto.RegisterType((*Measurement)(nil), "internal.Measurement") + proto.RegisterType((*Interval)(nil), "internal.Interval") + proto.RegisterType((*Series)(nil), "internal.Series") + proto.RegisterType((*SeriesList)(nil), "internal.SeriesList") +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/internal/internal.proto b/vendor/github.com/influxdata/influxdb/influxql/internal/internal.proto new file mode 100644 index 0000000000..61a81833dd --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/internal/internal.proto @@ -0,0 +1,69 @@ +package internal; + +message Point { + required string Name = 1; + required string Tags = 2; + required int64 Time = 3; + required bool Nil = 4; + repeated Aux Aux = 5; + optional uint32 Aggregated = 6; + + optional double FloatValue = 7; + optional int64 IntegerValue = 8; + optional string StringValue = 9; + optional bool BooleanValue = 10; +} + +message Aux { + required int32 DataType = 1; + optional double FloatValue = 2; + optional int64 IntegerValue = 3; + optional string StringValue = 4; + optional bool BooleanValue = 5; +} + +message IteratorOptions { + optional string Expr = 1; + repeated string Aux = 2; + repeated Measurement Sources = 3; + optional Interval Interval = 4; + repeated string Dimensions = 5; + optional int32 Fill = 6; + optional double FillValue = 7; + optional string Condition = 8; + optional int64 StartTime = 9; + optional int64 EndTime = 10; + optional bool Ascending = 11; + optional int64 Limit = 12; + optional int64 Offset = 13; + optional int64 SLimit = 14; + optional int64 SOffset = 15; + optional bool Dedupe = 16; +} + +message Measurements { + repeated Measurement Items = 1; +} + +message Measurement { + optional string Database = 1; + optional string RetentionPolicy = 2; + optional string Name = 3; + optional string Regex = 4; + optional bool IsTarget = 5; +} + +message Interval { + optional int64 Duration = 1; + optional int64 Offset = 2; +} + +message Series { + optional string Name = 1; + optional bytes Tags = 2; + repeated uint32 Aux = 3; +} + +message SeriesList { + repeated Series Items = 1; +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go b/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go new file mode 100644 index 0000000000..a19ba34581 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go @@ -0,0 +1,4668 @@ +// Generated by tmpl +// https://github.com/benbjohnson/tmpl +// +// DO NOT EDIT! +// Source: iterator.gen.go.tmpl + +package influxql + +import ( + "container/heap" + "errors" + "fmt" + "io" + "log" + "sort" + "sync" + + "github.com/gogo/protobuf/proto" +) + +// FloatIterator represents a stream of float points. +type FloatIterator interface { + Iterator + Next() *FloatPoint +} + +// newFloatIterators converts a slice of Iterator to a slice of FloatIterator. +// Drop and closes any iterator in itrs that is not a FloatIterator and cannot +// be cast to a FloatIterator. +func newFloatIterators(itrs []Iterator) []FloatIterator { + a := make([]FloatIterator, 0, len(itrs)) + for _, itr := range itrs { + switch itr := itr.(type) { + case FloatIterator: + a = append(a, itr) + + case IntegerIterator: + a = append(a, &integerFloatCastIterator{input: itr}) + + default: + itr.Close() + } + } + return a +} + +// bufFloatIterator represents a buffered FloatIterator. +type bufFloatIterator struct { + itr FloatIterator + buf *FloatPoint +} + +// newBufFloatIterator returns a buffered FloatIterator. +func newBufFloatIterator(itr FloatIterator) *bufFloatIterator { + return &bufFloatIterator{itr: itr} +} + +// Close closes the underlying iterator. +func (itr *bufFloatIterator) Close() error { return itr.itr.Close() } + +// peek returns the next point without removing it from the iterator. +func (itr *bufFloatIterator) peek() *FloatPoint { + p := itr.Next() + itr.unread(p) + return p +} + +// peekTime returns the time of the next point. +// Returns zero time if no more points available. +func (itr *bufFloatIterator) peekTime() int64 { + p := itr.peek() + if p == nil { + return ZeroTime + } + return p.Time +} + +// Next returns the current buffer, if exists, or calls the underlying iterator. +func (itr *bufFloatIterator) Next() *FloatPoint { + if itr.buf != nil { + buf := itr.buf + itr.buf = nil + return buf + } + return itr.itr.Next() +} + +// NextInWindow returns the next value if it is between [startTime, endTime). +// If the next value is outside the range then it is moved to the buffer. +func (itr *bufFloatIterator) NextInWindow(startTime, endTime int64) *FloatPoint { + v := itr.Next() + if v == nil { + return nil + } else if v.Time < startTime || v.Time >= endTime { + itr.unread(v) + return nil + } + return v +} + +// unread sets v to the buffer. It is read on the next call to Next(). +func (itr *bufFloatIterator) unread(v *FloatPoint) { itr.buf = v } + +// floatMergeIterator represents an iterator that combines multiple float iterators. +type floatMergeIterator struct { + inputs []FloatIterator + heap *floatMergeHeap + + // Current iterator and window. + curr *floatMergeHeapItem + window struct { + name string + tags string + startTime int64 + endTime int64 + } +} + +// newFloatMergeIterator returns a new instance of floatMergeIterator. +func newFloatMergeIterator(inputs []FloatIterator, opt IteratorOptions) *floatMergeIterator { + itr := &floatMergeIterator{ + inputs: inputs, + heap: &floatMergeHeap{ + items: make([]*floatMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Wrap in buffer, ignore any inputs without anymore points. + bufInput := newBufFloatIterator(input) + if bufInput.peek() == nil { + continue + } + + // Append to the heap. + itr.heap.items = append(itr.heap.items, &floatMergeHeapItem{itr: bufInput}) + } + heap.Init(itr.heap) + + return itr +} + +// Close closes the underlying iterators. +func (itr *floatMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + return nil +} + +// Next returns the next point from the iterator. +func (itr *floatMergeIterator) Next() *FloatPoint { + for { + // Retrieve the next iterator if we don't have one. + if itr.curr == nil { + if len(itr.heap.items) == 0 { + return nil + } + itr.curr = heap.Pop(itr.heap).(*floatMergeHeapItem) + + // Read point and set current window. + p := itr.curr.itr.Next() + itr.window.name, itr.window.tags = p.Name, p.Tags.ID() + itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) + return p + } + + // Read the next point from the current iterator. + p := itr.curr.itr.Next() + + // If there are no more points then remove iterator from heap and find next. + if p == nil { + itr.curr = nil + continue + } + + // Check if the point is inside of our current window. + inWindow := true + if itr.window.name != p.Name { + inWindow = false + } else if itr.window.tags != p.Tags.ID() { + inWindow = false + } else if itr.heap.opt.Ascending && p.Time >= itr.window.endTime { + inWindow = false + } else if !itr.heap.opt.Ascending && p.Time < itr.window.startTime { + inWindow = false + } + + // If it's outside our window then push iterator back on the heap and find new iterator. + if !inWindow { + itr.curr.itr.unread(p) + heap.Push(itr.heap, itr.curr) + itr.curr = nil + continue + } + + return p + } +} + +// floatMergeHeap represents a heap of floatMergeHeapItems. +// Items are sorted by their next window and then by name/tags. +type floatMergeHeap struct { + opt IteratorOptions + items []*floatMergeHeapItem +} + +func (h floatMergeHeap) Len() int { return len(h.items) } +func (h floatMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h floatMergeHeap) Less(i, j int) bool { + x, y := h.items[i].itr.peek(), h.items[j].itr.peek() + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if x.Tags.ID() != y.Tags.ID() { + return x.Tags.ID() < y.Tags.ID() + } + } else { + if x.Name != y.Name { + return x.Name > y.Name + } else if x.Tags.ID() != y.Tags.ID() { + return x.Tags.ID() > y.Tags.ID() + } + } + + xt, _ := h.opt.Window(x.Time) + yt, _ := h.opt.Window(y.Time) + + if h.opt.Ascending { + return xt < yt + } + return xt > yt +} + +func (h *floatMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*floatMergeHeapItem)) +} + +func (h *floatMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type floatMergeHeapItem struct { + itr *bufFloatIterator +} + +// floatSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. +type floatSortedMergeIterator struct { + inputs []FloatIterator + opt IteratorOptions + heap floatSortedMergeHeap +} + +// newFloatSortedMergeIterator returns an instance of floatSortedMergeIterator. +func newFloatSortedMergeIterator(inputs []FloatIterator, opt IteratorOptions) Iterator { + itr := &floatSortedMergeIterator{ + inputs: inputs, + heap: make(floatSortedMergeHeap, 0, len(inputs)), + opt: opt, + } + + // Initialize heap. + for _, input := range inputs { + // Read next point. + p := input.Next() + if p == nil { + continue + } + + // Append to the heap. + itr.heap = append(itr.heap, &floatSortedMergeHeapItem{point: p, itr: input, ascending: opt.Ascending}) + } + heap.Init(&itr.heap) + + return itr +} + +// Close closes the underlying iterators. +func (itr *floatSortedMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + return nil +} + +// Next returns the next points from the iterator. +func (itr *floatSortedMergeIterator) Next() *FloatPoint { return itr.pop() } + +// pop returns the next point from the heap. +// Reads the next point from item's cursor and puts it back on the heap. +func (itr *floatSortedMergeIterator) pop() *FloatPoint { + if len(itr.heap) == 0 { + return nil + } + + // Read the next item from the heap. + item := heap.Pop(&itr.heap).(*floatSortedMergeHeapItem) + + // Copy the point for return. + p := item.point.Clone() + + // Read the next item from the cursor. Push back to heap if one exists. + if item.point = item.itr.Next(); item.point != nil { + heap.Push(&itr.heap, item) + } + + return p +} + +// floatSortedMergeHeap represents a heap of floatSortedMergeHeapItems. +type floatSortedMergeHeap []*floatSortedMergeHeapItem + +func (h floatSortedMergeHeap) Len() int { return len(h) } +func (h floatSortedMergeHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } +func (h floatSortedMergeHeap) Less(i, j int) bool { + x, y := h[i].point, h[j].point + + if h[i].ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if !x.Tags.Equals(&y.Tags) { + return x.Tags.ID() < y.Tags.ID() + } + return x.Time < y.Time + } + + if x.Name != y.Name { + return x.Name > y.Name + } else if !x.Tags.Equals(&y.Tags) { + return x.Tags.ID() > y.Tags.ID() + } + return x.Time > y.Time +} + +func (h *floatSortedMergeHeap) Push(x interface{}) { + *h = append(*h, x.(*floatSortedMergeHeapItem)) +} + +func (h *floatSortedMergeHeap) Pop() interface{} { + old := *h + n := len(old) + item := old[n-1] + *h = old[0 : n-1] + return item +} + +type floatSortedMergeHeapItem struct { + point *FloatPoint + itr FloatIterator + ascending bool +} + +// floatLimitIterator represents an iterator that limits points per group. +type floatLimitIterator struct { + input FloatIterator + opt IteratorOptions + n int + + prev struct { + name string + tags Tags + } +} + +// newFloatLimitIterator returns a new instance of floatLimitIterator. +func newFloatLimitIterator(input FloatIterator, opt IteratorOptions) *floatLimitIterator { + return &floatLimitIterator{ + input: input, + opt: opt, + } +} + +// Close closes the underlying iterators. +func (itr *floatLimitIterator) Close() error { return itr.input.Close() } + +// Next returns the next point from the iterator. +func (itr *floatLimitIterator) Next() *FloatPoint { + for { + p := itr.input.Next() + if p == nil { + return nil + } + + // Reset window and counter if a new window is encountered. + if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) { + itr.prev.name = p.Name + itr.prev.tags = p.Tags + itr.n = 0 + } + + // Increment counter. + itr.n++ + + // Read next point if not beyond the offset. + if itr.n <= itr.opt.Offset { + continue + } + + // Read next point if we're beyond the limit. + if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { + // If there's no interval, no groups, and a single source then simply exit. + if itr.opt.Interval.IsZero() && len(itr.opt.Dimensions) == 0 && len(itr.opt.Sources) == 1 { + return nil + } + continue + } + + return p + } +} + +type floatFillIterator struct { + input *bufFloatIterator + prev *FloatPoint + startTime int64 + endTime int64 + auxFields []interface{} + done bool + opt IteratorOptions + + window struct { + name string + tags Tags + time int64 + } +} + +func newFloatFillIterator(input FloatIterator, expr Expr, opt IteratorOptions) *floatFillIterator { + if opt.Fill == NullFill { + if expr, ok := expr.(*Call); ok && expr.Name == "count" { + opt.Fill = NumberFill + opt.FillValue = float64(0) + } + } + + var startTime, endTime int64 + if opt.Ascending { + startTime, _ = opt.Window(opt.StartTime) + _, endTime = opt.Window(opt.EndTime) + } else { + _, startTime = opt.Window(opt.EndTime) + endTime, _ = opt.Window(opt.StartTime) + } + + var auxFields []interface{} + if len(opt.Aux) > 0 { + auxFields = make([]interface{}, len(opt.Aux)) + } + + itr := &floatFillIterator{ + input: newBufFloatIterator(input), + startTime: startTime, + endTime: endTime, + auxFields: auxFields, + opt: opt, + } + + p := itr.input.peek() + if p != nil { + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + } else { + itr.window.time = itr.endTime + } + return itr +} + +func (itr *floatFillIterator) Close() error { return itr.input.Close() } + +func (itr *floatFillIterator) Next() *FloatPoint { + p := itr.input.Next() + + // Check if the next point is outside of our window or is nil. + for p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { + // If we are inside of an interval, unread the point and continue below to + // constructing a new point. + if itr.opt.Ascending { + if itr.window.time < itr.endTime { + itr.input.unread(p) + p = nil + break + } + } else { + if itr.window.time >= itr.endTime { + itr.input.unread(p) + p = nil + break + } + } + + // We are *not* in a current interval. If there is no next point, + // we are at the end of all intervals. + if p == nil { + return nil + } + + // Set the new interval. + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + itr.prev = nil + break + } + + // Check if the point is our next expected point. + if p == nil || p.Time > itr.window.time { + if p != nil { + itr.input.unread(p) + } + + p = &FloatPoint{ + Name: itr.window.name, + Tags: itr.window.tags, + Time: itr.window.time, + Aux: itr.auxFields, + } + + switch itr.opt.Fill { + case NullFill: + p.Nil = true + case NumberFill: + p.Value = castToFloat(itr.opt.FillValue) + case PreviousFill: + if itr.prev != nil { + p.Value = itr.prev.Value + p.Nil = itr.prev.Nil + } else { + p.Nil = true + } + } + } else { + itr.prev = p + } + + // Advance the expected time. Do not advance to a new window here + // as there may be lingering points with the same timestamp in the previous + // window. + if itr.opt.Ascending { + itr.window.time = p.Time + int64(itr.opt.Interval.Duration) + } else { + itr.window.time = p.Time - int64(itr.opt.Interval.Duration) + } + return p +} + +// floatIntervalIterator represents a float implementation of IntervalIterator. +type floatIntervalIterator struct { + input FloatIterator + opt IteratorOptions +} + +func newFloatIntervalIterator(input FloatIterator, opt IteratorOptions) *floatIntervalIterator { + return &floatIntervalIterator{input: input, opt: opt} +} + +func (itr *floatIntervalIterator) Close() error { return itr.input.Close() } + +func (itr *floatIntervalIterator) Next() *FloatPoint { + p := itr.input.Next() + if p == nil { + return p + } + p.Time, _ = itr.opt.Window(p.Time) + return p +} + +// floatAuxIterator represents a float implementation of AuxIterator. +type floatAuxIterator struct { + input *bufFloatIterator + output chan *FloatPoint + fields auxIteratorFields +} + +func newFloatAuxIterator(input FloatIterator, seriesKeys SeriesList, opt IteratorOptions) *floatAuxIterator { + return &floatAuxIterator{ + input: newBufFloatIterator(input), + output: make(chan *FloatPoint, 1), + fields: newAuxIteratorFields(seriesKeys, opt), + } +} + +func (itr *floatAuxIterator) Start() { go itr.stream() } +func (itr *floatAuxIterator) Close() error { return itr.input.Close() } +func (itr *floatAuxIterator) Next() *FloatPoint { return <-itr.output } +func (itr *floatAuxIterator) Iterator(name string) Iterator { return itr.fields.iterator(name) } + +func (itr *floatAuxIterator) CreateIterator(opt IteratorOptions) (Iterator, error) { + expr := opt.Expr + if expr == nil { + panic("unable to create an iterator with no expression from an aux iterator") + } + + switch expr := expr.(type) { + case *VarRef: + return itr.Iterator(expr.Val), nil + default: + panic(fmt.Sprintf("invalid expression type for an aux iterator: %T", expr)) + } +} + +func (itr *floatAuxIterator) FieldDimensions(sources Sources) (fields, dimensions map[string]struct{}, err error) { + return nil, nil, errors.New("not implemented") +} + +func (itr *floatAuxIterator) SeriesKeys(opt IteratorOptions) (SeriesList, error) { + return nil, errors.New("not implemented") +} + +func (itr *floatAuxIterator) stream() { + for { + // Read next point. + p := itr.input.Next() + if p == nil { + break + } + + // Send point to output and to each field iterator. + itr.output <- p + itr.fields.send(p) + } + + close(itr.output) + itr.fields.close() +} + +// floatChanIterator represents a new instance of floatChanIterator. +type floatChanIterator struct { + c chan *FloatPoint + once sync.Once +} + +func (itr *floatChanIterator) Close() error { + itr.once.Do(func() { close(itr.c) }) + return nil +} + +func (itr *floatChanIterator) Next() *FloatPoint { return <-itr.c } + +// floatReduceFloatIterator executes a reducer for every interval and buffers the result. +type floatReduceFloatIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, FloatPointEmitter) + opt IteratorOptions + points []FloatPoint +} + +// Close closes the iterator and all child iterators. +func (itr *floatReduceFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *floatReduceFloatIterator) Next() *FloatPoint { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + itr.points = itr.reduce() + if len(itr.points) == 0 { + return nil + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p +} + +// floatReduceFloatPoint stores the reduced data for a name/tag combination. +type floatReduceFloatPoint struct { + Name string + Tags Tags + Aggregator FloatPointAggregator + Emitter FloatPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *floatReduceFloatIterator) reduce() []FloatPoint { + // Calculate next window. + startTime, endTime := itr.opt.Window(itr.input.peekTime()) + + // Create points by tags. + m := make(map[string]*floatReduceFloatPoint) + for { + // Read next point. + curr := itr.input.NextInWindow(startTime, endTime) + if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + id := curr.Name + "\x00" + tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Sort(sort.Reverse(sort.StringSlice(keys))) + + a := make([]FloatPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a +} + +// floatReduceIntegerIterator executes a reducer for every interval and buffers the result. +type floatReduceIntegerIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, IntegerPointEmitter) + opt IteratorOptions + points []IntegerPoint +} + +// Close closes the iterator and all child iterators. +func (itr *floatReduceIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *floatReduceIntegerIterator) Next() *IntegerPoint { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + itr.points = itr.reduce() + if len(itr.points) == 0 { + return nil + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p +} + +// floatReduceIntegerPoint stores the reduced data for a name/tag combination. +type floatReduceIntegerPoint struct { + Name string + Tags Tags + Aggregator FloatPointAggregator + Emitter IntegerPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *floatReduceIntegerIterator) reduce() []IntegerPoint { + // Calculate next window. + startTime, endTime := itr.opt.Window(itr.input.peekTime()) + + // Create points by tags. + m := make(map[string]*floatReduceIntegerPoint) + for { + // Read next point. + curr := itr.input.NextInWindow(startTime, endTime) + if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + id := curr.Name + "\x00" + tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Sort(sort.Reverse(sort.StringSlice(keys))) + + a := make([]IntegerPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a +} + +// floatReduceStringIterator executes a reducer for every interval and buffers the result. +type floatReduceStringIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, StringPointEmitter) + opt IteratorOptions + points []StringPoint +} + +// Close closes the iterator and all child iterators. +func (itr *floatReduceStringIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *floatReduceStringIterator) Next() *StringPoint { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + itr.points = itr.reduce() + if len(itr.points) == 0 { + return nil + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p +} + +// floatReduceStringPoint stores the reduced data for a name/tag combination. +type floatReduceStringPoint struct { + Name string + Tags Tags + Aggregator FloatPointAggregator + Emitter StringPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *floatReduceStringIterator) reduce() []StringPoint { + // Calculate next window. + startTime, endTime := itr.opt.Window(itr.input.peekTime()) + + // Create points by tags. + m := make(map[string]*floatReduceStringPoint) + for { + // Read next point. + curr := itr.input.NextInWindow(startTime, endTime) + if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + id := curr.Name + "\x00" + tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Sort(sort.Reverse(sort.StringSlice(keys))) + + a := make([]StringPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a +} + +// floatReduceBooleanIterator executes a reducer for every interval and buffers the result. +type floatReduceBooleanIterator struct { + input *bufFloatIterator + create func() (FloatPointAggregator, BooleanPointEmitter) + opt IteratorOptions + points []BooleanPoint +} + +// Close closes the iterator and all child iterators. +func (itr *floatReduceBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *floatReduceBooleanIterator) Next() *BooleanPoint { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + itr.points = itr.reduce() + if len(itr.points) == 0 { + return nil + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p +} + +// floatReduceBooleanPoint stores the reduced data for a name/tag combination. +type floatReduceBooleanPoint struct { + Name string + Tags Tags + Aggregator FloatPointAggregator + Emitter BooleanPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *floatReduceBooleanIterator) reduce() []BooleanPoint { + // Calculate next window. + startTime, endTime := itr.opt.Window(itr.input.peekTime()) + + // Create points by tags. + m := make(map[string]*floatReduceBooleanPoint) + for { + // Read next point. + curr := itr.input.NextInWindow(startTime, endTime) + if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + id := curr.Name + "\x00" + tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &floatReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateFloat(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Sort(sort.Reverse(sort.StringSlice(keys))) + + a := make([]BooleanPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a +} + +// floatTransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type floatTransformIterator struct { + input FloatIterator + fn floatTransformFunc +} + +// Close closes the iterator and all child iterators. +func (itr *floatTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *floatTransformIterator) Next() *FloatPoint { + p := itr.input.Next() + if p != nil { + p = itr.fn(p) + } + return p +} + +// floatTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type floatTransformFunc func(p *FloatPoint) *FloatPoint + +// floatReduceIterator executes a function to modify an existing point for every +// output of the input iterator. +type floatBoolTransformIterator struct { + input FloatIterator + fn floatBoolTransformFunc +} + +// Close closes the iterator and all child iterators. +func (itr *floatBoolTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *floatBoolTransformIterator) Next() *BooleanPoint { + p := itr.input.Next() + if p != nil { + return itr.fn(p) + } + return nil +} + +// floatBoolTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type floatBoolTransformFunc func(p *FloatPoint) *BooleanPoint + +// floatDedupeIterator only outputs unique points. +// This differs from the DistinctIterator in that it compares all aux fields too. +// This iterator is relatively inefficient and should only be used on small +// datasets such as meta query results. +type floatDedupeIterator struct { + input FloatIterator + m map[string]struct{} // lookup of points already sent +} + +// newFloatDedupeIterator returns a new instance of floatDedupeIterator. +func newFloatDedupeIterator(input FloatIterator) *floatDedupeIterator { + return &floatDedupeIterator{ + input: input, + m: make(map[string]struct{}), + } +} + +// Close closes the iterator and all child iterators. +func (itr *floatDedupeIterator) Close() error { return itr.input.Close() } + +// Next returns the next unique point from the input iterator. +func (itr *floatDedupeIterator) Next() *FloatPoint { + for { + // Read next point. + p := itr.input.Next() + if p == nil { + return nil + } + + // Serialize to bytes to store in lookup. + buf, err := proto.Marshal(encodeFloatPoint(p)) + if err != nil { + log.Println("error marshaling dedupe point:", err) + continue + } + + // If the point has already been output then move to the next point. + if _, ok := itr.m[string(buf)]; ok { + continue + } + + // Otherwise mark it as emitted and return point. + itr.m[string(buf)] = struct{}{} + return p + } +} + +// floatReaderIterator represents an iterator that streams from a reader. +type floatReaderIterator struct { + r io.Reader + dec *FloatPointDecoder + first *FloatPoint +} + +// newFloatReaderIterator returns a new instance of floatReaderIterator. +func newFloatReaderIterator(r io.Reader, first *FloatPoint) *floatReaderIterator { + return &floatReaderIterator{ + r: r, + dec: NewFloatPointDecoder(r), + first: first, + } +} + +// Close closes the underlying reader, if applicable. +func (itr *floatReaderIterator) Close() error { + if r, ok := itr.r.(io.ReadCloser); ok { + return r.Close() + } + return nil +} + +// Next returns the next point from the iterator. +func (itr *floatReaderIterator) Next() *FloatPoint { + // Send first point if it hasn't been sent yet. + if itr.first != nil { + p := itr.first + itr.first = nil + return p + } + + // OPTIMIZE(benbjohnson): Reuse point on iterator. + + // Unmarshal next point. + p := &FloatPoint{} + if err := itr.dec.DecodeFloatPoint(p); err == io.EOF { + return nil + } else if err != nil { + log.Printf("error reading iterator point: %s", err) + return nil + } + return p +} + +// IntegerIterator represents a stream of integer points. +type IntegerIterator interface { + Iterator + Next() *IntegerPoint +} + +// newIntegerIterators converts a slice of Iterator to a slice of IntegerIterator. +// Drop and closes any iterator in itrs that is not a IntegerIterator and cannot +// be cast to a IntegerIterator. +func newIntegerIterators(itrs []Iterator) []IntegerIterator { + a := make([]IntegerIterator, 0, len(itrs)) + for _, itr := range itrs { + switch itr := itr.(type) { + case IntegerIterator: + a = append(a, itr) + + default: + itr.Close() + } + } + return a +} + +// bufIntegerIterator represents a buffered IntegerIterator. +type bufIntegerIterator struct { + itr IntegerIterator + buf *IntegerPoint +} + +// newBufIntegerIterator returns a buffered IntegerIterator. +func newBufIntegerIterator(itr IntegerIterator) *bufIntegerIterator { + return &bufIntegerIterator{itr: itr} +} + +// Close closes the underlying iterator. +func (itr *bufIntegerIterator) Close() error { return itr.itr.Close() } + +// peek returns the next point without removing it from the iterator. +func (itr *bufIntegerIterator) peek() *IntegerPoint { + p := itr.Next() + itr.unread(p) + return p +} + +// peekTime returns the time of the next point. +// Returns zero time if no more points available. +func (itr *bufIntegerIterator) peekTime() int64 { + p := itr.peek() + if p == nil { + return ZeroTime + } + return p.Time +} + +// Next returns the current buffer, if exists, or calls the underlying iterator. +func (itr *bufIntegerIterator) Next() *IntegerPoint { + if itr.buf != nil { + buf := itr.buf + itr.buf = nil + return buf + } + return itr.itr.Next() +} + +// NextInWindow returns the next value if it is between [startTime, endTime). +// If the next value is outside the range then it is moved to the buffer. +func (itr *bufIntegerIterator) NextInWindow(startTime, endTime int64) *IntegerPoint { + v := itr.Next() + if v == nil { + return nil + } else if v.Time < startTime || v.Time >= endTime { + itr.unread(v) + return nil + } + return v +} + +// unread sets v to the buffer. It is read on the next call to Next(). +func (itr *bufIntegerIterator) unread(v *IntegerPoint) { itr.buf = v } + +// integerMergeIterator represents an iterator that combines multiple integer iterators. +type integerMergeIterator struct { + inputs []IntegerIterator + heap *integerMergeHeap + + // Current iterator and window. + curr *integerMergeHeapItem + window struct { + name string + tags string + startTime int64 + endTime int64 + } +} + +// newIntegerMergeIterator returns a new instance of integerMergeIterator. +func newIntegerMergeIterator(inputs []IntegerIterator, opt IteratorOptions) *integerMergeIterator { + itr := &integerMergeIterator{ + inputs: inputs, + heap: &integerMergeHeap{ + items: make([]*integerMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Wrap in buffer, ignore any inputs without anymore points. + bufInput := newBufIntegerIterator(input) + if bufInput.peek() == nil { + continue + } + + // Append to the heap. + itr.heap.items = append(itr.heap.items, &integerMergeHeapItem{itr: bufInput}) + } + heap.Init(itr.heap) + + return itr +} + +// Close closes the underlying iterators. +func (itr *integerMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + return nil +} + +// Next returns the next point from the iterator. +func (itr *integerMergeIterator) Next() *IntegerPoint { + for { + // Retrieve the next iterator if we don't have one. + if itr.curr == nil { + if len(itr.heap.items) == 0 { + return nil + } + itr.curr = heap.Pop(itr.heap).(*integerMergeHeapItem) + + // Read point and set current window. + p := itr.curr.itr.Next() + itr.window.name, itr.window.tags = p.Name, p.Tags.ID() + itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) + return p + } + + // Read the next point from the current iterator. + p := itr.curr.itr.Next() + + // If there are no more points then remove iterator from heap and find next. + if p == nil { + itr.curr = nil + continue + } + + // Check if the point is inside of our current window. + inWindow := true + if itr.window.name != p.Name { + inWindow = false + } else if itr.window.tags != p.Tags.ID() { + inWindow = false + } else if itr.heap.opt.Ascending && p.Time >= itr.window.endTime { + inWindow = false + } else if !itr.heap.opt.Ascending && p.Time < itr.window.startTime { + inWindow = false + } + + // If it's outside our window then push iterator back on the heap and find new iterator. + if !inWindow { + itr.curr.itr.unread(p) + heap.Push(itr.heap, itr.curr) + itr.curr = nil + continue + } + + return p + } +} + +// integerMergeHeap represents a heap of integerMergeHeapItems. +// Items are sorted by their next window and then by name/tags. +type integerMergeHeap struct { + opt IteratorOptions + items []*integerMergeHeapItem +} + +func (h integerMergeHeap) Len() int { return len(h.items) } +func (h integerMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h integerMergeHeap) Less(i, j int) bool { + x, y := h.items[i].itr.peek(), h.items[j].itr.peek() + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if x.Tags.ID() != y.Tags.ID() { + return x.Tags.ID() < y.Tags.ID() + } + } else { + if x.Name != y.Name { + return x.Name > y.Name + } else if x.Tags.ID() != y.Tags.ID() { + return x.Tags.ID() > y.Tags.ID() + } + } + + xt, _ := h.opt.Window(x.Time) + yt, _ := h.opt.Window(y.Time) + + if h.opt.Ascending { + return xt < yt + } + return xt > yt +} + +func (h *integerMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*integerMergeHeapItem)) +} + +func (h *integerMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type integerMergeHeapItem struct { + itr *bufIntegerIterator +} + +// integerSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. +type integerSortedMergeIterator struct { + inputs []IntegerIterator + opt IteratorOptions + heap integerSortedMergeHeap +} + +// newIntegerSortedMergeIterator returns an instance of integerSortedMergeIterator. +func newIntegerSortedMergeIterator(inputs []IntegerIterator, opt IteratorOptions) Iterator { + itr := &integerSortedMergeIterator{ + inputs: inputs, + heap: make(integerSortedMergeHeap, 0, len(inputs)), + opt: opt, + } + + // Initialize heap. + for _, input := range inputs { + // Read next point. + p := input.Next() + if p == nil { + continue + } + + // Append to the heap. + itr.heap = append(itr.heap, &integerSortedMergeHeapItem{point: p, itr: input, ascending: opt.Ascending}) + } + heap.Init(&itr.heap) + + return itr +} + +// Close closes the underlying iterators. +func (itr *integerSortedMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + return nil +} + +// Next returns the next points from the iterator. +func (itr *integerSortedMergeIterator) Next() *IntegerPoint { return itr.pop() } + +// pop returns the next point from the heap. +// Reads the next point from item's cursor and puts it back on the heap. +func (itr *integerSortedMergeIterator) pop() *IntegerPoint { + if len(itr.heap) == 0 { + return nil + } + + // Read the next item from the heap. + item := heap.Pop(&itr.heap).(*integerSortedMergeHeapItem) + + // Copy the point for return. + p := item.point.Clone() + + // Read the next item from the cursor. Push back to heap if one exists. + if item.point = item.itr.Next(); item.point != nil { + heap.Push(&itr.heap, item) + } + + return p +} + +// integerSortedMergeHeap represents a heap of integerSortedMergeHeapItems. +type integerSortedMergeHeap []*integerSortedMergeHeapItem + +func (h integerSortedMergeHeap) Len() int { return len(h) } +func (h integerSortedMergeHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } +func (h integerSortedMergeHeap) Less(i, j int) bool { + x, y := h[i].point, h[j].point + + if h[i].ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if !x.Tags.Equals(&y.Tags) { + return x.Tags.ID() < y.Tags.ID() + } + return x.Time < y.Time + } + + if x.Name != y.Name { + return x.Name > y.Name + } else if !x.Tags.Equals(&y.Tags) { + return x.Tags.ID() > y.Tags.ID() + } + return x.Time > y.Time +} + +func (h *integerSortedMergeHeap) Push(x interface{}) { + *h = append(*h, x.(*integerSortedMergeHeapItem)) +} + +func (h *integerSortedMergeHeap) Pop() interface{} { + old := *h + n := len(old) + item := old[n-1] + *h = old[0 : n-1] + return item +} + +type integerSortedMergeHeapItem struct { + point *IntegerPoint + itr IntegerIterator + ascending bool +} + +// integerLimitIterator represents an iterator that limits points per group. +type integerLimitIterator struct { + input IntegerIterator + opt IteratorOptions + n int + + prev struct { + name string + tags Tags + } +} + +// newIntegerLimitIterator returns a new instance of integerLimitIterator. +func newIntegerLimitIterator(input IntegerIterator, opt IteratorOptions) *integerLimitIterator { + return &integerLimitIterator{ + input: input, + opt: opt, + } +} + +// Close closes the underlying iterators. +func (itr *integerLimitIterator) Close() error { return itr.input.Close() } + +// Next returns the next point from the iterator. +func (itr *integerLimitIterator) Next() *IntegerPoint { + for { + p := itr.input.Next() + if p == nil { + return nil + } + + // Reset window and counter if a new window is encountered. + if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) { + itr.prev.name = p.Name + itr.prev.tags = p.Tags + itr.n = 0 + } + + // Increment counter. + itr.n++ + + // Read next point if not beyond the offset. + if itr.n <= itr.opt.Offset { + continue + } + + // Read next point if we're beyond the limit. + if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { + // If there's no interval, no groups, and a single source then simply exit. + if itr.opt.Interval.IsZero() && len(itr.opt.Dimensions) == 0 && len(itr.opt.Sources) == 1 { + return nil + } + continue + } + + return p + } +} + +type integerFillIterator struct { + input *bufIntegerIterator + prev *IntegerPoint + startTime int64 + endTime int64 + auxFields []interface{} + done bool + opt IteratorOptions + + window struct { + name string + tags Tags + time int64 + } +} + +func newIntegerFillIterator(input IntegerIterator, expr Expr, opt IteratorOptions) *integerFillIterator { + if opt.Fill == NullFill { + if expr, ok := expr.(*Call); ok && expr.Name == "count" { + opt.Fill = NumberFill + opt.FillValue = int64(0) + } + } + + var startTime, endTime int64 + if opt.Ascending { + startTime, _ = opt.Window(opt.StartTime) + _, endTime = opt.Window(opt.EndTime) + } else { + _, startTime = opt.Window(opt.EndTime) + endTime, _ = opt.Window(opt.StartTime) + } + + var auxFields []interface{} + if len(opt.Aux) > 0 { + auxFields = make([]interface{}, len(opt.Aux)) + } + + itr := &integerFillIterator{ + input: newBufIntegerIterator(input), + startTime: startTime, + endTime: endTime, + auxFields: auxFields, + opt: opt, + } + + p := itr.input.peek() + if p != nil { + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + } else { + itr.window.time = itr.endTime + } + return itr +} + +func (itr *integerFillIterator) Close() error { return itr.input.Close() } + +func (itr *integerFillIterator) Next() *IntegerPoint { + p := itr.input.Next() + + // Check if the next point is outside of our window or is nil. + for p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { + // If we are inside of an interval, unread the point and continue below to + // constructing a new point. + if itr.opt.Ascending { + if itr.window.time < itr.endTime { + itr.input.unread(p) + p = nil + break + } + } else { + if itr.window.time >= itr.endTime { + itr.input.unread(p) + p = nil + break + } + } + + // We are *not* in a current interval. If there is no next point, + // we are at the end of all intervals. + if p == nil { + return nil + } + + // Set the new interval. + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + itr.prev = nil + break + } + + // Check if the point is our next expected point. + if p == nil || p.Time > itr.window.time { + if p != nil { + itr.input.unread(p) + } + + p = &IntegerPoint{ + Name: itr.window.name, + Tags: itr.window.tags, + Time: itr.window.time, + Aux: itr.auxFields, + } + + switch itr.opt.Fill { + case NullFill: + p.Nil = true + case NumberFill: + p.Value = castToInteger(itr.opt.FillValue) + case PreviousFill: + if itr.prev != nil { + p.Value = itr.prev.Value + p.Nil = itr.prev.Nil + } else { + p.Nil = true + } + } + } else { + itr.prev = p + } + + // Advance the expected time. Do not advance to a new window here + // as there may be lingering points with the same timestamp in the previous + // window. + if itr.opt.Ascending { + itr.window.time = p.Time + int64(itr.opt.Interval.Duration) + } else { + itr.window.time = p.Time - int64(itr.opt.Interval.Duration) + } + return p +} + +// integerIntervalIterator represents a integer implementation of IntervalIterator. +type integerIntervalIterator struct { + input IntegerIterator + opt IteratorOptions +} + +func newIntegerIntervalIterator(input IntegerIterator, opt IteratorOptions) *integerIntervalIterator { + return &integerIntervalIterator{input: input, opt: opt} +} + +func (itr *integerIntervalIterator) Close() error { return itr.input.Close() } + +func (itr *integerIntervalIterator) Next() *IntegerPoint { + p := itr.input.Next() + if p == nil { + return p + } + p.Time, _ = itr.opt.Window(p.Time) + return p +} + +// integerAuxIterator represents a integer implementation of AuxIterator. +type integerAuxIterator struct { + input *bufIntegerIterator + output chan *IntegerPoint + fields auxIteratorFields +} + +func newIntegerAuxIterator(input IntegerIterator, seriesKeys SeriesList, opt IteratorOptions) *integerAuxIterator { + return &integerAuxIterator{ + input: newBufIntegerIterator(input), + output: make(chan *IntegerPoint, 1), + fields: newAuxIteratorFields(seriesKeys, opt), + } +} + +func (itr *integerAuxIterator) Start() { go itr.stream() } +func (itr *integerAuxIterator) Close() error { return itr.input.Close() } +func (itr *integerAuxIterator) Next() *IntegerPoint { return <-itr.output } +func (itr *integerAuxIterator) Iterator(name string) Iterator { return itr.fields.iterator(name) } + +func (itr *integerAuxIterator) CreateIterator(opt IteratorOptions) (Iterator, error) { + expr := opt.Expr + if expr == nil { + panic("unable to create an iterator with no expression from an aux iterator") + } + + switch expr := expr.(type) { + case *VarRef: + return itr.Iterator(expr.Val), nil + default: + panic(fmt.Sprintf("invalid expression type for an aux iterator: %T", expr)) + } +} + +func (itr *integerAuxIterator) FieldDimensions(sources Sources) (fields, dimensions map[string]struct{}, err error) { + return nil, nil, errors.New("not implemented") +} + +func (itr *integerAuxIterator) SeriesKeys(opt IteratorOptions) (SeriesList, error) { + return nil, errors.New("not implemented") +} + +func (itr *integerAuxIterator) stream() { + for { + // Read next point. + p := itr.input.Next() + if p == nil { + break + } + + // Send point to output and to each field iterator. + itr.output <- p + itr.fields.send(p) + } + + close(itr.output) + itr.fields.close() +} + +// integerChanIterator represents a new instance of integerChanIterator. +type integerChanIterator struct { + c chan *IntegerPoint + once sync.Once +} + +func (itr *integerChanIterator) Close() error { + itr.once.Do(func() { close(itr.c) }) + return nil +} + +func (itr *integerChanIterator) Next() *IntegerPoint { return <-itr.c } + +// integerReduceFloatIterator executes a reducer for every interval and buffers the result. +type integerReduceFloatIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, FloatPointEmitter) + opt IteratorOptions + points []FloatPoint +} + +// Close closes the iterator and all child iterators. +func (itr *integerReduceFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *integerReduceFloatIterator) Next() *FloatPoint { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + itr.points = itr.reduce() + if len(itr.points) == 0 { + return nil + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p +} + +// integerReduceFloatPoint stores the reduced data for a name/tag combination. +type integerReduceFloatPoint struct { + Name string + Tags Tags + Aggregator IntegerPointAggregator + Emitter FloatPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *integerReduceFloatIterator) reduce() []FloatPoint { + // Calculate next window. + startTime, endTime := itr.opt.Window(itr.input.peekTime()) + + // Create points by tags. + m := make(map[string]*integerReduceFloatPoint) + for { + // Read next point. + curr := itr.input.NextInWindow(startTime, endTime) + if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + id := curr.Name + "\x00" + tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Sort(sort.Reverse(sort.StringSlice(keys))) + + a := make([]FloatPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a +} + +// integerReduceIntegerIterator executes a reducer for every interval and buffers the result. +type integerReduceIntegerIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, IntegerPointEmitter) + opt IteratorOptions + points []IntegerPoint +} + +// Close closes the iterator and all child iterators. +func (itr *integerReduceIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *integerReduceIntegerIterator) Next() *IntegerPoint { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + itr.points = itr.reduce() + if len(itr.points) == 0 { + return nil + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p +} + +// integerReduceIntegerPoint stores the reduced data for a name/tag combination. +type integerReduceIntegerPoint struct { + Name string + Tags Tags + Aggregator IntegerPointAggregator + Emitter IntegerPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *integerReduceIntegerIterator) reduce() []IntegerPoint { + // Calculate next window. + startTime, endTime := itr.opt.Window(itr.input.peekTime()) + + // Create points by tags. + m := make(map[string]*integerReduceIntegerPoint) + for { + // Read next point. + curr := itr.input.NextInWindow(startTime, endTime) + if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + id := curr.Name + "\x00" + tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Sort(sort.Reverse(sort.StringSlice(keys))) + + a := make([]IntegerPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a +} + +// integerReduceStringIterator executes a reducer for every interval and buffers the result. +type integerReduceStringIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, StringPointEmitter) + opt IteratorOptions + points []StringPoint +} + +// Close closes the iterator and all child iterators. +func (itr *integerReduceStringIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *integerReduceStringIterator) Next() *StringPoint { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + itr.points = itr.reduce() + if len(itr.points) == 0 { + return nil + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p +} + +// integerReduceStringPoint stores the reduced data for a name/tag combination. +type integerReduceStringPoint struct { + Name string + Tags Tags + Aggregator IntegerPointAggregator + Emitter StringPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *integerReduceStringIterator) reduce() []StringPoint { + // Calculate next window. + startTime, endTime := itr.opt.Window(itr.input.peekTime()) + + // Create points by tags. + m := make(map[string]*integerReduceStringPoint) + for { + // Read next point. + curr := itr.input.NextInWindow(startTime, endTime) + if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + id := curr.Name + "\x00" + tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Sort(sort.Reverse(sort.StringSlice(keys))) + + a := make([]StringPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a +} + +// integerReduceBooleanIterator executes a reducer for every interval and buffers the result. +type integerReduceBooleanIterator struct { + input *bufIntegerIterator + create func() (IntegerPointAggregator, BooleanPointEmitter) + opt IteratorOptions + points []BooleanPoint +} + +// Close closes the iterator and all child iterators. +func (itr *integerReduceBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *integerReduceBooleanIterator) Next() *BooleanPoint { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + itr.points = itr.reduce() + if len(itr.points) == 0 { + return nil + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p +} + +// integerReduceBooleanPoint stores the reduced data for a name/tag combination. +type integerReduceBooleanPoint struct { + Name string + Tags Tags + Aggregator IntegerPointAggregator + Emitter BooleanPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *integerReduceBooleanIterator) reduce() []BooleanPoint { + // Calculate next window. + startTime, endTime := itr.opt.Window(itr.input.peekTime()) + + // Create points by tags. + m := make(map[string]*integerReduceBooleanPoint) + for { + // Read next point. + curr := itr.input.NextInWindow(startTime, endTime) + if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + id := curr.Name + "\x00" + tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &integerReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateInteger(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Sort(sort.Reverse(sort.StringSlice(keys))) + + a := make([]BooleanPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a +} + +// integerTransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type integerTransformIterator struct { + input IntegerIterator + fn integerTransformFunc +} + +// Close closes the iterator and all child iterators. +func (itr *integerTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *integerTransformIterator) Next() *IntegerPoint { + p := itr.input.Next() + if p != nil { + p = itr.fn(p) + } + return p +} + +// integerTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type integerTransformFunc func(p *IntegerPoint) *IntegerPoint + +// integerReduceIterator executes a function to modify an existing point for every +// output of the input iterator. +type integerBoolTransformIterator struct { + input IntegerIterator + fn integerBoolTransformFunc +} + +// Close closes the iterator and all child iterators. +func (itr *integerBoolTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *integerBoolTransformIterator) Next() *BooleanPoint { + p := itr.input.Next() + if p != nil { + return itr.fn(p) + } + return nil +} + +// integerBoolTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type integerBoolTransformFunc func(p *IntegerPoint) *BooleanPoint + +// integerDedupeIterator only outputs unique points. +// This differs from the DistinctIterator in that it compares all aux fields too. +// This iterator is relatively inefficient and should only be used on small +// datasets such as meta query results. +type integerDedupeIterator struct { + input IntegerIterator + m map[string]struct{} // lookup of points already sent +} + +// newIntegerDedupeIterator returns a new instance of integerDedupeIterator. +func newIntegerDedupeIterator(input IntegerIterator) *integerDedupeIterator { + return &integerDedupeIterator{ + input: input, + m: make(map[string]struct{}), + } +} + +// Close closes the iterator and all child iterators. +func (itr *integerDedupeIterator) Close() error { return itr.input.Close() } + +// Next returns the next unique point from the input iterator. +func (itr *integerDedupeIterator) Next() *IntegerPoint { + for { + // Read next point. + p := itr.input.Next() + if p == nil { + return nil + } + + // Serialize to bytes to store in lookup. + buf, err := proto.Marshal(encodeIntegerPoint(p)) + if err != nil { + log.Println("error marshaling dedupe point:", err) + continue + } + + // If the point has already been output then move to the next point. + if _, ok := itr.m[string(buf)]; ok { + continue + } + + // Otherwise mark it as emitted and return point. + itr.m[string(buf)] = struct{}{} + return p + } +} + +// integerReaderIterator represents an iterator that streams from a reader. +type integerReaderIterator struct { + r io.Reader + dec *IntegerPointDecoder + first *IntegerPoint +} + +// newIntegerReaderIterator returns a new instance of integerReaderIterator. +func newIntegerReaderIterator(r io.Reader, first *IntegerPoint) *integerReaderIterator { + return &integerReaderIterator{ + r: r, + dec: NewIntegerPointDecoder(r), + first: first, + } +} + +// Close closes the underlying reader, if applicable. +func (itr *integerReaderIterator) Close() error { + if r, ok := itr.r.(io.ReadCloser); ok { + return r.Close() + } + return nil +} + +// Next returns the next point from the iterator. +func (itr *integerReaderIterator) Next() *IntegerPoint { + // Send first point if it hasn't been sent yet. + if itr.first != nil { + p := itr.first + itr.first = nil + return p + } + + // OPTIMIZE(benbjohnson): Reuse point on iterator. + + // Unmarshal next point. + p := &IntegerPoint{} + if err := itr.dec.DecodeIntegerPoint(p); err == io.EOF { + return nil + } else if err != nil { + log.Printf("error reading iterator point: %s", err) + return nil + } + return p +} + +// StringIterator represents a stream of string points. +type StringIterator interface { + Iterator + Next() *StringPoint +} + +// newStringIterators converts a slice of Iterator to a slice of StringIterator. +// Drop and closes any iterator in itrs that is not a StringIterator and cannot +// be cast to a StringIterator. +func newStringIterators(itrs []Iterator) []StringIterator { + a := make([]StringIterator, 0, len(itrs)) + for _, itr := range itrs { + switch itr := itr.(type) { + case StringIterator: + a = append(a, itr) + + default: + itr.Close() + } + } + return a +} + +// bufStringIterator represents a buffered StringIterator. +type bufStringIterator struct { + itr StringIterator + buf *StringPoint +} + +// newBufStringIterator returns a buffered StringIterator. +func newBufStringIterator(itr StringIterator) *bufStringIterator { + return &bufStringIterator{itr: itr} +} + +// Close closes the underlying iterator. +func (itr *bufStringIterator) Close() error { return itr.itr.Close() } + +// peek returns the next point without removing it from the iterator. +func (itr *bufStringIterator) peek() *StringPoint { + p := itr.Next() + itr.unread(p) + return p +} + +// peekTime returns the time of the next point. +// Returns zero time if no more points available. +func (itr *bufStringIterator) peekTime() int64 { + p := itr.peek() + if p == nil { + return ZeroTime + } + return p.Time +} + +// Next returns the current buffer, if exists, or calls the underlying iterator. +func (itr *bufStringIterator) Next() *StringPoint { + if itr.buf != nil { + buf := itr.buf + itr.buf = nil + return buf + } + return itr.itr.Next() +} + +// NextInWindow returns the next value if it is between [startTime, endTime). +// If the next value is outside the range then it is moved to the buffer. +func (itr *bufStringIterator) NextInWindow(startTime, endTime int64) *StringPoint { + v := itr.Next() + if v == nil { + return nil + } else if v.Time < startTime || v.Time >= endTime { + itr.unread(v) + return nil + } + return v +} + +// unread sets v to the buffer. It is read on the next call to Next(). +func (itr *bufStringIterator) unread(v *StringPoint) { itr.buf = v } + +// stringMergeIterator represents an iterator that combines multiple string iterators. +type stringMergeIterator struct { + inputs []StringIterator + heap *stringMergeHeap + + // Current iterator and window. + curr *stringMergeHeapItem + window struct { + name string + tags string + startTime int64 + endTime int64 + } +} + +// newStringMergeIterator returns a new instance of stringMergeIterator. +func newStringMergeIterator(inputs []StringIterator, opt IteratorOptions) *stringMergeIterator { + itr := &stringMergeIterator{ + inputs: inputs, + heap: &stringMergeHeap{ + items: make([]*stringMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Wrap in buffer, ignore any inputs without anymore points. + bufInput := newBufStringIterator(input) + if bufInput.peek() == nil { + continue + } + + // Append to the heap. + itr.heap.items = append(itr.heap.items, &stringMergeHeapItem{itr: bufInput}) + } + heap.Init(itr.heap) + + return itr +} + +// Close closes the underlying iterators. +func (itr *stringMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + return nil +} + +// Next returns the next point from the iterator. +func (itr *stringMergeIterator) Next() *StringPoint { + for { + // Retrieve the next iterator if we don't have one. + if itr.curr == nil { + if len(itr.heap.items) == 0 { + return nil + } + itr.curr = heap.Pop(itr.heap).(*stringMergeHeapItem) + + // Read point and set current window. + p := itr.curr.itr.Next() + itr.window.name, itr.window.tags = p.Name, p.Tags.ID() + itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) + return p + } + + // Read the next point from the current iterator. + p := itr.curr.itr.Next() + + // If there are no more points then remove iterator from heap and find next. + if p == nil { + itr.curr = nil + continue + } + + // Check if the point is inside of our current window. + inWindow := true + if itr.window.name != p.Name { + inWindow = false + } else if itr.window.tags != p.Tags.ID() { + inWindow = false + } else if itr.heap.opt.Ascending && p.Time >= itr.window.endTime { + inWindow = false + } else if !itr.heap.opt.Ascending && p.Time < itr.window.startTime { + inWindow = false + } + + // If it's outside our window then push iterator back on the heap and find new iterator. + if !inWindow { + itr.curr.itr.unread(p) + heap.Push(itr.heap, itr.curr) + itr.curr = nil + continue + } + + return p + } +} + +// stringMergeHeap represents a heap of stringMergeHeapItems. +// Items are sorted by their next window and then by name/tags. +type stringMergeHeap struct { + opt IteratorOptions + items []*stringMergeHeapItem +} + +func (h stringMergeHeap) Len() int { return len(h.items) } +func (h stringMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h stringMergeHeap) Less(i, j int) bool { + x, y := h.items[i].itr.peek(), h.items[j].itr.peek() + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if x.Tags.ID() != y.Tags.ID() { + return x.Tags.ID() < y.Tags.ID() + } + } else { + if x.Name != y.Name { + return x.Name > y.Name + } else if x.Tags.ID() != y.Tags.ID() { + return x.Tags.ID() > y.Tags.ID() + } + } + + xt, _ := h.opt.Window(x.Time) + yt, _ := h.opt.Window(y.Time) + + if h.opt.Ascending { + return xt < yt + } + return xt > yt +} + +func (h *stringMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*stringMergeHeapItem)) +} + +func (h *stringMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type stringMergeHeapItem struct { + itr *bufStringIterator +} + +// stringSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. +type stringSortedMergeIterator struct { + inputs []StringIterator + opt IteratorOptions + heap stringSortedMergeHeap +} + +// newStringSortedMergeIterator returns an instance of stringSortedMergeIterator. +func newStringSortedMergeIterator(inputs []StringIterator, opt IteratorOptions) Iterator { + itr := &stringSortedMergeIterator{ + inputs: inputs, + heap: make(stringSortedMergeHeap, 0, len(inputs)), + opt: opt, + } + + // Initialize heap. + for _, input := range inputs { + // Read next point. + p := input.Next() + if p == nil { + continue + } + + // Append to the heap. + itr.heap = append(itr.heap, &stringSortedMergeHeapItem{point: p, itr: input, ascending: opt.Ascending}) + } + heap.Init(&itr.heap) + + return itr +} + +// Close closes the underlying iterators. +func (itr *stringSortedMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + return nil +} + +// Next returns the next points from the iterator. +func (itr *stringSortedMergeIterator) Next() *StringPoint { return itr.pop() } + +// pop returns the next point from the heap. +// Reads the next point from item's cursor and puts it back on the heap. +func (itr *stringSortedMergeIterator) pop() *StringPoint { + if len(itr.heap) == 0 { + return nil + } + + // Read the next item from the heap. + item := heap.Pop(&itr.heap).(*stringSortedMergeHeapItem) + + // Copy the point for return. + p := item.point.Clone() + + // Read the next item from the cursor. Push back to heap if one exists. + if item.point = item.itr.Next(); item.point != nil { + heap.Push(&itr.heap, item) + } + + return p +} + +// stringSortedMergeHeap represents a heap of stringSortedMergeHeapItems. +type stringSortedMergeHeap []*stringSortedMergeHeapItem + +func (h stringSortedMergeHeap) Len() int { return len(h) } +func (h stringSortedMergeHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } +func (h stringSortedMergeHeap) Less(i, j int) bool { + x, y := h[i].point, h[j].point + + if h[i].ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if !x.Tags.Equals(&y.Tags) { + return x.Tags.ID() < y.Tags.ID() + } + return x.Time < y.Time + } + + if x.Name != y.Name { + return x.Name > y.Name + } else if !x.Tags.Equals(&y.Tags) { + return x.Tags.ID() > y.Tags.ID() + } + return x.Time > y.Time +} + +func (h *stringSortedMergeHeap) Push(x interface{}) { + *h = append(*h, x.(*stringSortedMergeHeapItem)) +} + +func (h *stringSortedMergeHeap) Pop() interface{} { + old := *h + n := len(old) + item := old[n-1] + *h = old[0 : n-1] + return item +} + +type stringSortedMergeHeapItem struct { + point *StringPoint + itr StringIterator + ascending bool +} + +// stringLimitIterator represents an iterator that limits points per group. +type stringLimitIterator struct { + input StringIterator + opt IteratorOptions + n int + + prev struct { + name string + tags Tags + } +} + +// newStringLimitIterator returns a new instance of stringLimitIterator. +func newStringLimitIterator(input StringIterator, opt IteratorOptions) *stringLimitIterator { + return &stringLimitIterator{ + input: input, + opt: opt, + } +} + +// Close closes the underlying iterators. +func (itr *stringLimitIterator) Close() error { return itr.input.Close() } + +// Next returns the next point from the iterator. +func (itr *stringLimitIterator) Next() *StringPoint { + for { + p := itr.input.Next() + if p == nil { + return nil + } + + // Reset window and counter if a new window is encountered. + if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) { + itr.prev.name = p.Name + itr.prev.tags = p.Tags + itr.n = 0 + } + + // Increment counter. + itr.n++ + + // Read next point if not beyond the offset. + if itr.n <= itr.opt.Offset { + continue + } + + // Read next point if we're beyond the limit. + if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { + // If there's no interval, no groups, and a single source then simply exit. + if itr.opt.Interval.IsZero() && len(itr.opt.Dimensions) == 0 && len(itr.opt.Sources) == 1 { + return nil + } + continue + } + + return p + } +} + +type stringFillIterator struct { + input *bufStringIterator + prev *StringPoint + startTime int64 + endTime int64 + auxFields []interface{} + done bool + opt IteratorOptions + + window struct { + name string + tags Tags + time int64 + } +} + +func newStringFillIterator(input StringIterator, expr Expr, opt IteratorOptions) *stringFillIterator { + if opt.Fill == NullFill { + if expr, ok := expr.(*Call); ok && expr.Name == "count" { + opt.Fill = NumberFill + opt.FillValue = "" + } + } + + var startTime, endTime int64 + if opt.Ascending { + startTime, _ = opt.Window(opt.StartTime) + _, endTime = opt.Window(opt.EndTime) + } else { + _, startTime = opt.Window(opt.EndTime) + endTime, _ = opt.Window(opt.StartTime) + } + + var auxFields []interface{} + if len(opt.Aux) > 0 { + auxFields = make([]interface{}, len(opt.Aux)) + } + + itr := &stringFillIterator{ + input: newBufStringIterator(input), + startTime: startTime, + endTime: endTime, + auxFields: auxFields, + opt: opt, + } + + p := itr.input.peek() + if p != nil { + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + } else { + itr.window.time = itr.endTime + } + return itr +} + +func (itr *stringFillIterator) Close() error { return itr.input.Close() } + +func (itr *stringFillIterator) Next() *StringPoint { + p := itr.input.Next() + + // Check if the next point is outside of our window or is nil. + for p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { + // If we are inside of an interval, unread the point and continue below to + // constructing a new point. + if itr.opt.Ascending { + if itr.window.time < itr.endTime { + itr.input.unread(p) + p = nil + break + } + } else { + if itr.window.time >= itr.endTime { + itr.input.unread(p) + p = nil + break + } + } + + // We are *not* in a current interval. If there is no next point, + // we are at the end of all intervals. + if p == nil { + return nil + } + + // Set the new interval. + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + itr.prev = nil + break + } + + // Check if the point is our next expected point. + if p == nil || p.Time > itr.window.time { + if p != nil { + itr.input.unread(p) + } + + p = &StringPoint{ + Name: itr.window.name, + Tags: itr.window.tags, + Time: itr.window.time, + Aux: itr.auxFields, + } + + switch itr.opt.Fill { + case NullFill: + p.Nil = true + case NumberFill: + p.Value = castToString(itr.opt.FillValue) + case PreviousFill: + if itr.prev != nil { + p.Value = itr.prev.Value + p.Nil = itr.prev.Nil + } else { + p.Nil = true + } + } + } else { + itr.prev = p + } + + // Advance the expected time. Do not advance to a new window here + // as there may be lingering points with the same timestamp in the previous + // window. + if itr.opt.Ascending { + itr.window.time = p.Time + int64(itr.opt.Interval.Duration) + } else { + itr.window.time = p.Time - int64(itr.opt.Interval.Duration) + } + return p +} + +// stringIntervalIterator represents a string implementation of IntervalIterator. +type stringIntervalIterator struct { + input StringIterator + opt IteratorOptions +} + +func newStringIntervalIterator(input StringIterator, opt IteratorOptions) *stringIntervalIterator { + return &stringIntervalIterator{input: input, opt: opt} +} + +func (itr *stringIntervalIterator) Close() error { return itr.input.Close() } + +func (itr *stringIntervalIterator) Next() *StringPoint { + p := itr.input.Next() + if p == nil { + return p + } + p.Time, _ = itr.opt.Window(p.Time) + return p +} + +// stringAuxIterator represents a string implementation of AuxIterator. +type stringAuxIterator struct { + input *bufStringIterator + output chan *StringPoint + fields auxIteratorFields +} + +func newStringAuxIterator(input StringIterator, seriesKeys SeriesList, opt IteratorOptions) *stringAuxIterator { + return &stringAuxIterator{ + input: newBufStringIterator(input), + output: make(chan *StringPoint, 1), + fields: newAuxIteratorFields(seriesKeys, opt), + } +} + +func (itr *stringAuxIterator) Start() { go itr.stream() } +func (itr *stringAuxIterator) Close() error { return itr.input.Close() } +func (itr *stringAuxIterator) Next() *StringPoint { return <-itr.output } +func (itr *stringAuxIterator) Iterator(name string) Iterator { return itr.fields.iterator(name) } + +func (itr *stringAuxIterator) CreateIterator(opt IteratorOptions) (Iterator, error) { + expr := opt.Expr + if expr == nil { + panic("unable to create an iterator with no expression from an aux iterator") + } + + switch expr := expr.(type) { + case *VarRef: + return itr.Iterator(expr.Val), nil + default: + panic(fmt.Sprintf("invalid expression type for an aux iterator: %T", expr)) + } +} + +func (itr *stringAuxIterator) FieldDimensions(sources Sources) (fields, dimensions map[string]struct{}, err error) { + return nil, nil, errors.New("not implemented") +} + +func (itr *stringAuxIterator) SeriesKeys(opt IteratorOptions) (SeriesList, error) { + return nil, errors.New("not implemented") +} + +func (itr *stringAuxIterator) stream() { + for { + // Read next point. + p := itr.input.Next() + if p == nil { + break + } + + // Send point to output and to each field iterator. + itr.output <- p + itr.fields.send(p) + } + + close(itr.output) + itr.fields.close() +} + +// stringChanIterator represents a new instance of stringChanIterator. +type stringChanIterator struct { + c chan *StringPoint + once sync.Once +} + +func (itr *stringChanIterator) Close() error { + itr.once.Do(func() { close(itr.c) }) + return nil +} + +func (itr *stringChanIterator) Next() *StringPoint { return <-itr.c } + +// stringReduceFloatIterator executes a reducer for every interval and buffers the result. +type stringReduceFloatIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, FloatPointEmitter) + opt IteratorOptions + points []FloatPoint +} + +// Close closes the iterator and all child iterators. +func (itr *stringReduceFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *stringReduceFloatIterator) Next() *FloatPoint { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + itr.points = itr.reduce() + if len(itr.points) == 0 { + return nil + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p +} + +// stringReduceFloatPoint stores the reduced data for a name/tag combination. +type stringReduceFloatPoint struct { + Name string + Tags Tags + Aggregator StringPointAggregator + Emitter FloatPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *stringReduceFloatIterator) reduce() []FloatPoint { + // Calculate next window. + startTime, endTime := itr.opt.Window(itr.input.peekTime()) + + // Create points by tags. + m := make(map[string]*stringReduceFloatPoint) + for { + // Read next point. + curr := itr.input.NextInWindow(startTime, endTime) + if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + id := curr.Name + "\x00" + tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateString(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Sort(sort.Reverse(sort.StringSlice(keys))) + + a := make([]FloatPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a +} + +// stringReduceIntegerIterator executes a reducer for every interval and buffers the result. +type stringReduceIntegerIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, IntegerPointEmitter) + opt IteratorOptions + points []IntegerPoint +} + +// Close closes the iterator and all child iterators. +func (itr *stringReduceIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *stringReduceIntegerIterator) Next() *IntegerPoint { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + itr.points = itr.reduce() + if len(itr.points) == 0 { + return nil + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p +} + +// stringReduceIntegerPoint stores the reduced data for a name/tag combination. +type stringReduceIntegerPoint struct { + Name string + Tags Tags + Aggregator StringPointAggregator + Emitter IntegerPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *stringReduceIntegerIterator) reduce() []IntegerPoint { + // Calculate next window. + startTime, endTime := itr.opt.Window(itr.input.peekTime()) + + // Create points by tags. + m := make(map[string]*stringReduceIntegerPoint) + for { + // Read next point. + curr := itr.input.NextInWindow(startTime, endTime) + if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + id := curr.Name + "\x00" + tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateString(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Sort(sort.Reverse(sort.StringSlice(keys))) + + a := make([]IntegerPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a +} + +// stringReduceStringIterator executes a reducer for every interval and buffers the result. +type stringReduceStringIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, StringPointEmitter) + opt IteratorOptions + points []StringPoint +} + +// Close closes the iterator and all child iterators. +func (itr *stringReduceStringIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *stringReduceStringIterator) Next() *StringPoint { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + itr.points = itr.reduce() + if len(itr.points) == 0 { + return nil + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p +} + +// stringReduceStringPoint stores the reduced data for a name/tag combination. +type stringReduceStringPoint struct { + Name string + Tags Tags + Aggregator StringPointAggregator + Emitter StringPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *stringReduceStringIterator) reduce() []StringPoint { + // Calculate next window. + startTime, endTime := itr.opt.Window(itr.input.peekTime()) + + // Create points by tags. + m := make(map[string]*stringReduceStringPoint) + for { + // Read next point. + curr := itr.input.NextInWindow(startTime, endTime) + if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + id := curr.Name + "\x00" + tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateString(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Sort(sort.Reverse(sort.StringSlice(keys))) + + a := make([]StringPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a +} + +// stringReduceBooleanIterator executes a reducer for every interval and buffers the result. +type stringReduceBooleanIterator struct { + input *bufStringIterator + create func() (StringPointAggregator, BooleanPointEmitter) + opt IteratorOptions + points []BooleanPoint +} + +// Close closes the iterator and all child iterators. +func (itr *stringReduceBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *stringReduceBooleanIterator) Next() *BooleanPoint { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + itr.points = itr.reduce() + if len(itr.points) == 0 { + return nil + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p +} + +// stringReduceBooleanPoint stores the reduced data for a name/tag combination. +type stringReduceBooleanPoint struct { + Name string + Tags Tags + Aggregator StringPointAggregator + Emitter BooleanPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *stringReduceBooleanIterator) reduce() []BooleanPoint { + // Calculate next window. + startTime, endTime := itr.opt.Window(itr.input.peekTime()) + + // Create points by tags. + m := make(map[string]*stringReduceBooleanPoint) + for { + // Read next point. + curr := itr.input.NextInWindow(startTime, endTime) + if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + id := curr.Name + "\x00" + tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &stringReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateString(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Sort(sort.Reverse(sort.StringSlice(keys))) + + a := make([]BooleanPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a +} + +// stringTransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type stringTransformIterator struct { + input StringIterator + fn stringTransformFunc +} + +// Close closes the iterator and all child iterators. +func (itr *stringTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *stringTransformIterator) Next() *StringPoint { + p := itr.input.Next() + if p != nil { + p = itr.fn(p) + } + return p +} + +// stringTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type stringTransformFunc func(p *StringPoint) *StringPoint + +// stringReduceIterator executes a function to modify an existing point for every +// output of the input iterator. +type stringBoolTransformIterator struct { + input StringIterator + fn stringBoolTransformFunc +} + +// Close closes the iterator and all child iterators. +func (itr *stringBoolTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *stringBoolTransformIterator) Next() *BooleanPoint { + p := itr.input.Next() + if p != nil { + return itr.fn(p) + } + return nil +} + +// stringBoolTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type stringBoolTransformFunc func(p *StringPoint) *BooleanPoint + +// stringDedupeIterator only outputs unique points. +// This differs from the DistinctIterator in that it compares all aux fields too. +// This iterator is relatively inefficient and should only be used on small +// datasets such as meta query results. +type stringDedupeIterator struct { + input StringIterator + m map[string]struct{} // lookup of points already sent +} + +// newStringDedupeIterator returns a new instance of stringDedupeIterator. +func newStringDedupeIterator(input StringIterator) *stringDedupeIterator { + return &stringDedupeIterator{ + input: input, + m: make(map[string]struct{}), + } +} + +// Close closes the iterator and all child iterators. +func (itr *stringDedupeIterator) Close() error { return itr.input.Close() } + +// Next returns the next unique point from the input iterator. +func (itr *stringDedupeIterator) Next() *StringPoint { + for { + // Read next point. + p := itr.input.Next() + if p == nil { + return nil + } + + // Serialize to bytes to store in lookup. + buf, err := proto.Marshal(encodeStringPoint(p)) + if err != nil { + log.Println("error marshaling dedupe point:", err) + continue + } + + // If the point has already been output then move to the next point. + if _, ok := itr.m[string(buf)]; ok { + continue + } + + // Otherwise mark it as emitted and return point. + itr.m[string(buf)] = struct{}{} + return p + } +} + +// stringReaderIterator represents an iterator that streams from a reader. +type stringReaderIterator struct { + r io.Reader + dec *StringPointDecoder + first *StringPoint +} + +// newStringReaderIterator returns a new instance of stringReaderIterator. +func newStringReaderIterator(r io.Reader, first *StringPoint) *stringReaderIterator { + return &stringReaderIterator{ + r: r, + dec: NewStringPointDecoder(r), + first: first, + } +} + +// Close closes the underlying reader, if applicable. +func (itr *stringReaderIterator) Close() error { + if r, ok := itr.r.(io.ReadCloser); ok { + return r.Close() + } + return nil +} + +// Next returns the next point from the iterator. +func (itr *stringReaderIterator) Next() *StringPoint { + // Send first point if it hasn't been sent yet. + if itr.first != nil { + p := itr.first + itr.first = nil + return p + } + + // OPTIMIZE(benbjohnson): Reuse point on iterator. + + // Unmarshal next point. + p := &StringPoint{} + if err := itr.dec.DecodeStringPoint(p); err == io.EOF { + return nil + } else if err != nil { + log.Printf("error reading iterator point: %s", err) + return nil + } + return p +} + +// BooleanIterator represents a stream of boolean points. +type BooleanIterator interface { + Iterator + Next() *BooleanPoint +} + +// newBooleanIterators converts a slice of Iterator to a slice of BooleanIterator. +// Drop and closes any iterator in itrs that is not a BooleanIterator and cannot +// be cast to a BooleanIterator. +func newBooleanIterators(itrs []Iterator) []BooleanIterator { + a := make([]BooleanIterator, 0, len(itrs)) + for _, itr := range itrs { + switch itr := itr.(type) { + case BooleanIterator: + a = append(a, itr) + + default: + itr.Close() + } + } + return a +} + +// bufBooleanIterator represents a buffered BooleanIterator. +type bufBooleanIterator struct { + itr BooleanIterator + buf *BooleanPoint +} + +// newBufBooleanIterator returns a buffered BooleanIterator. +func newBufBooleanIterator(itr BooleanIterator) *bufBooleanIterator { + return &bufBooleanIterator{itr: itr} +} + +// Close closes the underlying iterator. +func (itr *bufBooleanIterator) Close() error { return itr.itr.Close() } + +// peek returns the next point without removing it from the iterator. +func (itr *bufBooleanIterator) peek() *BooleanPoint { + p := itr.Next() + itr.unread(p) + return p +} + +// peekTime returns the time of the next point. +// Returns zero time if no more points available. +func (itr *bufBooleanIterator) peekTime() int64 { + p := itr.peek() + if p == nil { + return ZeroTime + } + return p.Time +} + +// Next returns the current buffer, if exists, or calls the underlying iterator. +func (itr *bufBooleanIterator) Next() *BooleanPoint { + if itr.buf != nil { + buf := itr.buf + itr.buf = nil + return buf + } + return itr.itr.Next() +} + +// NextInWindow returns the next value if it is between [startTime, endTime). +// If the next value is outside the range then it is moved to the buffer. +func (itr *bufBooleanIterator) NextInWindow(startTime, endTime int64) *BooleanPoint { + v := itr.Next() + if v == nil { + return nil + } else if v.Time < startTime || v.Time >= endTime { + itr.unread(v) + return nil + } + return v +} + +// unread sets v to the buffer. It is read on the next call to Next(). +func (itr *bufBooleanIterator) unread(v *BooleanPoint) { itr.buf = v } + +// booleanMergeIterator represents an iterator that combines multiple boolean iterators. +type booleanMergeIterator struct { + inputs []BooleanIterator + heap *booleanMergeHeap + + // Current iterator and window. + curr *booleanMergeHeapItem + window struct { + name string + tags string + startTime int64 + endTime int64 + } +} + +// newBooleanMergeIterator returns a new instance of booleanMergeIterator. +func newBooleanMergeIterator(inputs []BooleanIterator, opt IteratorOptions) *booleanMergeIterator { + itr := &booleanMergeIterator{ + inputs: inputs, + heap: &booleanMergeHeap{ + items: make([]*booleanMergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Wrap in buffer, ignore any inputs without anymore points. + bufInput := newBufBooleanIterator(input) + if bufInput.peek() == nil { + continue + } + + // Append to the heap. + itr.heap.items = append(itr.heap.items, &booleanMergeHeapItem{itr: bufInput}) + } + heap.Init(itr.heap) + + return itr +} + +// Close closes the underlying iterators. +func (itr *booleanMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + return nil +} + +// Next returns the next point from the iterator. +func (itr *booleanMergeIterator) Next() *BooleanPoint { + for { + // Retrieve the next iterator if we don't have one. + if itr.curr == nil { + if len(itr.heap.items) == 0 { + return nil + } + itr.curr = heap.Pop(itr.heap).(*booleanMergeHeapItem) + + // Read point and set current window. + p := itr.curr.itr.Next() + itr.window.name, itr.window.tags = p.Name, p.Tags.ID() + itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) + return p + } + + // Read the next point from the current iterator. + p := itr.curr.itr.Next() + + // If there are no more points then remove iterator from heap and find next. + if p == nil { + itr.curr = nil + continue + } + + // Check if the point is inside of our current window. + inWindow := true + if itr.window.name != p.Name { + inWindow = false + } else if itr.window.tags != p.Tags.ID() { + inWindow = false + } else if itr.heap.opt.Ascending && p.Time >= itr.window.endTime { + inWindow = false + } else if !itr.heap.opt.Ascending && p.Time < itr.window.startTime { + inWindow = false + } + + // If it's outside our window then push iterator back on the heap and find new iterator. + if !inWindow { + itr.curr.itr.unread(p) + heap.Push(itr.heap, itr.curr) + itr.curr = nil + continue + } + + return p + } +} + +// booleanMergeHeap represents a heap of booleanMergeHeapItems. +// Items are sorted by their next window and then by name/tags. +type booleanMergeHeap struct { + opt IteratorOptions + items []*booleanMergeHeapItem +} + +func (h booleanMergeHeap) Len() int { return len(h.items) } +func (h booleanMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h booleanMergeHeap) Less(i, j int) bool { + x, y := h.items[i].itr.peek(), h.items[j].itr.peek() + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if x.Tags.ID() != y.Tags.ID() { + return x.Tags.ID() < y.Tags.ID() + } + } else { + if x.Name != y.Name { + return x.Name > y.Name + } else if x.Tags.ID() != y.Tags.ID() { + return x.Tags.ID() > y.Tags.ID() + } + } + + xt, _ := h.opt.Window(x.Time) + yt, _ := h.opt.Window(y.Time) + + if h.opt.Ascending { + return xt < yt + } + return xt > yt +} + +func (h *booleanMergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*booleanMergeHeapItem)) +} + +func (h *booleanMergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type booleanMergeHeapItem struct { + itr *bufBooleanIterator +} + +// booleanSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. +type booleanSortedMergeIterator struct { + inputs []BooleanIterator + opt IteratorOptions + heap booleanSortedMergeHeap +} + +// newBooleanSortedMergeIterator returns an instance of booleanSortedMergeIterator. +func newBooleanSortedMergeIterator(inputs []BooleanIterator, opt IteratorOptions) Iterator { + itr := &booleanSortedMergeIterator{ + inputs: inputs, + heap: make(booleanSortedMergeHeap, 0, len(inputs)), + opt: opt, + } + + // Initialize heap. + for _, input := range inputs { + // Read next point. + p := input.Next() + if p == nil { + continue + } + + // Append to the heap. + itr.heap = append(itr.heap, &booleanSortedMergeHeapItem{point: p, itr: input, ascending: opt.Ascending}) + } + heap.Init(&itr.heap) + + return itr +} + +// Close closes the underlying iterators. +func (itr *booleanSortedMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + return nil +} + +// Next returns the next points from the iterator. +func (itr *booleanSortedMergeIterator) Next() *BooleanPoint { return itr.pop() } + +// pop returns the next point from the heap. +// Reads the next point from item's cursor and puts it back on the heap. +func (itr *booleanSortedMergeIterator) pop() *BooleanPoint { + if len(itr.heap) == 0 { + return nil + } + + // Read the next item from the heap. + item := heap.Pop(&itr.heap).(*booleanSortedMergeHeapItem) + + // Copy the point for return. + p := item.point.Clone() + + // Read the next item from the cursor. Push back to heap if one exists. + if item.point = item.itr.Next(); item.point != nil { + heap.Push(&itr.heap, item) + } + + return p +} + +// booleanSortedMergeHeap represents a heap of booleanSortedMergeHeapItems. +type booleanSortedMergeHeap []*booleanSortedMergeHeapItem + +func (h booleanSortedMergeHeap) Len() int { return len(h) } +func (h booleanSortedMergeHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } +func (h booleanSortedMergeHeap) Less(i, j int) bool { + x, y := h[i].point, h[j].point + + if h[i].ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if !x.Tags.Equals(&y.Tags) { + return x.Tags.ID() < y.Tags.ID() + } + return x.Time < y.Time + } + + if x.Name != y.Name { + return x.Name > y.Name + } else if !x.Tags.Equals(&y.Tags) { + return x.Tags.ID() > y.Tags.ID() + } + return x.Time > y.Time +} + +func (h *booleanSortedMergeHeap) Push(x interface{}) { + *h = append(*h, x.(*booleanSortedMergeHeapItem)) +} + +func (h *booleanSortedMergeHeap) Pop() interface{} { + old := *h + n := len(old) + item := old[n-1] + *h = old[0 : n-1] + return item +} + +type booleanSortedMergeHeapItem struct { + point *BooleanPoint + itr BooleanIterator + ascending bool +} + +// booleanLimitIterator represents an iterator that limits points per group. +type booleanLimitIterator struct { + input BooleanIterator + opt IteratorOptions + n int + + prev struct { + name string + tags Tags + } +} + +// newBooleanLimitIterator returns a new instance of booleanLimitIterator. +func newBooleanLimitIterator(input BooleanIterator, opt IteratorOptions) *booleanLimitIterator { + return &booleanLimitIterator{ + input: input, + opt: opt, + } +} + +// Close closes the underlying iterators. +func (itr *booleanLimitIterator) Close() error { return itr.input.Close() } + +// Next returns the next point from the iterator. +func (itr *booleanLimitIterator) Next() *BooleanPoint { + for { + p := itr.input.Next() + if p == nil { + return nil + } + + // Reset window and counter if a new window is encountered. + if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) { + itr.prev.name = p.Name + itr.prev.tags = p.Tags + itr.n = 0 + } + + // Increment counter. + itr.n++ + + // Read next point if not beyond the offset. + if itr.n <= itr.opt.Offset { + continue + } + + // Read next point if we're beyond the limit. + if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { + // If there's no interval, no groups, and a single source then simply exit. + if itr.opt.Interval.IsZero() && len(itr.opt.Dimensions) == 0 && len(itr.opt.Sources) == 1 { + return nil + } + continue + } + + return p + } +} + +type booleanFillIterator struct { + input *bufBooleanIterator + prev *BooleanPoint + startTime int64 + endTime int64 + auxFields []interface{} + done bool + opt IteratorOptions + + window struct { + name string + tags Tags + time int64 + } +} + +func newBooleanFillIterator(input BooleanIterator, expr Expr, opt IteratorOptions) *booleanFillIterator { + if opt.Fill == NullFill { + if expr, ok := expr.(*Call); ok && expr.Name == "count" { + opt.Fill = NumberFill + opt.FillValue = false + } + } + + var startTime, endTime int64 + if opt.Ascending { + startTime, _ = opt.Window(opt.StartTime) + _, endTime = opt.Window(opt.EndTime) + } else { + _, startTime = opt.Window(opt.EndTime) + endTime, _ = opt.Window(opt.StartTime) + } + + var auxFields []interface{} + if len(opt.Aux) > 0 { + auxFields = make([]interface{}, len(opt.Aux)) + } + + itr := &booleanFillIterator{ + input: newBufBooleanIterator(input), + startTime: startTime, + endTime: endTime, + auxFields: auxFields, + opt: opt, + } + + p := itr.input.peek() + if p != nil { + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + } else { + itr.window.time = itr.endTime + } + return itr +} + +func (itr *booleanFillIterator) Close() error { return itr.input.Close() } + +func (itr *booleanFillIterator) Next() *BooleanPoint { + p := itr.input.Next() + + // Check if the next point is outside of our window or is nil. + for p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { + // If we are inside of an interval, unread the point and continue below to + // constructing a new point. + if itr.opt.Ascending { + if itr.window.time < itr.endTime { + itr.input.unread(p) + p = nil + break + } + } else { + if itr.window.time >= itr.endTime { + itr.input.unread(p) + p = nil + break + } + } + + // We are *not* in a current interval. If there is no next point, + // we are at the end of all intervals. + if p == nil { + return nil + } + + // Set the new interval. + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + itr.prev = nil + break + } + + // Check if the point is our next expected point. + if p == nil || p.Time > itr.window.time { + if p != nil { + itr.input.unread(p) + } + + p = &BooleanPoint{ + Name: itr.window.name, + Tags: itr.window.tags, + Time: itr.window.time, + Aux: itr.auxFields, + } + + switch itr.opt.Fill { + case NullFill: + p.Nil = true + case NumberFill: + p.Value = castToBoolean(itr.opt.FillValue) + case PreviousFill: + if itr.prev != nil { + p.Value = itr.prev.Value + p.Nil = itr.prev.Nil + } else { + p.Nil = true + } + } + } else { + itr.prev = p + } + + // Advance the expected time. Do not advance to a new window here + // as there may be lingering points with the same timestamp in the previous + // window. + if itr.opt.Ascending { + itr.window.time = p.Time + int64(itr.opt.Interval.Duration) + } else { + itr.window.time = p.Time - int64(itr.opt.Interval.Duration) + } + return p +} + +// booleanIntervalIterator represents a boolean implementation of IntervalIterator. +type booleanIntervalIterator struct { + input BooleanIterator + opt IteratorOptions +} + +func newBooleanIntervalIterator(input BooleanIterator, opt IteratorOptions) *booleanIntervalIterator { + return &booleanIntervalIterator{input: input, opt: opt} +} + +func (itr *booleanIntervalIterator) Close() error { return itr.input.Close() } + +func (itr *booleanIntervalIterator) Next() *BooleanPoint { + p := itr.input.Next() + if p == nil { + return p + } + p.Time, _ = itr.opt.Window(p.Time) + return p +} + +// booleanAuxIterator represents a boolean implementation of AuxIterator. +type booleanAuxIterator struct { + input *bufBooleanIterator + output chan *BooleanPoint + fields auxIteratorFields +} + +func newBooleanAuxIterator(input BooleanIterator, seriesKeys SeriesList, opt IteratorOptions) *booleanAuxIterator { + return &booleanAuxIterator{ + input: newBufBooleanIterator(input), + output: make(chan *BooleanPoint, 1), + fields: newAuxIteratorFields(seriesKeys, opt), + } +} + +func (itr *booleanAuxIterator) Start() { go itr.stream() } +func (itr *booleanAuxIterator) Close() error { return itr.input.Close() } +func (itr *booleanAuxIterator) Next() *BooleanPoint { return <-itr.output } +func (itr *booleanAuxIterator) Iterator(name string) Iterator { return itr.fields.iterator(name) } + +func (itr *booleanAuxIterator) CreateIterator(opt IteratorOptions) (Iterator, error) { + expr := opt.Expr + if expr == nil { + panic("unable to create an iterator with no expression from an aux iterator") + } + + switch expr := expr.(type) { + case *VarRef: + return itr.Iterator(expr.Val), nil + default: + panic(fmt.Sprintf("invalid expression type for an aux iterator: %T", expr)) + } +} + +func (itr *booleanAuxIterator) FieldDimensions(sources Sources) (fields, dimensions map[string]struct{}, err error) { + return nil, nil, errors.New("not implemented") +} + +func (itr *booleanAuxIterator) SeriesKeys(opt IteratorOptions) (SeriesList, error) { + return nil, errors.New("not implemented") +} + +func (itr *booleanAuxIterator) stream() { + for { + // Read next point. + p := itr.input.Next() + if p == nil { + break + } + + // Send point to output and to each field iterator. + itr.output <- p + itr.fields.send(p) + } + + close(itr.output) + itr.fields.close() +} + +// booleanChanIterator represents a new instance of booleanChanIterator. +type booleanChanIterator struct { + c chan *BooleanPoint + once sync.Once +} + +func (itr *booleanChanIterator) Close() error { + itr.once.Do(func() { close(itr.c) }) + return nil +} + +func (itr *booleanChanIterator) Next() *BooleanPoint { return <-itr.c } + +// booleanReduceFloatIterator executes a reducer for every interval and buffers the result. +type booleanReduceFloatIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, FloatPointEmitter) + opt IteratorOptions + points []FloatPoint +} + +// Close closes the iterator and all child iterators. +func (itr *booleanReduceFloatIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *booleanReduceFloatIterator) Next() *FloatPoint { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + itr.points = itr.reduce() + if len(itr.points) == 0 { + return nil + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p +} + +// booleanReduceFloatPoint stores the reduced data for a name/tag combination. +type booleanReduceFloatPoint struct { + Name string + Tags Tags + Aggregator BooleanPointAggregator + Emitter FloatPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *booleanReduceFloatIterator) reduce() []FloatPoint { + // Calculate next window. + startTime, endTime := itr.opt.Window(itr.input.peekTime()) + + // Create points by tags. + m := make(map[string]*booleanReduceFloatPoint) + for { + // Read next point. + curr := itr.input.NextInWindow(startTime, endTime) + if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + id := curr.Name + "\x00" + tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceFloatPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Sort(sort.Reverse(sort.StringSlice(keys))) + + a := make([]FloatPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a +} + +// booleanReduceIntegerIterator executes a reducer for every interval and buffers the result. +type booleanReduceIntegerIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, IntegerPointEmitter) + opt IteratorOptions + points []IntegerPoint +} + +// Close closes the iterator and all child iterators. +func (itr *booleanReduceIntegerIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *booleanReduceIntegerIterator) Next() *IntegerPoint { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + itr.points = itr.reduce() + if len(itr.points) == 0 { + return nil + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p +} + +// booleanReduceIntegerPoint stores the reduced data for a name/tag combination. +type booleanReduceIntegerPoint struct { + Name string + Tags Tags + Aggregator BooleanPointAggregator + Emitter IntegerPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *booleanReduceIntegerIterator) reduce() []IntegerPoint { + // Calculate next window. + startTime, endTime := itr.opt.Window(itr.input.peekTime()) + + // Create points by tags. + m := make(map[string]*booleanReduceIntegerPoint) + for { + // Read next point. + curr := itr.input.NextInWindow(startTime, endTime) + if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + id := curr.Name + "\x00" + tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceIntegerPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Sort(sort.Reverse(sort.StringSlice(keys))) + + a := make([]IntegerPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a +} + +// booleanReduceStringIterator executes a reducer for every interval and buffers the result. +type booleanReduceStringIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, StringPointEmitter) + opt IteratorOptions + points []StringPoint +} + +// Close closes the iterator and all child iterators. +func (itr *booleanReduceStringIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *booleanReduceStringIterator) Next() *StringPoint { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + itr.points = itr.reduce() + if len(itr.points) == 0 { + return nil + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p +} + +// booleanReduceStringPoint stores the reduced data for a name/tag combination. +type booleanReduceStringPoint struct { + Name string + Tags Tags + Aggregator BooleanPointAggregator + Emitter StringPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *booleanReduceStringIterator) reduce() []StringPoint { + // Calculate next window. + startTime, endTime := itr.opt.Window(itr.input.peekTime()) + + // Create points by tags. + m := make(map[string]*booleanReduceStringPoint) + for { + // Read next point. + curr := itr.input.NextInWindow(startTime, endTime) + if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + id := curr.Name + "\x00" + tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceStringPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Sort(sort.Reverse(sort.StringSlice(keys))) + + a := make([]StringPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a +} + +// booleanReduceBooleanIterator executes a reducer for every interval and buffers the result. +type booleanReduceBooleanIterator struct { + input *bufBooleanIterator + create func() (BooleanPointAggregator, BooleanPointEmitter) + opt IteratorOptions + points []BooleanPoint +} + +// Close closes the iterator and all child iterators. +func (itr *booleanReduceBooleanIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *booleanReduceBooleanIterator) Next() *BooleanPoint { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + itr.points = itr.reduce() + if len(itr.points) == 0 { + return nil + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p +} + +// booleanReduceBooleanPoint stores the reduced data for a name/tag combination. +type booleanReduceBooleanPoint struct { + Name string + Tags Tags + Aggregator BooleanPointAggregator + Emitter BooleanPointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *booleanReduceBooleanIterator) reduce() []BooleanPoint { + // Calculate next window. + startTime, endTime := itr.opt.Window(itr.input.peekTime()) + + // Create points by tags. + m := make(map[string]*booleanReduceBooleanPoint) + for { + // Read next point. + curr := itr.input.NextInWindow(startTime, endTime) + if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + id := curr.Name + "\x00" + tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &booleanReduceBooleanPoint{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.AggregateBoolean(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Sort(sort.Reverse(sort.StringSlice(keys))) + + a := make([]BooleanPoint, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points) - 1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a +} + +// booleanTransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type booleanTransformIterator struct { + input BooleanIterator + fn booleanTransformFunc +} + +// Close closes the iterator and all child iterators. +func (itr *booleanTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *booleanTransformIterator) Next() *BooleanPoint { + p := itr.input.Next() + if p != nil { + p = itr.fn(p) + } + return p +} + +// booleanTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type booleanTransformFunc func(p *BooleanPoint) *BooleanPoint + +// booleanReduceIterator executes a function to modify an existing point for every +// output of the input iterator. +type booleanBoolTransformIterator struct { + input BooleanIterator + fn booleanBoolTransformFunc +} + +// Close closes the iterator and all child iterators. +func (itr *booleanBoolTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *booleanBoolTransformIterator) Next() *BooleanPoint { + p := itr.input.Next() + if p != nil { + return itr.fn(p) + } + return nil +} + +// booleanBoolTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type booleanBoolTransformFunc func(p *BooleanPoint) *BooleanPoint + +// booleanDedupeIterator only outputs unique points. +// This differs from the DistinctIterator in that it compares all aux fields too. +// This iterator is relatively inefficient and should only be used on small +// datasets such as meta query results. +type booleanDedupeIterator struct { + input BooleanIterator + m map[string]struct{} // lookup of points already sent +} + +// newBooleanDedupeIterator returns a new instance of booleanDedupeIterator. +func newBooleanDedupeIterator(input BooleanIterator) *booleanDedupeIterator { + return &booleanDedupeIterator{ + input: input, + m: make(map[string]struct{}), + } +} + +// Close closes the iterator and all child iterators. +func (itr *booleanDedupeIterator) Close() error { return itr.input.Close() } + +// Next returns the next unique point from the input iterator. +func (itr *booleanDedupeIterator) Next() *BooleanPoint { + for { + // Read next point. + p := itr.input.Next() + if p == nil { + return nil + } + + // Serialize to bytes to store in lookup. + buf, err := proto.Marshal(encodeBooleanPoint(p)) + if err != nil { + log.Println("error marshaling dedupe point:", err) + continue + } + + // If the point has already been output then move to the next point. + if _, ok := itr.m[string(buf)]; ok { + continue + } + + // Otherwise mark it as emitted and return point. + itr.m[string(buf)] = struct{}{} + return p + } +} + +// booleanReaderIterator represents an iterator that streams from a reader. +type booleanReaderIterator struct { + r io.Reader + dec *BooleanPointDecoder + first *BooleanPoint +} + +// newBooleanReaderIterator returns a new instance of booleanReaderIterator. +func newBooleanReaderIterator(r io.Reader, first *BooleanPoint) *booleanReaderIterator { + return &booleanReaderIterator{ + r: r, + dec: NewBooleanPointDecoder(r), + first: first, + } +} + +// Close closes the underlying reader, if applicable. +func (itr *booleanReaderIterator) Close() error { + if r, ok := itr.r.(io.ReadCloser); ok { + return r.Close() + } + return nil +} + +// Next returns the next point from the iterator. +func (itr *booleanReaderIterator) Next() *BooleanPoint { + // Send first point if it hasn't been sent yet. + if itr.first != nil { + p := itr.first + itr.first = nil + return p + } + + // OPTIMIZE(benbjohnson): Reuse point on iterator. + + // Unmarshal next point. + p := &BooleanPoint{} + if err := itr.dec.DecodeBooleanPoint(p); err == io.EOF { + return nil + } else if err != nil { + log.Printf("error reading iterator point: %s", err) + return nil + } + return p +} + +// IteratorEncoder is an encoder for encoding an iterator's points to w. +type IteratorEncoder struct { + w io.Writer +} + +// NewIteratorEncoder encodes an iterator's points to w. +func NewIteratorEncoder(w io.Writer) *IteratorEncoder { + return &IteratorEncoder{w: w} +} + +// Encode encodes and writes all of itr's points to the underlying writer. +func (enc *IteratorEncoder) EncodeIterator(itr Iterator) error { + switch itr := itr.(type) { + case FloatIterator: + return enc.encodeFloatIterator(itr) + case IntegerIterator: + return enc.encodeIntegerIterator(itr) + case StringIterator: + return enc.encodeStringIterator(itr) + case BooleanIterator: + return enc.encodeBooleanIterator(itr) + default: + panic(fmt.Sprintf("unsupported iterator for encoder: %T", itr)) + } +} + +// encodeFloatIterator encodes all points from itr to the underlying writer. +func (enc *IteratorEncoder) encodeFloatIterator(itr FloatIterator) error { + penc := NewFloatPointEncoder(enc.w) + for { + // Retrieve the next point from the iterator. + p := itr.Next() + if p == nil { + return nil + } + + // Write the point to the point encoder. + if err := penc.EncodeFloatPoint(p); err != nil { + return err + } + } +} + +// encodeIntegerIterator encodes all points from itr to the underlying writer. +func (enc *IteratorEncoder) encodeIntegerIterator(itr IntegerIterator) error { + penc := NewIntegerPointEncoder(enc.w) + for { + // Retrieve the next point from the iterator. + p := itr.Next() + if p == nil { + return nil + } + + // Write the point to the point encoder. + if err := penc.EncodeIntegerPoint(p); err != nil { + return err + } + } +} + +// encodeStringIterator encodes all points from itr to the underlying writer. +func (enc *IteratorEncoder) encodeStringIterator(itr StringIterator) error { + penc := NewStringPointEncoder(enc.w) + for { + // Retrieve the next point from the iterator. + p := itr.Next() + if p == nil { + return nil + } + + // Write the point to the point encoder. + if err := penc.EncodeStringPoint(p); err != nil { + return err + } + } +} + +// encodeBooleanIterator encodes all points from itr to the underlying writer. +func (enc *IteratorEncoder) encodeBooleanIterator(itr BooleanIterator) error { + penc := NewBooleanPointEncoder(enc.w) + for { + // Retrieve the next point from the iterator. + p := itr.Next() + if p == nil { + return nil + } + + // Write the point to the point encoder. + if err := penc.EncodeBooleanPoint(p); err != nil { + return err + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go.tmpl b/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go.tmpl new file mode 100644 index 0000000000..661e489300 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go.tmpl @@ -0,0 +1,928 @@ +package influxql + +import ( + "container/heap" + "errors" + "fmt" + "io" + "sort" + "sync" + "log" + + "github.com/gogo/protobuf/proto" +) + +{{with $types := .}}{{range $k := $types}} + +// {{$k.Name}}Iterator represents a stream of {{$k.name}} points. +type {{$k.Name}}Iterator interface { + Iterator + Next() *{{$k.Name}}Point +} + +// new{{$k.Name}}Iterators converts a slice of Iterator to a slice of {{$k.Name}}Iterator. +// Drop and closes any iterator in itrs that is not a {{$k.Name}}Iterator and cannot +// be cast to a {{$k.Name}}Iterator. +func new{{$k.Name}}Iterators(itrs []Iterator) []{{$k.Name}}Iterator { + a := make([]{{$k.Name}}Iterator, 0, len(itrs)) + for _, itr := range itrs { + switch itr := itr.(type) { + case {{$k.Name}}Iterator: + a = append(a, itr) +{{if eq .Name "Float"}} + case IntegerIterator: + a = append(a, &integerFloatCastIterator{input: itr}) +{{end}} + default: + itr.Close() + } + } + return a +} + + +// buf{{$k.Name}}Iterator represents a buffered {{$k.Name}}Iterator. +type buf{{$k.Name}}Iterator struct { + itr {{$k.Name}}Iterator + buf *{{$k.Name}}Point +} + +// newBuf{{$k.Name}}Iterator returns a buffered {{$k.Name}}Iterator. +func newBuf{{$k.Name}}Iterator(itr {{$k.Name}}Iterator) *buf{{$k.Name}}Iterator { + return &buf{{$k.Name}}Iterator{itr: itr} +} + +// Close closes the underlying iterator. +func (itr *buf{{$k.Name}}Iterator) Close() error { return itr.itr.Close() } + +// peek returns the next point without removing it from the iterator. +func (itr *buf{{$k.Name}}Iterator) peek() *{{$k.Name}}Point { + p := itr.Next() + itr.unread(p) + return p +} + +// peekTime returns the time of the next point. +// Returns zero time if no more points available. +func (itr *buf{{$k.Name}}Iterator) peekTime() int64 { + p := itr.peek() + if p == nil { + return ZeroTime + } + return p.Time +} + +// Next returns the current buffer, if exists, or calls the underlying iterator. +func (itr *buf{{$k.Name}}Iterator) Next() *{{$k.Name}}Point { + if itr.buf != nil { + buf := itr.buf + itr.buf = nil + return buf + } + return itr.itr.Next() +} + +// NextInWindow returns the next value if it is between [startTime, endTime). +// If the next value is outside the range then it is moved to the buffer. +func (itr *buf{{$k.Name}}Iterator) NextInWindow(startTime, endTime int64) *{{$k.Name}}Point { + v := itr.Next() + if v == nil { + return nil + } else if v.Time < startTime || v.Time >= endTime { + itr.unread(v) + return nil + } + return v +} + +// unread sets v to the buffer. It is read on the next call to Next(). +func (itr *buf{{$k.Name}}Iterator) unread(v *{{$k.Name}}Point) { itr.buf = v } + +// {{$k.name}}MergeIterator represents an iterator that combines multiple {{$k.name}} iterators. +type {{$k.name}}MergeIterator struct { + inputs []{{$k.Name}}Iterator + heap *{{$k.name}}MergeHeap + + // Current iterator and window. + curr *{{$k.name}}MergeHeapItem + window struct { + name string + tags string + startTime int64 + endTime int64 + } +} + +// new{{$k.Name}}MergeIterator returns a new instance of {{$k.name}}MergeIterator. +func new{{$k.Name}}MergeIterator(inputs []{{$k.Name}}Iterator, opt IteratorOptions) *{{$k.name}}MergeIterator { + itr := &{{$k.name}}MergeIterator{ + inputs: inputs, + heap: &{{$k.name}}MergeHeap{ + items: make([]*{{$k.name}}MergeHeapItem, 0, len(inputs)), + opt: opt, + }, + } + + // Initialize heap items. + for _, input := range inputs { + // Wrap in buffer, ignore any inputs without anymore points. + bufInput := newBuf{{$k.Name}}Iterator(input) + if bufInput.peek() == nil { + continue + } + + // Append to the heap. + itr.heap.items = append(itr.heap.items, &{{$k.name}}MergeHeapItem{itr: bufInput}) + } + heap.Init(itr.heap) + + return itr +} + +// Close closes the underlying iterators. +func (itr *{{$k.name}}MergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + return nil +} + +// Next returns the next point from the iterator. +func (itr *{{$k.name}}MergeIterator) Next() *{{$k.Name}}Point { + for { + // Retrieve the next iterator if we don't have one. + if itr.curr == nil { + if len(itr.heap.items) == 0 { + return nil + } + itr.curr = heap.Pop(itr.heap).(*{{$k.name}}MergeHeapItem) + + // Read point and set current window. + p := itr.curr.itr.Next() + itr.window.name, itr.window.tags = p.Name, p.Tags.ID() + itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) + return p + } + + // Read the next point from the current iterator. + p := itr.curr.itr.Next() + + // If there are no more points then remove iterator from heap and find next. + if p == nil { + itr.curr = nil + continue + } + + // Check if the point is inside of our current window. + inWindow := true + if itr.window.name != p.Name { + inWindow = false + } else if itr.window.tags != p.Tags.ID() { + inWindow = false + } else if itr.heap.opt.Ascending && p.Time >= itr.window.endTime { + inWindow = false + } else if !itr.heap.opt.Ascending && p.Time < itr.window.startTime { + inWindow = false + } + + // If it's outside our window then push iterator back on the heap and find new iterator. + if !inWindow { + itr.curr.itr.unread(p) + heap.Push(itr.heap, itr.curr) + itr.curr = nil + continue + } + + return p + } +} + +// {{$k.name}}MergeHeap represents a heap of {{$k.name}}MergeHeapItems. +// Items are sorted by their next window and then by name/tags. +type {{$k.name}}MergeHeap struct { + opt IteratorOptions + items []*{{$k.name}}MergeHeapItem +} + +func (h {{$k.name}}MergeHeap) Len() int { return len(h.items) } +func (h {{$k.name}}MergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } +func (h {{$k.name}}MergeHeap) Less(i, j int) bool { + x, y := h.items[i].itr.peek(), h.items[j].itr.peek() + + if h.opt.Ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if x.Tags.ID() != y.Tags.ID() { + return x.Tags.ID() < y.Tags.ID() + } + } else { + if x.Name != y.Name { + return x.Name > y.Name + } else if x.Tags.ID() != y.Tags.ID() { + return x.Tags.ID() > y.Tags.ID() + } + } + + xt, _ := h.opt.Window(x.Time) + yt, _ := h.opt.Window(y.Time) + + if h.opt.Ascending { + return xt < yt + } + return xt > yt +} + + +func (h *{{$k.name}}MergeHeap) Push(x interface{}) { + h.items = append(h.items, x.(*{{$k.name}}MergeHeapItem)) +} + +func (h *{{$k.name}}MergeHeap) Pop() interface{} { + old := h.items + n := len(old) + item := old[n-1] + h.items = old[0 : n-1] + return item +} + +type {{$k.name}}MergeHeapItem struct { + itr *buf{{$k.Name}}Iterator +} + + +// {{$k.name}}SortedMergeIterator is an iterator that sorts and merges multiple iterators into one. +type {{$k.name}}SortedMergeIterator struct { + inputs []{{$k.Name}}Iterator + opt IteratorOptions + heap {{$k.name}}SortedMergeHeap +} + +// new{{$k.Name}}SortedMergeIterator returns an instance of {{$k.name}}SortedMergeIterator. +func new{{$k.Name}}SortedMergeIterator(inputs []{{$k.Name}}Iterator, opt IteratorOptions) Iterator { + itr := &{{$k.name}}SortedMergeIterator{ + inputs: inputs, + heap: make({{$k.name}}SortedMergeHeap, 0, len(inputs)), + opt: opt, + } + + // Initialize heap. + for _, input := range inputs { + // Read next point. + p := input.Next() + if p == nil { + continue + } + + // Append to the heap. + itr.heap = append(itr.heap, &{{$k.name}}SortedMergeHeapItem{point: p, itr: input, ascending: opt.Ascending}) + } + heap.Init(&itr.heap) + + return itr +} + +// Close closes the underlying iterators. +func (itr *{{$k.name}}SortedMergeIterator) Close() error { + for _, input := range itr.inputs { + input.Close() + } + return nil +} + +// Next returns the next points from the iterator. +func (itr *{{$k.name}}SortedMergeIterator) Next() *{{$k.Name}}Point { return itr.pop() } + +// pop returns the next point from the heap. +// Reads the next point from item's cursor and puts it back on the heap. +func (itr *{{$k.name}}SortedMergeIterator) pop() *{{$k.Name}}Point { + if len(itr.heap) == 0 { + return nil + } + + // Read the next item from the heap. + item := heap.Pop(&itr.heap).(*{{$k.name}}SortedMergeHeapItem) + + // Copy the point for return. + p := item.point.Clone() + + // Read the next item from the cursor. Push back to heap if one exists. + if item.point = item.itr.Next(); item.point != nil { + heap.Push(&itr.heap, item) + } + + return p +} + +// {{$k.name}}SortedMergeHeap represents a heap of {{$k.name}}SortedMergeHeapItems. +type {{$k.name}}SortedMergeHeap []*{{$k.name}}SortedMergeHeapItem + +func (h {{$k.name}}SortedMergeHeap) Len() int { return len(h) } +func (h {{$k.name}}SortedMergeHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } +func (h {{$k.name}}SortedMergeHeap) Less(i, j int) bool { + x, y := h[i].point, h[j].point + + if h[i].ascending { + if x.Name != y.Name { + return x.Name < y.Name + } else if !x.Tags.Equals(&y.Tags) { + return x.Tags.ID() < y.Tags.ID() + } + return x.Time < y.Time + } + + if x.Name != y.Name { + return x.Name > y.Name + } else if !x.Tags.Equals(&y.Tags) { + return x.Tags.ID() > y.Tags.ID() + } + return x.Time > y.Time +} + +func (h *{{$k.name}}SortedMergeHeap) Push(x interface{}) { + *h = append(*h, x.(*{{$k.name}}SortedMergeHeapItem)) +} + +func (h *{{$k.name}}SortedMergeHeap) Pop() interface{} { + old := *h + n := len(old) + item := old[n-1] + *h = old[0 : n-1] + return item +} + +type {{$k.name}}SortedMergeHeapItem struct { + point *{{$k.Name}}Point + itr {{$k.Name}}Iterator + ascending bool +} + +// {{$k.name}}LimitIterator represents an iterator that limits points per group. +type {{$k.name}}LimitIterator struct { + input {{$k.Name}}Iterator + opt IteratorOptions + n int + + prev struct { + name string + tags Tags + } +} + +// new{{$k.Name}}LimitIterator returns a new instance of {{$k.name}}LimitIterator. +func new{{$k.Name}}LimitIterator(input {{$k.Name}}Iterator, opt IteratorOptions) *{{$k.name}}LimitIterator { + return &{{$k.name}}LimitIterator{ + input: input, + opt: opt, + } +} + +// Close closes the underlying iterators. +func (itr *{{$k.name}}LimitIterator) Close() error { return itr.input.Close() } + +// Next returns the next point from the iterator. +func (itr *{{$k.name}}LimitIterator) Next() *{{$k.Name}}Point { + for { + p := itr.input.Next() + if p == nil { + return nil + } + + // Reset window and counter if a new window is encountered. + if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) { + itr.prev.name = p.Name + itr.prev.tags = p.Tags + itr.n = 0 + } + + // Increment counter. + itr.n++ + + // Read next point if not beyond the offset. + if itr.n <= itr.opt.Offset { + continue + } + + // Read next point if we're beyond the limit. + if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { + // If there's no interval, no groups, and a single source then simply exit. + if itr.opt.Interval.IsZero() && len(itr.opt.Dimensions) == 0 && len(itr.opt.Sources) == 1 { + return nil + } + continue + } + + return p + } +} + +type {{$k.name}}FillIterator struct { + input *buf{{$k.Name}}Iterator + prev *{{$k.Name}}Point + startTime int64 + endTime int64 + auxFields []interface{} + done bool + opt IteratorOptions + + window struct { + name string + tags Tags + time int64 + } +} + +func new{{$k.Name}}FillIterator(input {{$k.Name}}Iterator, expr Expr, opt IteratorOptions) *{{$k.name}}FillIterator { + if opt.Fill == NullFill { + if expr, ok := expr.(*Call); ok && expr.Name == "count" { + opt.Fill = NumberFill + opt.FillValue = {{$k.Zero}} + } + } + + var startTime, endTime int64 + if opt.Ascending { + startTime, _ = opt.Window(opt.StartTime) + _, endTime = opt.Window(opt.EndTime) + } else { + _, startTime = opt.Window(opt.EndTime) + endTime, _ = opt.Window(opt.StartTime) + } + + var auxFields []interface{} + if len(opt.Aux) > 0 { + auxFields = make([]interface{}, len(opt.Aux)) + } + + itr := &{{$k.name}}FillIterator{ + input: newBuf{{$k.Name}}Iterator(input), + startTime: startTime, + endTime: endTime, + auxFields: auxFields, + opt: opt, + } + + p := itr.input.peek() + if p != nil { + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + } else { + itr.window.time = itr.endTime + } + return itr +} + +func (itr *{{$k.name}}FillIterator) Close() error { return itr.input.Close() } + +func (itr *{{$k.name}}FillIterator) Next() *{{$k.Name}}Point { + p := itr.input.Next() + + // Check if the next point is outside of our window or is nil. + for p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { + // If we are inside of an interval, unread the point and continue below to + // constructing a new point. + if itr.opt.Ascending { + if itr.window.time < itr.endTime { + itr.input.unread(p) + p = nil + break + } + } else { + if itr.window.time >= itr.endTime { + itr.input.unread(p) + p = nil + break + } + } + + // We are *not* in a current interval. If there is no next point, + // we are at the end of all intervals. + if p == nil { + return nil + } + + // Set the new interval. + itr.window.name, itr.window.tags = p.Name, p.Tags + itr.window.time = itr.startTime + itr.prev = nil + break + } + + // Check if the point is our next expected point. + if p == nil || p.Time > itr.window.time { + if p != nil { + itr.input.unread(p) + } + + p = &{{$k.Name}}Point{ + Name: itr.window.name, + Tags: itr.window.tags, + Time: itr.window.time, + Aux: itr.auxFields, + } + + switch itr.opt.Fill { + case NullFill: + p.Nil = true + case NumberFill: + p.Value = castTo{{$k.Name}}(itr.opt.FillValue) + case PreviousFill: + if itr.prev != nil { + p.Value = itr.prev.Value + p.Nil = itr.prev.Nil + } else { + p.Nil = true + } + } + } else { + itr.prev = p + } + + // Advance the expected time. Do not advance to a new window here + // as there may be lingering points with the same timestamp in the previous + // window. + if itr.opt.Ascending { + itr.window.time = p.Time + int64(itr.opt.Interval.Duration) + } else { + itr.window.time = p.Time - int64(itr.opt.Interval.Duration) + } + return p +} + +// {{$k.name}}IntervalIterator represents a {{$k.name}} implementation of IntervalIterator. +type {{$k.name}}IntervalIterator struct { + input {{$k.Name}}Iterator + opt IteratorOptions +} + +func new{{$k.Name}}IntervalIterator(input {{$k.Name}}Iterator, opt IteratorOptions) *{{$k.name}}IntervalIterator { + return &{{$k.name}}IntervalIterator{input: input, opt: opt} +} + +func (itr *{{$k.name}}IntervalIterator) Close() error { return itr.input.Close() } + +func (itr *{{$k.name}}IntervalIterator) Next() *{{$k.Name}}Point { + p := itr.input.Next() + if p == nil { + return p + } + p.Time, _ = itr.opt.Window(p.Time) + return p +} + +// {{$k.name}}AuxIterator represents a {{$k.name}} implementation of AuxIterator. +type {{$k.name}}AuxIterator struct { + input *buf{{$k.Name}}Iterator + output chan *{{$k.Name}}Point + fields auxIteratorFields +} + +func new{{$k.Name}}AuxIterator(input {{$k.Name}}Iterator, seriesKeys SeriesList, opt IteratorOptions) *{{$k.name}}AuxIterator { + return &{{$k.name}}AuxIterator{ + input: newBuf{{$k.Name}}Iterator(input), + output: make(chan *{{$k.Name}}Point, 1), + fields: newAuxIteratorFields(seriesKeys, opt), + } +} + +func (itr *{{$k.name}}AuxIterator) Start() { go itr.stream() } +func (itr *{{$k.name}}AuxIterator) Close() error { return itr.input.Close() } +func (itr *{{$k.name}}AuxIterator) Next() *{{$k.Name}}Point { return <-itr.output } +func (itr *{{$k.name}}AuxIterator) Iterator(name string) Iterator { return itr.fields.iterator(name) } + +func (itr *{{$k.name}}AuxIterator) CreateIterator(opt IteratorOptions) (Iterator, error) { + expr := opt.Expr + if expr == nil { + panic("unable to create an iterator with no expression from an aux iterator") + } + + switch expr := expr.(type) { + case *VarRef: + return itr.Iterator(expr.Val), nil + default: + panic(fmt.Sprintf("invalid expression type for an aux iterator: %T", expr)) + } +} + +func (itr *{{$k.name}}AuxIterator) FieldDimensions(sources Sources) (fields, dimensions map[string]struct{}, err error) { + return nil, nil, errors.New("not implemented") +} + +func (itr *{{$k.name}}AuxIterator) SeriesKeys(opt IteratorOptions) (SeriesList, error) { + return nil, errors.New("not implemented") +} + +func (itr *{{$k.name}}AuxIterator) stream() { + for { + // Read next point. + p := itr.input.Next() + if p == nil { + break + } + + // Send point to output and to each field iterator. + itr.output <- p + itr.fields.send(p) + } + + close(itr.output) + itr.fields.close() +} + +// {{$k.name}}ChanIterator represents a new instance of {{$k.name}}ChanIterator. +type {{$k.name}}ChanIterator struct { + c chan *{{$k.Name}}Point + once sync.Once +} + +func (itr *{{$k.name}}ChanIterator) Close() error { + itr.once.Do(func() { close(itr.c) }) + return nil +} + +func (itr *{{$k.name}}ChanIterator) Next() *{{$k.Name}}Point { return <-itr.c } + +{{range $v := $types}} + +// {{$k.name}}Reduce{{$v.Name}}Iterator executes a reducer for every interval and buffers the result. +type {{$k.name}}Reduce{{$v.Name}}Iterator struct { + input *buf{{$k.Name}}Iterator + create func() ({{$k.Name}}PointAggregator, {{$v.Name}}PointEmitter) + opt IteratorOptions + points []{{$v.Name}}Point +} + +// Close closes the iterator and all child iterators. +func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) Next() *{{.Name}}Point { + // Calculate next window if we have no more points. + if len(itr.points) == 0 { + itr.points = itr.reduce() + if len(itr.points) == 0 { + return nil + } + } + + // Pop next point off the stack. + p := &itr.points[len(itr.points)-1] + itr.points = itr.points[:len(itr.points)-1] + return p +} + +// {{$k.name}}Reduce{{$v.Name}}Point stores the reduced data for a name/tag combination. +type {{$k.name}}Reduce{{$v.Name}}Point struct { + Name string + Tags Tags + Aggregator {{$k.Name}}PointAggregator + Emitter {{$v.Name}}PointEmitter +} + +// reduce executes fn once for every point in the next window. +// The previous value for the dimension is passed to fn. +func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() []{{$v.Name}}Point { + // Calculate next window. + startTime, endTime := itr.opt.Window(itr.input.peekTime()) + + // Create points by tags. + m := make(map[string]*{{$k.name}}Reduce{{.Name}}Point) + for { + // Read next point. + curr := itr.input.NextInWindow(startTime, endTime) + if curr == nil { + break + } else if curr.Nil { + continue + } + tags := curr.Tags.Subset(itr.opt.Dimensions) + id := curr.Name + "\x00" + tags.ID() + + // Retrieve the aggregator for this name/tag combination or create one. + rp := m[id] + if rp == nil { + aggregator, emitter := itr.create() + rp = &{{$k.name}}Reduce{{.Name}}Point{ + Name: curr.Name, + Tags: tags, + Aggregator: aggregator, + Emitter: emitter, + } + m[id] = rp + } + rp.Aggregator.Aggregate{{$k.Name}}(curr) + } + + // Reverse sort points by name & tag. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Sort(sort.Reverse(sort.StringSlice(keys))) + + a := make([]{{$v.Name}}Point, 0, len(m)) + for _, k := range keys { + rp := m[k] + points := rp.Emitter.Emit() + for i := len(points)-1; i >= 0; i-- { + points[i].Name = rp.Name + points[i].Tags = rp.Tags + // Set the points time to the interval time if the reducer didn't provide one. + if points[i].Time == ZeroTime { + points[i].Time = startTime + } + a = append(a, points[i]) + } + } + + return a +} +{{end}} + +// {{$k.name}}TransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type {{$k.name}}TransformIterator struct { + input {{$k.Name}}Iterator + fn {{$k.name}}TransformFunc +} + +// Close closes the iterator and all child iterators. +func (itr *{{$k.name}}TransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *{{$k.name}}TransformIterator) Next() *{{$k.Name}}Point { + p := itr.input.Next() + if p != nil { + p = itr.fn(p) + } + return p +} + +// {{$k.name}}TransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type {{$k.name}}TransformFunc func(p *{{$k.Name}}Point) *{{$k.Name}}Point + +// {{$k.name}}ReduceIterator executes a function to modify an existing point for every +// output of the input iterator. +type {{$k.name}}BoolTransformIterator struct { + input {{$k.Name}}Iterator + fn {{$k.name}}BoolTransformFunc +} + +// Close closes the iterator and all child iterators. +func (itr *{{$k.name}}BoolTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *{{$k.name}}BoolTransformIterator) Next() *BooleanPoint { + p := itr.input.Next() + if p != nil { + return itr.fn(p) + } + return nil +} + +// {{$k.name}}BoolTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type {{$k.name}}BoolTransformFunc func(p *{{$k.Name}}Point) *BooleanPoint + +// {{$k.name}}DedupeIterator only outputs unique points. +// This differs from the DistinctIterator in that it compares all aux fields too. +// This iterator is relatively inefficient and should only be used on small +// datasets such as meta query results. +type {{$k.name}}DedupeIterator struct { + input {{$k.Name}}Iterator + m map[string]struct{} // lookup of points already sent +} + +// new{{$k.Name}}DedupeIterator returns a new instance of {{$k.name}}DedupeIterator. +func new{{$k.Name}}DedupeIterator(input {{$k.Name}}Iterator) *{{$k.name}}DedupeIterator { + return &{{$k.name}}DedupeIterator{ + input: input, + m: make(map[string]struct{}), + } +} + +// Close closes the iterator and all child iterators. +func (itr *{{$k.name}}DedupeIterator) Close() error { return itr.input.Close() } + +// Next returns the next unique point from the input iterator. +func (itr *{{$k.name}}DedupeIterator) Next() *{{$k.Name}}Point { + for { + // Read next point. + p := itr.input.Next() + if p == nil { + return nil + } + + // Serialize to bytes to store in lookup. + buf, err := proto.Marshal(encode{{$k.Name}}Point(p)) + if err != nil { + log.Println("error marshaling dedupe point:", err) + continue + } + + // If the point has already been output then move to the next point. + if _, ok := itr.m[string(buf)]; ok { + continue + } + + // Otherwise mark it as emitted and return point. + itr.m[string(buf)] = struct{}{} + return p + } +} + +// {{$k.name}}ReaderIterator represents an iterator that streams from a reader. +type {{$k.name}}ReaderIterator struct { + r io.Reader + dec *{{$k.Name}}PointDecoder + first *{{$k.Name}}Point +} + +// new{{$k.Name}}ReaderIterator returns a new instance of {{$k.name}}ReaderIterator. +func new{{$k.Name}}ReaderIterator(r io.Reader, first *{{$k.Name}}Point) *{{$k.name}}ReaderIterator { + return &{{$k.name}}ReaderIterator{ + r: r, + dec: New{{$k.Name}}PointDecoder(r), + first: first, + } +} + +// Close closes the underlying reader, if applicable. +func (itr *{{$k.name}}ReaderIterator) Close() error { + if r, ok := itr.r.(io.ReadCloser); ok { + return r.Close() + } + return nil +} + +// Next returns the next point from the iterator. +func (itr *{{$k.name}}ReaderIterator) Next() *{{$k.Name}}Point { + // Send first point if it hasn't been sent yet. + if itr.first != nil { + p := itr.first + itr.first = nil + return p + } + + // OPTIMIZE(benbjohnson): Reuse point on iterator. + + // Unmarshal next point. + p := &{{$k.Name}}Point{} + if err := itr.dec.Decode{{$k.Name}}Point(p); err == io.EOF { + return nil + } else if err != nil { + log.Printf("error reading iterator point: %s", err) + return nil + } + return p +} +{{end}} + + +// IteratorEncoder is an encoder for encoding an iterator's points to w. +type IteratorEncoder struct { + w io.Writer +} + +// NewIteratorEncoder encodes an iterator's points to w. +func NewIteratorEncoder(w io.Writer) *IteratorEncoder { + return &IteratorEncoder{w: w} +} + +// Encode encodes and writes all of itr's points to the underlying writer. +func (enc *IteratorEncoder) EncodeIterator(itr Iterator) error { + switch itr := itr.(type) { + case FloatIterator: + return enc.encodeFloatIterator(itr) + case IntegerIterator: + return enc.encodeIntegerIterator(itr) + case StringIterator: + return enc.encodeStringIterator(itr) + case BooleanIterator: + return enc.encodeBooleanIterator(itr) + default: + panic(fmt.Sprintf("unsupported iterator for encoder: %T", itr)) + } +} + +{{range .}} +// encode{{.Name}}Iterator encodes all points from itr to the underlying writer. +func (enc *IteratorEncoder) encode{{.Name}}Iterator(itr {{.Name}}Iterator) error { + penc := New{{.Name}}PointEncoder(enc.w) + for { + // Retrieve the next point from the iterator. + p := itr.Next() + if p == nil { + return nil + } + + // Write the point to the point encoder. + if err := penc.Encode{{.Name}}Point(p); err != nil { + return err + } + } +} + +{{end}}{{end}} diff --git a/vendor/github.com/influxdata/influxdb/influxql/iterator.go b/vendor/github.com/influxdata/influxdb/influxql/iterator.go new file mode 100644 index 0000000000..cee4cc3ae6 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/iterator.go @@ -0,0 +1,1007 @@ +package influxql + +import ( + "errors" + "fmt" + "io" + "sort" + "sync" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/influxdata/influxdb/influxql/internal" +) + +// ErrUnknownCall is returned when operating on an unknown function call. +var ErrUnknownCall = errors.New("unknown call") + +const ( + // MinTime is used as the minimum time value when computing an unbounded range. + MinTime = int64(0) + + // MaxTime is used as the maximum time value when computing an unbounded range. + // This time is Jan 1, 2050 at midnight UTC. + MaxTime = int64(2524608000000000000) +) + +// Iterator represents a generic interface for all Iterators. +// Most iterator operations are done on the typed sub-interfaces. +type Iterator interface { + Close() error +} + +// Iterators represents a list of iterators. +type Iterators []Iterator + +// Close closes all iterators. +func (a Iterators) Close() error { + for _, itr := range a { + itr.Close() + } + return nil +} + +// filterNonNil returns a slice of iterators that removes all nil iterators. +func (a Iterators) filterNonNil() []Iterator { + other := make([]Iterator, 0, len(a)) + for _, itr := range a { + if itr == nil { + continue + } + other = append(other, itr) + } + return other +} + +// castType determines what type to cast the set of iterators to. +// An iterator type is chosen using this hierarchy: +// float > integer > string > boolean +func (a Iterators) castType() DataType { + if len(a) == 0 { + return Unknown + } + + typ := DataType(Boolean) + for _, input := range a { + switch input.(type) { + case FloatIterator: + // Once a float iterator is found, short circuit the end. + return Float + case IntegerIterator: + if typ > Integer { + typ = Integer + } + case StringIterator: + if typ > String { + typ = String + } + case BooleanIterator: + // Boolean is the lowest type. + } + } + return typ +} + +// cast casts an array of iterators to a single type. +// Iterators that are not compatible or cannot be cast to the +// chosen iterator type are closed and dropped. +func (a Iterators) cast() interface{} { + typ := a.castType() + switch typ { + case Float: + return newFloatIterators(a) + case Integer: + return newIntegerIterators(a) + case String: + return newStringIterators(a) + case Boolean: + return newBooleanIterators(a) + } + return a +} + +// NewMergeIterator returns an iterator to merge itrs into one. +// Inputs must either be merge iterators or only contain a single name/tag in +// sorted order. The iterator will output all points by window, name/tag, then +// time. This iterator is useful when you need all of the points for an +// interval. +func NewMergeIterator(inputs []Iterator, opt IteratorOptions) Iterator { + inputs = Iterators(inputs).filterNonNil() + if len(inputs) == 0 { + return &nilFloatIterator{} + } + + // Aggregate functions can use a more relaxed sorting so that points + // within a window are grouped. This is much more efficient. + switch inputs := Iterators(inputs).cast().(type) { + case []FloatIterator: + return newFloatMergeIterator(inputs, opt) + case []IntegerIterator: + return newIntegerMergeIterator(inputs, opt) + case []StringIterator: + return newStringMergeIterator(inputs, opt) + case []BooleanIterator: + return newBooleanMergeIterator(inputs, opt) + default: + panic(fmt.Sprintf("unsupported merge iterator type: %T", inputs)) + } +} + +// NewSortedMergeIterator returns an iterator to merge itrs into one. +// Inputs must either be sorted merge iterators or only contain a single +// name/tag in sorted order. The iterator will output all points by name/tag, +// then time. This iterator is useful when you need all points for a name/tag +// to be in order. +func NewSortedMergeIterator(inputs []Iterator, opt IteratorOptions) Iterator { + inputs = Iterators(inputs).filterNonNil() + if len(inputs) == 0 { + return &nilFloatIterator{} + } + + switch inputs := Iterators(inputs).cast().(type) { + case []FloatIterator: + return newFloatSortedMergeIterator(inputs, opt) + case []IntegerIterator: + return newIntegerSortedMergeIterator(inputs, opt) + case []StringIterator: + return newStringSortedMergeIterator(inputs, opt) + case []BooleanIterator: + return newBooleanSortedMergeIterator(inputs, opt) + default: + panic(fmt.Sprintf("unsupported sorted merge iterator type: %T", inputs)) + } +} + +// NewLimitIterator returns an iterator that limits the number of points per grouping. +func NewLimitIterator(input Iterator, opt IteratorOptions) Iterator { + switch input := input.(type) { + case FloatIterator: + return newFloatLimitIterator(input, opt) + case IntegerIterator: + return newIntegerLimitIterator(input, opt) + case StringIterator: + return newStringLimitIterator(input, opt) + case BooleanIterator: + return newBooleanLimitIterator(input, opt) + default: + panic(fmt.Sprintf("unsupported limit iterator type: %T", input)) + } +} + +// NewDedupeIterator returns an iterator that only outputs unique points. +// This iterator maintains a serialized copy of each row so it is inefficient +// to use on large datasets. It is intended for small datasets such as meta queries. +func NewDedupeIterator(input Iterator) Iterator { + if input == nil { + return nil + } + + switch input := input.(type) { + case FloatIterator: + return newFloatDedupeIterator(input) + case IntegerIterator: + return newIntegerDedupeIterator(input) + case StringIterator: + return newStringDedupeIterator(input) + case BooleanIterator: + return newBooleanDedupeIterator(input) + default: + panic(fmt.Sprintf("unsupported dedupe iterator type: %T", input)) + } +} + +// NewFillIterator returns an iterator that fills in missing points in an aggregate. +func NewFillIterator(input Iterator, expr Expr, opt IteratorOptions) Iterator { + switch input := input.(type) { + case FloatIterator: + return newFloatFillIterator(input, expr, opt) + case IntegerIterator: + return newIntegerFillIterator(input, expr, opt) + case StringIterator: + return newStringFillIterator(input, expr, opt) + case BooleanIterator: + return newBooleanFillIterator(input, expr, opt) + default: + panic(fmt.Sprintf("unsupported fill iterator type: %T", input)) + } +} + +// NewIntervalIterator returns an iterator that sets the time on each point to the interval. +func NewIntervalIterator(input Iterator, opt IteratorOptions) Iterator { + switch input := input.(type) { + case FloatIterator: + return newFloatIntervalIterator(input, opt) + case IntegerIterator: + return newIntegerIntervalIterator(input, opt) + case StringIterator: + return newStringIntervalIterator(input, opt) + case BooleanIterator: + return newBooleanIntervalIterator(input, opt) + default: + panic(fmt.Sprintf("unsupported fill iterator type: %T", input)) + } +} + +// AuxIterator represents an iterator that can split off separate auxilary iterators. +type AuxIterator interface { + Iterator + IteratorCreator + + // Auxilary iterator + Iterator(name string) Iterator + + // Start starts writing to the created iterators. + Start() +} + +// NewAuxIterator returns a new instance of AuxIterator. +func NewAuxIterator(input Iterator, seriesKeys SeriesList, opt IteratorOptions) AuxIterator { + switch input := input.(type) { + case FloatIterator: + return newFloatAuxIterator(input, seriesKeys, opt) + case IntegerIterator: + return newIntegerAuxIterator(input, seriesKeys, opt) + case StringIterator: + return newStringAuxIterator(input, seriesKeys, opt) + case BooleanIterator: + return newBooleanAuxIterator(input, seriesKeys, opt) + default: + panic(fmt.Sprintf("unsupported aux iterator type: %T", input)) + } +} + +// auxIteratorField represents an auxilary field within an AuxIterator. +type auxIteratorField struct { + name string // field name + typ DataType // detected data type + itrs []Iterator // auxillary iterators + mu sync.Mutex + opt IteratorOptions +} + +func (f *auxIteratorField) append(itr Iterator) { + f.mu.Lock() + defer f.mu.Unlock() + f.itrs = append(f.itrs, itr) +} + +func (f *auxIteratorField) close() { + f.mu.Lock() + defer f.mu.Unlock() + for _, itr := range f.itrs { + itr.Close() + } +} + +type auxIteratorFields []*auxIteratorField + +// newAuxIteratorFields returns a new instance of auxIteratorFields from a list of field names. +func newAuxIteratorFields(seriesKeys SeriesList, opt IteratorOptions) auxIteratorFields { + fields := make(auxIteratorFields, len(opt.Aux)) + for i, name := range opt.Aux { + fields[i] = &auxIteratorField{name: name, opt: opt} + for _, s := range seriesKeys { + aux := s.Aux[i] + if aux == Unknown { + continue + } + + if fields[i].typ == Unknown || aux < fields[i].typ { + fields[i].typ = aux + } + } + } + return fields +} + +func (a auxIteratorFields) close() { + for _, f := range a { + f.close() + } +} + +// iterator creates a new iterator for a named auxilary field. +func (a auxIteratorFields) iterator(name string) Iterator { + for _, f := range a { + // Skip field if it's name doesn't match. + // Exit if no points were received by the iterator. + if f.name != name { + continue + } + + // Create channel iterator by data type. + switch f.typ { + case Float: + itr := &floatChanIterator{c: make(chan *FloatPoint, 1)} + f.append(itr) + return itr + case Integer: + itr := &integerChanIterator{c: make(chan *IntegerPoint, 1)} + f.append(itr) + return itr + case String: + itr := &stringChanIterator{c: make(chan *StringPoint, 1)} + f.append(itr) + return itr + case Boolean: + itr := &booleanChanIterator{c: make(chan *BooleanPoint, 1)} + f.append(itr) + return itr + default: + break + } + } + + return &nilFloatIterator{} +} + +// send sends a point to all field iterators. +func (a auxIteratorFields) send(p Point) { + values := p.aux() + for i, f := range a { + v := values[i] + + tags := p.tags() + tags = tags.Subset(f.opt.Dimensions) + + // Send new point for each aux iterator. + // Primitive pointers represent nil values. + for _, itr := range f.itrs { + switch itr := itr.(type) { + case *floatChanIterator: + switch v := v.(type) { + case float64: + itr.c <- &FloatPoint{Name: p.name(), Tags: tags, Time: p.time(), Value: v} + case int64: + itr.c <- &FloatPoint{Name: p.name(), Tags: tags, Time: p.time(), Value: float64(v)} + default: + itr.c <- &FloatPoint{Name: p.name(), Tags: tags, Time: p.time(), Nil: true} + } + case *integerChanIterator: + switch v := v.(type) { + case int64: + itr.c <- &IntegerPoint{Name: p.name(), Tags: tags, Time: p.time(), Value: v} + default: + itr.c <- &IntegerPoint{Name: p.name(), Tags: tags, Time: p.time(), Nil: true} + } + case *stringChanIterator: + switch v := v.(type) { + case string: + itr.c <- &StringPoint{Name: p.name(), Tags: tags, Time: p.time(), Value: v} + default: + itr.c <- &StringPoint{Name: p.name(), Tags: tags, Time: p.time(), Nil: true} + } + case *booleanChanIterator: + switch v := v.(type) { + case bool: + itr.c <- &BooleanPoint{Name: p.name(), Tags: tags, Time: p.time(), Value: v} + default: + itr.c <- &BooleanPoint{Name: p.name(), Tags: tags, Time: p.time(), Nil: true} + } + default: + panic(fmt.Sprintf("invalid aux itr type: %T", itr)) + } + } + } +} + +// drainIterator reads all points from an iterator. +func drainIterator(itr Iterator) { + for { + switch itr := itr.(type) { + case FloatIterator: + if p := itr.Next(); p == nil { + return + } + case IntegerIterator: + if p := itr.Next(); p == nil { + return + } + case StringIterator: + if p := itr.Next(); p == nil { + return + } + case BooleanIterator: + if p := itr.Next(); p == nil { + return + } + default: + panic(fmt.Sprintf("unsupported iterator type for draining: %T", itr)) + } + } +} + +// NewReaderIterator returns an iterator that streams from a reader. +func NewReaderIterator(r io.Reader) (Iterator, error) { + var p Point + if err := NewPointDecoder(r).DecodePoint(&p); err == io.EOF { + return &nilFloatIterator{}, nil + } else if err != nil { + return nil, err + } + + switch p := p.(type) { + case *FloatPoint: + return newFloatReaderIterator(r, p), nil + case *IntegerPoint: + return newIntegerReaderIterator(r, p), nil + case *StringPoint: + return newStringReaderIterator(r, p), nil + case *BooleanPoint: + return newBooleanReaderIterator(r, p), nil + default: + panic(fmt.Sprintf("unsupported point for reader iterator: %T", p)) + } +} + +// IteratorCreator represents an interface for objects that can create Iterators. +type IteratorCreator interface { + // Creates a simple iterator for use in an InfluxQL query. + CreateIterator(opt IteratorOptions) (Iterator, error) + + // Returns the unique fields and dimensions across a list of sources. + FieldDimensions(sources Sources) (fields, dimensions map[string]struct{}, err error) + + // Returns the series keys that will be returned by this iterator. + SeriesKeys(opt IteratorOptions) (SeriesList, error) +} + +// IteratorCreators represents a list of iterator creators. +type IteratorCreators []IteratorCreator + +// Close closes all iterator creators that implement io.Closer. +func (a IteratorCreators) Close() error { + for _, ic := range a { + if ic, ok := ic.(io.Closer); ok { + ic.Close() + } + } + return nil +} + +// CreateIterator returns a single combined iterator from multiple iterator creators. +func (a IteratorCreators) CreateIterator(opt IteratorOptions) (Iterator, error) { + // Create iterators for each shard. + // Ensure that they are closed if an error occurs. + itrs := make([]Iterator, 0, len(a)) + if err := func() error { + for _, ic := range a { + itr, err := ic.CreateIterator(opt) + if err != nil { + return err + } + itrs = append(itrs, itr) + } + return nil + }(); err != nil { + Iterators(itrs).Close() + return nil, err + } + + // Merge into a single iterator. + if opt.MergeSorted() { + return NewSortedMergeIterator(itrs, opt), nil + } + + itr := NewMergeIterator(itrs, opt) + if opt.Expr != nil { + if expr, ok := opt.Expr.(*Call); ok && expr.Name == "count" { + opt.Expr = &Call{ + Name: "sum", + Args: expr.Args, + } + } + } + return NewCallIterator(itr, opt) +} + +// FieldDimensions returns unique fields and dimensions from multiple iterator creators. +func (a IteratorCreators) FieldDimensions(sources Sources) (fields, dimensions map[string]struct{}, err error) { + fields = make(map[string]struct{}) + dimensions = make(map[string]struct{}) + + for _, ic := range a { + f, d, err := ic.FieldDimensions(sources) + if err != nil { + return nil, nil, err + } + for k := range f { + fields[k] = struct{}{} + } + for k := range d { + dimensions[k] = struct{}{} + } + } + return +} + +// SeriesKeys returns a list of series in all iterator creators in a. +// If a series exists in multiple creators in a, all instances will be combined +// into a single Series by calling Combine on it. +func (a IteratorCreators) SeriesKeys(opt IteratorOptions) (SeriesList, error) { + seriesMap := make(map[string]Series) + for _, ic := range a { + series, err := ic.SeriesKeys(opt) + if err != nil { + return nil, err + } + + for _, s := range series { + cur, ok := seriesMap[s.ID()] + if ok { + cur.Combine(&s) + } else { + seriesMap[s.ID()] = s + } + } + } + + seriesList := make([]Series, 0, len(seriesMap)) + for _, s := range seriesMap { + seriesList = append(seriesList, s) + } + sort.Sort(SeriesList(seriesList)) + return SeriesList(seriesList), nil +} + +// IteratorOptions is an object passed to CreateIterator to specify creation options. +type IteratorOptions struct { + // Expression to iterate for. + // This can be VarRef or a Call. + Expr Expr + + // Auxilary tags or values to also retrieve for the point. + Aux []string + + // Data sources from which to retrieve data. + Sources []Source + + // Group by interval and tags. + Interval Interval + Dimensions []string + + // Fill options. + Fill FillOption + FillValue interface{} + + // Condition to filter by. + Condition Expr + + // Time range for the iterator. + StartTime int64 + EndTime int64 + + // Sorted in time ascending order if true. + Ascending bool + + // Limits the number of points per series. + Limit, Offset int + + // Limits the number of series. + SLimit, SOffset int + + // Removes duplicate rows from raw queries. + Dedupe bool +} + +// newIteratorOptionsStmt creates the iterator options from stmt. +func newIteratorOptionsStmt(stmt *SelectStatement, sopt *SelectOptions) (opt IteratorOptions, err error) { + // Determine time range from the condition. + startTime, endTime := TimeRange(stmt.Condition) + if !startTime.IsZero() { + opt.StartTime = startTime.UnixNano() + } else { + if sopt != nil { + opt.StartTime = sopt.MinTime.UnixNano() + } else { + opt.StartTime = MinTime + } + } + if !endTime.IsZero() { + opt.EndTime = endTime.UnixNano() + } else { + if sopt != nil { + opt.EndTime = sopt.MaxTime.UnixNano() + } else { + opt.EndTime = MaxTime + } + } + + // Determine group by interval. + interval, err := stmt.GroupByInterval() + if err != nil { + return opt, err + } + // Set duration to zero if a negative interval has been used. + if interval < 0 { + interval = 0 + } + opt.Interval.Duration = interval + + // Determine dimensions. + for _, d := range stmt.Dimensions { + if d, ok := d.Expr.(*VarRef); ok { + opt.Dimensions = append(opt.Dimensions, d.Val) + } + } + + opt.Sources = stmt.Sources + opt.Condition = stmt.Condition + opt.Ascending = stmt.TimeAscending() + opt.Dedupe = stmt.Dedupe + + opt.Fill, opt.FillValue = stmt.Fill, stmt.FillValue + opt.Limit, opt.Offset = stmt.Limit, stmt.Offset + opt.SLimit, opt.SOffset = stmt.SLimit, stmt.SOffset + + return opt, nil +} + +// MergeSorted returns true if the options require a sorted merge. +// This is only needed when the expression is a variable reference or there is no expr. +func (opt IteratorOptions) MergeSorted() bool { + if opt.Expr == nil { + return true + } + _, ok := opt.Expr.(*VarRef) + return ok +} + +// SeekTime returns the time the iterator should start from. +// For ascending iterators this is the start time, for descending iterators it's the end time. +func (opt IteratorOptions) SeekTime() int64 { + if opt.Ascending { + return opt.StartTime + } + return opt.EndTime +} + +// Window returns the time window [start,end) that t falls within. +func (opt IteratorOptions) Window(t int64) (start, end int64) { + if opt.Interval.IsZero() { + return opt.StartTime, opt.EndTime + } + + // Subtract the offset to the time so we calculate the correct base interval. + t -= int64(opt.Interval.Offset) + + // Truncate time by duration. + t -= t % int64(opt.Interval.Duration) + + // Apply the offset. + start = t + int64(opt.Interval.Offset) + end = start + int64(opt.Interval.Duration) + return +} + +// DerivativeInterval returns the time interval for the derivative function. +func (opt IteratorOptions) DerivativeInterval() Interval { + // Use the interval on the derivative() call, if specified. + if expr, ok := opt.Expr.(*Call); ok && len(expr.Args) == 2 { + return Interval{Duration: expr.Args[1].(*DurationLiteral).Val} + } + + // Otherwise use the group by interval, if specified. + if opt.Interval.Duration > 0 { + return Interval{Duration: opt.Interval.Duration} + } + + return Interval{Duration: time.Second} +} + +// MarshalBinary encodes opt into a binary format. +func (opt *IteratorOptions) MarshalBinary() ([]byte, error) { + return proto.Marshal(encodeIteratorOptions(opt)) +} + +// UnmarshalBinary decodes from a binary format in to opt. +func (opt *IteratorOptions) UnmarshalBinary(buf []byte) error { + var pb internal.IteratorOptions + if err := proto.Unmarshal(buf, &pb); err != nil { + return err + } + + other, err := decodeIteratorOptions(&pb) + if err != nil { + return err + } + *opt = *other + + return nil +} + +func encodeIteratorOptions(opt *IteratorOptions) *internal.IteratorOptions { + pb := &internal.IteratorOptions{ + Aux: opt.Aux, + Interval: encodeInterval(opt.Interval), + Dimensions: opt.Dimensions, + Fill: proto.Int32(int32(opt.Fill)), + StartTime: proto.Int64(opt.StartTime), + EndTime: proto.Int64(opt.EndTime), + Ascending: proto.Bool(opt.Ascending), + Limit: proto.Int64(int64(opt.Limit)), + Offset: proto.Int64(int64(opt.Offset)), + SLimit: proto.Int64(int64(opt.SLimit)), + SOffset: proto.Int64(int64(opt.SOffset)), + Dedupe: proto.Bool(opt.Dedupe), + } + + // Set expression, if set. + if opt.Expr != nil { + pb.Expr = proto.String(opt.Expr.String()) + } + + // Convert and encode sources to measurements. + sources := make([]*internal.Measurement, len(opt.Sources)) + for i, source := range opt.Sources { + mm := source.(*Measurement) + sources[i] = encodeMeasurement(mm) + } + pb.Sources = sources + + // Fill value can only be a number. Set it if available. + if v, ok := opt.FillValue.(float64); ok { + pb.FillValue = proto.Float64(v) + } + + // Set condition, if set. + if opt.Condition != nil { + pb.Condition = proto.String(opt.Condition.String()) + } + + return pb +} + +func decodeIteratorOptions(pb *internal.IteratorOptions) (*IteratorOptions, error) { + opt := &IteratorOptions{ + Aux: pb.GetAux(), + Interval: decodeInterval(pb.GetInterval()), + Dimensions: pb.GetDimensions(), + Fill: FillOption(pb.GetFill()), + FillValue: pb.GetFillValue(), + StartTime: pb.GetStartTime(), + EndTime: pb.GetEndTime(), + Ascending: pb.GetAscending(), + Limit: int(pb.GetLimit()), + Offset: int(pb.GetOffset()), + SLimit: int(pb.GetSLimit()), + SOffset: int(pb.GetSOffset()), + Dedupe: pb.GetDedupe(), + } + + // Set expression, if set. + if pb.Expr != nil { + expr, err := ParseExpr(pb.GetExpr()) + if err != nil { + return nil, err + } + opt.Expr = expr + } + + // Convert and encode sources to measurements. + sources := make([]Source, len(pb.GetSources())) + for i, source := range pb.GetSources() { + mm, err := decodeMeasurement(source) + if err != nil { + return nil, err + } + sources[i] = mm + } + opt.Sources = sources + + // Set condition, if set. + if pb.Condition != nil { + expr, err := ParseExpr(pb.GetCondition()) + if err != nil { + return nil, err + } + opt.Condition = expr + } + + return opt, nil +} + +// selectInfo represents an object that stores info about select fields. +type selectInfo struct { + calls map[*Call]struct{} + refs map[*VarRef]struct{} +} + +// newSelectInfo creates a object with call and var ref info from stmt. +func newSelectInfo(stmt *SelectStatement) *selectInfo { + info := &selectInfo{ + calls: make(map[*Call]struct{}), + refs: make(map[*VarRef]struct{}), + } + Walk(info, stmt.Fields) + return info +} + +func (v *selectInfo) Visit(n Node) Visitor { + switch n := n.(type) { + case *Call: + v.calls[n] = struct{}{} + return nil + case *VarRef: + v.refs[n] = struct{}{} + return nil + } + return v +} + +// Series represents a series that will be returned by the iterator. +type Series struct { + Name string + Tags Tags + Aux []DataType +} + +// ID is a single string that combines the name and tags id for the series. +func (s *Series) ID() string { + return s.Name + "\x00" + s.Tags.ID() +} + +// Combine combines two series with the same name and tags. +// It will promote auxiliary iterator types to the highest type. +func (s *Series) Combine(other *Series) { + for i, t := range s.Aux { + if other.Aux[i] == Unknown { + continue + } + + if t == Unknown || other.Aux[i] < t { + s.Aux[i] = other.Aux[i] + } + } +} + +func encodeSeries(s Series) *internal.Series { + aux := make([]uint32, len(s.Aux)) + for i := range s.Aux { + aux[i] = uint32(s.Aux[i]) + } + + return &internal.Series{ + Name: proto.String(s.Name), + Tags: encodeTags(s.Tags.KeyValues()), + Aux: aux, + } +} + +func decodeSeries(pb *internal.Series) Series { + var aux []DataType + if len(pb.GetAux()) > 0 { + aux = make([]DataType, len(pb.GetAux())) + for i := range pb.GetAux() { + aux[i] = DataType(pb.GetAux()[i]) + } + } + + return Series{ + Name: pb.GetName(), + Tags: newTagsID(string(pb.GetTags())), + Aux: aux, + } +} + +// SeriesList is a list of series that will be returned by an iterator. +type SeriesList []Series + +func (a SeriesList) Len() int { return len(a) } +func (a SeriesList) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func (a SeriesList) Less(i, j int) bool { + if a[i].Name != a[j].Name { + return a[i].Name < a[j].Name + } + return a[i].Tags.ID() < a[j].Tags.ID() +} + +// MarshalBinary encodes list into a binary format. +func (a SeriesList) MarshalBinary() ([]byte, error) { + return proto.Marshal(encodeSeriesList(a)) +} + +// UnmarshalBinary decodes from a binary format. +func (a *SeriesList) UnmarshalBinary(buf []byte) error { + var pb internal.SeriesList + if err := proto.Unmarshal(buf, &pb); err != nil { + return err + } + + (*a) = decodeSeriesList(&pb) + + return nil +} + +func encodeSeriesList(a SeriesList) *internal.SeriesList { + pb := make([]*internal.Series, len(a)) + for i := range a { + pb[i] = encodeSeries(a[i]) + } + + return &internal.SeriesList{ + Items: pb, + } +} + +func decodeSeriesList(pb *internal.SeriesList) SeriesList { + a := make([]Series, len(pb.GetItems())) + for i := range pb.GetItems() { + a[i] = decodeSeries(pb.GetItems()[i]) + } + return SeriesList(a) +} + +// Interval represents a repeating interval for a query. +type Interval struct { + Duration time.Duration + Offset time.Duration +} + +// IsZero returns true if the interval has no duration. +func (i Interval) IsZero() bool { return i.Duration == 0 } + +func encodeInterval(i Interval) *internal.Interval { + return &internal.Interval{ + Duration: proto.Int64(i.Duration.Nanoseconds()), + Offset: proto.Int64(i.Offset.Nanoseconds()), + } +} + +func decodeInterval(pb *internal.Interval) Interval { + return Interval{ + Duration: time.Duration(pb.GetDuration()), + Offset: time.Duration(pb.GetOffset()), + } +} + +type nilFloatIterator struct{} + +func (*nilFloatIterator) Close() error { return nil } +func (*nilFloatIterator) Next() *FloatPoint { return nil } + +// integerFloatTransformIterator executes a function to modify an existing point for every +// output of the input iterator. +type integerFloatTransformIterator struct { + input IntegerIterator + fn integerFloatTransformFunc +} + +// Close closes the iterator and all child iterators. +func (itr *integerFloatTransformIterator) Close() error { return itr.input.Close() } + +// Next returns the minimum value for the next available interval. +func (itr *integerFloatTransformIterator) Next() *FloatPoint { + p := itr.input.Next() + if p != nil { + return itr.fn(p) + } + return nil +} + +// integerFloatTransformFunc creates or modifies a point. +// The point passed in may be modified and returned rather than allocating a +// new point if possible. +type integerFloatTransformFunc func(p *IntegerPoint) *FloatPoint + +type integerFloatCastIterator struct { + input IntegerIterator +} + +func (itr *integerFloatCastIterator) Close() error { return itr.input.Close() } +func (itr *integerFloatCastIterator) Next() *FloatPoint { + p := itr.input.Next() + if p == nil { + return nil + } + + return &FloatPoint{ + Name: p.Name, + Tags: p.Tags, + Time: p.Time, + Nil: p.Nil, + Value: float64(p.Value), + Aux: p.Aux, + } +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/iterator_test.go b/vendor/github.com/influxdata/influxdb/influxql/iterator_test.go new file mode 100644 index 0000000000..668d7e47df --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/iterator_test.go @@ -0,0 +1,1145 @@ +package influxql_test + +import ( + "fmt" + "math" + "math/rand" + "reflect" + "regexp" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/influxdb/pkg/deep" +) + +// Ensure that a set of iterators can be merged together, sorted by window and name/tag. +func TestMergeIterator_Float(t *testing.T) { + inputs := []*FloatIterator{ + {Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}, + }}, + {Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, + }}, + {Points: []influxql.FloatPoint{}}, + {Points: []influxql.FloatPoint{}}, + } + + itr := influxql.NewMergeIterator(FloatIterators(inputs), influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Ascending: true, + }) + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, + {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, + {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by window and name/tag. +func TestMergeIterator_Integer(t *testing.T) { + inputs := []*IntegerIterator{ + {Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}, + }}, + {Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, + }}, + {Points: []influxql.IntegerPoint{}}, + } + itr := influxql.NewMergeIterator(IntegerIterators(inputs), influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Ascending: true, + }) + + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, + {&influxql.IntegerPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, + {&influxql.IntegerPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by window and name/tag. +func TestMergeIterator_String(t *testing.T) { + inputs := []*StringIterator{ + {Points: []influxql.StringPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: "a"}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: "c"}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: "d"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: "b"}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: "h"}, + }}, + {Points: []influxql.StringPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: "g"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: "e"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: "f"}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: "i"}, + }}, + {Points: []influxql.StringPoint{}}, + } + itr := influxql.NewMergeIterator(StringIterators(inputs), influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Ascending: true, + }) + + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: "a"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: "c"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: "g"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: "d"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: "b"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: "e"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: "f"}}, + {&influxql.StringPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: "i"}}, + {&influxql.StringPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: "h"}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by window and name/tag. +func TestMergeIterator_Boolean(t *testing.T) { + inputs := []*BooleanIterator{ + {Points: []influxql.BooleanPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: true}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: true}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: false}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: false}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: true}, + }}, + {Points: []influxql.BooleanPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: true}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: true}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: false}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: false}, + }}, + {Points: []influxql.BooleanPoint{}}, + } + itr := influxql.NewMergeIterator(BooleanIterators(inputs), influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Ascending: true, + }) + + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: true}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: true}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: true}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: false}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: false}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: true}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: false}}, + {&influxql.BooleanPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: false}}, + {&influxql.BooleanPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: true}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +func TestMergeIterator_Nil(t *testing.T) { + itr := influxql.NewMergeIterator([]influxql.Iterator{nil}, influxql.IteratorOptions{}).(influxql.FloatIterator) + if p := itr.Next(); p != nil { + t.Fatalf("unexpected point: %#v", p) + } + itr.Close() +} + +func TestMergeIterator_Cast_Float(t *testing.T) { + inputs := []influxql.Iterator{ + &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}, + }}, + &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, + }}, + } + + itr := influxql.NewMergeIterator(inputs, influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Ascending: true, + }) + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, + {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, + {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + switch input := input.(type) { + case *FloatIterator: + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + case *IntegerIterator: + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by name/tag. +func TestSortedMergeIterator_Float(t *testing.T) { + inputs := []*FloatIterator{ + {Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}, + }}, + {Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, + }}, + {Points: []influxql.FloatPoint{}}, + } + itr := influxql.NewSortedMergeIterator(FloatIterators(inputs), influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Ascending: true, + }) + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, + {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, + {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by name/tag. +func TestSortedMergeIterator_Integer(t *testing.T) { + inputs := []*IntegerIterator{ + {Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}, + }}, + {Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, + }}, + {Points: []influxql.IntegerPoint{}}, + } + itr := influxql.NewSortedMergeIterator(IntegerIterators(inputs), influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Ascending: true, + }) + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, + {&influxql.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, + {&influxql.IntegerPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, + {&influxql.IntegerPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by name/tag. +func TestSortedMergeIterator_String(t *testing.T) { + inputs := []*StringIterator{ + {Points: []influxql.StringPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: "a"}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: "c"}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: "d"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: "b"}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: "h"}, + }}, + {Points: []influxql.StringPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: "g"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: "e"}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: "f"}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: "i"}, + }}, + {Points: []influxql.StringPoint{}}, + } + itr := influxql.NewSortedMergeIterator(StringIterators(inputs), influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Ascending: true, + }) + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: "a"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: "c"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: "g"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: "d"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: "b"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: "e"}}, + {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: "f"}}, + {&influxql.StringPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: "i"}}, + {&influxql.StringPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: "h"}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +// Ensure that a set of iterators can be merged together, sorted by name/tag. +func TestSortedMergeIterator_Boolean(t *testing.T) { + inputs := []*BooleanIterator{ + {Points: []influxql.BooleanPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: true}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: true}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: false}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: false}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: true}, + }}, + {Points: []influxql.BooleanPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: true}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: true}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: false}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: true}, + }}, + {Points: []influxql.BooleanPoint{}}, + } + itr := influxql.NewSortedMergeIterator(BooleanIterators(inputs), influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Ascending: true, + }) + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: true}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: true}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: true}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: false}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: false}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: true}}, + {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: false}}, + {&influxql.BooleanPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: true}}, + {&influxql.BooleanPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: true}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } +} + +func TestSortedMergeIterator_Nil(t *testing.T) { + itr := influxql.NewSortedMergeIterator([]influxql.Iterator{nil}, influxql.IteratorOptions{}).(influxql.FloatIterator) + if p := itr.Next(); p != nil { + t.Fatalf("unexpected point: %#v", p) + } + itr.Close() +} + +func TestSortedMergeIterator_Cast_Float(t *testing.T) { + inputs := []influxql.Iterator{ + &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, + {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, + {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}, + }}, + &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, + {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, + {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, + }}, + } + + itr := influxql.NewSortedMergeIterator(inputs, influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10 * time.Nanosecond, + }, + Ascending: true, + }) + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, + {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, + {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, + {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}}, + }) { + t.Errorf("unexpected points: %s", spew.Sdump(a)) + } + + for i, input := range inputs { + switch input := input.(type) { + case *FloatIterator: + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + case *IntegerIterator: + if !input.Closed { + t.Errorf("iterator %d not closed", i) + } + } + } +} + +// Ensure limit iterators work with limit and offset. +func TestLimitIterator_Float(t *testing.T) { + input := &FloatIterator{Points: []influxql.FloatPoint{ + {Name: "cpu", Time: 0, Value: 1}, + {Name: "cpu", Time: 5, Value: 3}, + {Name: "cpu", Time: 10, Value: 5}, + {Name: "mem", Time: 5, Value: 3}, + {Name: "mem", Time: 7, Value: 8}, + }} + + itr := influxql.NewLimitIterator(input, influxql.IteratorOptions{ + Limit: 1, + Offset: 1, + }) + + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Name: "cpu", Time: 5, Value: 3}}, + {&influxql.FloatPoint{Name: "mem", Time: 7, Value: 8}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } + + if !input.Closed { + t.Error("iterator not closed") + } +} + +// Ensure limit iterators work with limit and offset. +func TestLimitIterator_Integer(t *testing.T) { + input := &IntegerIterator{Points: []influxql.IntegerPoint{ + {Name: "cpu", Time: 0, Value: 1}, + {Name: "cpu", Time: 5, Value: 3}, + {Name: "cpu", Time: 10, Value: 5}, + {Name: "mem", Time: 5, Value: 3}, + {Name: "mem", Time: 7, Value: 8}, + }} + + itr := influxql.NewLimitIterator(input, influxql.IteratorOptions{ + Limit: 1, + Offset: 1, + }) + + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.IntegerPoint{Name: "cpu", Time: 5, Value: 3}}, + {&influxql.IntegerPoint{Name: "mem", Time: 7, Value: 8}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } + + if !input.Closed { + t.Error("iterator not closed") + } +} + +// Ensure limit iterators work with limit and offset. +func TestLimitIterator_String(t *testing.T) { + input := &StringIterator{Points: []influxql.StringPoint{ + {Name: "cpu", Time: 0, Value: "a"}, + {Name: "cpu", Time: 5, Value: "b"}, + {Name: "cpu", Time: 10, Value: "c"}, + {Name: "mem", Time: 5, Value: "d"}, + {Name: "mem", Time: 7, Value: "e"}, + }} + + itr := influxql.NewLimitIterator(input, influxql.IteratorOptions{ + Limit: 1, + Offset: 1, + }) + + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.StringPoint{Name: "cpu", Time: 5, Value: "b"}}, + {&influxql.StringPoint{Name: "mem", Time: 7, Value: "e"}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } + + if !input.Closed { + t.Error("iterator not closed") + } +} + +// Ensure limit iterators work with limit and offset. +func TestLimitIterator_Boolean(t *testing.T) { + input := &BooleanIterator{Points: []influxql.BooleanPoint{ + {Name: "cpu", Time: 0, Value: true}, + {Name: "cpu", Time: 5, Value: false}, + {Name: "cpu", Time: 10, Value: true}, + {Name: "mem", Time: 5, Value: false}, + {Name: "mem", Time: 7, Value: true}, + }} + + itr := influxql.NewLimitIterator(input, influxql.IteratorOptions{ + Limit: 1, + Offset: 1, + }) + + if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.BooleanPoint{Name: "cpu", Time: 5, Value: false}}, + {&influxql.BooleanPoint{Name: "mem", Time: 7, Value: true}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } + + if !input.Closed { + t.Error("iterator not closed") + } +} + +// Ensure auxilary iterators can be created for auxilary fields. +func TestFloatAuxIterator(t *testing.T) { + itr := influxql.NewAuxIterator( + &FloatIterator{Points: []influxql.FloatPoint{ + {Time: 0, Value: 1, Aux: []interface{}{float64(100), float64(200)}}, + {Time: 1, Value: 2, Aux: []interface{}{float64(500), math.NaN()}}, + }}, + []influxql.Series{ + {Aux: []influxql.DataType{influxql.Float, influxql.Float}}, + }, + influxql.IteratorOptions{Aux: []string{"f0", "f1"}}, + ) + + itrs := []influxql.Iterator{ + itr, + itr.Iterator("f0"), + itr.Iterator("f1"), + itr.Iterator("f0"), + } + itr.Start() + + if a := Iterators(itrs).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + { + &influxql.FloatPoint{Time: 0, Value: 1, Aux: []interface{}{float64(100), float64(200)}}, + &influxql.FloatPoint{Time: 0, Value: float64(100)}, + &influxql.FloatPoint{Time: 0, Value: float64(200)}, + &influxql.FloatPoint{Time: 0, Value: float64(100)}, + }, + { + &influxql.FloatPoint{Time: 1, Value: 2, Aux: []interface{}{float64(500), math.NaN()}}, + &influxql.FloatPoint{Time: 1, Value: float64(500)}, + &influxql.FloatPoint{Time: 1, Value: math.NaN()}, + &influxql.FloatPoint{Time: 1, Value: float64(500)}, + }, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Ensure limit iterator returns a subset of points. +func TestLimitIterator(t *testing.T) { + itr := influxql.NewLimitIterator( + &FloatIterator{Points: []influxql.FloatPoint{ + {Time: 0, Value: 0}, + {Time: 1, Value: 1}, + {Time: 2, Value: 2}, + {Time: 3, Value: 3}, + }}, + influxql.IteratorOptions{ + Limit: 2, + Offset: 1, + StartTime: influxql.MinTime, + EndTime: influxql.MaxTime, + }, + ) + + if a := (Iterators{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ + {&influxql.FloatPoint{Time: 1, Value: 1}}, + {&influxql.FloatPoint{Time: 2, Value: 2}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + +// Iterators is a test wrapper for iterators. +type Iterators []influxql.Iterator + +// Next returns the next value from each iterator. +// Returns nil if any iterator returns a nil. +func (itrs Iterators) Next() []influxql.Point { + a := make([]influxql.Point, len(itrs)) + for i, itr := range itrs { + switch itr := itr.(type) { + case influxql.FloatIterator: + fp := itr.Next() + if fp == nil { + return nil + } + a[i] = fp + case influxql.IntegerIterator: + ip := itr.Next() + if ip == nil { + return nil + } + a[i] = ip + case influxql.StringIterator: + sp := itr.Next() + if sp == nil { + return nil + } + a[i] = sp + case influxql.BooleanIterator: + bp := itr.Next() + if bp == nil { + return nil + } + a[i] = bp + default: + panic(fmt.Sprintf("iterator type not supported: %T", itr)) + } + } + return a +} + +// ReadAll reads all points from all iterators. +func (itrs Iterators) ReadAll() [][]influxql.Point { + var a [][]influxql.Point + + // Read from every iterator until a nil is encountered. + for { + points := itrs.Next() + if points == nil { + break + } + a = append(a, points) + } + + // Close all iterators. + influxql.Iterators(itrs).Close() + + return a +} + +func TestIteratorOptions_Window_Interval(t *testing.T) { + opt := influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10, + }, + } + + start, end := opt.Window(4) + if start != 0 { + t.Errorf("expected start to be 0, got %d", start) + } + if end != 10 { + t.Errorf("expected end to be 10, got %d", end) + } +} + +func TestIteratorOptions_Window_Offset(t *testing.T) { + opt := influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10, + Offset: 8, + }, + } + + start, end := opt.Window(14) + if start != 8 { + t.Errorf("expected start to be 8, got %d", start) + } + if end != 18 { + t.Errorf("expected end to be 18, got %d", end) + } +} + +func TestIteratorOptions_Window_Default(t *testing.T) { + opt := influxql.IteratorOptions{ + StartTime: 0, + EndTime: 60, + } + + start, end := opt.Window(34) + if start != 0 { + t.Errorf("expected start to be 0, got %d", start) + } + if end != 60 { + t.Errorf("expected end to be 60, got %d", end) + } +} + +func TestIteratorOptions_SeekTime_Ascending(t *testing.T) { + opt := influxql.IteratorOptions{ + StartTime: 30, + EndTime: 60, + Ascending: true, + } + + time := opt.SeekTime() + if time != 30 { + t.Errorf("expected time to be 30, got %d", time) + } +} + +func TestIteratorOptions_SeekTime_Descending(t *testing.T) { + opt := influxql.IteratorOptions{ + StartTime: 30, + EndTime: 60, + Ascending: false, + } + + time := opt.SeekTime() + if time != 60 { + t.Errorf("expected time to be 60, got %d", time) + } +} + +func TestIteratorOptions_MergeSorted(t *testing.T) { + opt := influxql.IteratorOptions{} + sorted := opt.MergeSorted() + if !sorted { + t.Error("expected no expression to be sorted, got unsorted") + } + + opt.Expr = &influxql.VarRef{} + sorted = opt.MergeSorted() + if !sorted { + t.Error("expected expression with varref to be sorted, got unsorted") + } + + opt.Expr = &influxql.Call{} + sorted = opt.MergeSorted() + if sorted { + t.Error("expected expression without varref to be unsorted, got sorted") + } +} + +func TestIteratorOptions_DerivativeInterval_Default(t *testing.T) { + opt := influxql.IteratorOptions{} + expected := influxql.Interval{Duration: time.Second} + actual := opt.DerivativeInterval() + if actual != expected { + t.Errorf("expected derivative interval to be %v, got %v", expected, actual) + } +} + +func TestIteratorOptions_DerivativeInterval_GroupBy(t *testing.T) { + opt := influxql.IteratorOptions{ + Interval: influxql.Interval{ + Duration: 10, + Offset: 2, + }, + } + expected := influxql.Interval{Duration: 10} + actual := opt.DerivativeInterval() + if actual != expected { + t.Errorf("expected derivative interval to be %v, got %v", expected, actual) + } +} + +func TestIteratorOptions_DerivativeInterval_Call(t *testing.T) { + opt := influxql.IteratorOptions{ + Expr: &influxql.Call{ + Name: "mean", + Args: []influxql.Expr{ + &influxql.VarRef{Val: "value"}, + &influxql.DurationLiteral{Val: 2 * time.Second}, + }, + }, + Interval: influxql.Interval{ + Duration: 10, + Offset: 2, + }, + } + expected := influxql.Interval{Duration: 2 * time.Second} + actual := opt.DerivativeInterval() + if actual != expected { + t.Errorf("expected derivative interval to be %v, got %v", expected, actual) + } +} + +// Ensure iterator options can be marshaled to and from a binary format. +func TestIteratorOptions_MarshalBinary(t *testing.T) { + opt := &influxql.IteratorOptions{ + Expr: MustParseExpr("count(value)"), + Aux: []string{"a", "b", "c"}, + Sources: []influxql.Source{ + &influxql.Measurement{Database: "db0", RetentionPolicy: "rp0", Name: "mm0"}, + }, + Interval: influxql.Interval{ + Duration: 1 * time.Hour, + Offset: 20 * time.Minute, + }, + Dimensions: []string{"region", "host"}, + Fill: influxql.NumberFill, + FillValue: float64(100), + Condition: MustParseExpr(`foo = 'bar'`), + StartTime: 1000, + EndTime: 2000, + Ascending: true, + Limit: 100, + Offset: 200, + SLimit: 300, + SOffset: 400, + Dedupe: true, + } + + // Marshal to binary. + buf, err := opt.MarshalBinary() + if err != nil { + t.Fatal(err) + } + + // Unmarshal back to an object. + var other influxql.IteratorOptions + if err := other.UnmarshalBinary(buf); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(&other, opt) { + t.Fatalf("unexpected options: %s", spew.Sdump(other)) + } +} + +// Ensure iterator options with a regex measurement can be marshaled. +func TestIteratorOptions_MarshalBinary_Measurement_Regex(t *testing.T) { + opt := &influxql.IteratorOptions{ + Sources: []influxql.Source{ + &influxql.Measurement{Database: "db1", RetentionPolicy: "rp2", Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`series.+`)}}, + }, + } + + // Marshal to binary. + buf, err := opt.MarshalBinary() + if err != nil { + t.Fatal(err) + } + + // Unmarshal back to an object. + var other influxql.IteratorOptions + if err := other.UnmarshalBinary(buf); err != nil { + t.Fatal(err) + } else if v := other.Sources[0].(*influxql.Measurement).Regex.Val.String(); v != `/series.+/` { + t.Fatalf("unexpected measurement regex: %s", v) + } +} + +// Ensure series list can be marshaled into and out of a binary format. +func TestSeriesList_MarshalBinary(t *testing.T) { + a := []influxql.Series{ + {Name: "cpu", Tags: ParseTags("foo=bar"), Aux: []influxql.DataType{influxql.Float, influxql.String}}, + {Name: "mem", Aux: []influxql.DataType{influxql.Integer}}, + {Name: "disk"}, + {}, + } + + // Marshal to binary. + buf, err := influxql.SeriesList(a).MarshalBinary() + if err != nil { + t.Fatal(err) + } + + // Unmarshal back to an object. + var other influxql.SeriesList + if err := other.UnmarshalBinary(buf); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(other, influxql.SeriesList(a)) { + t.Fatalf("unexpected series list: %s", spew.Sdump(other)) + } +} + +// IteratorCreator is a mockable implementation of SelectStatementExecutor.IteratorCreator. +type IteratorCreator struct { + CreateIteratorFn func(opt influxql.IteratorOptions) (influxql.Iterator, error) + FieldDimensionsFn func(sources influxql.Sources) (fields, dimensions map[string]struct{}, err error) + SeriesKeysFn func(opt influxql.IteratorOptions) (influxql.SeriesList, error) +} + +func (ic *IteratorCreator) CreateIterator(opt influxql.IteratorOptions) (influxql.Iterator, error) { + return ic.CreateIteratorFn(opt) +} + +func (ic *IteratorCreator) FieldDimensions(sources influxql.Sources) (fields, dimensions map[string]struct{}, err error) { + return ic.FieldDimensionsFn(sources) +} + +func (ic *IteratorCreator) SeriesKeys(opt influxql.IteratorOptions) (influxql.SeriesList, error) { + if ic.SeriesKeysFn != nil { + return ic.SeriesKeysFn(opt) + } + + itr, err := ic.CreateIterator(opt) + if err != nil { + return nil, err + } + + seriesMap := make(map[string]influxql.Series) + switch itr := itr.(type) { + case influxql.FloatIterator: + for p := itr.Next(); p != nil; p = itr.Next() { + s := influxql.Series{Name: p.Name, Tags: p.Tags, Aux: influxql.InspectDataTypes(p.Aux)} + seriesMap[s.ID()] = s + } + case influxql.IntegerIterator: + for p := itr.Next(); p != nil; p = itr.Next() { + s := influxql.Series{Name: p.Name, Tags: p.Tags, Aux: influxql.InspectDataTypes(p.Aux)} + seriesMap[s.ID()] = s + } + case influxql.StringIterator: + for p := itr.Next(); p != nil; p = itr.Next() { + s := influxql.Series{Name: p.Name, Tags: p.Tags, Aux: influxql.InspectDataTypes(p.Aux)} + seriesMap[s.ID()] = s + } + case influxql.BooleanIterator: + for p := itr.Next(); p != nil; p = itr.Next() { + s := influxql.Series{Name: p.Name, Tags: p.Tags, Aux: influxql.InspectDataTypes(p.Aux)} + seriesMap[s.ID()] = s + } + } + + seriesList := make([]influxql.Series, 0, len(seriesMap)) + for _, s := range seriesMap { + seriesList = append(seriesList, s) + } + return influxql.SeriesList(seriesList), nil +} + +// Test implementation of influxql.FloatIterator +type FloatIterator struct { + Points []influxql.FloatPoint + Closed bool +} + +// Close is a no-op. +func (itr *FloatIterator) Close() error { itr.Closed = true; return nil } + +// Next returns the next value and shifts it off the beginning of the points slice. +func (itr *FloatIterator) Next() *influxql.FloatPoint { + if len(itr.Points) == 0 || itr.Closed { + return nil + } + + v := &itr.Points[0] + itr.Points = itr.Points[1:] + return v +} + +func FloatIterators(inputs []*FloatIterator) []influxql.Iterator { + itrs := make([]influxql.Iterator, len(inputs)) + for i := range itrs { + itrs[i] = influxql.Iterator(inputs[i]) + } + return itrs +} + +// GenerateFloatIterator creates a FloatIterator with random data. +func GenerateFloatIterator(rand *rand.Rand, valueN int) *FloatIterator { + const interval = 10 * time.Second + + itr := &FloatIterator{ + Points: make([]influxql.FloatPoint, valueN), + } + + for i := 0; i < valueN; i++ { + // Generate incrementing timestamp with some jitter (1s). + jitter := (rand.Int63n(2) * int64(time.Second)) + timestamp := int64(i)*int64(10*time.Second) + jitter + + itr.Points[i] = influxql.FloatPoint{ + Time: timestamp, + Value: rand.Float64(), + } + } + + return itr +} + +// Test implementation of influxql.IntegerIterator +type IntegerIterator struct { + Points []influxql.IntegerPoint + Closed bool +} + +// Close is a no-op. +func (itr *IntegerIterator) Close() error { itr.Closed = true; return nil } + +// Next returns the next value and shifts it off the beginning of the points slice. +func (itr *IntegerIterator) Next() *influxql.IntegerPoint { + if len(itr.Points) == 0 || itr.Closed { + return nil + } + + v := &itr.Points[0] + itr.Points = itr.Points[1:] + return v +} + +func IntegerIterators(inputs []*IntegerIterator) []influxql.Iterator { + itrs := make([]influxql.Iterator, len(inputs)) + for i := range itrs { + itrs[i] = influxql.Iterator(inputs[i]) + } + return itrs +} + +// Test implementation of influxql.StringIterator +type StringIterator struct { + Points []influxql.StringPoint + Closed bool +} + +// Close is a no-op. +func (itr *StringIterator) Close() error { itr.Closed = true; return nil } + +// Next returns the next value and shifts it off the beginning of the points slice. +func (itr *StringIterator) Next() *influxql.StringPoint { + if len(itr.Points) == 0 || itr.Closed { + return nil + } + + v := &itr.Points[0] + itr.Points = itr.Points[1:] + return v +} + +func StringIterators(inputs []*StringIterator) []influxql.Iterator { + itrs := make([]influxql.Iterator, len(inputs)) + for i := range itrs { + itrs[i] = influxql.Iterator(inputs[i]) + } + return itrs +} + +// Test implementation of influxql.BooleanIterator +type BooleanIterator struct { + Points []influxql.BooleanPoint + Closed bool +} + +// Close is a no-op. +func (itr *BooleanIterator) Close() error { itr.Closed = true; return nil } + +// Next returns the next value and shifts it off the beginning of the points slice. +func (itr *BooleanIterator) Next() *influxql.BooleanPoint { + if len(itr.Points) == 0 || itr.Closed { + return nil + } + + v := &itr.Points[0] + itr.Points = itr.Points[1:] + return v +} + +func BooleanIterators(inputs []*BooleanIterator) []influxql.Iterator { + itrs := make([]influxql.Iterator, len(inputs)) + for i := range itrs { + itrs[i] = influxql.Iterator(inputs[i]) + } + return itrs +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/parser.go b/vendor/github.com/influxdata/influxdb/influxql/parser.go new file mode 100644 index 0000000000..81e99f7cd2 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxql/parser.go @@ -0,0 +1,2659 @@ +package influxql + +import ( + "bytes" + "errors" + "fmt" + "io" + "math" + "regexp" + "sort" + "strconv" + "strings" + "time" +) + +const ( + // DateFormat represents the format for date literals. + DateFormat = "2006-01-02" + + // DateTimeFormat represents the format for date time literals. + DateTimeFormat = "2006-01-02 15:04:05.999999" +) + +// Parser represents an InfluxQL parser. +type Parser struct { + s *bufScanner +} + +// NewParser returns a new instance of Parser. +func NewParser(r io.Reader) *Parser { + return &Parser{s: newBufScanner(r)} +} + +// ParseQuery parses a query string and returns its AST representation. +func ParseQuery(s string) (*Query, error) { return NewParser(strings.NewReader(s)).ParseQuery() } + +// ParseStatement parses a statement string and returns its AST representation. +func ParseStatement(s string) (Statement, error) { + return NewParser(strings.NewReader(s)).ParseStatement() +} + +// MustParseStatement parses a statement string and returns its AST. Panic on error. +func MustParseStatement(s string) Statement { + stmt, err := ParseStatement(s) + if err != nil { + panic(err.Error()) + } + return stmt +} + +// ParseExpr parses an expression string and returns its AST representation. +func ParseExpr(s string) (Expr, error) { return NewParser(strings.NewReader(s)).ParseExpr() } + +// MustParseExpr parses an expression string and returns its AST. Panic on error. +func MustParseExpr(s string) Expr { + expr, err := ParseExpr(s) + if err != nil { + panic(err.Error()) + } + return expr +} + +// ParseQuery parses an InfluxQL string and returns a Query AST object. +func (p *Parser) ParseQuery() (*Query, error) { + var statements Statements + var semi bool + + for { + if tok, _, _ := p.scanIgnoreWhitespace(); tok == EOF { + return &Query{Statements: statements}, nil + } else if !semi && tok == SEMICOLON { + semi = true + } else { + p.unscan() + s, err := p.ParseStatement() + if err != nil { + return nil, err + } + statements = append(statements, s) + semi = false + } + } +} + +// ParseStatement parses an InfluxQL string and returns a Statement AST object. +func (p *Parser) ParseStatement() (Statement, error) { + // Inspect the first token. + tok, pos, lit := p.scanIgnoreWhitespace() + switch tok { + case SELECT: + return p.parseSelectStatement(targetNotRequired) + case DELETE: + return p.parseDeleteStatement() + case SHOW: + return p.parseShowStatement() + case CREATE: + return p.parseCreateStatement() + case DROP: + return p.parseDropStatement() + case GRANT: + return p.parseGrantStatement() + case REVOKE: + return p.parseRevokeStatement() + case ALTER: + return p.parseAlterStatement() + case SET: + return p.parseSetPasswordUserStatement() + default: + return nil, newParseError(tokstr(tok, lit), []string{"SELECT", "DELETE", "SHOW", "CREATE", "DROP", "GRANT", "REVOKE", "ALTER", "SET"}, pos) + } +} + +// parseShowStatement parses a string and returns a list statement. +// This function assumes the SHOW token has already been consumed. +func (p *Parser) parseShowStatement() (Statement, error) { + tok, pos, lit := p.scanIgnoreWhitespace() + switch tok { + case CONTINUOUS: + return p.parseShowContinuousQueriesStatement() + case GRANTS: + return p.parseGrantsForUserStatement() + case DATABASES: + return p.parseShowDatabasesStatement() + case SERVERS: + return p.parseShowServersStatement() + case FIELD: + tok, pos, lit := p.scanIgnoreWhitespace() + if tok == KEYS { + return p.parseShowFieldKeysStatement() + } + return nil, newParseError(tokstr(tok, lit), []string{"KEYS"}, pos) + case MEASUREMENTS: + return p.parseShowMeasurementsStatement() + case RETENTION: + tok, pos, lit := p.scanIgnoreWhitespace() + if tok == POLICIES { + return p.parseShowRetentionPoliciesStatement() + } + return nil, newParseError(tokstr(tok, lit), []string{"POLICIES"}, pos) + case SERIES: + return p.parseShowSeriesStatement() + case SHARD: + tok, pos, lit := p.scanIgnoreWhitespace() + if tok == GROUPS { + return p.parseShowShardGroupsStatement() + } + return nil, newParseError(tokstr(tok, lit), []string{"GROUPS"}, pos) + case SHARDS: + return p.parseShowShardsStatement() + case STATS: + return p.parseShowStatsStatement() + case DIAGNOSTICS: + return p.parseShowDiagnosticsStatement() + case TAG: + tok, pos, lit := p.scanIgnoreWhitespace() + if tok == KEYS { + return p.parseShowTagKeysStatement() + } else if tok == VALUES { + return p.parseShowTagValuesStatement() + } + return nil, newParseError(tokstr(tok, lit), []string{"KEYS", "VALUES"}, pos) + case USERS: + return p.parseShowUsersStatement() + case SUBSCRIPTIONS: + return p.parseShowSubscriptionsStatement() + } + + showQueryKeywords := []string{ + "CONTINUOUS", + "DATABASES", + "FIELD", + "GRANTS", + "MEASUREMENTS", + "RETENTION", + "SERIES", + "SERVERS", + "TAG", + "USERS", + "STATS", + "DIAGNOSTICS", + "SHARD", + "SHARDS", + "SUBSCRIPTIONS", + } + sort.Strings(showQueryKeywords) + + return nil, newParseError(tokstr(tok, lit), showQueryKeywords, pos) +} + +// parseCreateStatement parses a string and returns a create statement. +// This function assumes the CREATE token has already been consumed. +func (p *Parser) parseCreateStatement() (Statement, error) { + tok, pos, lit := p.scanIgnoreWhitespace() + if tok == CONTINUOUS { + return p.parseCreateContinuousQueryStatement() + } else if tok == DATABASE { + return p.parseCreateDatabaseStatement() + } else if tok == USER { + return p.parseCreateUserStatement() + } else if tok == RETENTION { + tok, pos, lit = p.scanIgnoreWhitespace() + if tok != POLICY { + return nil, newParseError(tokstr(tok, lit), []string{"POLICY"}, pos) + } + return p.parseCreateRetentionPolicyStatement() + } else if tok == SUBSCRIPTION { + return p.parseCreateSubscriptionStatement() + } + + return nil, newParseError(tokstr(tok, lit), []string{"CONTINUOUS", "DATABASE", "USER", "RETENTION", "SUBSCRIPTION"}, pos) +} + +// parseDropStatement parses a string and returns a drop statement. +// This function assumes the DROP token has already been consumed. +func (p *Parser) parseDropStatement() (Statement, error) { + tok, pos, lit := p.scanIgnoreWhitespace() + if tok == SERIES { + return p.parseDropSeriesStatement() + } else if tok == MEASUREMENT { + return p.parseDropMeasurementStatement() + } else if tok == CONTINUOUS { + return p.parseDropContinuousQueryStatement() + } else if tok == DATABASE { + return p.parseDropDatabaseStatement() + } else if tok == RETENTION { + if tok, pos, lit := p.scanIgnoreWhitespace(); tok != POLICY { + return nil, newParseError(tokstr(tok, lit), []string{"POLICY"}, pos) + } + return p.parseDropRetentionPolicyStatement() + } else if tok == USER { + return p.parseDropUserStatement() + } else if tok == META || tok == DATA { + return p.parseDropServerStatement(tok) + } else if tok == SUBSCRIPTION { + return p.parseDropSubscriptionStatement() + } + + return nil, newParseError(tokstr(tok, lit), []string{"SERIES", "CONTINUOUS", "MEASUREMENT", "SERVER", "SUBSCRIPTION"}, pos) +} + +// parseAlterStatement parses a string and returns an alter statement. +// This function assumes the ALTER token has already been consumed. +func (p *Parser) parseAlterStatement() (Statement, error) { + tok, pos, lit := p.scanIgnoreWhitespace() + if tok == RETENTION { + if tok, pos, lit = p.scanIgnoreWhitespace(); tok != POLICY { + return nil, newParseError(tokstr(tok, lit), []string{"POLICY"}, pos) + } + return p.parseAlterRetentionPolicyStatement() + } + + return nil, newParseError(tokstr(tok, lit), []string{"RETENTION"}, pos) +} + +// parseSetPasswordUserStatement parses a string and returns a set statement. +// This function assumes the SET token has already been consumed. +func (p *Parser) parseSetPasswordUserStatement() (*SetPasswordUserStatement, error) { + stmt := &SetPasswordUserStatement{} + + // Consume the required PASSWORD FOR tokens. + if err := p.parseTokens([]Token{PASSWORD, FOR}); err != nil { + return nil, err + } + + // Parse username + ident, err := p.parseIdent() + + if err != nil { + return nil, err + } + stmt.Name = ident + + // Consume the required = token. + if tok, pos, lit := p.scanIgnoreWhitespace(); tok != EQ { + return nil, newParseError(tokstr(tok, lit), []string{"="}, pos) + } + + // Parse new user's password + if ident, err = p.parseString(); err != nil { + return nil, err + } + stmt.Password = ident + + return stmt, nil +} + +// parseCreateSubscriptionStatement parses a string and returns a CreatesubScriptionStatement. +// This function assumes the "CREATE SUBSCRIPTION" tokens have already been consumed. +func (p *Parser) parseCreateSubscriptionStatement() (*CreateSubscriptionStatement, error) { + stmt := &CreateSubscriptionStatement{} + + // Read the id of the subscription to create. + ident, err := p.parseIdent() + if err != nil { + return nil, err + } + stmt.Name = ident + + // Expect an "ON" keyword. + if tok, pos, lit := p.scanIgnoreWhitespace(); tok != ON { + return nil, newParseError(tokstr(tok, lit), []string{"ON"}, pos) + } + + // Read the name of the database. + if ident, err = p.parseIdent(); err != nil { + return nil, err + } + stmt.Database = ident + + if tok, pos, lit := p.scan(); tok != DOT { + return nil, newParseError(tokstr(tok, lit), []string{"."}, pos) + } + + // Read the name of the retention policy. + if ident, err = p.parseIdent(); err != nil { + return nil, err + } + stmt.RetentionPolicy = ident + + // Expect a "DESTINATIONS" keyword. + if tok, pos, lit := p.scanIgnoreWhitespace(); tok != DESTINATIONS { + return nil, newParseError(tokstr(tok, lit), []string{"DESTINATIONS"}, pos) + } + + // Expect one of "ANY ALL" keywords. + if tok, pos, lit := p.scanIgnoreWhitespace(); tok == ALL || tok == ANY { + stmt.Mode = tokens[tok] + } else { + return nil, newParseError(tokstr(tok, lit), []string{"ALL", "ANY"}, pos) + } + + // Read list of destinations. + var destinations []string + if destinations, err = p.parseStringList(); err != nil { + return nil, err + } + stmt.Destinations = destinations + + return stmt, nil +} + +// parseCreateRetentionPolicyStatement parses a string and returns a create retention policy statement. +// This function assumes the CREATE RETENTION POLICY tokens have already been consumed. +func (p *Parser) parseCreateRetentionPolicyStatement() (*CreateRetentionPolicyStatement, error) { + stmt := &CreateRetentionPolicyStatement{} + + // Parse the retention policy name. + ident, err := p.parseIdent() + if err != nil { + return nil, err + } + stmt.Name = ident + + // Consume the required ON token. + if tok, pos, lit := p.scanIgnoreWhitespace(); tok != ON { + return nil, newParseError(tokstr(tok, lit), []string{"ON"}, pos) + } + + // Parse the database name. + ident, err = p.parseIdent() + if err != nil { + return nil, err + } + stmt.Database = ident + + // Parse required DURATION token. + tok, pos, lit := p.scanIgnoreWhitespace() + if tok != DURATION { + return nil, newParseError(tokstr(tok, lit), []string{"DURATION"}, pos) + } + + // Parse duration value + d, err := p.parseDuration() + if err != nil { + return nil, err + } + stmt.Duration = d + + // Parse required REPLICATION token. + if tok, pos, lit = p.scanIgnoreWhitespace(); tok != REPLICATION { + return nil, newParseError(tokstr(tok, lit), []string{"REPLICATION"}, pos) + } + + // Parse replication value. + n, err := p.parseInt(1, math.MaxInt32) + if err != nil { + return nil, err + } + stmt.Replication = n + + // Parse optional DEFAULT token. + if tok, pos, lit = p.scanIgnoreWhitespace(); tok == DEFAULT { + stmt.Default = true + } else if tok != EOF && tok != SEMICOLON { + return nil, newParseError(tokstr(tok, lit), []string{"DEFAULT"}, pos) + } + + return stmt, nil +} + +// parseAlterRetentionPolicyStatement parses a string and returns an alter retention policy statement. +// This function assumes the ALTER RETENTION POLICY tokens have already been consumed. +func (p *Parser) parseAlterRetentionPolicyStatement() (*AlterRetentionPolicyStatement, error) { + stmt := &AlterRetentionPolicyStatement{} + + // Parse the retention policy name. + tok, pos, lit := p.scanIgnoreWhitespace() + if tok == DEFAULT { + stmt.Name = "default" + } else if tok == IDENT { + stmt.Name = lit + } else { + return nil, newParseError(tokstr(tok, lit), []string{"identifier"}, pos) + } + + // Consume the required ON token. + if tok, pos, lit = p.scanIgnoreWhitespace(); tok != ON { + return nil, newParseError(tokstr(tok, lit), []string{"ON"}, pos) + } + + // Parse the database name. + ident, err := p.parseIdent() + if err != nil { + return nil, err + } + stmt.Database = ident + + // Loop through option tokens (DURATION, REPLICATION, DEFAULT, etc.). + maxNumOptions := 3 +Loop: + for i := 0; i < maxNumOptions; i++ { + tok, pos, lit := p.scanIgnoreWhitespace() + switch tok { + case DURATION: + d, err := p.parseDuration() + if err != nil { + return nil, err + } + stmt.Duration = &d + case REPLICATION: + n, err := p.parseInt(1, math.MaxInt32) + if err != nil { + return nil, err + } + stmt.Replication = &n + case DEFAULT: + stmt.Default = true + default: + if i < 1 { + return nil, newParseError(tokstr(tok, lit), []string{"DURATION", "RETENTION", "DEFAULT"}, pos) + } + p.unscan() + break Loop + } + } + + return stmt, nil +} + +// parseInt parses a string and returns an integer literal. +func (p *Parser) parseInt(min, max int) (int, error) { + tok, pos, lit := p.scanIgnoreWhitespace() + if tok != NUMBER { + return 0, newParseError(tokstr(tok, lit), []string{"number"}, pos) + } + + // Return an error if the number has a fractional part. + if strings.Contains(lit, ".") { + return 0, &ParseError{Message: "number must be an integer", Pos: pos} + } + + // Convert string to int. + n, err := strconv.Atoi(lit) + if err != nil { + return 0, &ParseError{Message: err.Error(), Pos: pos} + } else if min > n || n > max { + return 0, &ParseError{ + Message: fmt.Sprintf("invalid value %d: must be %d <= n <= %d", n, min, max), + Pos: pos, + } + } + + return n, nil +} + +// parseUInt32 parses a string and returns a 32-bit unsigned integer literal. +func (p *Parser) parseUInt32() (uint32, error) { + tok, pos, lit := p.scanIgnoreWhitespace() + if tok != NUMBER { + return 0, newParseError(tokstr(tok, lit), []string{"number"}, pos) + } + + // Convert string to unsigned 32-bit integer + n, err := strconv.ParseUint(lit, 10, 32) + if err != nil { + return 0, &ParseError{Message: err.Error(), Pos: pos} + } + + return uint32(n), nil +} + +// parseUInt64 parses a string and returns a 64-bit unsigned integer literal. +func (p *Parser) parseUInt64() (uint64, error) { + tok, pos, lit := p.scanIgnoreWhitespace() + if tok != NUMBER { + return 0, newParseError(tokstr(tok, lit), []string{"number"}, pos) + } + + // Convert string to unsigned 64-bit integer + n, err := strconv.ParseUint(lit, 10, 64) + if err != nil { + return 0, &ParseError{Message: err.Error(), Pos: pos} + } + + return uint64(n), nil +} + +// parseDuration parses a string and returns a duration literal. +// This function assumes the DURATION token has already been consumed. +func (p *Parser) parseDuration() (time.Duration, error) { + tok, pos, lit := p.scanIgnoreWhitespace() + if tok != DURATIONVAL && tok != INF { + return 0, newParseError(tokstr(tok, lit), []string{"duration"}, pos) + } + + if tok == INF { + return 0, nil + } + + d, err := ParseDuration(lit) + if err != nil { + return 0, &ParseError{Message: err.Error(), Pos: pos} + } + + return d, nil +} + +// parseIdent parses an identifier. +func (p *Parser) parseIdent() (string, error) { + tok, pos, lit := p.scanIgnoreWhitespace() + if tok != IDENT { + return "", newParseError(tokstr(tok, lit), []string{"identifier"}, pos) + } + return lit, nil +} + +// parseIdentList parses a comma delimited list of identifiers. +func (p *Parser) parseIdentList() ([]string, error) { + // Parse first (required) identifier. + ident, err := p.parseIdent() + if err != nil { + return nil, err + } + idents := []string{ident} + + // Parse remaining (optional) identifiers. + for { + if tok, _, _ := p.scanIgnoreWhitespace(); tok != COMMA { + p.unscan() + return idents, nil + } + + if ident, err = p.parseIdent(); err != nil { + return nil, err + } + + idents = append(idents, ident) + } +} + +// parseSegmentedIdents parses a segmented identifiers. +// e.g., "db"."rp".measurement or "db"..measurement +func (p *Parser) parseSegmentedIdents() ([]string, error) { + ident, err := p.parseIdent() + if err != nil { + return nil, err + } + idents := []string{ident} + + // Parse remaining (optional) identifiers. + for { + if tok, _, _ := p.scan(); tok != DOT { + // No more segments so we're done. + p.unscan() + break + } + + if ch := p.peekRune(); ch == '/' { + // Next segment is a regex so we're done. + break + } else if ch == ':' { + // Next segment is context-specific so let caller handle it. + break + } else if ch == '.' { + // Add an empty identifier. + idents = append(idents, "") + continue + } + + // Parse the next identifier. + if ident, err = p.parseIdent(); err != nil { + return nil, err + } + + idents = append(idents, ident) + } + + if len(idents) > 3 { + msg := fmt.Sprintf("too many segments in %s", QuoteIdent(idents...)) + return nil, &ParseError{Message: msg} + } + + return idents, nil +} + +// parserString parses a string. +func (p *Parser) parseString() (string, error) { + tok, pos, lit := p.scanIgnoreWhitespace() + if tok != STRING { + return "", newParseError(tokstr(tok, lit), []string{"string"}, pos) + } + return lit, nil +} + +// parserString parses a string. +func (p *Parser) parseStringList() ([]string, error) { + // Parse first (required) string. + str, err := p.parseString() + if err != nil { + return nil, err + } + strs := []string{str} + + // Parse remaining (optional) strings. + for { + if tok, _, _ := p.scanIgnoreWhitespace(); tok != COMMA { + p.unscan() + return strs, nil + } + + if str, err = p.parseString(); err != nil { + return nil, err + } + + strs = append(strs, str) + } +} + +// parseRevokeStatement parses a string and returns a revoke statement. +// This function assumes the REVOKE token has already been consumed. +func (p *Parser) parseRevokeStatement() (Statement, error) { + // Parse the privilege to be revoked. + priv, err := p.parsePrivilege() + if err != nil { + return nil, err + } + + // Check for ON or FROM clauses. + tok, pos, lit := p.scanIgnoreWhitespace() + if tok == ON { + stmt, err := p.parseRevokeOnStatement() + if err != nil { + return nil, err + } + stmt.Privilege = priv + return stmt, nil + } else if tok == FROM { + // Admin privilege is only revoked on ALL PRIVILEGES. + if priv != AllPrivileges { + return nil, newParseError(tokstr(tok, lit), []string{"ON"}, pos) + } + return p.parseRevokeAdminStatement() + } + + // Only ON or FROM clauses are allowed after privilege. + if priv == AllPrivileges { + return nil, newParseError(tokstr(tok, lit), []string{"ON", "FROM"}, pos) + } + return nil, newParseError(tokstr(tok, lit), []string{"ON"}, pos) +} + +// parseRevokeOnStatement parses a string and returns a revoke statement. +// This function assumes the [PRIVILEGE] ON tokens have already been consumed. +func (p *Parser) parseRevokeOnStatement() (*RevokeStatement, error) { + stmt := &RevokeStatement{} + + // Parse the name of the database. + lit, err := p.parseIdent() + if err != nil { + return nil, err + } + stmt.On = lit + + // Parse FROM clause. + tok, pos, lit := p.scanIgnoreWhitespace() + + // Check for required FROM token. + if tok != FROM { + return nil, newParseError(tokstr(tok, lit), []string{"FROM"}, pos) + } + + // Parse the name of the user. + lit, err = p.parseIdent() + if err != nil { + return nil, err + } + stmt.User = lit + + return stmt, nil +} + +// parseRevokeAdminStatement parses a string and returns a revoke admin statement. +// This function assumes the ALL [PRVILEGES] FROM token has already been consumed. +func (p *Parser) parseRevokeAdminStatement() (*RevokeAdminStatement, error) { + // Admin privilege is always false when revoke admin clause is called. + stmt := &RevokeAdminStatement{} + + // Parse the name of the user. + lit, err := p.parseIdent() + if err != nil { + return nil, err + } + stmt.User = lit + + return stmt, nil +} + +// parseGrantStatement parses a string and returns a grant statement. +// This function assumes the GRANT token has already been consumed. +func (p *Parser) parseGrantStatement() (Statement, error) { + // Parse the privilege to be granted. + priv, err := p.parsePrivilege() + if err != nil { + return nil, err + } + + // Check for ON or TO clauses. + tok, pos, lit := p.scanIgnoreWhitespace() + if tok == ON { + stmt, err := p.parseGrantOnStatement() + if err != nil { + return nil, err + } + stmt.Privilege = priv + return stmt, nil + } else if tok == TO { + // Admin privilege is only granted on ALL PRIVILEGES. + if priv != AllPrivileges { + return nil, newParseError(tokstr(tok, lit), []string{"ON"}, pos) + } + return p.parseGrantAdminStatement() + } + + // Only ON or TO clauses are allowed after privilege. + if priv == AllPrivileges { + return nil, newParseError(tokstr(tok, lit), []string{"ON", "TO"}, pos) + } + return nil, newParseError(tokstr(tok, lit), []string{"ON"}, pos) +} + +// parseGrantOnStatement parses a string and returns a grant statement. +// This function assumes the [PRIVILEGE] ON tokens have already been consumed. +func (p *Parser) parseGrantOnStatement() (*GrantStatement, error) { + stmt := &GrantStatement{} + + // Parse the name of the database. + lit, err := p.parseIdent() + if err != nil { + return nil, err + } + stmt.On = lit + + // Parse TO clause. + tok, pos, lit := p.scanIgnoreWhitespace() + + // Check for required TO token. + if tok != TO { + return nil, newParseError(tokstr(tok, lit), []string{"TO"}, pos) + } + + // Parse the name of the user. + lit, err = p.parseIdent() + if err != nil { + return nil, err + } + stmt.User = lit + + return stmt, nil +} + +// parseGrantAdminStatement parses a string and returns a grant admin statement. +// This function assumes the ALL [PRVILEGES] TO tokens have already been consumed. +func (p *Parser) parseGrantAdminStatement() (*GrantAdminStatement, error) { + // Admin privilege is always true when grant admin clause is called. + stmt := &GrantAdminStatement{} + + // Parse the name of the user. + lit, err := p.parseIdent() + if err != nil { + return nil, err + } + stmt.User = lit + + return stmt, nil +} + +// parsePrivilege parses a string and returns a Privilege +func (p *Parser) parsePrivilege() (Privilege, error) { + tok, pos, lit := p.scanIgnoreWhitespace() + switch tok { + case READ: + return ReadPrivilege, nil + case WRITE: + return WritePrivilege, nil + case ALL: + // Consume optional PRIVILEGES token + tok, pos, lit = p.scanIgnoreWhitespace() + if tok != PRIVILEGES { + p.unscan() + } + return AllPrivileges, nil + } + return 0, newParseError(tokstr(tok, lit), []string{"READ", "WRITE", "ALL [PRIVILEGES]"}, pos) +} + +// parseSelectStatement parses a select string and returns a Statement AST object. +// This function assumes the SELECT token has already been consumed. +func (p *Parser) parseSelectStatement(tr targetRequirement) (*SelectStatement, error) { + stmt := &SelectStatement{} + var err error + + // Parse fields: "FIELD+". + if stmt.Fields, err = p.parseFields(); err != nil { + return nil, err + } + + // Parse target: "INTO" + if stmt.Target, err = p.parseTarget(tr); err != nil { + return nil, err + } + + // Parse source: "FROM". + if tok, pos, lit := p.scanIgnoreWhitespace(); tok != FROM { + return nil, newParseError(tokstr(tok, lit), []string{"FROM"}, pos) + } + if stmt.Sources, err = p.parseSources(); err != nil { + return nil, err + } + + // Parse condition: "WHERE EXPR". + if stmt.Condition, err = p.parseCondition(); err != nil { + return nil, err + } + + // Parse dimensions: "GROUP BY DIMENSION+". + if stmt.Dimensions, err = p.parseDimensions(); err != nil { + return nil, err + } + + // Parse fill options: "fill(