From ab59ea091a2374653ea8aeb5ed7675de61c08461 Mon Sep 17 00:00:00 2001 From: Stuart Clark Date: Thu, 3 Aug 2017 01:23:08 +0100 Subject: [PATCH] Add Consul & BoltDB datasource support (#178) * Add libkv support * Add vendoring --- data.go | 42 +- docs/content/functions/general.md | 45 + docs/content/index.md | 4 +- glide.yaml | 1 + libkv/libkv.go | 185 ++ test/integration/Dockerfile | 6 + test/integration/config.db | Bin 0 -> 32768 bytes test/integration/datasources_boltdb.bats | 27 + test/integration/datasources_consul.bats | 35 + test/integration/test.sh | 4 + vendor/github.com/boltdb/bolt/.gitignore | 4 + vendor/github.com/boltdb/bolt/LICENSE | 20 + vendor/github.com/boltdb/bolt/Makefile | 18 + vendor/github.com/boltdb/bolt/README.md | 916 ++++++++ vendor/github.com/boltdb/bolt/appveyor.yml | 18 + vendor/github.com/boltdb/bolt/bolt_386.go | 10 + vendor/github.com/boltdb/bolt/bolt_amd64.go | 10 + vendor/github.com/boltdb/bolt/bolt_arm.go | 28 + vendor/github.com/boltdb/bolt/bolt_arm64.go | 12 + vendor/github.com/boltdb/bolt/bolt_linux.go | 10 + vendor/github.com/boltdb/bolt/bolt_openbsd.go | 27 + vendor/github.com/boltdb/bolt/bolt_ppc.go | 9 + vendor/github.com/boltdb/bolt/bolt_ppc64.go | 12 + vendor/github.com/boltdb/bolt/bolt_ppc64le.go | 12 + vendor/github.com/boltdb/bolt/bolt_s390x.go | 12 + vendor/github.com/boltdb/bolt/bolt_unix.go | 89 + .../boltdb/bolt/bolt_unix_solaris.go | 90 + vendor/github.com/boltdb/bolt/bolt_windows.go | 144 ++ .../github.com/boltdb/bolt/boltsync_unix.go | 8 + vendor/github.com/boltdb/bolt/bucket.go | 777 +++++++ vendor/github.com/boltdb/bolt/bucket_test.go | 1909 +++++++++++++++++ .../github.com/boltdb/bolt/cmd/bolt/main.go | 1740 +++++++++++++++ .../boltdb/bolt/cmd/bolt/main_test.go | 356 +++ vendor/github.com/boltdb/bolt/cursor.go | 400 ++++ vendor/github.com/boltdb/bolt/cursor_test.go | 817 +++++++ vendor/github.com/boltdb/bolt/db.go | 1039 +++++++++ vendor/github.com/boltdb/bolt/db_test.go | 1545 +++++++++++++ vendor/github.com/boltdb/bolt/doc.go | 44 + vendor/github.com/boltdb/bolt/errors.go | 71 + vendor/github.com/boltdb/bolt/freelist.go | 252 +++ .../github.com/boltdb/bolt/freelist_test.go | 158 ++ vendor/github.com/boltdb/bolt/node.go | 604 ++++++ vendor/github.com/boltdb/bolt/node_test.go | 156 ++ vendor/github.com/boltdb/bolt/page.go | 197 ++ vendor/github.com/boltdb/bolt/page_test.go | 72 + vendor/github.com/boltdb/bolt/quick_test.go | 87 + .../github.com/boltdb/bolt/simulation_test.go | 329 +++ vendor/github.com/boltdb/bolt/tx.go | 684 ++++++ vendor/github.com/boltdb/bolt/tx_test.go | 716 +++++++ vendor/github.com/docker/libkv/.travis.yml | 31 + vendor/github.com/docker/libkv/LICENSE.code | 191 ++ vendor/github.com/docker/libkv/LICENSE.docs | 425 ++++ vendor/github.com/docker/libkv/MAINTAINERS | 40 + vendor/github.com/docker/libkv/README.md | 107 + .../docker/libkv/docs/compatibility.md | 82 + .../github.com/docker/libkv/docs/examples.md | 157 ++ vendor/github.com/docker/libkv/libkv.go | 40 + vendor/github.com/docker/libkv/libkv_test.go | 24 + .../github.com/docker/libkv/script/.validate | 33 + .../github.com/docker/libkv/script/coverage | 21 + .../docker/libkv/script/travis_consul.sh | 18 + .../docker/libkv/script/travis_etcd.sh | 11 + .../docker/libkv/script/travis_zk.sh | 12 + .../docker/libkv/script/validate-gofmt | 30 + .../docker/libkv/store/boltdb/boltdb.go | 474 ++++ .../docker/libkv/store/boltdb/boltdb_test.go | 144 ++ .../docker/libkv/store/consul/consul.go | 558 +++++ .../docker/libkv/store/consul/consul_test.go | 84 + .../github.com/docker/libkv/store/helpers.go | 47 + .../docker/libkv/store/mock/mock.go | 113 + vendor/github.com/docker/libkv/store/store.go | 132 ++ .../docker/libkv/testutils/utils.go | 622 ++++++ .../github.com/hashicorp/consul/api/README.md | 43 + vendor/github.com/hashicorp/consul/api/acl.go | 175 ++ .../hashicorp/consul/api/acl_test.go | 157 ++ .../github.com/hashicorp/consul/api/agent.go | 515 +++++ .../hashicorp/consul/api/agent_test.go | 788 +++++++ vendor/github.com/hashicorp/consul/api/api.go | 772 +++++++ .../hashicorp/consul/api/api_test.go | 537 +++++ .../hashicorp/consul/api/catalog.go | 198 ++ .../hashicorp/consul/api/catalog_test.go | 474 ++++ .../hashicorp/consul/api/coordinate.go | 67 + .../hashicorp/consul/api/coordinate_test.go | 44 + .../github.com/hashicorp/consul/api/event.go | 104 + .../hashicorp/consul/api/event_test.go | 50 + .../github.com/hashicorp/consul/api/health.go | 200 ++ .../hashicorp/consul/api/health_test.go | 354 +++ vendor/github.com/hashicorp/consul/api/kv.go | 420 ++++ .../hashicorp/consul/api/kv_test.go | 574 +++++ .../github.com/hashicorp/consul/api/lock.go | 385 ++++ .../hashicorp/consul/api/lock_test.go | 560 +++++ .../hashicorp/consul/api/operator.go | 11 + .../hashicorp/consul/api/operator_area.go | 168 ++ .../consul/api/operator_autopilot.go | 219 ++ .../consul/api/operator_autopilot_test.go | 104 + .../hashicorp/consul/api/operator_keyring.go | 83 + .../consul/api/operator_keyring_test.go | 73 + .../hashicorp/consul/api/operator_raft.go | 86 + .../consul/api/operator_raft_test.go | 38 + .../hashicorp/consul/api/prepared_query.go | 198 ++ .../consul/api/prepared_query_test.go | 133 ++ vendor/github.com/hashicorp/consul/api/raw.go | 24 + .../hashicorp/consul/api/semaphore.go | 513 +++++ .../hashicorp/consul/api/semaphore_test.go | 518 +++++ .../hashicorp/consul/api/session.go | 224 ++ .../hashicorp/consul/api/session_test.go | 392 ++++ .../hashicorp/consul/api/snapshot.go | 47 + .../hashicorp/consul/api/snapshot_test.go | 134 ++ .../github.com/hashicorp/consul/api/status.go | 43 + .../hashicorp/consul/api/status_test.go | 37 + .../github.com/hashicorp/go-cleanhttp/LICENSE | 363 ++++ .../hashicorp/go-cleanhttp/README.md | 30 + .../hashicorp/go-cleanhttp/cleanhttp.go | 56 + .../github.com/hashicorp/go-cleanhttp/doc.go | 20 + .../hashicorp/go-rootcerts/.travis.yml | 12 + .../github.com/hashicorp/go-rootcerts/LICENSE | 363 ++++ .../hashicorp/go-rootcerts/Makefile | 8 + .../hashicorp/go-rootcerts/README.md | 43 + .../github.com/hashicorp/go-rootcerts/doc.go | 9 + .../hashicorp/go-rootcerts/rootcerts.go | 103 + .../hashicorp/go-rootcerts/rootcerts_base.go | 12 + .../go-rootcerts/rootcerts_darwin.go | 48 + .../go-rootcerts/rootcerts_darwin_test.go | 17 + .../hashicorp/go-rootcerts/rootcerts_test.go | 52 + .../test-fixtures/cafile/cacert.pem | 28 + .../capath-with-symlinks/securetrust.pem | 1 + .../capath-with-symlinks/thawte.pem | 1 + .../test-fixtures/capath/securetrust.pem | 22 + .../test-fixtures/capath/thawte.pem | 25 + .../hashicorp/serf/coordinate/client.go | 227 ++ .../hashicorp/serf/coordinate/client_test.go | 180 ++ .../hashicorp/serf/coordinate/config.go | 70 + .../hashicorp/serf/coordinate/coordinate.go | 203 ++ .../serf/coordinate/coordinate_test.go | 298 +++ .../serf/coordinate/performance_test.go | 182 ++ .../hashicorp/serf/coordinate/phantom.go | 187 ++ .../hashicorp/serf/coordinate/util_test.go | 27 + vendor/golang.org/x/net/context/context.go | 156 ++ .../golang.org/x/net/context/context_test.go | 583 +++++ .../x/net/context/ctxhttp/ctxhttp.go | 74 + .../x/net/context/ctxhttp/ctxhttp_17_test.go | 29 + .../x/net/context/ctxhttp/ctxhttp_pre17.go | 147 ++ .../net/context/ctxhttp/ctxhttp_pre17_test.go | 79 + .../x/net/context/ctxhttp/ctxhttp_test.go | 105 + vendor/golang.org/x/net/context/go17.go | 72 + vendor/golang.org/x/net/context/pre_go17.go | 300 +++ .../x/net/context/withtimeout_test.go | 26 + 147 files changed, 30764 insertions(+), 3 deletions(-) create mode 100644 libkv/libkv.go create mode 100644 test/integration/config.db create mode 100644 test/integration/datasources_boltdb.bats create mode 100644 test/integration/datasources_consul.bats create mode 100644 vendor/github.com/boltdb/bolt/.gitignore create mode 100644 vendor/github.com/boltdb/bolt/LICENSE create mode 100644 vendor/github.com/boltdb/bolt/Makefile create mode 100644 vendor/github.com/boltdb/bolt/README.md create mode 100644 vendor/github.com/boltdb/bolt/appveyor.yml create mode 100644 vendor/github.com/boltdb/bolt/bolt_386.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_amd64.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_arm.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_arm64.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_linux.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_openbsd.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_ppc.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_ppc64.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_ppc64le.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_s390x.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_unix.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_unix_solaris.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_windows.go create mode 100644 vendor/github.com/boltdb/bolt/boltsync_unix.go create mode 100644 vendor/github.com/boltdb/bolt/bucket.go create mode 100644 vendor/github.com/boltdb/bolt/bucket_test.go create mode 100644 vendor/github.com/boltdb/bolt/cmd/bolt/main.go create mode 100644 vendor/github.com/boltdb/bolt/cmd/bolt/main_test.go create mode 100644 vendor/github.com/boltdb/bolt/cursor.go create mode 100644 vendor/github.com/boltdb/bolt/cursor_test.go create mode 100644 vendor/github.com/boltdb/bolt/db.go create mode 100644 vendor/github.com/boltdb/bolt/db_test.go create mode 100644 vendor/github.com/boltdb/bolt/doc.go create mode 100644 vendor/github.com/boltdb/bolt/errors.go create mode 100644 vendor/github.com/boltdb/bolt/freelist.go create mode 100644 vendor/github.com/boltdb/bolt/freelist_test.go create mode 100644 vendor/github.com/boltdb/bolt/node.go create mode 100644 vendor/github.com/boltdb/bolt/node_test.go create mode 100644 vendor/github.com/boltdb/bolt/page.go create mode 100644 vendor/github.com/boltdb/bolt/page_test.go create mode 100644 vendor/github.com/boltdb/bolt/quick_test.go create mode 100644 vendor/github.com/boltdb/bolt/simulation_test.go create mode 100644 vendor/github.com/boltdb/bolt/tx.go create mode 100644 vendor/github.com/boltdb/bolt/tx_test.go create mode 100644 vendor/github.com/docker/libkv/.travis.yml create mode 100644 vendor/github.com/docker/libkv/LICENSE.code create mode 100644 vendor/github.com/docker/libkv/LICENSE.docs create mode 100644 vendor/github.com/docker/libkv/MAINTAINERS create mode 100644 vendor/github.com/docker/libkv/README.md create mode 100644 vendor/github.com/docker/libkv/docs/compatibility.md create mode 100644 vendor/github.com/docker/libkv/docs/examples.md create mode 100644 vendor/github.com/docker/libkv/libkv.go create mode 100644 vendor/github.com/docker/libkv/libkv_test.go create mode 100644 vendor/github.com/docker/libkv/script/.validate create mode 100755 vendor/github.com/docker/libkv/script/coverage create mode 100755 vendor/github.com/docker/libkv/script/travis_consul.sh create mode 100755 vendor/github.com/docker/libkv/script/travis_etcd.sh create mode 100755 vendor/github.com/docker/libkv/script/travis_zk.sh create mode 100755 vendor/github.com/docker/libkv/script/validate-gofmt create mode 100644 vendor/github.com/docker/libkv/store/boltdb/boltdb.go create mode 100644 vendor/github.com/docker/libkv/store/boltdb/boltdb_test.go create mode 100644 vendor/github.com/docker/libkv/store/consul/consul.go create mode 100644 vendor/github.com/docker/libkv/store/consul/consul_test.go create mode 100644 vendor/github.com/docker/libkv/store/helpers.go create mode 100644 vendor/github.com/docker/libkv/store/mock/mock.go create mode 100644 vendor/github.com/docker/libkv/store/store.go create mode 100644 vendor/github.com/docker/libkv/testutils/utils.go create mode 100644 vendor/github.com/hashicorp/consul/api/README.md create mode 100644 vendor/github.com/hashicorp/consul/api/acl.go create mode 100644 vendor/github.com/hashicorp/consul/api/acl_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/agent.go create mode 100644 vendor/github.com/hashicorp/consul/api/agent_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/api.go create mode 100644 vendor/github.com/hashicorp/consul/api/api_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/catalog.go create mode 100644 vendor/github.com/hashicorp/consul/api/catalog_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/coordinate.go create mode 100644 vendor/github.com/hashicorp/consul/api/coordinate_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/event.go create mode 100644 vendor/github.com/hashicorp/consul/api/event_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/health.go create mode 100644 vendor/github.com/hashicorp/consul/api/health_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/kv.go create mode 100644 vendor/github.com/hashicorp/consul/api/kv_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/lock.go create mode 100644 vendor/github.com/hashicorp/consul/api/lock_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator_area.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator_autopilot.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator_autopilot_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator_keyring.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator_keyring_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator_raft.go create mode 100644 vendor/github.com/hashicorp/consul/api/operator_raft_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/prepared_query.go create mode 100644 vendor/github.com/hashicorp/consul/api/prepared_query_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/raw.go create mode 100644 vendor/github.com/hashicorp/consul/api/semaphore.go create mode 100644 vendor/github.com/hashicorp/consul/api/semaphore_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/session.go create mode 100644 vendor/github.com/hashicorp/consul/api/session_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/snapshot.go create mode 100644 vendor/github.com/hashicorp/consul/api/snapshot_test.go create mode 100644 vendor/github.com/hashicorp/consul/api/status.go create mode 100644 vendor/github.com/hashicorp/consul/api/status_test.go create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/LICENSE create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/README.md create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/doc.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/.travis.yml create mode 100644 vendor/github.com/hashicorp/go-rootcerts/LICENSE create mode 100644 vendor/github.com/hashicorp/go-rootcerts/Makefile create mode 100644 vendor/github.com/hashicorp/go-rootcerts/README.md create mode 100644 vendor/github.com/hashicorp/go-rootcerts/doc.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin_test.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts_test.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/test-fixtures/cafile/cacert.pem create mode 120000 vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/securetrust.pem create mode 120000 vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/thawte.pem create mode 100644 vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath/securetrust.pem create mode 100644 vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath/thawte.pem create mode 100644 vendor/github.com/hashicorp/serf/coordinate/client.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/client_test.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/config.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/coordinate.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/coordinate_test.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/performance_test.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/phantom.go create mode 100644 vendor/github.com/hashicorp/serf/coordinate/util_test.go create mode 100644 vendor/golang.org/x/net/context/context.go create mode 100644 vendor/golang.org/x/net/context/context_test.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go create mode 100644 vendor/golang.org/x/net/context/go17.go create mode 100644 vendor/golang.org/x/net/context/pre_go17.go create mode 100644 vendor/golang.org/x/net/context/withtimeout_test.go diff --git a/data.go b/data.go index 3a1ded088..7f925a8a0 100644 --- a/data.go +++ b/data.go @@ -1,6 +1,7 @@ package main import ( + "errors" "fmt" "io/ioutil" "log" @@ -14,6 +15,7 @@ import ( "time" "github.com/blang/vfs" + "github.com/hairyhenderson/gomplate/libkv" "github.com/hairyhenderson/gomplate/vault" ) @@ -42,6 +44,10 @@ func init() { addSourceReader("https", readHTTP) addSourceReader("file", readFile) addSourceReader("vault", readVault) + addSourceReader("consul", readLibKV) + addSourceReader("consul+http", readLibKV) + addSourceReader("consul+https", readLibKV) + addSourceReader("boltdb", readLibKV) } var sourceReaders map[string]func(*Source, ...string) ([]byte, error) @@ -85,6 +91,7 @@ type Source struct { FS vfs.Filesystem // used for file: URLs, nil otherwise HC *http.Client // used for http[s]: URLs, nil otherwise VC *vault.Client //used for vault: URLs, nil otherwise + KV *libkv.LibKV // used for consul:, etcd:, zookeeper: & boltdb: URLs, nil otherwise Header http.Header // used for http[s]: URLs, nil otherwise } @@ -98,7 +105,7 @@ func NewSource(alias string, URL *url.URL) (s *Source) { Ext: ext, } - if ext != "" { + if ext != "" && URL.Scheme != "boltdb" { mediatype := mime.TypeByExtension(ext) t, params, err := mime.ParseMediaType(mediatype) if err != nil { @@ -194,6 +201,9 @@ func (d *Data) Datasource(alias string, args ...string) interface{} { if source.Type == "application/toml" { return ty.TOML(s) } + if source.Type == "text/plain" { + return s + } log.Fatalf("Datasources of type %s not yet supported", source.Type) return nil } @@ -326,6 +336,36 @@ func readVault(source *Source, args ...string) ([]byte, error) { return data, nil } +func readLibKV(source *Source, args ...string) ([]byte, error) { + if source.KV == nil { + source.KV = libkv.New(source.URL) + err := source.KV.Login() + addCleanupHook(source.KV.Logout) + if err != nil { + return nil, err + } + } + + p := source.URL.Path + + if source.URL.Scheme == "boltdb" { + if len(args) != 1 { + return nil, errors.New("missing key") + } + p = args[0] + } else if len(args) == 1 { + p = p + "/" + args[0] + } + + data, err := source.KV.Read(p) + if err != nil { + return nil, err + } + source.Type = "text/plain" + + return data, nil +} + func parseHeaderArgs(headerArgs []string) map[string]http.Header { headers := make(map[string]http.Header) for _, v := range headerArgs { diff --git a/docs/content/functions/general.md b/docs/content/functions/general.md index ab3625739..16a549579 100644 --- a/docs/content/functions/general.md +++ b/docs/content/functions/general.md @@ -511,6 +511,51 @@ $ gomplate -d foo=https://httpbin.org/get -H 'foo=Foo: bar' -i '{{(datasource "f bar ``` +##### Usage with Consul data + +There are three URL schemes which can be used to retrieve data from [Hashicorp Consul](https://consul.io/). +The `consul://` (or `consul+http://`) scheme can optionally be used with a hostname and port to specify a server (e.g. `consul://localhost:8500`). +By default this will be contacted by HTTP, but the `$CONSUL_HTTP_SSL` can be used to switch to HTTPS mode. Alternatively +the `consul+https://` scheme can be used. + +If the server address isn't included the variable `$CONSUL_HTTP_ADDR` will be checked, otherwise `localhost:8500` will be used. + +The following environment variables can be used: + +| name | usage | +| -- | -- | +| `CONSUL_HTTP_ADDR` | Hostname and optional port for connecting to Consul. Defaults to localhost and port 8500. | +| `CONSUL_TIMEOUT` | Timeout (in seconds) when communicating to Consul. Defaults to 10 seconds. | +| `CONSUL_HTTP_TOKEN` | The Consul token to use when connecting to the server. | +| `CONSUL_HTTP_AUTH` | Should be specified as :. Used to authenticate to the server. | +| `CONSUL_HTTP_SSL` | Switch to HTTPS mode if set to a true value. It accepts 1, t, T, TRUE, true, True, 0, f, F, FALSE, false, False. Alternatively use the `consul+https://` scheme. | +| `CONSUL_TLS_SERVER_NAME` | The server name to use as the SNI host when connecting to Consul via TLS. | +| `CONSUL_CACERT` | If specified points to a CA file for verifying Consul server using TLS. | +| `CONSUL_CAPATH` | If specified points to a directory of CA files for verifying Consul server using TLS. | +| `CONSUL_CLIENT_CERT` | Client certificate file for certificate authentication. Both a certificate and key are required. | +| `CONSUL_CLIENT_KEY` | Client key file for certificate authentication. Both a certificate and key are required. | +| `CONSUL_HTTP_SSL_VERIFY` | Disable Consul TLS certificate checking. It accepts 1, t, T, TRUE, true, True, 0, f, F, FALSE, false, False. | + +If a path is included it is used as a prefix for all uses of the datasource. + +##### Usage with BoldDB data + +[BoldDB](https://github.com/boltdb/bolt) is a simple local key/value store used by many Go tools. + +It can be accessed using the `boltdb://` scheme in addition to the full path to the database file +and the bucket name specified using the #fragment identifier (e.g. `boltdb:////tmp/database.db#bucket). + +As access is vi [libkv](https://github.com/docker/libkv) the first 8 bytes of all values is used as an +incrementing last modified index value. Therefore all values must be at least 9 bytes long, with the first +8 being ignored. + +The following environment variables can be used: + +| name | usage | +| -- | -- | +| `BOLTDB_TIMEOUT` | Timeout (in seconds) to wait for a lock on the database file when opening. | +| `BOLTDB_PERSIST` | If set keep the database open instead of closing after each read. It accepts 1, t, T, TRUE, true, True, 0, f, F, FALSE, false, False. | + ##### Usage with Vault data The special `vault://` URL scheme can be used to retrieve data from [Hashicorp diff --git a/docs/content/index.md b/docs/content/index.md index 244d475d9..611f6701b 100644 --- a/docs/content/index.md +++ b/docs/content/index.md @@ -9,8 +9,8 @@ menu: A [Go template](https://golang.org/pkg/text/template/)-based CLI tool. `gomplate` can be used as an alternative to [`envsubst`](https://www.gnu.org/software/gettext/manual/html_node/envsubst-Invocation.html) but also supports -additional template datasources such as: JSON, YAML, AWS EC2 metadata, and -[Hashicorp Vault](https://www.vaultproject.io/) secrets. +additional template datasources such as: JSON, YAML, AWS EC2 metadata, [BoldDB](https://github.com/boltdb/bolt), +[Hashicorp Consul](https://www.consul.io/) and [Hashicorp Vault](https://www.vaultproject.io/) secrets. I really like `envsubst` for use as a super-minimalist template processor. But its simplicity is also its biggest flaw: it's all-or-nothing with shell-like variables. diff --git a/glide.yaml b/glide.yaml index ffb52fe91..2dc8d62d6 100644 --- a/glide.yaml +++ b/glide.yaml @@ -18,6 +18,7 @@ import: - codec - package: github.com/hairyhenderson/toml version: support-map-interface-keys +- package: github.com/docker/libkv testImport: - package: github.com/stretchr/testify version: ^1.1.4 diff --git a/libkv/libkv.go b/libkv/libkv.go new file mode 100644 index 000000000..6b8cb24f4 --- /dev/null +++ b/libkv/libkv.go @@ -0,0 +1,185 @@ +package libkv + +import ( + "crypto/tls" + "errors" + "log" + "net/url" + "strconv" + "time" + + "github.com/blang/vfs" + "github.com/docker/libkv" + "github.com/docker/libkv/store" + "github.com/docker/libkv/store/boltdb" + "github.com/docker/libkv/store/consul" + "github.com/hairyhenderson/gomplate/env" + consulapi "github.com/hashicorp/consul/api" +) + +// logFatal is defined so log.Fatal calls can be overridden for testing +var logFatal = log.Fatal + +// LibKV - +type LibKV struct { + store store.Store + fs vfs.Filesystem +} + +type SetupDetails struct { + sourceType store.Backend + client string + options *store.Config +} + +// New - instantiate a new +func New(url *url.URL) *LibKV { + var setup *SetupDetails + var err error + + if url.Scheme == "consul" || url.Scheme == "consul+http" { + setup, err = setupConsul(url, false) + if err != nil { + logFatal("consul setup error", err) + } + } + if url.Scheme == "consul+https" { + setup, err = setupConsul(url, true) + if err != nil { + logFatal("consul setup error", err) + } + } + if url.Scheme == "boltdb" { + setup, err = setupBoltDB(url, false) + if err != nil { + logFatal("boltdb setup error", err) + } + } + + if setup.client == "" { + logFatal("missing client location") + } + + kv, err := libkv.NewStore( + setup.sourceType, + []string{setup.client}, + setup.options, + ) + if err != nil { + logFatal("Cannot create store", err) + } + + return &LibKV{kv, nil} +} + +func setupConsul(url *url.URL, enableTLS bool) (*SetupDetails, error) { + setup := &SetupDetails{} + consul.Register() + setup.sourceType = store.CONSUL + setup.client = env.Getenv("CONSUL_HTTP_ADDR", "localhost:8500") + setup.options = &store.Config{} + if timeout := env.Getenv("CONSUL_TIMEOUT", ""); timeout != "" { + num, err := strconv.ParseInt(timeout, 10, 16) + if err != nil { + return nil, err + } + setup.options.ConnectionTimeout = time.Duration(num) * time.Second + } + if ssl := env.Getenv("CONSUL_HTTP_SSL", ""); ssl != "" { + enabled, err := strconv.ParseBool(ssl) + if err != nil { + return nil, err + } + enableTLS = enabled + } + if enableTLS { + config, err := setupTLS("CONSUL") + if err != nil { + return nil, err + } + setup.options.TLS = config + } + return setup, nil +} + +func setupBoltDB(url *url.URL, enableTLS bool) (*SetupDetails, error) { + setup := &SetupDetails{} + boltdb.Register() + setup.sourceType = store.BOLTDB + setup.client = url.Path + setup.options = &store.Config{} + setup.options.Bucket = url.Fragment + if setup.options.Bucket == "" { + return nil, errors.New("missing bucket") + } + if timeout := env.Getenv("BOLTDB_TIMEOUT", ""); timeout != "" { + num, err := strconv.ParseInt(timeout, 10, 16) + if err != nil { + return nil, err + } + setup.options.ConnectionTimeout = time.Duration(num) * time.Second + } + if persist := env.Getenv("BOLTDB_PERSIST", ""); persist != "" { + enabled, err := strconv.ParseBool(persist) + if err != nil { + return nil, err + } + setup.options.PersistConnection = enabled + } + return setup, nil +} + +func setupTLS(prefix string) (*tls.Config, error) { + tlsConfig := &consulapi.TLSConfig{} + + if v := env.Getenv(prefix+"_TLS_SERVER_NAME", ""); v != "" { + tlsConfig.Address = v + } + if v := env.Getenv(prefix+"_CACERT", ""); v != "" { + tlsConfig.CAFile = v + } + if v := env.Getenv(prefix+"_CAPATH", ""); v != "" { + tlsConfig.CAPath = v + } + if v := env.Getenv(prefix+"_CLIENT_CERT", ""); v != "" { + tlsConfig.CertFile = v + } + if v := env.Getenv(prefix+"_CLIENT_KEY", ""); v != "" { + tlsConfig.KeyFile = v + } + if v := env.Getenv(prefix+"_HTTP_SSL_VERIFY", ""); v != "" { + verify, err := strconv.ParseBool(v) + if err != nil { + return nil, err + } + if !verify { + tlsConfig.InsecureSkipVerify = true + } + } + + config, err := consulapi.SetupTLSConfig(tlsConfig) + if err != nil { + return nil, err + } + + return config, nil +} + +// Login - +func (kv *LibKV) Login() error { + return nil +} + +// Logout - +func (kv *LibKV) Logout() { +} + +// Read - +func (kv *LibKV) Read(path string) ([]byte, error) { + data, err := kv.store.Get(path) + if err != nil { + return nil, err + } + + return data.Value, nil +} diff --git a/test/integration/Dockerfile b/test/integration/Dockerfile index da792ad69..bdacd19e7 100644 --- a/test/integration/Dockerfile +++ b/test/integration/Dockerfile @@ -1,6 +1,7 @@ FROM alpine:edge ENV VAULT_VER 0.7.0 +ENV CONSUL_VER 0.9.0 RUN apk add --no-cache \ curl \ bash \ @@ -10,6 +11,10 @@ RUN apk add --no-cache \ && unzip /tmp/vault.zip \ && mv vault /bin/vault \ && rm /tmp/vault.zip \ + && curl -L -o /tmp/consul.zip https://releases.hashicorp.com/consul/${CONSUL_VER}/consul_${CONSUL_VER}_linux_amd64.zip \ + && unzip /tmp/consul.zip \ + && mv consul /bin/consul \ + && rm /tmp/consul.zip \ && apk del curl RUN mkdir /lib64 \ @@ -20,5 +25,6 @@ COPY mirror /bin/mirror COPY *.sh /tests/ COPY *.bash /tests/ COPY *.bats /tests/ +COPY *.db /test/integration/ CMD ["/tests/test.sh"] diff --git a/test/integration/config.db b/test/integration/config.db new file mode 100644 index 0000000000000000000000000000000000000000..e68a29c678883ab54578c35f7ea081ed39b855a0 GIT binary patch literal 32768 zcmeI(F-`(O6adf_6%)1K2zmpxvoXP5~Gcmu(h!F1YX7qSaATYoMGn& z6ih4#Lh>eAW_EXWc6reJ`Lk73m$gz4-nIvg+-`k8UQSo*rLj@!%~HR7zP^8)?zW?! zng9U;1PBlyK!5-N0t5&USR8?s@+iN>|HHX}vBzC4?TG)k%6{|l?D?sEb1~LclK=q% z1PBlyK!5-N0t5&USP+4T?;BO!%JTs`Y0L5bQQGqUdD%Zs^~wEZ@9J(pwWgNF@PhhO zUO|wj3-T1fb-%w?O1E=c4{}g=Xr{> zh#=+vyB&G|<^IokdIAIp5FkK+009C72oRXRK;-!&{~zxI#JT|a|M~wW?nr&/dev/null & +export CONSUL_HTTP_ADDR=http://127.0.0.1:8500 + +consul agent -dev -log-level=err >&/dev/null & + bats $(dirname $0) diff --git a/vendor/github.com/boltdb/bolt/.gitignore b/vendor/github.com/boltdb/bolt/.gitignore new file mode 100644 index 000000000..c7bd2b7a5 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/.gitignore @@ -0,0 +1,4 @@ +*.prof +*.test +*.swp +/bin/ diff --git a/vendor/github.com/boltdb/bolt/LICENSE b/vendor/github.com/boltdb/bolt/LICENSE new file mode 100644 index 000000000..004e77fe5 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Ben Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/boltdb/bolt/Makefile b/vendor/github.com/boltdb/bolt/Makefile new file mode 100644 index 000000000..e035e63ad --- /dev/null +++ b/vendor/github.com/boltdb/bolt/Makefile @@ -0,0 +1,18 @@ +BRANCH=`git rev-parse --abbrev-ref HEAD` +COMMIT=`git rev-parse --short HEAD` +GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" + +default: build + +race: + @go test -v -race -test.run="TestSimulate_(100op|1000op)" + +# go get github.com/kisielk/errcheck +errcheck: + @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt + +test: + @go test -v -cover . + @go test -v ./cmd/bolt + +.PHONY: fmt test diff --git a/vendor/github.com/boltdb/bolt/README.md b/vendor/github.com/boltdb/bolt/README.md new file mode 100644 index 000000000..7d43a15b2 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/README.md @@ -0,0 +1,916 @@ +Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.2.1-green.svg) +==== + +Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] +[LMDB project][lmdb]. The goal of the project is to provide a simple, +fast, and reliable database for projects that don't require a full database +server such as Postgres or MySQL. + +Since Bolt is meant to be used as such a low-level piece of functionality, +simplicity is key. The API will be small and only focus on getting values +and setting values. That's it. + +[hyc_symas]: https://twitter.com/hyc_symas +[lmdb]: http://symas.com/mdb/ + +## Project Status + +Bolt is stable, the API is fixed, and the file format is fixed. Full unit +test coverage and randomized black box testing are used to ensure database +consistency and thread safety. Bolt is currently used in high-load production +environments serving databases as large as 1TB. Many companies such as +Shopify and Heroku use Bolt-backed services every day. + +## Table of Contents + +- [Getting Started](#getting-started) + - [Installing](#installing) + - [Opening a database](#opening-a-database) + - [Transactions](#transactions) + - [Read-write transactions](#read-write-transactions) + - [Read-only transactions](#read-only-transactions) + - [Batch read-write transactions](#batch-read-write-transactions) + - [Managing transactions manually](#managing-transactions-manually) + - [Using buckets](#using-buckets) + - [Using key/value pairs](#using-keyvalue-pairs) + - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) + - [Iterating over keys](#iterating-over-keys) + - [Prefix scans](#prefix-scans) + - [Range scans](#range-scans) + - [ForEach()](#foreach) + - [Nested buckets](#nested-buckets) + - [Database backups](#database-backups) + - [Statistics](#statistics) + - [Read-Only Mode](#read-only-mode) + - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) +- [Resources](#resources) +- [Comparison with other databases](#comparison-with-other-databases) + - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) + - [LevelDB, RocksDB](#leveldb-rocksdb) + - [LMDB](#lmdb) +- [Caveats & Limitations](#caveats--limitations) +- [Reading the Source](#reading-the-source) +- [Other Projects Using Bolt](#other-projects-using-bolt) + +## Getting Started + +### Installing + +To start using Bolt, install Go and run `go get`: + +```sh +$ go get github.com/boltdb/bolt/... +``` + +This will retrieve the library and install the `bolt` command line utility into +your `$GOBIN` path. + + +### Opening a database + +The top-level object in Bolt is a `DB`. It is represented as a single file on +your disk and represents a consistent snapshot of your data. + +To open your database, simply use the `bolt.Open()` function: + +```go +package main + +import ( + "log" + + "github.com/boltdb/bolt" +) + +func main() { + // Open the my.db data file in your current directory. + // It will be created if it doesn't exist. + db, err := bolt.Open("my.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + defer db.Close() + + ... +} +``` + +Please note that Bolt obtains a file lock on the data file so multiple processes +cannot open the same database at the same time. Opening an already open Bolt +database will cause it to hang until the other process closes it. To prevent +an indefinite wait you can pass a timeout option to the `Open()` function: + +```go +db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) +``` + + +### Transactions + +Bolt allows only one read-write transaction at a time but allows as many +read-only transactions as you want at a time. Each transaction has a consistent +view of the data as it existed when the transaction started. + +Individual transactions and all objects created from them (e.g. buckets, keys) +are not thread safe. To work with data in multiple goroutines you must start +a transaction for each one or use locking to ensure only one goroutine accesses +a transaction at a time. Creating transaction from the `DB` is thread safe. + +Read-only transactions and read-write transactions should not depend on one +another and generally shouldn't be opened simultaneously in the same goroutine. +This can cause a deadlock as the read-write transaction needs to periodically +re-map the data file but it cannot do so while a read-only transaction is open. + + +#### Read-write transactions + +To start a read-write transaction, you can use the `DB.Update()` function: + +```go +err := db.Update(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Inside the closure, you have a consistent view of the database. You commit the +transaction by returning `nil` at the end. You can also rollback the transaction +at any point by returning an error. All database operations are allowed inside +a read-write transaction. + +Always check the return error as it will report any disk failures that can cause +your transaction to not complete. If you return an error within your closure +it will be passed through. + + +#### Read-only transactions + +To start a read-only transaction, you can use the `DB.View()` function: + +```go +err := db.View(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +You also get a consistent view of the database within this closure, however, +no mutating operations are allowed within a read-only transaction. You can only +retrieve buckets, retrieve values, and copy the database within a read-only +transaction. + + +#### Batch read-write transactions + +Each `DB.Update()` waits for disk to commit the writes. This overhead +can be minimized by combining multiple updates with the `DB.Batch()` +function: + +```go +err := db.Batch(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Concurrent Batch calls are opportunistically combined into larger +transactions. Batch is only useful when there are multiple goroutines +calling it. + +The trade-off is that `Batch` can call the given +function multiple times, if parts of the transaction fail. The +function must be idempotent and side effects must take effect only +after a successful return from `DB.Batch()`. + +For example: don't display messages from inside the function, instead +set variables in the enclosing scope: + +```go +var id uint64 +err := db.Batch(func(tx *bolt.Tx) error { + // Find last key in bucket, decode as bigendian uint64, increment + // by one, encode back to []byte, and add new key. + ... + id = newValue + return nil +}) +if err != nil { + return ... +} +fmt.Println("Allocated ID %d", id) +``` + + +#### Managing transactions manually + +The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` +function. These helper functions will start the transaction, execute a function, +and then safely close your transaction if an error is returned. This is the +recommended way to use Bolt transactions. + +However, sometimes you may want to manually start and end your transactions. +You can use the `DB.Begin()` function directly but **please** be sure to close +the transaction. + +```go +// Start a writable transaction. +tx, err := db.Begin(true) +if err != nil { + return err +} +defer tx.Rollback() + +// Use the transaction... +_, err := tx.CreateBucket([]byte("MyBucket")) +if err != nil { + return err +} + +// Commit the transaction and check for error. +if err := tx.Commit(); err != nil { + return err +} +``` + +The first argument to `DB.Begin()` is a boolean stating if the transaction +should be writable. + + +### Using buckets + +Buckets are collections of key/value pairs within the database. All keys in a +bucket must be unique. You can create a bucket using the `DB.CreateBucket()` +function: + +```go +db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("MyBucket")) + if err != nil { + return fmt.Errorf("create bucket: %s", err) + } + return nil +}) +``` + +You can also create a bucket only if it doesn't exist by using the +`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this +function for all your top-level buckets after you open your database so you can +guarantee that they exist for future transactions. + +To delete a bucket, simply call the `Tx.DeleteBucket()` function. + + +### Using key/value pairs + +To save a key/value pair to a bucket, use the `Bucket.Put()` function: + +```go +db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + err := b.Put([]byte("answer"), []byte("42")) + return err +}) +``` + +This will set the value of the `"answer"` key to `"42"` in the `MyBucket` +bucket. To retrieve this value, we can use the `Bucket.Get()` function: + +```go +db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + v := b.Get([]byte("answer")) + fmt.Printf("The answer is: %s\n", v) + return nil +}) +``` + +The `Get()` function does not return an error because its operation is +guaranteed to work (unless there is some kind of system failure). If the key +exists then it will return its byte slice value. If it doesn't exist then it +will return `nil`. It's important to note that you can have a zero-length value +set to a key which is different than the key not existing. + +Use the `Bucket.Delete()` function to delete a key from the bucket. + +Please note that values returned from `Get()` are only valid while the +transaction is open. If you need to use a value outside of the transaction +then you must use `copy()` to copy it to another byte slice. + + +### Autoincrementing integer for the bucket +By using the `NextSequence()` function, you can let Bolt determine a sequence +which can be used as the unique identifier for your key/value pairs. See the +example below. + +```go +// CreateUser saves u to the store. The new user ID is set on u once the data is persisted. +func (s *Store) CreateUser(u *User) error { + return s.db.Update(func(tx *bolt.Tx) error { + // Retrieve the users bucket. + // This should be created when the DB is first opened. + b := tx.Bucket([]byte("users")) + + // Generate ID for the user. + // This returns an error only if the Tx is closed or not writeable. + // That can't happen in an Update() call so I ignore the error check. + id, _ := b.NextSequence() + u.ID = int(id) + + // Marshal user data into bytes. + buf, err := json.Marshal(u) + if err != nil { + return err + } + + // Persist bytes to users bucket. + return b.Put(itob(u.ID), buf) + }) +} + +// itob returns an 8-byte big endian representation of v. +func itob(v int) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(v)) + return b +} + +type User struct { + ID int + ... +} +``` + +### Iterating over keys + +Bolt stores its keys in byte-sorted order within a bucket. This makes sequential +iteration over these keys extremely fast. To iterate over keys we'll use a +`Cursor`: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + b := tx.Bucket([]byte("MyBucket")) + + c := b.Cursor() + + for k, v := c.First(); k != nil; k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +The cursor allows you to move to a specific point in the list of keys and move +forward or backward through the keys one at a time. + +The following functions are available on the cursor: + +``` +First() Move to the first key. +Last() Move to the last key. +Seek() Move to a specific key. +Next() Move to the next key. +Prev() Move to the previous key. +``` + +Each of those functions has a return signature of `(key []byte, value []byte)`. +When you have iterated to the end of the cursor then `Next()` will return a +`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` +before calling `Next()` or `Prev()`. If you do not seek to a position then +these functions will return a `nil` key. + +During iteration, if the key is non-`nil` but the value is `nil`, that means +the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to +access the sub-bucket. + + +#### Prefix scans + +To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + c := tx.Bucket([]byte("MyBucket")).Cursor() + + prefix := []byte("1234") + for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +#### Range scans + +Another common use case is scanning over a range such as a time range. If you +use a sortable time encoding such as RFC3339 then you can query a specific +date range like this: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume our events bucket exists and has RFC3339 encoded time keys. + c := tx.Bucket([]byte("Events")).Cursor() + + // Our time range spans the 90's decade. + min := []byte("1990-01-01T00:00:00Z") + max := []byte("2000-01-01T00:00:00Z") + + // Iterate over the 90's. + for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { + fmt.Printf("%s: %s\n", k, v) + } + + return nil +}) +``` + +Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable. + + +#### ForEach() + +You can also use the function `ForEach()` if you know you'll be iterating over +all the keys in a bucket: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + b := tx.Bucket([]byte("MyBucket")) + + b.ForEach(func(k, v []byte) error { + fmt.Printf("key=%s, value=%s\n", k, v) + return nil + }) + return nil +}) +``` + +Please note that keys and values in `ForEach()` are only valid while +the transaction is open. If you need to use a key or value outside of +the transaction, you must use `copy()` to copy it to another byte +slice. + +### Nested buckets + +You can also store a bucket in a key to create nested buckets. The API is the +same as the bucket management API on the `DB` object: + +```go +func (*Bucket) CreateBucket(key []byte) (*Bucket, error) +func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) +func (*Bucket) DeleteBucket(key []byte) error +``` + +Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings. + +```go + +// createUser creates a new user in the given account. +func createUser(accountID int, u *User) error { + // Start the transaction. + tx, err := db.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + // Retrieve the root bucket for the account. + // Assume this has already been created when the account was set up. + root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10))) + + // Setup the users bucket. + bkt, err := root.CreateBucketIfNotExists([]byte("USERS")) + if err != nil { + return err + } + + // Generate an ID for the new user. + userID, err := bkt.NextSequence() + if err != nil { + return err + } + u.ID = userID + + // Marshal and save the encoded user. + if buf, err := json.Marshal(u); err != nil { + return err + } else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil { + return err + } + + // Commit the transaction. + if err := tx.Commit(); err != nil { + return err + } + + return nil +} + +``` + + + + +### Database backups + +Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` +function to write a consistent view of the database to a writer. If you call +this from a read-only transaction, it will perform a hot backup and not block +your other database reads and writes. + +By default, it will use a regular file handle which will utilize the operating +system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx) +documentation for information about optimizing for larger-than-RAM datasets. + +One common use case is to backup over HTTP so you can use tools like `cURL` to +do database backups: + +```go +func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { + err := db.View(func(tx *bolt.Tx) error { + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) + w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) + _, err := tx.WriteTo(w) + return err + }) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} +``` + +Then you can backup using this command: + +```sh +$ curl http://localhost/backup > my.db +``` + +Or you can open your browser to `http://localhost/backup` and it will download +automatically. + +If you want to backup to another file you can use the `Tx.CopyFile()` helper +function. + + +### Statistics + +The database keeps a running count of many of the internal operations it +performs so you can better understand what's going on. By grabbing a snapshot +of these stats at two points in time we can see what operations were performed +in that time range. + +For example, we could start a goroutine to log stats every 10 seconds: + +```go +go func() { + // Grab the initial stats. + prev := db.Stats() + + for { + // Wait for 10s. + time.Sleep(10 * time.Second) + + // Grab the current stats and diff them. + stats := db.Stats() + diff := stats.Sub(&prev) + + // Encode stats to JSON and print to STDERR. + json.NewEncoder(os.Stderr).Encode(diff) + + // Save stats for the next loop. + prev = stats + } +}() +``` + +It's also useful to pipe these stats to a service such as statsd for monitoring +or to provide an HTTP endpoint that will perform a fixed-length sample. + + +### Read-Only Mode + +Sometimes it is useful to create a shared, read-only Bolt database. To this, +set the `Options.ReadOnly` flag when opening your database. Read-only mode +uses a shared lock to allow multiple processes to read from the database but +it will block any processes from opening the database in read-write mode. + +```go +db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) +if err != nil { + log.Fatal(err) +} +``` + +### Mobile Use (iOS/Android) + +Bolt is able to run on mobile devices by leveraging the binding feature of the +[gomobile](https://github.com/golang/mobile) tool. Create a struct that will +contain your database logic and a reference to a `*bolt.DB` with a initializing +constructor that takes in a filepath where the database file will be stored. +Neither Android nor iOS require extra permissions or cleanup from using this method. + +```go +func NewBoltDB(filepath string) *BoltDB { + db, err := bolt.Open(filepath+"/demo.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + + return &BoltDB{db} +} + +type BoltDB struct { + db *bolt.DB + ... +} + +func (b *BoltDB) Path() string { + return b.db.Path() +} + +func (b *BoltDB) Close() { + b.db.Close() +} +``` + +Database logic should be defined as methods on this wrapper struct. + +To initialize this struct from the native language (both platforms now sync +their local storage to the cloud. These snippets disable that functionality for the +database file): + +#### Android + +```java +String path; +if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){ + path = getNoBackupFilesDir().getAbsolutePath(); +} else{ + path = getFilesDir().getAbsolutePath(); +} +Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path) +``` + +#### iOS + +```objc +- (void)demo { + NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory, + NSUserDomainMask, + YES) objectAtIndex:0]; + GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path); + [self addSkipBackupAttributeToItemAtPath:demo.path]; + //Some DB Logic would go here + [demo close]; +} + +- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString +{ + NSURL* URL= [NSURL fileURLWithPath: filePathString]; + assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]); + + NSError *error = nil; + BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES] + forKey: NSURLIsExcludedFromBackupKey error: &error]; + if(!success){ + NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error); + } + return success; +} + +``` + +## Resources + +For more information on getting started with Bolt, check out the following articles: + +* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). +* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville + + +## Comparison with other databases + +### Postgres, MySQL, & other relational databases + +Relational databases structure data into rows and are only accessible through +the use of SQL. This approach provides flexibility in how you store and query +your data but also incurs overhead in parsing and planning SQL statements. Bolt +accesses all data by a byte slice key. This makes Bolt fast to read and write +data by key but provides no built-in support for joining values together. + +Most relational databases (with the exception of SQLite) are standalone servers +that run separately from your application. This gives your systems +flexibility to connect multiple application servers to a single database +server but also adds overhead in serializing and transporting data over the +network. Bolt runs as a library included in your application so all data access +has to go through your application's process. This brings data closer to your +application but limits multi-process access to the data. + + +### LevelDB, RocksDB + +LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that +they are libraries bundled into the application, however, their underlying +structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes +random writes by using a write ahead log and multi-tiered, sorted files called +SSTables. Bolt uses a B+tree internally and only a single file. Both approaches +have trade-offs. + +If you require a high random write throughput (>10,000 w/sec) or you need to use +spinning disks then LevelDB could be a good choice. If your application is +read-heavy or does a lot of range scans then Bolt could be a good choice. + +One other important consideration is that LevelDB does not have transactions. +It supports batch writing of key/values pairs and it supports read snapshots +but it will not give you the ability to do a compare-and-swap operation safely. +Bolt supports fully serializable ACID transactions. + + +### LMDB + +Bolt was originally a port of LMDB so it is architecturally similar. Both use +a B+tree, have ACID semantics with fully serializable transactions, and support +lock-free MVCC using a single writer and multiple readers. + +The two projects have somewhat diverged. LMDB heavily focuses on raw performance +while Bolt has focused on simplicity and ease of use. For example, LMDB allows +several unsafe actions such as direct writes for the sake of performance. Bolt +opts to disallow actions which can leave the database in a corrupted state. The +only exception to this in Bolt is `DB.NoSync`. + +There are also a few differences in API. LMDB requires a maximum mmap size when +opening an `mdb_env` whereas Bolt will handle incremental mmap resizing +automatically. LMDB overloads the getter and setter functions with multiple +flags whereas Bolt splits these specialized cases into their own functions. + + +## Caveats & Limitations + +It's important to pick the right tool for the job and Bolt is no exception. +Here are a few things to note when evaluating and using Bolt: + +* Bolt is good for read intensive workloads. Sequential write performance is + also fast but random writes can be slow. You can use `DB.Batch()` or add a + write-ahead log to help mitigate this issue. + +* Bolt uses a B+tree internally so there can be a lot of random page access. + SSDs provide a significant performance boost over spinning disks. + +* Try to avoid long running read transactions. Bolt uses copy-on-write so + old pages cannot be reclaimed while an old transaction is using them. + +* Byte slices returned from Bolt are only valid during a transaction. Once the + transaction has been committed or rolled back then the memory they point to + can be reused by a new page or can be unmapped from virtual memory and you'll + see an `unexpected fault address` panic when accessing it. + +* Bolt uses an exclusive write lock on the database file so it cannot be + shared by multiple processes. + +* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for + buckets that have random inserts will cause your database to have very poor + page utilization. + +* Use larger buckets in general. Smaller buckets causes poor page utilization + once they become larger than the page size (typically 4KB). + +* Bulk loading a lot of random writes into a new bucket can be slow as the + page will not split until the transaction is committed. Randomly inserting + more than 100,000 key/value pairs into a single new bucket in a single + transaction is not advised. + +* Bolt uses a memory-mapped file so the underlying operating system handles the + caching of the data. Typically, the OS will cache as much of the file as it + can in memory and will release memory as needed to other processes. This means + that Bolt can show very high memory usage when working with large databases. + However, this is expected and the OS will release memory as needed. Bolt can + handle databases much larger than the available physical RAM, provided its + memory-map fits in the process virtual address space. It may be problematic + on 32-bits systems. + +* The data structures in the Bolt database are memory mapped so the data file + will be endian specific. This means that you cannot copy a Bolt file from a + little endian machine to a big endian machine and have it work. For most + users this is not a concern since most modern CPUs are little endian. + +* Because of the way pages are laid out on disk, Bolt cannot truncate data files + and return free pages back to the disk. Instead, Bolt maintains a free list + of unused pages within its data file. These free pages can be reused by later + transactions. This works well for many use cases as databases generally tend + to grow. However, it's important to note that deleting large chunks of data + will not allow you to reclaim that space on disk. + + For more information on page allocation, [see this comment][page-allocation]. + +[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 + + +## Reading the Source + +Bolt is a relatively small code base (<3KLOC) for an embedded, serializable, +transactional key/value database so it can be a good starting point for people +interested in how databases work. + +The best places to start are the main entry points into Bolt: + +- `Open()` - Initializes the reference to the database. It's responsible for + creating the database if it doesn't exist, obtaining an exclusive lock on the + file, reading the meta pages, & memory-mapping the file. + +- `DB.Begin()` - Starts a read-only or read-write transaction depending on the + value of the `writable` argument. This requires briefly obtaining the "meta" + lock to keep track of open transactions. Only one read-write transaction can + exist at a time so the "rwlock" is acquired during the life of a read-write + transaction. + +- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the + arguments, a cursor is used to traverse the B+tree to the page and position + where they key & value will be written. Once the position is found, the bucket + materializes the underlying page and the page's parent pages into memory as + "nodes". These nodes are where mutations occur during read-write transactions. + These changes get flushed to disk during commit. + +- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor + to move to the page & position of a key/value pair. During a read-only + transaction, the key and value data is returned as a direct reference to the + underlying mmap file so there's no allocation overhead. For read-write + transactions, this data may reference the mmap file or one of the in-memory + node values. + +- `Cursor` - This object is simply for traversing the B+tree of on-disk pages + or in-memory nodes. It can seek to a specific key, move to the first or last + value, or it can move forward or backward. The cursor handles the movement up + and down the B+tree transparently to the end user. + +- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages + into pages to be written to disk. Writing to disk then occurs in two phases. + First, the dirty pages are written to disk and an `fsync()` occurs. Second, a + new meta page with an incremented transaction ID is written and another + `fsync()` occurs. This two phase write ensures that partially written data + pages are ignored in the event of a crash since the meta page pointing to them + is never written. Partially written meta pages are invalidated because they + are written with a checksum. + +If you have additional notes that could be helpful for others, please submit +them via pull request. + + +## Other Projects Using Bolt + +Below is a list of public, open source projects that use Bolt: + +* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. +* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. +* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. +* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. +* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. +* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. +* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. +* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. +* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. +* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". +* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. +* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. +* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. +* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. +* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. +* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. +* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. +* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. +* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. +* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. +* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. +* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. +* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. +* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. +* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. +* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. +* [stow](https://github.com/djherbis/stow) - a persistence manager for objects + backed by boltdb. +* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining + simple tx and key scans. +* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. +* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service +* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service. +* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. +* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. +* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB. +* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB. +* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings. +* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend. +* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. +* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter. +* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development. +* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains +* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal. +* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. +* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. +* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies +* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB +* [Ponzu CMS](https://ponzu-cms.org) - Headless CMS + automatic JSON API with auto-HTTPS, HTTP/2 Server Push, and flexible server framework. + +If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/vendor/github.com/boltdb/bolt/appveyor.yml b/vendor/github.com/boltdb/bolt/appveyor.yml new file mode 100644 index 000000000..6e26e941d --- /dev/null +++ b/vendor/github.com/boltdb/bolt/appveyor.yml @@ -0,0 +1,18 @@ +version: "{build}" + +os: Windows Server 2012 R2 + +clone_folder: c:\gopath\src\github.com\boltdb\bolt + +environment: + GOPATH: c:\gopath + +install: + - echo %PATH% + - echo %GOPATH% + - go version + - go env + - go get -v -t ./... + +build_script: + - go test -v ./... diff --git a/vendor/github.com/boltdb/bolt/bolt_386.go b/vendor/github.com/boltdb/bolt/bolt_386.go new file mode 100644 index 000000000..820d533c1 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_386.go @@ -0,0 +1,10 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_amd64.go b/vendor/github.com/boltdb/bolt/bolt_amd64.go new file mode 100644 index 000000000..98fafdb47 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_amd64.go @@ -0,0 +1,10 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_arm.go b/vendor/github.com/boltdb/bolt/bolt_arm.go new file mode 100644 index 000000000..7e5cb4b94 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_arm.go @@ -0,0 +1,28 @@ +package bolt + +import "unsafe" + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned bool + +func init() { + // Simple check to see whether this arch handles unaligned load/stores + // correctly. + + // ARM9 and older devices require load/stores to be from/to aligned + // addresses. If not, the lower 2 bits are cleared and that address is + // read in a jumbled up order. + + // See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html + + raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11} + val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2)) + + brokenUnaligned = val != 0x11222211 +} diff --git a/vendor/github.com/boltdb/bolt/bolt_arm64.go b/vendor/github.com/boltdb/bolt/bolt_arm64.go new file mode 100644 index 000000000..b26d84f91 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_arm64.go @@ -0,0 +1,12 @@ +// +build arm64 + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_linux.go b/vendor/github.com/boltdb/bolt/bolt_linux.go new file mode 100644 index 000000000..2b6766614 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_linux.go @@ -0,0 +1,10 @@ +package bolt + +import ( + "syscall" +) + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return syscall.Fdatasync(int(db.file.Fd())) +} diff --git a/vendor/github.com/boltdb/bolt/bolt_openbsd.go b/vendor/github.com/boltdb/bolt/bolt_openbsd.go new file mode 100644 index 000000000..7058c3d73 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_openbsd.go @@ -0,0 +1,27 @@ +package bolt + +import ( + "syscall" + "unsafe" +) + +const ( + msAsync = 1 << iota // perform asynchronous writes + msSync // perform synchronous writes + msInvalidate // invalidate cached data +) + +func msync(db *DB) error { + _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) + if errno != 0 { + return errno + } + return nil +} + +func fdatasync(db *DB) error { + if db.data != nil { + return msync(db) + } + return db.file.Sync() +} diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc.go b/vendor/github.com/boltdb/bolt/bolt_ppc.go new file mode 100644 index 000000000..645ddc3ed --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_ppc.go @@ -0,0 +1,9 @@ +// +build ppc + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64.go b/vendor/github.com/boltdb/bolt/bolt_ppc64.go new file mode 100644 index 000000000..9331d9771 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_ppc64.go @@ -0,0 +1,12 @@ +// +build ppc64 + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64le.go b/vendor/github.com/boltdb/bolt/bolt_ppc64le.go new file mode 100644 index 000000000..8c143bc5d --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_ppc64le.go @@ -0,0 +1,12 @@ +// +build ppc64le + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_s390x.go b/vendor/github.com/boltdb/bolt/bolt_s390x.go new file mode 100644 index 000000000..d7c39af92 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_s390x.go @@ -0,0 +1,12 @@ +// +build s390x + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_unix.go b/vendor/github.com/boltdb/bolt/bolt_unix.go new file mode 100644 index 000000000..cad62dda1 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_unix.go @@ -0,0 +1,89 @@ +// +build !windows,!plan9,!solaris + +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + flag := syscall.LOCK_SH + if exclusive { + flag = syscall.LOCK_EX + } + + // Otherwise attempt to obtain an exclusive lock. + err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB) + if err == nil { + return nil + } else if err != syscall.EWOULDBLOCK { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := syscall.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} + +// NOTE: This function is copied from stdlib because it is not available on darwin. +func madvise(b []byte, advice int) (err error) { + _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go b/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go new file mode 100644 index 000000000..307bf2b3e --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go @@ -0,0 +1,90 @@ +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Pid = 0 + lock.Whence = 0 + lock.Pid = 0 + if exclusive { + lock.Type = syscall.F_WRLCK + } else { + lock.Type = syscall.F_RDLCK + } + err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock) + if err == nil { + return nil + } else if err != syscall.EAGAIN { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Type = syscall.F_UNLCK + lock.Whence = 0 + return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff --git a/vendor/github.com/boltdb/bolt/bolt_windows.go b/vendor/github.com/boltdb/bolt/bolt_windows.go new file mode 100644 index 000000000..b00fb0720 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_windows.go @@ -0,0 +1,144 @@ +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" +) + +// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1 +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") +) + +const ( + lockExt = ".lock" + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + flagLockExclusive = 2 + flagLockFailImmediately = 1 + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx + errLockViolation syscall.Errno = 0x21 +) + +func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) + if r == 0 { + return err + } + return nil +} + +func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) + if r == 0 { + return err + } + return nil +} + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + // Create a separate lock file on windows because a process + // cannot share an exclusive lock on the same file. This is + // needed during Tx.WriteTo(). + f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode) + if err != nil { + return err + } + db.lockfile = f + + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + + var flag uint32 = flagLockFailImmediately + if exclusive { + flag |= flagLockExclusive + } + + err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) + if err == nil { + return nil + } else if err != errLockViolation { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{}) + db.lockfile.Close() + os.Remove(db.path + lockExt) + return err +} + +// mmap memory maps a DB's data file. +// Based on: https://github.com/edsrzf/mmap-go +func mmap(db *DB, sz int) error { + if !db.readOnly { + // Truncate the database to the size of the mmap. + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("truncate: %s", err) + } + } + + // Open a file mapping handle. + sizelo := uint32(sz >> 32) + sizehi := uint32(sz) & 0xffffffff + h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) + if h == 0 { + return os.NewSyscallError("CreateFileMapping", errno) + } + + // Create the memory map. + addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) + if addr == 0 { + return os.NewSyscallError("MapViewOfFile", errno) + } + + // Close mapping handle. + if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { + return os.NewSyscallError("CloseHandle", err) + } + + // Convert to a byte array. + db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) + db.datasz = sz + + return nil +} + +// munmap unmaps a pointer from a file. +// Based on: https://github.com/edsrzf/mmap-go +func munmap(db *DB) error { + if db.data == nil { + return nil + } + + addr := (uintptr)(unsafe.Pointer(&db.data[0])) + if err := syscall.UnmapViewOfFile(addr); err != nil { + return os.NewSyscallError("UnmapViewOfFile", err) + } + return nil +} diff --git a/vendor/github.com/boltdb/bolt/boltsync_unix.go b/vendor/github.com/boltdb/bolt/boltsync_unix.go new file mode 100644 index 000000000..f50442523 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/boltsync_unix.go @@ -0,0 +1,8 @@ +// +build !windows,!plan9,!linux,!openbsd + +package bolt + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} diff --git a/vendor/github.com/boltdb/bolt/bucket.go b/vendor/github.com/boltdb/bolt/bucket.go new file mode 100644 index 000000000..0c5bf2746 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bucket.go @@ -0,0 +1,777 @@ +package bolt + +import ( + "bytes" + "fmt" + "unsafe" +) + +const ( + // MaxKeySize is the maximum length of a key, in bytes. + MaxKeySize = 32768 + + // MaxValueSize is the maximum length of a value, in bytes. + MaxValueSize = (1 << 31) - 2 +) + +const ( + maxUint = ^uint(0) + minUint = 0 + maxInt = int(^uint(0) >> 1) + minInt = -maxInt - 1 +) + +const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) + +const ( + minFillPercent = 0.1 + maxFillPercent = 1.0 +) + +// DefaultFillPercent is the percentage that split pages are filled. +// This value can be changed by setting Bucket.FillPercent. +const DefaultFillPercent = 0.5 + +// Bucket represents a collection of key/value pairs inside the database. +type Bucket struct { + *bucket + tx *Tx // the associated transaction + buckets map[string]*Bucket // subbucket cache + page *page // inline page reference + rootNode *node // materialized node for the root page. + nodes map[pgid]*node // node cache + + // Sets the threshold for filling nodes when they split. By default, + // the bucket will fill to 50% but it can be useful to increase this + // amount if you know that your write workloads are mostly append-only. + // + // This is non-persisted across transactions so it must be set in every Tx. + FillPercent float64 +} + +// bucket represents the on-file representation of a bucket. +// This is stored as the "value" of a bucket key. If the bucket is small enough, +// then its root page can be stored inline in the "value", after the bucket +// header. In the case of inline buckets, the "root" will be 0. +type bucket struct { + root pgid // page id of the bucket's root-level page + sequence uint64 // monotonically incrementing, used by NextSequence() +} + +// newBucket returns a new bucket associated with a transaction. +func newBucket(tx *Tx) Bucket { + var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} + if tx.writable { + b.buckets = make(map[string]*Bucket) + b.nodes = make(map[pgid]*node) + } + return b +} + +// Tx returns the tx of the bucket. +func (b *Bucket) Tx() *Tx { + return b.tx +} + +// Root returns the root of the bucket. +func (b *Bucket) Root() pgid { + return b.root +} + +// Writable returns whether the bucket is writable. +func (b *Bucket) Writable() bool { + return b.tx.writable +} + +// Cursor creates a cursor associated with the bucket. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (b *Bucket) Cursor() *Cursor { + // Update transaction statistics. + b.tx.stats.CursorCount++ + + // Allocate and return a cursor. + return &Cursor{ + bucket: b, + stack: make([]elemRef, 0), + } +} + +// Bucket retrieves a nested bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) Bucket(name []byte) *Bucket { + if b.buckets != nil { + if child := b.buckets[string(name)]; child != nil { + return child + } + } + + // Move cursor to key. + c := b.Cursor() + k, v, flags := c.seek(name) + + // Return nil if the key doesn't exist or it is not a bucket. + if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { + return nil + } + + // Otherwise create a bucket and cache it. + var child = b.openBucket(v) + if b.buckets != nil { + b.buckets[string(name)] = child + } + + return child +} + +// Helper method that re-interprets a sub-bucket value +// from a parent into a Bucket +func (b *Bucket) openBucket(value []byte) *Bucket { + var child = newBucket(b.tx) + + // If unaligned load/stores are broken on this arch and value is + // unaligned simply clone to an aligned byte array. + unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0 + + if unaligned { + value = cloneBytes(value) + } + + // If this is a writable transaction then we need to copy the bucket entry. + // Read-only transactions can point directly at the mmap entry. + if b.tx.writable && !unaligned { + child.bucket = &bucket{} + *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) + } else { + child.bucket = (*bucket)(unsafe.Pointer(&value[0])) + } + + // Save a reference to the inline page if the bucket is inline. + if child.root == 0 { + child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + } + + return &child +} + +// CreateBucket creates a new bucket at the given key and returns the new bucket. +// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { + if b.tx.db == nil { + return nil, ErrTxClosed + } else if !b.tx.writable { + return nil, ErrTxNotWritable + } else if len(key) == 0 { + return nil, ErrBucketNameRequired + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key. + if bytes.Equal(key, k) { + if (flags & bucketLeafFlag) != 0 { + return nil, ErrBucketExists + } + return nil, ErrIncompatibleValue + } + + // Create empty, inline bucket. + var bucket = Bucket{ + bucket: &bucket{}, + rootNode: &node{isLeaf: true}, + FillPercent: DefaultFillPercent, + } + var value = bucket.write() + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, bucketLeafFlag) + + // Since subbuckets are not allowed on inline buckets, we need to + // dereference the inline page, if it exists. This will cause the bucket + // to be treated as a regular, non-inline bucket for the rest of the tx. + b.page = nil + + return b.Bucket(key), nil +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { + child, err := b.CreateBucket(key) + if err == ErrBucketExists { + return b.Bucket(key), nil + } else if err != nil { + return nil, err + } + return child, nil +} + +// DeleteBucket deletes a bucket at the given key. +// Returns an error if the bucket does not exists, or if the key represents a non-bucket value. +func (b *Bucket) DeleteBucket(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if bucket doesn't exist or is not a bucket. + if !bytes.Equal(key, k) { + return ErrBucketNotFound + } else if (flags & bucketLeafFlag) == 0 { + return ErrIncompatibleValue + } + + // Recursively delete all child buckets. + child := b.Bucket(key) + err := child.ForEach(func(k, v []byte) error { + if v == nil { + if err := child.DeleteBucket(k); err != nil { + return fmt.Errorf("delete bucket: %s", err) + } + } + return nil + }) + if err != nil { + return err + } + + // Remove cached copy. + delete(b.buckets, string(key)) + + // Release all bucket pages to freelist. + child.nodes = nil + child.rootNode = nil + child.free() + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// Get retrieves the value for a key in the bucket. +// Returns a nil value if the key does not exist or if the key is a nested bucket. +// The returned value is only valid for the life of the transaction. +func (b *Bucket) Get(key []byte) []byte { + k, v, flags := b.Cursor().seek(key) + + // Return nil if this is a bucket. + if (flags & bucketLeafFlag) != 0 { + return nil + } + + // If our target node isn't the same key as what's passed in then return nil. + if !bytes.Equal(key, k) { + return nil + } + return v +} + +// Put sets the value for a key in the bucket. +// If the key exist then its previous value will be overwritten. +// Supplied value must remain valid for the life of the transaction. +// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. +func (b *Bucket) Put(key []byte, value []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } else if len(key) == 0 { + return ErrKeyRequired + } else if len(key) > MaxKeySize { + return ErrKeyTooLarge + } else if int64(len(value)) > MaxValueSize { + return ErrValueTooLarge + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key with a bucket value. + if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, 0) + + return nil +} + +// Delete removes a key from the bucket. +// If the key does not exist then nothing is done and a nil error is returned. +// Returns an error if the bucket was created from a read-only transaction. +func (b *Bucket) Delete(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + _, _, flags := c.seek(key) + + // Return an error if there is already existing bucket value. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// Sequence returns the current integer for the bucket without incrementing it. +func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } + +// SetSequence updates the sequence number for the bucket. +func (b *Bucket) SetSequence(v uint64) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence = v + return nil +} + +// NextSequence returns an autoincrementing integer for the bucket. +func (b *Bucket) NextSequence() (uint64, error) { + if b.tx.db == nil { + return 0, ErrTxClosed + } else if !b.Writable() { + return 0, ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence++ + return b.bucket.sequence, nil +} + +// ForEach executes a function for each key/value pair in a bucket. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. The provided function must not modify +// the bucket; this will result in undefined behavior. +func (b *Bucket) ForEach(fn func(k, v []byte) error) error { + if b.tx.db == nil { + return ErrTxClosed + } + c := b.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if err := fn(k, v); err != nil { + return err + } + } + return nil +} + +// Stat returns stats on a bucket. +func (b *Bucket) Stats() BucketStats { + var s, subStats BucketStats + pageSize := b.tx.db.pageSize + s.BucketN += 1 + if b.root == 0 { + s.InlineBucketN += 1 + } + b.forEachPage(func(p *page, depth int) { + if (p.flags & leafPageFlag) != 0 { + s.KeyN += int(p.count) + + // used totals the used bytes for the page + used := pageHeaderSize + + if p.count != 0 { + // If page has any elements, add all element headers. + used += leafPageElementSize * int(p.count-1) + + // Add all element key, value sizes. + // The computation takes advantage of the fact that the position + // of the last element's key/value equals to the total of the sizes + // of all previous elements' keys and values. + // It also includes the last element's header. + lastElement := p.leafPageElement(p.count - 1) + used += int(lastElement.pos + lastElement.ksize + lastElement.vsize) + } + + if b.root == 0 { + // For inlined bucket just update the inline stats + s.InlineBucketInuse += used + } else { + // For non-inlined bucket update all the leaf stats + s.LeafPageN++ + s.LeafInuse += used + s.LeafOverflowN += int(p.overflow) + + // Collect stats from sub-buckets. + // Do that by iterating over all element headers + // looking for the ones with the bucketLeafFlag. + for i := uint16(0); i < p.count; i++ { + e := p.leafPageElement(i) + if (e.flags & bucketLeafFlag) != 0 { + // For any bucket element, open the element value + // and recursively call Stats on the contained bucket. + subStats.Add(b.openBucket(e.value()).Stats()) + } + } + } + } else if (p.flags & branchPageFlag) != 0 { + s.BranchPageN++ + lastElement := p.branchPageElement(p.count - 1) + + // used totals the used bytes for the page + // Add header and all element headers. + used := pageHeaderSize + (branchPageElementSize * int(p.count-1)) + + // Add size of all keys and values. + // Again, use the fact that last element's position equals to + // the total of key, value sizes of all previous elements. + used += int(lastElement.pos + lastElement.ksize) + s.BranchInuse += used + s.BranchOverflowN += int(p.overflow) + } + + // Keep track of maximum page depth. + if depth+1 > s.Depth { + s.Depth = (depth + 1) + } + }) + + // Alloc stats can be computed from page counts and pageSize. + s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize + s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize + + // Add the max depth of sub-buckets to get total nested depth. + s.Depth += subStats.Depth + // Add the stats for all sub-buckets + s.Add(subStats) + return s +} + +// forEachPage iterates over every page in a bucket, including inline pages. +func (b *Bucket) forEachPage(fn func(*page, int)) { + // If we have an inline page then just use that. + if b.page != nil { + fn(b.page, 0) + return + } + + // Otherwise traverse the page hierarchy. + b.tx.forEachPage(b.root, 0, fn) +} + +// forEachPageNode iterates over every page (or node) in a bucket. +// This also includes inline pages. +func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { + // If we have an inline page or root node then just use that. + if b.page != nil { + fn(b.page, nil, 0) + return + } + b._forEachPageNode(b.root, 0, fn) +} + +func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { + var p, n = b.pageNode(pgid) + + // Execute function. + fn(p, n, depth) + + // Recursively loop over children. + if p != nil { + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + b._forEachPageNode(elem.pgid, depth+1, fn) + } + } + } else { + if !n.isLeaf { + for _, inode := range n.inodes { + b._forEachPageNode(inode.pgid, depth+1, fn) + } + } + } +} + +// spill writes all the nodes for this bucket to dirty pages. +func (b *Bucket) spill() error { + // Spill all child buckets first. + for name, child := range b.buckets { + // If the child bucket is small enough and it has no child buckets then + // write it inline into the parent bucket's page. Otherwise spill it + // like a normal bucket and make the parent value a pointer to the page. + var value []byte + if child.inlineable() { + child.free() + value = child.write() + } else { + if err := child.spill(); err != nil { + return err + } + + // Update the child bucket header in this bucket. + value = make([]byte, unsafe.Sizeof(bucket{})) + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *child.bucket + } + + // Skip writing the bucket if there are no materialized nodes. + if child.rootNode == nil { + continue + } + + // Update parent node. + var c = b.Cursor() + k, _, flags := c.seek([]byte(name)) + if !bytes.Equal([]byte(name), k) { + panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) + } + if flags&bucketLeafFlag == 0 { + panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) + } + c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) + } + + // Ignore if there's not a materialized root node. + if b.rootNode == nil { + return nil + } + + // Spill nodes. + if err := b.rootNode.spill(); err != nil { + return err + } + b.rootNode = b.rootNode.root() + + // Update the root node for this bucket. + if b.rootNode.pgid >= b.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) + } + b.root = b.rootNode.pgid + + return nil +} + +// inlineable returns true if a bucket is small enough to be written inline +// and if it contains no subbuckets. Otherwise returns false. +func (b *Bucket) inlineable() bool { + var n = b.rootNode + + // Bucket must only contain a single leaf node. + if n == nil || !n.isLeaf { + return false + } + + // Bucket is not inlineable if it contains subbuckets or if it goes beyond + // our threshold for inline bucket size. + var size = pageHeaderSize + for _, inode := range n.inodes { + size += leafPageElementSize + len(inode.key) + len(inode.value) + + if inode.flags&bucketLeafFlag != 0 { + return false + } else if size > b.maxInlineBucketSize() { + return false + } + } + + return true +} + +// Returns the maximum total size of a bucket to make it a candidate for inlining. +func (b *Bucket) maxInlineBucketSize() int { + return b.tx.db.pageSize / 4 +} + +// write allocates and writes a bucket to a byte slice. +func (b *Bucket) write() []byte { + // Allocate the appropriate size. + var n = b.rootNode + var value = make([]byte, bucketHeaderSize+n.size()) + + // Write a bucket header. + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *b.bucket + + // Convert byte slice to a fake page and write the root node. + var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + n.write(p) + + return value +} + +// rebalance attempts to balance all nodes. +func (b *Bucket) rebalance() { + for _, n := range b.nodes { + n.rebalance() + } + for _, child := range b.buckets { + child.rebalance() + } +} + +// node creates a node from a page and associates it with a given parent. +func (b *Bucket) node(pgid pgid, parent *node) *node { + _assert(b.nodes != nil, "nodes map expected") + + // Retrieve node if it's already been created. + if n := b.nodes[pgid]; n != nil { + return n + } + + // Otherwise create a node and cache it. + n := &node{bucket: b, parent: parent} + if parent == nil { + b.rootNode = n + } else { + parent.children = append(parent.children, n) + } + + // Use the inline page if this is an inline bucket. + var p = b.page + if p == nil { + p = b.tx.page(pgid) + } + + // Read the page into the node and cache it. + n.read(p) + b.nodes[pgid] = n + + // Update statistics. + b.tx.stats.NodeCount++ + + return n +} + +// free recursively frees all pages in the bucket. +func (b *Bucket) free() { + if b.root == 0 { + return + } + + var tx = b.tx + b.forEachPageNode(func(p *page, n *node, _ int) { + if p != nil { + tx.db.freelist.free(tx.meta.txid, p) + } else { + n.free() + } + }) + b.root = 0 +} + +// dereference removes all references to the old mmap. +func (b *Bucket) dereference() { + if b.rootNode != nil { + b.rootNode.root().dereference() + } + + for _, child := range b.buckets { + child.dereference() + } +} + +// pageNode returns the in-memory node, if it exists. +// Otherwise returns the underlying page. +func (b *Bucket) pageNode(id pgid) (*page, *node) { + // Inline buckets have a fake page embedded in their value so treat them + // differently. We'll return the rootNode (if available) or the fake page. + if b.root == 0 { + if id != 0 { + panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) + } + if b.rootNode != nil { + return nil, b.rootNode + } + return b.page, nil + } + + // Check the node cache for non-inline buckets. + if b.nodes != nil { + if n := b.nodes[id]; n != nil { + return nil, n + } + } + + // Finally lookup the page from the transaction if no node is materialized. + return b.tx.page(id), nil +} + +// BucketStats records statistics about resources used by a bucket. +type BucketStats struct { + // Page count statistics. + BranchPageN int // number of logical branch pages + BranchOverflowN int // number of physical branch overflow pages + LeafPageN int // number of logical leaf pages + LeafOverflowN int // number of physical leaf overflow pages + + // Tree statistics. + KeyN int // number of keys/value pairs + Depth int // number of levels in B+tree + + // Page size utilization. + BranchAlloc int // bytes allocated for physical branch pages + BranchInuse int // bytes actually used for branch data + LeafAlloc int // bytes allocated for physical leaf pages + LeafInuse int // bytes actually used for leaf data + + // Bucket statistics + BucketN int // total number of buckets including the top bucket + InlineBucketN int // total number on inlined buckets + InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) +} + +func (s *BucketStats) Add(other BucketStats) { + s.BranchPageN += other.BranchPageN + s.BranchOverflowN += other.BranchOverflowN + s.LeafPageN += other.LeafPageN + s.LeafOverflowN += other.LeafOverflowN + s.KeyN += other.KeyN + if s.Depth < other.Depth { + s.Depth = other.Depth + } + s.BranchAlloc += other.BranchAlloc + s.BranchInuse += other.BranchInuse + s.LeafAlloc += other.LeafAlloc + s.LeafInuse += other.LeafInuse + + s.BucketN += other.BucketN + s.InlineBucketN += other.InlineBucketN + s.InlineBucketInuse += other.InlineBucketInuse +} + +// cloneBytes returns a copy of a given slice. +func cloneBytes(v []byte) []byte { + var clone = make([]byte, len(v)) + copy(clone, v) + return clone +} diff --git a/vendor/github.com/boltdb/bolt/bucket_test.go b/vendor/github.com/boltdb/bolt/bucket_test.go new file mode 100644 index 000000000..cddbe2713 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bucket_test.go @@ -0,0 +1,1909 @@ +package bolt_test + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "log" + "math/rand" + "os" + "strconv" + "strings" + "testing" + "testing/quick" + + "github.com/boltdb/bolt" +) + +// Ensure that a bucket that gets a non-existent key returns nil. +func TestBucket_Get_NonExistent(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if v := b.Get([]byte("foo")); v != nil { + t.Fatal("expected nil value") + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket can read a value that is not flushed yet. +func TestBucket_Get_FromNode(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if v := b.Get([]byte("foo")); !bytes.Equal(v, []byte("bar")) { + t.Fatalf("unexpected value: %v", v) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket retrieved via Get() returns a nil. +func TestBucket_Get_IncompatibleValue(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + if _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")); err != nil { + t.Fatal(err) + } + + if tx.Bucket([]byte("widgets")).Get([]byte("foo")) != nil { + t.Fatal("expected nil value") + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a slice returned from a bucket has a capacity equal to its length. +// This also allows slices to be appended to since it will require a realloc by Go. +// +// https://github.com/boltdb/bolt/issues/544 +func TestBucket_Get_Capacity(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + // Write key to a bucket. + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("bucket")) + if err != nil { + return err + } + return b.Put([]byte("key"), []byte("val")) + }); err != nil { + t.Fatal(err) + } + + // Retrieve value and attempt to append to it. + if err := db.Update(func(tx *bolt.Tx) error { + k, v := tx.Bucket([]byte("bucket")).Cursor().First() + + // Verify capacity. + if len(k) != cap(k) { + t.Fatalf("unexpected key slice capacity: %d", cap(k)) + } else if len(v) != cap(v) { + t.Fatalf("unexpected value slice capacity: %d", cap(v)) + } + + // Ensure slice can be appended to without a segfault. + k = append(k, []byte("123")...) + v = append(v, []byte("123")...) + + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket can write a key/value. +func TestBucket_Put(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + + v := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + if !bytes.Equal([]byte("bar"), v) { + t.Fatalf("unexpected value: %v", v) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket can rewrite a key in the same transaction. +func TestBucket_Put_Repeat(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("baz")); err != nil { + t.Fatal(err) + } + + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + if !bytes.Equal([]byte("baz"), value) { + t.Fatalf("unexpected value: %v", value) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket can write a bunch of large values. +func TestBucket_Put_Large(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + count, factor := 100, 200 + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + for i := 1; i < count; i++ { + if err := b.Put([]byte(strings.Repeat("0", i*factor)), []byte(strings.Repeat("X", (count-i)*factor))); err != nil { + t.Fatal(err) + } + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for i := 1; i < count; i++ { + value := b.Get([]byte(strings.Repeat("0", i*factor))) + if !bytes.Equal(value, []byte(strings.Repeat("X", (count-i)*factor))) { + t.Fatalf("unexpected value: %v", value) + } + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a database can perform multiple large appends safely. +func TestDB_Put_VeryLarge(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + n, batchN := 400000, 200000 + ksize, vsize := 8, 500 + + db := MustOpenDB() + defer db.MustClose() + + for i := 0; i < n; i += batchN { + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + for j := 0; j < batchN; j++ { + k, v := make([]byte, ksize), make([]byte, vsize) + binary.BigEndian.PutUint32(k, uint32(i+j)) + if err := b.Put(k, v); err != nil { + t.Fatal(err) + } + } + return nil + }); err != nil { + t.Fatal(err) + } + } +} + +// Ensure that a setting a value on a key with a bucket value returns an error. +func TestBucket_Put_IncompatibleValue(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b0, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + if _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")); err != nil { + t.Fatal(err) + } + if err := b0.Put([]byte("foo"), []byte("bar")); err != bolt.ErrIncompatibleValue { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a setting a value while the transaction is closed returns an error. +func TestBucket_Put_Closed(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } + + if err := b.Put([]byte("foo"), []byte("bar")); err != bolt.ErrTxClosed { + t.Fatalf("unexpected error: %s", err) + } +} + +// Ensure that setting a value on a read-only bucket returns an error. +func TestBucket_Put_ReadOnly(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + if err := b.Put([]byte("foo"), []byte("bar")); err != bolt.ErrTxNotWritable { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket can delete an existing key. +func TestBucket_Delete(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if err := b.Delete([]byte("foo")); err != nil { + t.Fatal(err) + } + if v := b.Get([]byte("foo")); v != nil { + t.Fatalf("unexpected value: %v", v) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that deleting a large set of keys will work correctly. +func TestBucket_Delete_Large(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 100; i++ { + if err := b.Put([]byte(strconv.Itoa(i)), []byte(strings.Repeat("*", 1024))); err != nil { + t.Fatal(err) + } + } + + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for i := 0; i < 100; i++ { + if err := b.Delete([]byte(strconv.Itoa(i))); err != nil { + t.Fatal(err) + } + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for i := 0; i < 100; i++ { + if v := b.Get([]byte(strconv.Itoa(i))); v != nil { + t.Fatalf("unexpected value: %v, i=%d", v, i) + } + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Deleting a very large list of keys will cause the freelist to use overflow. +func TestBucket_Delete_FreelistOverflow(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + db := MustOpenDB() + defer db.MustClose() + + k := make([]byte, 16) + for i := uint64(0); i < 10000; i++ { + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte("0")) + if err != nil { + t.Fatalf("bucket error: %s", err) + } + + for j := uint64(0); j < 1000; j++ { + binary.BigEndian.PutUint64(k[:8], i) + binary.BigEndian.PutUint64(k[8:], j) + if err := b.Put(k, nil); err != nil { + t.Fatalf("put error: %s", err) + } + } + + return nil + }); err != nil { + t.Fatal(err) + } + } + + // Delete all of them in one large transaction + if err := db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("0")) + c := b.Cursor() + for k, _ := c.First(); k != nil; k, _ = c.Next() { + if err := c.Delete(); err != nil { + t.Fatal(err) + } + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that accessing and updating nested buckets is ok across transactions. +func TestBucket_Nested(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + // Create a widgets bucket. + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + // Create a widgets/foo bucket. + _, err = b.CreateBucket([]byte("foo")) + if err != nil { + t.Fatal(err) + } + + // Create a widgets/bar key. + if err := b.Put([]byte("bar"), []byte("0000")); err != nil { + t.Fatal(err) + } + + return nil + }); err != nil { + t.Fatal(err) + } + db.MustCheck() + + // Update widgets/bar. + if err := db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + if err := b.Put([]byte("bar"), []byte("xxxx")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + db.MustCheck() + + // Cause a split. + if err := db.Update(func(tx *bolt.Tx) error { + var b = tx.Bucket([]byte("widgets")) + for i := 0; i < 10000; i++ { + if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { + t.Fatal(err) + } + } + return nil + }); err != nil { + t.Fatal(err) + } + db.MustCheck() + + // Insert into widgets/foo/baz. + if err := db.Update(func(tx *bolt.Tx) error { + var b = tx.Bucket([]byte("widgets")) + if err := b.Bucket([]byte("foo")).Put([]byte("baz"), []byte("yyyy")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + db.MustCheck() + + // Verify. + if err := db.View(func(tx *bolt.Tx) error { + var b = tx.Bucket([]byte("widgets")) + if v := b.Bucket([]byte("foo")).Get([]byte("baz")); !bytes.Equal(v, []byte("yyyy")) { + t.Fatalf("unexpected value: %v", v) + } + if v := b.Get([]byte("bar")); !bytes.Equal(v, []byte("xxxx")) { + t.Fatalf("unexpected value: %v", v) + } + for i := 0; i < 10000; i++ { + if v := b.Get([]byte(strconv.Itoa(i))); !bytes.Equal(v, []byte(strconv.Itoa(i))) { + t.Fatalf("unexpected value: %v", v) + } + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that deleting a bucket using Delete() returns an error. +func TestBucket_Delete_Bucket(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if _, err := b.CreateBucket([]byte("foo")); err != nil { + t.Fatal(err) + } + if err := b.Delete([]byte("foo")); err != bolt.ErrIncompatibleValue { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that deleting a key on a read-only bucket returns an error. +func TestBucket_Delete_ReadOnly(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + if err := tx.Bucket([]byte("widgets")).Delete([]byte("foo")); err != bolt.ErrTxNotWritable { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a deleting value while the transaction is closed returns an error. +func TestBucket_Delete_Closed(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } + if err := b.Delete([]byte("foo")); err != bolt.ErrTxClosed { + t.Fatalf("unexpected error: %s", err) + } +} + +// Ensure that deleting a bucket causes nested buckets to be deleted. +func TestBucket_DeleteBucket_Nested(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + widgets, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + foo, err := widgets.CreateBucket([]byte("foo")) + if err != nil { + t.Fatal(err) + } + + bar, err := foo.CreateBucket([]byte("bar")) + if err != nil { + t.Fatal(err) + } + if err := bar.Put([]byte("baz"), []byte("bat")); err != nil { + t.Fatal(err) + } + if err := tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that deleting a bucket causes nested buckets to be deleted after they have been committed. +func TestBucket_DeleteBucket_Nested2(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + widgets, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + foo, err := widgets.CreateBucket([]byte("foo")) + if err != nil { + t.Fatal(err) + } + + bar, err := foo.CreateBucket([]byte("bar")) + if err != nil { + t.Fatal(err) + } + + if err := bar.Put([]byte("baz"), []byte("bat")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.Update(func(tx *bolt.Tx) error { + widgets := tx.Bucket([]byte("widgets")) + if widgets == nil { + t.Fatal("expected widgets bucket") + } + + foo := widgets.Bucket([]byte("foo")) + if foo == nil { + t.Fatal("expected foo bucket") + } + + bar := foo.Bucket([]byte("bar")) + if bar == nil { + t.Fatal("expected bar bucket") + } + + if v := bar.Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) { + t.Fatalf("unexpected value: %v", v) + } + if err := tx.DeleteBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + if tx.Bucket([]byte("widgets")) != nil { + t.Fatal("expected bucket to be deleted") + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that deleting a child bucket with multiple pages causes all pages to get collected. +// NOTE: Consistency check in bolt_test.DB.Close() will panic if pages not freed properly. +func TestBucket_DeleteBucket_Large(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + widgets, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + foo, err := widgets.CreateBucket([]byte("foo")) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 1000; i++ { + if err := foo.Put([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i))); err != nil { + t.Fatal(err) + } + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.Update(func(tx *bolt.Tx) error { + if err := tx.DeleteBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a simple value retrieved via Bucket() returns a nil. +func TestBucket_Bucket_IncompatibleValue(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + widgets, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo")); b != nil { + t.Fatal("expected nil bucket") + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that creating a bucket on an existing non-bucket key returns an error. +func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + widgets, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if _, err := widgets.CreateBucket([]byte("foo")); err != bolt.ErrIncompatibleValue { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that deleting a bucket on an existing non-bucket key returns an error. +func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + widgets, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if err := tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")); err != bolt.ErrIncompatibleValue { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure bucket can set and update its sequence number. +func TestBucket_Sequence(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + bkt, err := tx.CreateBucket([]byte("0")) + if err != nil { + t.Fatal(err) + } + + // Retrieve sequence. + if v := bkt.Sequence(); v != 0 { + t.Fatalf("unexpected sequence: %d", v) + } + + // Update sequence. + if err := bkt.SetSequence(1000); err != nil { + t.Fatal(err) + } + + // Read sequence again. + if v := bkt.Sequence(); v != 1000 { + t.Fatalf("unexpected sequence: %d", v) + } + + return nil + }); err != nil { + t.Fatal(err) + } + + // Verify sequence in separate transaction. + if err := db.View(func(tx *bolt.Tx) error { + if v := tx.Bucket([]byte("0")).Sequence(); v != 1000 { + t.Fatalf("unexpected sequence: %d", v) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket can return an autoincrementing sequence. +func TestBucket_NextSequence(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + widgets, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + woojits, err := tx.CreateBucket([]byte("woojits")) + if err != nil { + t.Fatal(err) + } + + // Make sure sequence increments. + if seq, err := widgets.NextSequence(); err != nil { + t.Fatal(err) + } else if seq != 1 { + t.Fatalf("unexpecte sequence: %d", seq) + } + + if seq, err := widgets.NextSequence(); err != nil { + t.Fatal(err) + } else if seq != 2 { + t.Fatalf("unexpected sequence: %d", seq) + } + + // Buckets should be separate. + if seq, err := woojits.NextSequence(); err != nil { + t.Fatal(err) + } else if seq != 1 { + t.Fatalf("unexpected sequence: %d", 1) + } + + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket will persist an autoincrementing sequence even if its +// the only thing updated on the bucket. +// https://github.com/boltdb/bolt/issues/296 +func TestBucket_NextSequence_Persist(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.Bucket([]byte("widgets")).NextSequence(); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.Update(func(tx *bolt.Tx) error { + seq, err := tx.Bucket([]byte("widgets")).NextSequence() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } else if seq != 2 { + t.Fatalf("unexpected sequence: %d", seq) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that retrieving the next sequence on a read-only bucket returns an error. +func TestBucket_NextSequence_ReadOnly(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + _, err := tx.Bucket([]byte("widgets")).NextSequence() + if err != bolt.ErrTxNotWritable { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that retrieving the next sequence for a bucket on a closed database return an error. +func TestBucket_NextSequence_Closed(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } + if _, err := b.NextSequence(); err != bolt.ErrTxClosed { + t.Fatal(err) + } +} + +// Ensure a user can loop over all key/value pairs in a bucket. +func TestBucket_ForEach(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("0000")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("baz"), []byte("0001")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("bar"), []byte("0002")); err != nil { + t.Fatal(err) + } + + var index int + if err := b.ForEach(func(k, v []byte) error { + switch index { + case 0: + if !bytes.Equal(k, []byte("bar")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte("0002")) { + t.Fatalf("unexpected value: %v", v) + } + case 1: + if !bytes.Equal(k, []byte("baz")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte("0001")) { + t.Fatalf("unexpected value: %v", v) + } + case 2: + if !bytes.Equal(k, []byte("foo")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte("0000")) { + t.Fatalf("unexpected value: %v", v) + } + } + index++ + return nil + }); err != nil { + t.Fatal(err) + } + + if index != 3 { + t.Fatalf("unexpected index: %d", index) + } + + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure a database can stop iteration early. +func TestBucket_ForEach_ShortCircuit(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("bar"), []byte("0000")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("baz"), []byte("0000")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("0000")); err != nil { + t.Fatal(err) + } + + var index int + if err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { + index++ + if bytes.Equal(k, []byte("baz")) { + return errors.New("marker") + } + return nil + }); err == nil || err.Error() != "marker" { + t.Fatalf("unexpected error: %s", err) + } + if index != 2 { + t.Fatalf("unexpected index: %d", index) + } + + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that looping over a bucket on a closed database returns an error. +func TestBucket_ForEach_Closed(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } + + if err := b.ForEach(func(k, v []byte) error { return nil }); err != bolt.ErrTxClosed { + t.Fatalf("unexpected error: %s", err) + } +} + +// Ensure that an error is returned when inserting with an empty key. +func TestBucket_Put_EmptyKey(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte(""), []byte("bar")); err != bolt.ErrKeyRequired { + t.Fatalf("unexpected error: %s", err) + } + if err := b.Put(nil, []byte("bar")); err != bolt.ErrKeyRequired { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that an error is returned when inserting with a key that's too large. +func TestBucket_Put_KeyTooLarge(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put(make([]byte, 32769), []byte("bar")); err != bolt.ErrKeyTooLarge { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that an error is returned when inserting a value that's too large. +func TestBucket_Put_ValueTooLarge(t *testing.T) { + // Skip this test on DroneCI because the machine is resource constrained. + if os.Getenv("DRONE") == "true" { + t.Skip("not enough RAM for test") + } + + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), make([]byte, bolt.MaxValueSize+1)); err != bolt.ErrValueTooLarge { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure a bucket can calculate stats. +func TestBucket_Stats(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + // Add bucket with fewer keys but one big value. + bigKey := []byte("really-big-value") + for i := 0; i < 500; i++ { + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte("woojits")) + if err != nil { + t.Fatal(err) + } + + if err := b.Put([]byte(fmt.Sprintf("%03d", i)), []byte(strconv.Itoa(i))); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + } + if err := db.Update(func(tx *bolt.Tx) error { + if err := tx.Bucket([]byte("woojits")).Put(bigKey, []byte(strings.Repeat("*", 10000))); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + db.MustCheck() + + if err := db.View(func(tx *bolt.Tx) error { + stats := tx.Bucket([]byte("woojits")).Stats() + if stats.BranchPageN != 1 { + t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) + } else if stats.BranchOverflowN != 0 { + t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) + } else if stats.LeafPageN != 7 { + t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) + } else if stats.LeafOverflowN != 2 { + t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) + } else if stats.KeyN != 501 { + t.Fatalf("unexpected KeyN: %d", stats.KeyN) + } else if stats.Depth != 2 { + t.Fatalf("unexpected Depth: %d", stats.Depth) + } + + branchInuse := 16 // branch page header + branchInuse += 7 * 16 // branch elements + branchInuse += 7 * 3 // branch keys (6 3-byte keys) + if stats.BranchInuse != branchInuse { + t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) + } + + leafInuse := 7 * 16 // leaf page header + leafInuse += 501 * 16 // leaf elements + leafInuse += 500*3 + len(bigKey) // leaf keys + leafInuse += 1*10 + 2*90 + 3*400 + 10000 // leaf values + if stats.LeafInuse != leafInuse { + t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) + } + + // Only check allocations for 4KB pages. + if os.Getpagesize() == 4096 { + if stats.BranchAlloc != 4096 { + t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) + } else if stats.LeafAlloc != 36864 { + t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) + } + } + + if stats.BucketN != 1 { + t.Fatalf("unexpected BucketN: %d", stats.BucketN) + } else if stats.InlineBucketN != 0 { + t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN) + } else if stats.InlineBucketInuse != 0 { + t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse) + } + + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure a bucket with random insertion utilizes fill percentage correctly. +func TestBucket_Stats_RandomFill(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } else if os.Getpagesize() != 4096 { + t.Skip("invalid page size for test") + } + + db := MustOpenDB() + defer db.MustClose() + + // Add a set of values in random order. It will be the same random + // order so we can maintain consistency between test runs. + var count int + rand := rand.New(rand.NewSource(42)) + for _, i := range rand.Perm(1000) { + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte("woojits")) + if err != nil { + t.Fatal(err) + } + b.FillPercent = 0.9 + for _, j := range rand.Perm(100) { + index := (j * 10000) + i + if err := b.Put([]byte(fmt.Sprintf("%d000000000000000", index)), []byte("0000000000")); err != nil { + t.Fatal(err) + } + count++ + } + return nil + }); err != nil { + t.Fatal(err) + } + } + + db.MustCheck() + + if err := db.View(func(tx *bolt.Tx) error { + stats := tx.Bucket([]byte("woojits")).Stats() + if stats.KeyN != 100000 { + t.Fatalf("unexpected KeyN: %d", stats.KeyN) + } + + if stats.BranchPageN != 98 { + t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) + } else if stats.BranchOverflowN != 0 { + t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) + } else if stats.BranchInuse != 130984 { + t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) + } else if stats.BranchAlloc != 401408 { + t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) + } + + if stats.LeafPageN != 3412 { + t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) + } else if stats.LeafOverflowN != 0 { + t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) + } else if stats.LeafInuse != 4742482 { + t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) + } else if stats.LeafAlloc != 13975552 { + t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure a bucket can calculate stats. +func TestBucket_Stats_Small(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + // Add a bucket that fits on a single root leaf. + b, err := tx.CreateBucket([]byte("whozawhats")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + + return nil + }); err != nil { + t.Fatal(err) + } + + db.MustCheck() + + if err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("whozawhats")) + stats := b.Stats() + if stats.BranchPageN != 0 { + t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) + } else if stats.BranchOverflowN != 0 { + t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) + } else if stats.LeafPageN != 0 { + t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) + } else if stats.LeafOverflowN != 0 { + t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) + } else if stats.KeyN != 1 { + t.Fatalf("unexpected KeyN: %d", stats.KeyN) + } else if stats.Depth != 1 { + t.Fatalf("unexpected Depth: %d", stats.Depth) + } else if stats.BranchInuse != 0 { + t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) + } else if stats.LeafInuse != 0 { + t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) + } + + if os.Getpagesize() == 4096 { + if stats.BranchAlloc != 0 { + t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) + } else if stats.LeafAlloc != 0 { + t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) + } + } + + if stats.BucketN != 1 { + t.Fatalf("unexpected BucketN: %d", stats.BucketN) + } else if stats.InlineBucketN != 1 { + t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN) + } else if stats.InlineBucketInuse != 16+16+6 { + t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse) + } + + return nil + }); err != nil { + t.Fatal(err) + } +} + +func TestBucket_Stats_EmptyBucket(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + // Add a bucket that fits on a single root leaf. + if _, err := tx.CreateBucket([]byte("whozawhats")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + db.MustCheck() + + if err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("whozawhats")) + stats := b.Stats() + if stats.BranchPageN != 0 { + t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) + } else if stats.BranchOverflowN != 0 { + t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) + } else if stats.LeafPageN != 0 { + t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) + } else if stats.LeafOverflowN != 0 { + t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) + } else if stats.KeyN != 0 { + t.Fatalf("unexpected KeyN: %d", stats.KeyN) + } else if stats.Depth != 1 { + t.Fatalf("unexpected Depth: %d", stats.Depth) + } else if stats.BranchInuse != 0 { + t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) + } else if stats.LeafInuse != 0 { + t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) + } + + if os.Getpagesize() == 4096 { + if stats.BranchAlloc != 0 { + t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) + } else if stats.LeafAlloc != 0 { + t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) + } + } + + if stats.BucketN != 1 { + t.Fatalf("unexpected BucketN: %d", stats.BucketN) + } else if stats.InlineBucketN != 1 { + t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN) + } else if stats.InlineBucketInuse != 16 { + t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse) + } + + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure a bucket can calculate stats. +func TestBucket_Stats_Nested(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("foo")) + if err != nil { + t.Fatal(err) + } + for i := 0; i < 100; i++ { + if err := b.Put([]byte(fmt.Sprintf("%02d", i)), []byte(fmt.Sprintf("%02d", i))); err != nil { + t.Fatal(err) + } + } + + bar, err := b.CreateBucket([]byte("bar")) + if err != nil { + t.Fatal(err) + } + for i := 0; i < 10; i++ { + if err := bar.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { + t.Fatal(err) + } + } + + baz, err := bar.CreateBucket([]byte("baz")) + if err != nil { + t.Fatal(err) + } + for i := 0; i < 10; i++ { + if err := baz.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { + t.Fatal(err) + } + } + + return nil + }); err != nil { + t.Fatal(err) + } + + db.MustCheck() + + if err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("foo")) + stats := b.Stats() + if stats.BranchPageN != 0 { + t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) + } else if stats.BranchOverflowN != 0 { + t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) + } else if stats.LeafPageN != 2 { + t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) + } else if stats.LeafOverflowN != 0 { + t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) + } else if stats.KeyN != 122 { + t.Fatalf("unexpected KeyN: %d", stats.KeyN) + } else if stats.Depth != 3 { + t.Fatalf("unexpected Depth: %d", stats.Depth) + } else if stats.BranchInuse != 0 { + t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) + } + + foo := 16 // foo (pghdr) + foo += 101 * 16 // foo leaf elements + foo += 100*2 + 100*2 // foo leaf key/values + foo += 3 + 16 // foo -> bar key/value + + bar := 16 // bar (pghdr) + bar += 11 * 16 // bar leaf elements + bar += 10 + 10 // bar leaf key/values + bar += 3 + 16 // bar -> baz key/value + + baz := 16 // baz (inline) (pghdr) + baz += 10 * 16 // baz leaf elements + baz += 10 + 10 // baz leaf key/values + + if stats.LeafInuse != foo+bar+baz { + t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) + } + + if os.Getpagesize() == 4096 { + if stats.BranchAlloc != 0 { + t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) + } else if stats.LeafAlloc != 8192 { + t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) + } + } + + if stats.BucketN != 3 { + t.Fatalf("unexpected BucketN: %d", stats.BucketN) + } else if stats.InlineBucketN != 1 { + t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN) + } else if stats.InlineBucketInuse != baz { + t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse) + } + + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure a large bucket can calculate stats. +func TestBucket_Stats_Large(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + db := MustOpenDB() + defer db.MustClose() + + var index int + for i := 0; i < 100; i++ { + // Add bucket with lots of keys. + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + for i := 0; i < 1000; i++ { + if err := b.Put([]byte(strconv.Itoa(index)), []byte(strconv.Itoa(index))); err != nil { + t.Fatal(err) + } + index++ + } + return nil + }); err != nil { + t.Fatal(err) + } + } + + db.MustCheck() + + if err := db.View(func(tx *bolt.Tx) error { + stats := tx.Bucket([]byte("widgets")).Stats() + if stats.BranchPageN != 13 { + t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) + } else if stats.BranchOverflowN != 0 { + t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) + } else if stats.LeafPageN != 1196 { + t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) + } else if stats.LeafOverflowN != 0 { + t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) + } else if stats.KeyN != 100000 { + t.Fatalf("unexpected KeyN: %d", stats.KeyN) + } else if stats.Depth != 3 { + t.Fatalf("unexpected Depth: %d", stats.Depth) + } else if stats.BranchInuse != 25257 { + t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) + } else if stats.LeafInuse != 2596916 { + t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) + } + + if os.Getpagesize() == 4096 { + if stats.BranchAlloc != 53248 { + t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) + } else if stats.LeafAlloc != 4898816 { + t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) + } + } + + if stats.BucketN != 1 { + t.Fatalf("unexpected BucketN: %d", stats.BucketN) + } else if stats.InlineBucketN != 0 { + t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN) + } else if stats.InlineBucketInuse != 0 { + t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse) + } + + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket can write random keys and values across multiple transactions. +func TestBucket_Put_Single(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + index := 0 + if err := quick.Check(func(items testdata) bool { + db := MustOpenDB() + defer db.MustClose() + + m := make(map[string][]byte) + + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + for _, item := range items { + if err := db.Update(func(tx *bolt.Tx) error { + if err := tx.Bucket([]byte("widgets")).Put(item.Key, item.Value); err != nil { + panic("put error: " + err.Error()) + } + m[string(item.Key)] = item.Value + return nil + }); err != nil { + t.Fatal(err) + } + + // Verify all key/values so far. + if err := db.View(func(tx *bolt.Tx) error { + i := 0 + for k, v := range m { + value := tx.Bucket([]byte("widgets")).Get([]byte(k)) + if !bytes.Equal(value, v) { + t.Logf("value mismatch [run %d] (%d of %d):\nkey: %x\ngot: %x\nexp: %x", index, i, len(m), []byte(k), value, v) + db.CopyTempFile() + t.FailNow() + } + i++ + } + return nil + }); err != nil { + t.Fatal(err) + } + } + + index++ + return true + }, nil); err != nil { + t.Error(err) + } +} + +// Ensure that a transaction can insert multiple key/value pairs at once. +func TestBucket_Put_Multiple(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + if err := quick.Check(func(items testdata) bool { + db := MustOpenDB() + defer db.MustClose() + + // Bulk insert all values. + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for _, item := range items { + if err := b.Put(item.Key, item.Value); err != nil { + t.Fatal(err) + } + } + return nil + }); err != nil { + t.Fatal(err) + } + + // Verify all items exist. + if err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for _, item := range items { + value := b.Get(item.Key) + if !bytes.Equal(item.Value, value) { + db.CopyTempFile() + t.Fatalf("exp=%x; got=%x", item.Value, value) + } + } + return nil + }); err != nil { + t.Fatal(err) + } + + return true + }, qconfig()); err != nil { + t.Error(err) + } +} + +// Ensure that a transaction can delete all key/value pairs and return to a single leaf page. +func TestBucket_Delete_Quick(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + if err := quick.Check(func(items testdata) bool { + db := MustOpenDB() + defer db.MustClose() + + // Bulk insert all values. + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for _, item := range items { + if err := b.Put(item.Key, item.Value); err != nil { + t.Fatal(err) + } + } + return nil + }); err != nil { + t.Fatal(err) + } + + // Remove items one at a time and check consistency. + for _, item := range items { + if err := db.Update(func(tx *bolt.Tx) error { + return tx.Bucket([]byte("widgets")).Delete(item.Key) + }); err != nil { + t.Fatal(err) + } + } + + // Anything before our deletion index should be nil. + if err := db.View(func(tx *bolt.Tx) error { + if err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { + t.Fatalf("bucket should be empty; found: %06x", trunc(k, 3)) + return nil + }); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + return true + }, qconfig()); err != nil { + t.Error(err) + } +} + +func ExampleBucket_Put() { + // Open the database. + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } + defer os.Remove(db.Path()) + + // Start a write transaction. + if err := db.Update(func(tx *bolt.Tx) error { + // Create a bucket. + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + return err + } + + // Set the value "bar" for the key "foo". + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + return err + } + return nil + }); err != nil { + log.Fatal(err) + } + + // Read value back in a different read-only transaction. + if err := db.View(func(tx *bolt.Tx) error { + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + fmt.Printf("The value of 'foo' is: %s\n", value) + return nil + }); err != nil { + log.Fatal(err) + } + + // Close database to release file lock. + if err := db.Close(); err != nil { + log.Fatal(err) + } + + // Output: + // The value of 'foo' is: bar +} + +func ExampleBucket_Delete() { + // Open the database. + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } + defer os.Remove(db.Path()) + + // Start a write transaction. + if err := db.Update(func(tx *bolt.Tx) error { + // Create a bucket. + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + return err + } + + // Set the value "bar" for the key "foo". + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + return err + } + + // Retrieve the key back from the database and verify it. + value := b.Get([]byte("foo")) + fmt.Printf("The value of 'foo' was: %s\n", value) + + return nil + }); err != nil { + log.Fatal(err) + } + + // Delete the key in a different write transaction. + if err := db.Update(func(tx *bolt.Tx) error { + return tx.Bucket([]byte("widgets")).Delete([]byte("foo")) + }); err != nil { + log.Fatal(err) + } + + // Retrieve the key again. + if err := db.View(func(tx *bolt.Tx) error { + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + if value == nil { + fmt.Printf("The value of 'foo' is now: nil\n") + } + return nil + }); err != nil { + log.Fatal(err) + } + + // Close database to release file lock. + if err := db.Close(); err != nil { + log.Fatal(err) + } + + // Output: + // The value of 'foo' was: bar + // The value of 'foo' is now: nil +} + +func ExampleBucket_ForEach() { + // Open the database. + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } + defer os.Remove(db.Path()) + + // Insert data into a bucket. + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("animals")) + if err != nil { + return err + } + + if err := b.Put([]byte("dog"), []byte("fun")); err != nil { + return err + } + if err := b.Put([]byte("cat"), []byte("lame")); err != nil { + return err + } + if err := b.Put([]byte("liger"), []byte("awesome")); err != nil { + return err + } + + // Iterate over items in sorted key order. + if err := b.ForEach(func(k, v []byte) error { + fmt.Printf("A %s is %s.\n", k, v) + return nil + }); err != nil { + return err + } + + return nil + }); err != nil { + log.Fatal(err) + } + + // Close database to release file lock. + if err := db.Close(); err != nil { + log.Fatal(err) + } + + // Output: + // A cat is lame. + // A dog is fun. + // A liger is awesome. +} diff --git a/vendor/github.com/boltdb/bolt/cmd/bolt/main.go b/vendor/github.com/boltdb/bolt/cmd/bolt/main.go new file mode 100644 index 000000000..057eca50a --- /dev/null +++ b/vendor/github.com/boltdb/bolt/cmd/bolt/main.go @@ -0,0 +1,1740 @@ +package main + +import ( + "bytes" + "encoding/binary" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "math/rand" + "os" + "runtime" + "runtime/pprof" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" + "unsafe" + + "github.com/boltdb/bolt" +) + +var ( + // ErrUsage is returned when a usage message was printed and the process + // should simply exit with an error. + ErrUsage = errors.New("usage") + + // ErrUnknownCommand is returned when a CLI command is not specified. + ErrUnknownCommand = errors.New("unknown command") + + // ErrPathRequired is returned when the path to a Bolt database is not specified. + ErrPathRequired = errors.New("path required") + + // ErrFileNotFound is returned when a Bolt database does not exist. + ErrFileNotFound = errors.New("file not found") + + // ErrInvalidValue is returned when a benchmark reads an unexpected value. + ErrInvalidValue = errors.New("invalid value") + + // ErrCorrupt is returned when a checking a data file finds errors. + ErrCorrupt = errors.New("invalid value") + + // ErrNonDivisibleBatchSize is returned when the batch size can't be evenly + // divided by the iteration count. + ErrNonDivisibleBatchSize = errors.New("number of iterations must be divisible by the batch size") + + // ErrPageIDRequired is returned when a required page id is not specified. + ErrPageIDRequired = errors.New("page id required") + + // ErrPageNotFound is returned when specifying a page above the high water mark. + ErrPageNotFound = errors.New("page not found") + + // ErrPageFreed is returned when reading a page that has already been freed. + ErrPageFreed = errors.New("page freed") +) + +// PageHeaderSize represents the size of the bolt.page header. +const PageHeaderSize = 16 + +func main() { + m := NewMain() + if err := m.Run(os.Args[1:]...); err == ErrUsage { + os.Exit(2) + } else if err != nil { + fmt.Println(err.Error()) + os.Exit(1) + } +} + +// Main represents the main program execution. +type Main struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewMain returns a new instance of Main connect to the standard input/output. +func NewMain() *Main { + return &Main{ + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +// Run executes the program. +func (m *Main) Run(args ...string) error { + // Require a command at the beginning. + if len(args) == 0 || strings.HasPrefix(args[0], "-") { + fmt.Fprintln(m.Stderr, m.Usage()) + return ErrUsage + } + + // Execute command. + switch args[0] { + case "help": + fmt.Fprintln(m.Stderr, m.Usage()) + return ErrUsage + case "bench": + return newBenchCommand(m).Run(args[1:]...) + case "check": + return newCheckCommand(m).Run(args[1:]...) + case "compact": + return newCompactCommand(m).Run(args[1:]...) + case "dump": + return newDumpCommand(m).Run(args[1:]...) + case "info": + return newInfoCommand(m).Run(args[1:]...) + case "page": + return newPageCommand(m).Run(args[1:]...) + case "pages": + return newPagesCommand(m).Run(args[1:]...) + case "stats": + return newStatsCommand(m).Run(args[1:]...) + default: + return ErrUnknownCommand + } +} + +// Usage returns the help message. +func (m *Main) Usage() string { + return strings.TrimLeft(` +Bolt is a tool for inspecting bolt databases. + +Usage: + + bolt command [arguments] + +The commands are: + + bench run synthetic benchmark against bolt + check verifies integrity of bolt database + compact copies a bolt database, compacting it in the process + info print basic info + help print this screen + pages print list of pages with their types + stats iterate over all pages and generate usage stats + +Use "bolt [command] -h" for more information about a command. +`, "\n") +} + +// CheckCommand represents the "check" command execution. +type CheckCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewCheckCommand returns a CheckCommand. +func newCheckCommand(m *Main) *CheckCommand { + return &CheckCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *CheckCommand) Run(args ...string) error { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + help := fs.Bool("h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if *help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + // Require database path. + path := fs.Arg(0) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } + + // Open database. + db, err := bolt.Open(path, 0666, nil) + if err != nil { + return err + } + defer db.Close() + + // Perform consistency check. + return db.View(func(tx *bolt.Tx) error { + var count int + ch := tx.Check() + loop: + for { + select { + case err, ok := <-ch: + if !ok { + break loop + } + fmt.Fprintln(cmd.Stdout, err) + count++ + } + } + + // Print summary of errors. + if count > 0 { + fmt.Fprintf(cmd.Stdout, "%d errors found\n", count) + return ErrCorrupt + } + + // Notify user that database is valid. + fmt.Fprintln(cmd.Stdout, "OK") + return nil + }) +} + +// Usage returns the help message. +func (cmd *CheckCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt check PATH + +Check opens a database at PATH and runs an exhaustive check to verify that +all pages are accessible or are marked as freed. It also verifies that no +pages are double referenced. + +Verification errors will stream out as they are found and the process will +return after all pages have been checked. +`, "\n") +} + +// InfoCommand represents the "info" command execution. +type InfoCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewInfoCommand returns a InfoCommand. +func newInfoCommand(m *Main) *InfoCommand { + return &InfoCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *InfoCommand) Run(args ...string) error { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + help := fs.Bool("h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if *help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + // Require database path. + path := fs.Arg(0) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } + + // Open the database. + db, err := bolt.Open(path, 0666, nil) + if err != nil { + return err + } + defer db.Close() + + // Print basic database info. + info := db.Info() + fmt.Fprintf(cmd.Stdout, "Page Size: %d\n", info.PageSize) + + return nil +} + +// Usage returns the help message. +func (cmd *InfoCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt info PATH + +Info prints basic information about the Bolt database at PATH. +`, "\n") +} + +// DumpCommand represents the "dump" command execution. +type DumpCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// newDumpCommand returns a DumpCommand. +func newDumpCommand(m *Main) *DumpCommand { + return &DumpCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *DumpCommand) Run(args ...string) error { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + help := fs.Bool("h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if *help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + // Require database path and page id. + path := fs.Arg(0) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } + + // Read page ids. + pageIDs, err := atois(fs.Args()[1:]) + if err != nil { + return err + } else if len(pageIDs) == 0 { + return ErrPageIDRequired + } + + // Open database to retrieve page size. + pageSize, err := ReadPageSize(path) + if err != nil { + return err + } + + // Open database file handler. + f, err := os.Open(path) + if err != nil { + return err + } + defer func() { _ = f.Close() }() + + // Print each page listed. + for i, pageID := range pageIDs { + // Print a separator. + if i > 0 { + fmt.Fprintln(cmd.Stdout, "===============================================") + } + + // Print page to stdout. + if err := cmd.PrintPage(cmd.Stdout, f, pageID, pageSize); err != nil { + return err + } + } + + return nil +} + +// PrintPage prints a given page as hexadecimal. +func (cmd *DumpCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSize int) error { + const bytesPerLineN = 16 + + // Read page into buffer. + buf := make([]byte, pageSize) + addr := pageID * pageSize + if n, err := r.ReadAt(buf, int64(addr)); err != nil { + return err + } else if n != pageSize { + return io.ErrUnexpectedEOF + } + + // Write out to writer in 16-byte lines. + var prev []byte + var skipped bool + for offset := 0; offset < pageSize; offset += bytesPerLineN { + // Retrieve current 16-byte line. + line := buf[offset : offset+bytesPerLineN] + isLastLine := (offset == (pageSize - bytesPerLineN)) + + // If it's the same as the previous line then print a skip. + if bytes.Equal(line, prev) && !isLastLine { + if !skipped { + fmt.Fprintf(w, "%07x *\n", addr+offset) + skipped = true + } + } else { + // Print line as hexadecimal in 2-byte groups. + fmt.Fprintf(w, "%07x %04x %04x %04x %04x %04x %04x %04x %04x\n", addr+offset, + line[0:2], line[2:4], line[4:6], line[6:8], + line[8:10], line[10:12], line[12:14], line[14:16], + ) + + skipped = false + } + + // Save the previous line. + prev = line + } + fmt.Fprint(w, "\n") + + return nil +} + +// Usage returns the help message. +func (cmd *DumpCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt dump -page PAGEID PATH + +Dump prints a hexadecimal dump of a single page. +`, "\n") +} + +// PageCommand represents the "page" command execution. +type PageCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// newPageCommand returns a PageCommand. +func newPageCommand(m *Main) *PageCommand { + return &PageCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *PageCommand) Run(args ...string) error { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + help := fs.Bool("h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if *help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + // Require database path and page id. + path := fs.Arg(0) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } + + // Read page ids. + pageIDs, err := atois(fs.Args()[1:]) + if err != nil { + return err + } else if len(pageIDs) == 0 { + return ErrPageIDRequired + } + + // Open database file handler. + f, err := os.Open(path) + if err != nil { + return err + } + defer func() { _ = f.Close() }() + + // Print each page listed. + for i, pageID := range pageIDs { + // Print a separator. + if i > 0 { + fmt.Fprintln(cmd.Stdout, "===============================================") + } + + // Retrieve page info and page size. + p, buf, err := ReadPage(path, pageID) + if err != nil { + return err + } + + // Print basic page info. + fmt.Fprintf(cmd.Stdout, "Page ID: %d\n", p.id) + fmt.Fprintf(cmd.Stdout, "Page Type: %s\n", p.Type()) + fmt.Fprintf(cmd.Stdout, "Total Size: %d bytes\n", len(buf)) + + // Print type-specific data. + switch p.Type() { + case "meta": + err = cmd.PrintMeta(cmd.Stdout, buf) + case "leaf": + err = cmd.PrintLeaf(cmd.Stdout, buf) + case "branch": + err = cmd.PrintBranch(cmd.Stdout, buf) + case "freelist": + err = cmd.PrintFreelist(cmd.Stdout, buf) + } + if err != nil { + return err + } + } + + return nil +} + +// PrintMeta prints the data from the meta page. +func (cmd *PageCommand) PrintMeta(w io.Writer, buf []byte) error { + m := (*meta)(unsafe.Pointer(&buf[PageHeaderSize])) + fmt.Fprintf(w, "Version: %d\n", m.version) + fmt.Fprintf(w, "Page Size: %d bytes\n", m.pageSize) + fmt.Fprintf(w, "Flags: %08x\n", m.flags) + fmt.Fprintf(w, "Root: \n", m.root.root) + fmt.Fprintf(w, "Freelist: \n", m.freelist) + fmt.Fprintf(w, "HWM: \n", m.pgid) + fmt.Fprintf(w, "Txn ID: %d\n", m.txid) + fmt.Fprintf(w, "Checksum: %016x\n", m.checksum) + fmt.Fprintf(w, "\n") + return nil +} + +// PrintLeaf prints the data for a leaf page. +func (cmd *PageCommand) PrintLeaf(w io.Writer, buf []byte) error { + p := (*page)(unsafe.Pointer(&buf[0])) + + // Print number of items. + fmt.Fprintf(w, "Item Count: %d\n", p.count) + fmt.Fprintf(w, "\n") + + // Print each key/value. + for i := uint16(0); i < p.count; i++ { + e := p.leafPageElement(i) + + // Format key as string. + var k string + if isPrintable(string(e.key())) { + k = fmt.Sprintf("%q", string(e.key())) + } else { + k = fmt.Sprintf("%x", string(e.key())) + } + + // Format value as string. + var v string + if (e.flags & uint32(bucketLeafFlag)) != 0 { + b := (*bucket)(unsafe.Pointer(&e.value()[0])) + v = fmt.Sprintf("", b.root, b.sequence) + } else if isPrintable(string(e.value())) { + v = fmt.Sprintf("%q", string(e.value())) + } else { + v = fmt.Sprintf("%x", string(e.value())) + } + + fmt.Fprintf(w, "%s: %s\n", k, v) + } + fmt.Fprintf(w, "\n") + return nil +} + +// PrintBranch prints the data for a leaf page. +func (cmd *PageCommand) PrintBranch(w io.Writer, buf []byte) error { + p := (*page)(unsafe.Pointer(&buf[0])) + + // Print number of items. + fmt.Fprintf(w, "Item Count: %d\n", p.count) + fmt.Fprintf(w, "\n") + + // Print each key/value. + for i := uint16(0); i < p.count; i++ { + e := p.branchPageElement(i) + + // Format key as string. + var k string + if isPrintable(string(e.key())) { + k = fmt.Sprintf("%q", string(e.key())) + } else { + k = fmt.Sprintf("%x", string(e.key())) + } + + fmt.Fprintf(w, "%s: \n", k, e.pgid) + } + fmt.Fprintf(w, "\n") + return nil +} + +// PrintFreelist prints the data for a freelist page. +func (cmd *PageCommand) PrintFreelist(w io.Writer, buf []byte) error { + p := (*page)(unsafe.Pointer(&buf[0])) + + // Print number of items. + fmt.Fprintf(w, "Item Count: %d\n", p.count) + fmt.Fprintf(w, "\n") + + // Print each page in the freelist. + ids := (*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)) + for i := uint16(0); i < p.count; i++ { + fmt.Fprintf(w, "%d\n", ids[i]) + } + fmt.Fprintf(w, "\n") + return nil +} + +// PrintPage prints a given page as hexadecimal. +func (cmd *PageCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSize int) error { + const bytesPerLineN = 16 + + // Read page into buffer. + buf := make([]byte, pageSize) + addr := pageID * pageSize + if n, err := r.ReadAt(buf, int64(addr)); err != nil { + return err + } else if n != pageSize { + return io.ErrUnexpectedEOF + } + + // Write out to writer in 16-byte lines. + var prev []byte + var skipped bool + for offset := 0; offset < pageSize; offset += bytesPerLineN { + // Retrieve current 16-byte line. + line := buf[offset : offset+bytesPerLineN] + isLastLine := (offset == (pageSize - bytesPerLineN)) + + // If it's the same as the previous line then print a skip. + if bytes.Equal(line, prev) && !isLastLine { + if !skipped { + fmt.Fprintf(w, "%07x *\n", addr+offset) + skipped = true + } + } else { + // Print line as hexadecimal in 2-byte groups. + fmt.Fprintf(w, "%07x %04x %04x %04x %04x %04x %04x %04x %04x\n", addr+offset, + line[0:2], line[2:4], line[4:6], line[6:8], + line[8:10], line[10:12], line[12:14], line[14:16], + ) + + skipped = false + } + + // Save the previous line. + prev = line + } + fmt.Fprint(w, "\n") + + return nil +} + +// Usage returns the help message. +func (cmd *PageCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt page -page PATH pageid [pageid...] + +Page prints one or more pages in human readable format. +`, "\n") +} + +// PagesCommand represents the "pages" command execution. +type PagesCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewPagesCommand returns a PagesCommand. +func newPagesCommand(m *Main) *PagesCommand { + return &PagesCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *PagesCommand) Run(args ...string) error { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + help := fs.Bool("h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if *help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + // Require database path. + path := fs.Arg(0) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } + + // Open database. + db, err := bolt.Open(path, 0666, nil) + if err != nil { + return err + } + defer func() { _ = db.Close() }() + + // Write header. + fmt.Fprintln(cmd.Stdout, "ID TYPE ITEMS OVRFLW") + fmt.Fprintln(cmd.Stdout, "======== ========== ====== ======") + + return db.Update(func(tx *bolt.Tx) error { + var id int + for { + p, err := tx.Page(id) + if err != nil { + return &PageError{ID: id, Err: err} + } else if p == nil { + break + } + + // Only display count and overflow if this is a non-free page. + var count, overflow string + if p.Type != "free" { + count = strconv.Itoa(p.Count) + if p.OverflowCount > 0 { + overflow = strconv.Itoa(p.OverflowCount) + } + } + + // Print table row. + fmt.Fprintf(cmd.Stdout, "%-8d %-10s %-6s %-6s\n", p.ID, p.Type, count, overflow) + + // Move to the next non-overflow page. + id += 1 + if p.Type != "free" { + id += p.OverflowCount + } + } + return nil + }) +} + +// Usage returns the help message. +func (cmd *PagesCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt pages PATH + +Pages prints a table of pages with their type (meta, leaf, branch, freelist). +Leaf and branch pages will show a key count in the "items" column while the +freelist will show the number of free pages in the "items" column. + +The "overflow" column shows the number of blocks that the page spills over +into. Normally there is no overflow but large keys and values can cause +a single page to take up multiple blocks. +`, "\n") +} + +// StatsCommand represents the "stats" command execution. +type StatsCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewStatsCommand returns a StatsCommand. +func newStatsCommand(m *Main) *StatsCommand { + return &StatsCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *StatsCommand) Run(args ...string) error { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + help := fs.Bool("h", false, "") + if err := fs.Parse(args); err != nil { + return err + } else if *help { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } + + // Require database path. + path, prefix := fs.Arg(0), fs.Arg(1) + if path == "" { + return ErrPathRequired + } else if _, err := os.Stat(path); os.IsNotExist(err) { + return ErrFileNotFound + } + + // Open database. + db, err := bolt.Open(path, 0666, nil) + if err != nil { + return err + } + defer db.Close() + + return db.View(func(tx *bolt.Tx) error { + var s bolt.BucketStats + var count int + if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { + if bytes.HasPrefix(name, []byte(prefix)) { + s.Add(b.Stats()) + count += 1 + } + return nil + }); err != nil { + return err + } + + fmt.Fprintf(cmd.Stdout, "Aggregate statistics for %d buckets\n\n", count) + + fmt.Fprintln(cmd.Stdout, "Page count statistics") + fmt.Fprintf(cmd.Stdout, "\tNumber of logical branch pages: %d\n", s.BranchPageN) + fmt.Fprintf(cmd.Stdout, "\tNumber of physical branch overflow pages: %d\n", s.BranchOverflowN) + fmt.Fprintf(cmd.Stdout, "\tNumber of logical leaf pages: %d\n", s.LeafPageN) + fmt.Fprintf(cmd.Stdout, "\tNumber of physical leaf overflow pages: %d\n", s.LeafOverflowN) + + fmt.Fprintln(cmd.Stdout, "Tree statistics") + fmt.Fprintf(cmd.Stdout, "\tNumber of keys/value pairs: %d\n", s.KeyN) + fmt.Fprintf(cmd.Stdout, "\tNumber of levels in B+tree: %d\n", s.Depth) + + fmt.Fprintln(cmd.Stdout, "Page size utilization") + fmt.Fprintf(cmd.Stdout, "\tBytes allocated for physical branch pages: %d\n", s.BranchAlloc) + var percentage int + if s.BranchAlloc != 0 { + percentage = int(float32(s.BranchInuse) * 100.0 / float32(s.BranchAlloc)) + } + fmt.Fprintf(cmd.Stdout, "\tBytes actually used for branch data: %d (%d%%)\n", s.BranchInuse, percentage) + fmt.Fprintf(cmd.Stdout, "\tBytes allocated for physical leaf pages: %d\n", s.LeafAlloc) + percentage = 0 + if s.LeafAlloc != 0 { + percentage = int(float32(s.LeafInuse) * 100.0 / float32(s.LeafAlloc)) + } + fmt.Fprintf(cmd.Stdout, "\tBytes actually used for leaf data: %d (%d%%)\n", s.LeafInuse, percentage) + + fmt.Fprintln(cmd.Stdout, "Bucket statistics") + fmt.Fprintf(cmd.Stdout, "\tTotal number of buckets: %d\n", s.BucketN) + percentage = 0 + if s.BucketN != 0 { + percentage = int(float32(s.InlineBucketN) * 100.0 / float32(s.BucketN)) + } + fmt.Fprintf(cmd.Stdout, "\tTotal number on inlined buckets: %d (%d%%)\n", s.InlineBucketN, percentage) + percentage = 0 + if s.LeafInuse != 0 { + percentage = int(float32(s.InlineBucketInuse) * 100.0 / float32(s.LeafInuse)) + } + fmt.Fprintf(cmd.Stdout, "\tBytes used for inlined buckets: %d (%d%%)\n", s.InlineBucketInuse, percentage) + + return nil + }) +} + +// Usage returns the help message. +func (cmd *StatsCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt stats PATH + +Stats performs an extensive search of the database to track every page +reference. It starts at the current meta page and recursively iterates +through every accessible bucket. + +The following errors can be reported: + + already freed + The page is referenced more than once in the freelist. + + unreachable unfreed + The page is not referenced by a bucket or in the freelist. + + reachable freed + The page is referenced by a bucket but is also in the freelist. + + out of bounds + A page is referenced that is above the high water mark. + + multiple references + A page is referenced by more than one other page. + + invalid type + The page type is not "meta", "leaf", "branch", or "freelist". + +No errors should occur in your database. However, if for some reason you +experience corruption, please submit a ticket to the Bolt project page: + + https://github.com/boltdb/bolt/issues +`, "\n") +} + +var benchBucketName = []byte("bench") + +// BenchCommand represents the "bench" command execution. +type BenchCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +// NewBenchCommand returns a BenchCommand using the +func newBenchCommand(m *Main) *BenchCommand { + return &BenchCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the "bench" command. +func (cmd *BenchCommand) Run(args ...string) error { + // Parse CLI arguments. + options, err := cmd.ParseFlags(args) + if err != nil { + return err + } + + // Remove path if "-work" is not set. Otherwise keep path. + if options.Work { + fmt.Fprintf(cmd.Stdout, "work: %s\n", options.Path) + } else { + defer os.Remove(options.Path) + } + + // Create database. + db, err := bolt.Open(options.Path, 0666, nil) + if err != nil { + return err + } + db.NoSync = options.NoSync + defer db.Close() + + // Write to the database. + var results BenchResults + if err := cmd.runWrites(db, options, &results); err != nil { + return fmt.Errorf("write: %v", err) + } + + // Read from the database. + if err := cmd.runReads(db, options, &results); err != nil { + return fmt.Errorf("bench: read: %s", err) + } + + // Print results. + fmt.Fprintf(os.Stderr, "# Write\t%v\t(%v/op)\t(%v op/sec)\n", results.WriteDuration, results.WriteOpDuration(), results.WriteOpsPerSecond()) + fmt.Fprintf(os.Stderr, "# Read\t%v\t(%v/op)\t(%v op/sec)\n", results.ReadDuration, results.ReadOpDuration(), results.ReadOpsPerSecond()) + fmt.Fprintln(os.Stderr, "") + return nil +} + +// ParseFlags parses the command line flags. +func (cmd *BenchCommand) ParseFlags(args []string) (*BenchOptions, error) { + var options BenchOptions + + // Parse flagset. + fs := flag.NewFlagSet("", flag.ContinueOnError) + fs.StringVar(&options.ProfileMode, "profile-mode", "rw", "") + fs.StringVar(&options.WriteMode, "write-mode", "seq", "") + fs.StringVar(&options.ReadMode, "read-mode", "seq", "") + fs.IntVar(&options.Iterations, "count", 1000, "") + fs.IntVar(&options.BatchSize, "batch-size", 0, "") + fs.IntVar(&options.KeySize, "key-size", 8, "") + fs.IntVar(&options.ValueSize, "value-size", 32, "") + fs.StringVar(&options.CPUProfile, "cpuprofile", "", "") + fs.StringVar(&options.MemProfile, "memprofile", "", "") + fs.StringVar(&options.BlockProfile, "blockprofile", "", "") + fs.Float64Var(&options.FillPercent, "fill-percent", bolt.DefaultFillPercent, "") + fs.BoolVar(&options.NoSync, "no-sync", false, "") + fs.BoolVar(&options.Work, "work", false, "") + fs.StringVar(&options.Path, "path", "", "") + fs.SetOutput(cmd.Stderr) + if err := fs.Parse(args); err != nil { + return nil, err + } + + // Set batch size to iteration size if not set. + // Require that batch size can be evenly divided by the iteration count. + if options.BatchSize == 0 { + options.BatchSize = options.Iterations + } else if options.Iterations%options.BatchSize != 0 { + return nil, ErrNonDivisibleBatchSize + } + + // Generate temp path if one is not passed in. + if options.Path == "" { + f, err := ioutil.TempFile("", "bolt-bench-") + if err != nil { + return nil, fmt.Errorf("temp file: %s", err) + } + f.Close() + os.Remove(f.Name()) + options.Path = f.Name() + } + + return &options, nil +} + +// Writes to the database. +func (cmd *BenchCommand) runWrites(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + // Start profiling for writes. + if options.ProfileMode == "rw" || options.ProfileMode == "w" { + cmd.startProfiling(options) + } + + t := time.Now() + + var err error + switch options.WriteMode { + case "seq": + err = cmd.runWritesSequential(db, options, results) + case "rnd": + err = cmd.runWritesRandom(db, options, results) + case "seq-nest": + err = cmd.runWritesSequentialNested(db, options, results) + case "rnd-nest": + err = cmd.runWritesRandomNested(db, options, results) + default: + return fmt.Errorf("invalid write mode: %s", options.WriteMode) + } + + // Save time to write. + results.WriteDuration = time.Since(t) + + // Stop profiling for writes only. + if options.ProfileMode == "w" { + cmd.stopProfiling() + } + + return err +} + +func (cmd *BenchCommand) runWritesSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + var i = uint32(0) + return cmd.runWritesWithSource(db, options, results, func() uint32 { i++; return i }) +} + +func (cmd *BenchCommand) runWritesRandom(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + return cmd.runWritesWithSource(db, options, results, func() uint32 { return r.Uint32() }) +} + +func (cmd *BenchCommand) runWritesSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + var i = uint32(0) + return cmd.runWritesWithSource(db, options, results, func() uint32 { i++; return i }) +} + +func (cmd *BenchCommand) runWritesRandomNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + return cmd.runWritesWithSource(db, options, results, func() uint32 { return r.Uint32() }) +} + +func (cmd *BenchCommand) runWritesWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error { + results.WriteOps = options.Iterations + + for i := 0; i < options.Iterations; i += options.BatchSize { + if err := db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucketIfNotExists(benchBucketName) + b.FillPercent = options.FillPercent + + for j := 0; j < options.BatchSize; j++ { + key := make([]byte, options.KeySize) + value := make([]byte, options.ValueSize) + + // Write key as uint32. + binary.BigEndian.PutUint32(key, keySource()) + + // Insert key/value. + if err := b.Put(key, value); err != nil { + return err + } + } + + return nil + }); err != nil { + return err + } + } + return nil +} + +func (cmd *BenchCommand) runWritesNestedWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error { + results.WriteOps = options.Iterations + + for i := 0; i < options.Iterations; i += options.BatchSize { + if err := db.Update(func(tx *bolt.Tx) error { + top, err := tx.CreateBucketIfNotExists(benchBucketName) + if err != nil { + return err + } + top.FillPercent = options.FillPercent + + // Create bucket key. + name := make([]byte, options.KeySize) + binary.BigEndian.PutUint32(name, keySource()) + + // Create bucket. + b, err := top.CreateBucketIfNotExists(name) + if err != nil { + return err + } + b.FillPercent = options.FillPercent + + for j := 0; j < options.BatchSize; j++ { + var key = make([]byte, options.KeySize) + var value = make([]byte, options.ValueSize) + + // Generate key as uint32. + binary.BigEndian.PutUint32(key, keySource()) + + // Insert value into subbucket. + if err := b.Put(key, value); err != nil { + return err + } + } + + return nil + }); err != nil { + return err + } + } + return nil +} + +// Reads from the database. +func (cmd *BenchCommand) runReads(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + // Start profiling for reads. + if options.ProfileMode == "r" { + cmd.startProfiling(options) + } + + t := time.Now() + + var err error + switch options.ReadMode { + case "seq": + switch options.WriteMode { + case "seq-nest", "rnd-nest": + err = cmd.runReadsSequentialNested(db, options, results) + default: + err = cmd.runReadsSequential(db, options, results) + } + default: + return fmt.Errorf("invalid read mode: %s", options.ReadMode) + } + + // Save read time. + results.ReadDuration = time.Since(t) + + // Stop profiling for reads. + if options.ProfileMode == "rw" || options.ProfileMode == "r" { + cmd.stopProfiling() + } + + return err +} + +func (cmd *BenchCommand) runReadsSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + return db.View(func(tx *bolt.Tx) error { + t := time.Now() + + for { + var count int + + c := tx.Bucket(benchBucketName).Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if v == nil { + return errors.New("invalid value") + } + count++ + } + + if options.WriteMode == "seq" && count != options.Iterations { + return fmt.Errorf("read seq: iter mismatch: expected %d, got %d", options.Iterations, count) + } + + results.ReadOps += count + + // Make sure we do this for at least a second. + if time.Since(t) >= time.Second { + break + } + } + + return nil + }) +} + +func (cmd *BenchCommand) runReadsSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + return db.View(func(tx *bolt.Tx) error { + t := time.Now() + + for { + var count int + var top = tx.Bucket(benchBucketName) + if err := top.ForEach(func(name, _ []byte) error { + c := top.Bucket(name).Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if v == nil { + return ErrInvalidValue + } + count++ + } + return nil + }); err != nil { + return err + } + + if options.WriteMode == "seq-nest" && count != options.Iterations { + return fmt.Errorf("read seq-nest: iter mismatch: expected %d, got %d", options.Iterations, count) + } + + results.ReadOps += count + + // Make sure we do this for at least a second. + if time.Since(t) >= time.Second { + break + } + } + + return nil + }) +} + +// File handlers for the various profiles. +var cpuprofile, memprofile, blockprofile *os.File + +// Starts all profiles set on the options. +func (cmd *BenchCommand) startProfiling(options *BenchOptions) { + var err error + + // Start CPU profiling. + if options.CPUProfile != "" { + cpuprofile, err = os.Create(options.CPUProfile) + if err != nil { + fmt.Fprintf(cmd.Stderr, "bench: could not create cpu profile %q: %v\n", options.CPUProfile, err) + os.Exit(1) + } + pprof.StartCPUProfile(cpuprofile) + } + + // Start memory profiling. + if options.MemProfile != "" { + memprofile, err = os.Create(options.MemProfile) + if err != nil { + fmt.Fprintf(cmd.Stderr, "bench: could not create memory profile %q: %v\n", options.MemProfile, err) + os.Exit(1) + } + runtime.MemProfileRate = 4096 + } + + // Start fatal profiling. + if options.BlockProfile != "" { + blockprofile, err = os.Create(options.BlockProfile) + if err != nil { + fmt.Fprintf(cmd.Stderr, "bench: could not create block profile %q: %v\n", options.BlockProfile, err) + os.Exit(1) + } + runtime.SetBlockProfileRate(1) + } +} + +// Stops all profiles. +func (cmd *BenchCommand) stopProfiling() { + if cpuprofile != nil { + pprof.StopCPUProfile() + cpuprofile.Close() + cpuprofile = nil + } + + if memprofile != nil { + pprof.Lookup("heap").WriteTo(memprofile, 0) + memprofile.Close() + memprofile = nil + } + + if blockprofile != nil { + pprof.Lookup("block").WriteTo(blockprofile, 0) + blockprofile.Close() + blockprofile = nil + runtime.SetBlockProfileRate(0) + } +} + +// BenchOptions represents the set of options that can be passed to "bolt bench". +type BenchOptions struct { + ProfileMode string + WriteMode string + ReadMode string + Iterations int + BatchSize int + KeySize int + ValueSize int + CPUProfile string + MemProfile string + BlockProfile string + StatsInterval time.Duration + FillPercent float64 + NoSync bool + Work bool + Path string +} + +// BenchResults represents the performance results of the benchmark. +type BenchResults struct { + WriteOps int + WriteDuration time.Duration + ReadOps int + ReadDuration time.Duration +} + +// Returns the duration for a single write operation. +func (r *BenchResults) WriteOpDuration() time.Duration { + if r.WriteOps == 0 { + return 0 + } + return r.WriteDuration / time.Duration(r.WriteOps) +} + +// Returns average number of write operations that can be performed per second. +func (r *BenchResults) WriteOpsPerSecond() int { + var op = r.WriteOpDuration() + if op == 0 { + return 0 + } + return int(time.Second) / int(op) +} + +// Returns the duration for a single read operation. +func (r *BenchResults) ReadOpDuration() time.Duration { + if r.ReadOps == 0 { + return 0 + } + return r.ReadDuration / time.Duration(r.ReadOps) +} + +// Returns average number of read operations that can be performed per second. +func (r *BenchResults) ReadOpsPerSecond() int { + var op = r.ReadOpDuration() + if op == 0 { + return 0 + } + return int(time.Second) / int(op) +} + +type PageError struct { + ID int + Err error +} + +func (e *PageError) Error() string { + return fmt.Sprintf("page error: id=%d, err=%s", e.ID, e.Err) +} + +// isPrintable returns true if the string is valid unicode and contains only printable runes. +func isPrintable(s string) bool { + if !utf8.ValidString(s) { + return false + } + for _, ch := range s { + if !unicode.IsPrint(ch) { + return false + } + } + return true +} + +// ReadPage reads page info & full page data from a path. +// This is not transactionally safe. +func ReadPage(path string, pageID int) (*page, []byte, error) { + // Find page size. + pageSize, err := ReadPageSize(path) + if err != nil { + return nil, nil, fmt.Errorf("read page size: %s", err) + } + + // Open database file. + f, err := os.Open(path) + if err != nil { + return nil, nil, err + } + defer f.Close() + + // Read one block into buffer. + buf := make([]byte, pageSize) + if n, err := f.ReadAt(buf, int64(pageID*pageSize)); err != nil { + return nil, nil, err + } else if n != len(buf) { + return nil, nil, io.ErrUnexpectedEOF + } + + // Determine total number of blocks. + p := (*page)(unsafe.Pointer(&buf[0])) + overflowN := p.overflow + + // Re-read entire page (with overflow) into buffer. + buf = make([]byte, (int(overflowN)+1)*pageSize) + if n, err := f.ReadAt(buf, int64(pageID*pageSize)); err != nil { + return nil, nil, err + } else if n != len(buf) { + return nil, nil, io.ErrUnexpectedEOF + } + p = (*page)(unsafe.Pointer(&buf[0])) + + return p, buf, nil +} + +// ReadPageSize reads page size a path. +// This is not transactionally safe. +func ReadPageSize(path string) (int, error) { + // Open database file. + f, err := os.Open(path) + if err != nil { + return 0, err + } + defer f.Close() + + // Read 4KB chunk. + buf := make([]byte, 4096) + if _, err := io.ReadFull(f, buf); err != nil { + return 0, err + } + + // Read page size from metadata. + m := (*meta)(unsafe.Pointer(&buf[PageHeaderSize])) + return int(m.pageSize), nil +} + +// atois parses a slice of strings into integers. +func atois(strs []string) ([]int, error) { + var a []int + for _, str := range strs { + i, err := strconv.Atoi(str) + if err != nil { + return nil, err + } + a = append(a, i) + } + return a, nil +} + +// DO NOT EDIT. Copied from the "bolt" package. +const maxAllocSize = 0xFFFFFFF + +// DO NOT EDIT. Copied from the "bolt" package. +const ( + branchPageFlag = 0x01 + leafPageFlag = 0x02 + metaPageFlag = 0x04 + freelistPageFlag = 0x10 +) + +// DO NOT EDIT. Copied from the "bolt" package. +const bucketLeafFlag = 0x01 + +// DO NOT EDIT. Copied from the "bolt" package. +type pgid uint64 + +// DO NOT EDIT. Copied from the "bolt" package. +type txid uint64 + +// DO NOT EDIT. Copied from the "bolt" package. +type meta struct { + magic uint32 + version uint32 + pageSize uint32 + flags uint32 + root bucket + freelist pgid + pgid pgid + txid txid + checksum uint64 +} + +// DO NOT EDIT. Copied from the "bolt" package. +type bucket struct { + root pgid + sequence uint64 +} + +// DO NOT EDIT. Copied from the "bolt" package. +type page struct { + id pgid + flags uint16 + count uint16 + overflow uint32 + ptr uintptr +} + +// DO NOT EDIT. Copied from the "bolt" package. +func (p *page) Type() string { + if (p.flags & branchPageFlag) != 0 { + return "branch" + } else if (p.flags & leafPageFlag) != 0 { + return "leaf" + } else if (p.flags & metaPageFlag) != 0 { + return "meta" + } else if (p.flags & freelistPageFlag) != 0 { + return "freelist" + } + return fmt.Sprintf("unknown<%02x>", p.flags) +} + +// DO NOT EDIT. Copied from the "bolt" package. +func (p *page) leafPageElement(index uint16) *leafPageElement { + n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] + return n +} + +// DO NOT EDIT. Copied from the "bolt" package. +func (p *page) branchPageElement(index uint16) *branchPageElement { + return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] +} + +// DO NOT EDIT. Copied from the "bolt" package. +type branchPageElement struct { + pos uint32 + ksize uint32 + pgid pgid +} + +// DO NOT EDIT. Copied from the "bolt" package. +func (n *branchPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return buf[n.pos : n.pos+n.ksize] +} + +// DO NOT EDIT. Copied from the "bolt" package. +type leafPageElement struct { + flags uint32 + pos uint32 + ksize uint32 + vsize uint32 +} + +// DO NOT EDIT. Copied from the "bolt" package. +func (n *leafPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return buf[n.pos : n.pos+n.ksize] +} + +// DO NOT EDIT. Copied from the "bolt" package. +func (n *leafPageElement) value() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return buf[n.pos+n.ksize : n.pos+n.ksize+n.vsize] +} + +// CompactCommand represents the "compact" command execution. +type CompactCommand struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer + + SrcPath string + DstPath string + TxMaxSize int64 +} + +// newCompactCommand returns a CompactCommand. +func newCompactCommand(m *Main) *CompactCommand { + return &CompactCommand{ + Stdin: m.Stdin, + Stdout: m.Stdout, + Stderr: m.Stderr, + } +} + +// Run executes the command. +func (cmd *CompactCommand) Run(args ...string) (err error) { + // Parse flags. + fs := flag.NewFlagSet("", flag.ContinueOnError) + fs.SetOutput(ioutil.Discard) + fs.StringVar(&cmd.DstPath, "o", "", "") + fs.Int64Var(&cmd.TxMaxSize, "tx-max-size", 65536, "") + if err := fs.Parse(args); err == flag.ErrHelp { + fmt.Fprintln(cmd.Stderr, cmd.Usage()) + return ErrUsage + } else if err != nil { + return err + } else if cmd.DstPath == "" { + return fmt.Errorf("output file required") + } + + // Require database paths. + cmd.SrcPath = fs.Arg(0) + if cmd.SrcPath == "" { + return ErrPathRequired + } + + // Ensure source file exists. + fi, err := os.Stat(cmd.SrcPath) + if os.IsNotExist(err) { + return ErrFileNotFound + } else if err != nil { + return err + } + initialSize := fi.Size() + + // Open source database. + src, err := bolt.Open(cmd.SrcPath, 0444, nil) + if err != nil { + return err + } + defer src.Close() + + // Open destination database. + dst, err := bolt.Open(cmd.DstPath, fi.Mode(), nil) + if err != nil { + return err + } + defer dst.Close() + + // Run compaction. + if err := cmd.compact(dst, src); err != nil { + return err + } + + // Report stats on new size. + fi, err = os.Stat(cmd.DstPath) + if err != nil { + return err + } else if fi.Size() == 0 { + return fmt.Errorf("zero db size") + } + fmt.Fprintf(cmd.Stdout, "%d -> %d bytes (gain=%.2fx)\n", initialSize, fi.Size(), float64(initialSize)/float64(fi.Size())) + + return nil +} + +func (cmd *CompactCommand) compact(dst, src *bolt.DB) error { + // commit regularly, or we'll run out of memory for large datasets if using one transaction. + var size int64 + tx, err := dst.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + if err := cmd.walk(src, func(keys [][]byte, k, v []byte, seq uint64) error { + // On each key/value, check if we have exceeded tx size. + sz := int64(len(k) + len(v)) + if size+sz > cmd.TxMaxSize && cmd.TxMaxSize != 0 { + // Commit previous transaction. + if err := tx.Commit(); err != nil { + return err + } + + // Start new transaction. + tx, err = dst.Begin(true) + if err != nil { + return err + } + size = 0 + } + size += sz + + // Create bucket on the root transaction if this is the first level. + nk := len(keys) + if nk == 0 { + bkt, err := tx.CreateBucket(k) + if err != nil { + return err + } + if err := bkt.SetSequence(seq); err != nil { + return err + } + return nil + } + + // Create buckets on subsequent levels, if necessary. + b := tx.Bucket(keys[0]) + if nk > 1 { + for _, k := range keys[1:] { + b = b.Bucket(k) + } + } + + // If there is no value then this is a bucket call. + if v == nil { + bkt, err := b.CreateBucket(k) + if err != nil { + return err + } + if err := bkt.SetSequence(seq); err != nil { + return err + } + return nil + } + + // Otherwise treat it as a key/value pair. + return b.Put(k, v) + }); err != nil { + return err + } + + return tx.Commit() +} + +// walkFunc is the type of the function called for keys (buckets and "normal" +// values) discovered by Walk. keys is the list of keys to descend to the bucket +// owning the discovered key/value pair k/v. +type walkFunc func(keys [][]byte, k, v []byte, seq uint64) error + +// walk walks recursively the bolt database db, calling walkFn for each key it finds. +func (cmd *CompactCommand) walk(db *bolt.DB, walkFn walkFunc) error { + return db.View(func(tx *bolt.Tx) error { + return tx.ForEach(func(name []byte, b *bolt.Bucket) error { + return cmd.walkBucket(b, nil, name, nil, b.Sequence(), walkFn) + }) + }) +} + +func (cmd *CompactCommand) walkBucket(b *bolt.Bucket, keypath [][]byte, k, v []byte, seq uint64, fn walkFunc) error { + // Execute callback. + if err := fn(keypath, k, v, seq); err != nil { + return err + } + + // If this is not a bucket then stop. + if v != nil { + return nil + } + + // Iterate over each child key/value. + keypath = append(keypath, k) + return b.ForEach(func(k, v []byte) error { + if v == nil { + bkt := b.Bucket(k) + return cmd.walkBucket(bkt, keypath, k, nil, bkt.Sequence(), fn) + } + return cmd.walkBucket(b, keypath, k, v, b.Sequence(), fn) + }) +} + +// Usage returns the help message. +func (cmd *CompactCommand) Usage() string { + return strings.TrimLeft(` +usage: bolt compact [options] -o DST SRC + +Compact opens a database at SRC path and walks it recursively, copying keys +as they are found from all buckets, to a newly created database at DST path. + +The original database is left untouched. + +Additional options include: + + -tx-max-size NUM + Specifies the maximum size of individual transactions. + Defaults to 64KB. +`, "\n") +} diff --git a/vendor/github.com/boltdb/bolt/cmd/bolt/main_test.go b/vendor/github.com/boltdb/bolt/cmd/bolt/main_test.go new file mode 100644 index 000000000..0a11ff339 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/cmd/bolt/main_test.go @@ -0,0 +1,356 @@ +package main_test + +import ( + "bytes" + crypto "crypto/rand" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "math/rand" + "os" + "strconv" + "testing" + + "github.com/boltdb/bolt" + "github.com/boltdb/bolt/cmd/bolt" +) + +// Ensure the "info" command can print information about a database. +func TestInfoCommand_Run(t *testing.T) { + db := MustOpen(0666, nil) + db.DB.Close() + defer db.Close() + + // Run the info command. + m := NewMain() + if err := m.Run("info", db.Path); err != nil { + t.Fatal(err) + } +} + +// Ensure the "stats" command executes correctly with an empty database. +func TestStatsCommand_Run_EmptyDatabase(t *testing.T) { + // Ignore + if os.Getpagesize() != 4096 { + t.Skip("system does not use 4KB page size") + } + + db := MustOpen(0666, nil) + defer db.Close() + db.DB.Close() + + // Generate expected result. + exp := "Aggregate statistics for 0 buckets\n\n" + + "Page count statistics\n" + + "\tNumber of logical branch pages: 0\n" + + "\tNumber of physical branch overflow pages: 0\n" + + "\tNumber of logical leaf pages: 0\n" + + "\tNumber of physical leaf overflow pages: 0\n" + + "Tree statistics\n" + + "\tNumber of keys/value pairs: 0\n" + + "\tNumber of levels in B+tree: 0\n" + + "Page size utilization\n" + + "\tBytes allocated for physical branch pages: 0\n" + + "\tBytes actually used for branch data: 0 (0%)\n" + + "\tBytes allocated for physical leaf pages: 0\n" + + "\tBytes actually used for leaf data: 0 (0%)\n" + + "Bucket statistics\n" + + "\tTotal number of buckets: 0\n" + + "\tTotal number on inlined buckets: 0 (0%)\n" + + "\tBytes used for inlined buckets: 0 (0%)\n" + + // Run the command. + m := NewMain() + if err := m.Run("stats", db.Path); err != nil { + t.Fatal(err) + } else if m.Stdout.String() != exp { + t.Fatalf("unexpected stdout:\n\n%s", m.Stdout.String()) + } +} + +// Ensure the "stats" command can execute correctly. +func TestStatsCommand_Run(t *testing.T) { + // Ignore + if os.Getpagesize() != 4096 { + t.Skip("system does not use 4KB page size") + } + + db := MustOpen(0666, nil) + defer db.Close() + + if err := db.Update(func(tx *bolt.Tx) error { + // Create "foo" bucket. + b, err := tx.CreateBucket([]byte("foo")) + if err != nil { + return err + } + for i := 0; i < 10; i++ { + if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { + return err + } + } + + // Create "bar" bucket. + b, err = tx.CreateBucket([]byte("bar")) + if err != nil { + return err + } + for i := 0; i < 100; i++ { + if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { + return err + } + } + + // Create "baz" bucket. + b, err = tx.CreateBucket([]byte("baz")) + if err != nil { + return err + } + if err := b.Put([]byte("key"), []byte("value")); err != nil { + return err + } + + return nil + }); err != nil { + t.Fatal(err) + } + db.DB.Close() + + // Generate expected result. + exp := "Aggregate statistics for 3 buckets\n\n" + + "Page count statistics\n" + + "\tNumber of logical branch pages: 0\n" + + "\tNumber of physical branch overflow pages: 0\n" + + "\tNumber of logical leaf pages: 1\n" + + "\tNumber of physical leaf overflow pages: 0\n" + + "Tree statistics\n" + + "\tNumber of keys/value pairs: 111\n" + + "\tNumber of levels in B+tree: 1\n" + + "Page size utilization\n" + + "\tBytes allocated for physical branch pages: 0\n" + + "\tBytes actually used for branch data: 0 (0%)\n" + + "\tBytes allocated for physical leaf pages: 4096\n" + + "\tBytes actually used for leaf data: 1996 (48%)\n" + + "Bucket statistics\n" + + "\tTotal number of buckets: 3\n" + + "\tTotal number on inlined buckets: 2 (66%)\n" + + "\tBytes used for inlined buckets: 236 (11%)\n" + + // Run the command. + m := NewMain() + if err := m.Run("stats", db.Path); err != nil { + t.Fatal(err) + } else if m.Stdout.String() != exp { + t.Fatalf("unexpected stdout:\n\n%s", m.Stdout.String()) + } +} + +// Main represents a test wrapper for main.Main that records output. +type Main struct { + *main.Main + Stdin bytes.Buffer + Stdout bytes.Buffer + Stderr bytes.Buffer +} + +// NewMain returns a new instance of Main. +func NewMain() *Main { + m := &Main{Main: main.NewMain()} + m.Main.Stdin = &m.Stdin + m.Main.Stdout = &m.Stdout + m.Main.Stderr = &m.Stderr + return m +} + +// MustOpen creates a Bolt database in a temporary location. +func MustOpen(mode os.FileMode, options *bolt.Options) *DB { + // Create temporary path. + f, _ := ioutil.TempFile("", "bolt-") + f.Close() + os.Remove(f.Name()) + + db, err := bolt.Open(f.Name(), mode, options) + if err != nil { + panic(err.Error()) + } + return &DB{DB: db, Path: f.Name()} +} + +// DB is a test wrapper for bolt.DB. +type DB struct { + *bolt.DB + Path string +} + +// Close closes and removes the database. +func (db *DB) Close() error { + defer os.Remove(db.Path) + return db.DB.Close() +} + +func TestCompactCommand_Run(t *testing.T) { + var s int64 + if err := binary.Read(crypto.Reader, binary.BigEndian, &s); err != nil { + t.Fatal(err) + } + rand.Seed(s) + + dstdb := MustOpen(0666, nil) + dstdb.Close() + + // fill the db + db := MustOpen(0666, nil) + if err := db.Update(func(tx *bolt.Tx) error { + n := 2 + rand.Intn(5) + for i := 0; i < n; i++ { + k := []byte(fmt.Sprintf("b%d", i)) + b, err := tx.CreateBucketIfNotExists(k) + if err != nil { + return err + } + if err := b.SetSequence(uint64(i)); err != nil { + return err + } + if err := fillBucket(b, append(k, '.')); err != nil { + return err + } + } + return nil + }); err != nil { + db.Close() + t.Fatal(err) + } + + // make the db grow by adding large values, and delete them. + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte("large_vals")) + if err != nil { + return err + } + n := 5 + rand.Intn(5) + for i := 0; i < n; i++ { + v := make([]byte, 1000*1000*(1+rand.Intn(5))) + _, err := crypto.Read(v) + if err != nil { + return err + } + if err := b.Put([]byte(fmt.Sprintf("l%d", i)), v); err != nil { + return err + } + } + return nil + }); err != nil { + db.Close() + t.Fatal(err) + } + if err := db.Update(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("large_vals")).Cursor() + for k, _ := c.First(); k != nil; k, _ = c.Next() { + if err := c.Delete(); err != nil { + return err + } + } + return tx.DeleteBucket([]byte("large_vals")) + }); err != nil { + db.Close() + t.Fatal(err) + } + db.DB.Close() + defer db.Close() + defer dstdb.Close() + + dbChk, err := chkdb(db.Path) + if err != nil { + t.Fatal(err) + } + + m := NewMain() + if err := m.Run("compact", "-o", dstdb.Path, db.Path); err != nil { + t.Fatal(err) + } + + dbChkAfterCompact, err := chkdb(db.Path) + if err != nil { + t.Fatal(err) + } + + dstdbChk, err := chkdb(dstdb.Path) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(dbChk, dbChkAfterCompact) { + t.Error("the original db has been touched") + } + if !bytes.Equal(dbChk, dstdbChk) { + t.Error("the compacted db data isn't the same than the original db") + } +} + +func fillBucket(b *bolt.Bucket, prefix []byte) error { + n := 10 + rand.Intn(50) + for i := 0; i < n; i++ { + v := make([]byte, 10*(1+rand.Intn(4))) + _, err := crypto.Read(v) + if err != nil { + return err + } + k := append(prefix, []byte(fmt.Sprintf("k%d", i))...) + if err := b.Put(k, v); err != nil { + return err + } + } + // limit depth of subbuckets + s := 2 + rand.Intn(4) + if len(prefix) > (2*s + 1) { + return nil + } + n = 1 + rand.Intn(3) + for i := 0; i < n; i++ { + k := append(prefix, []byte(fmt.Sprintf("b%d", i))...) + sb, err := b.CreateBucket(k) + if err != nil { + return err + } + if err := fillBucket(sb, append(k, '.')); err != nil { + return err + } + } + return nil +} + +func chkdb(path string) ([]byte, error) { + db, err := bolt.Open(path, 0666, nil) + if err != nil { + return nil, err + } + defer db.Close() + var buf bytes.Buffer + err = db.View(func(tx *bolt.Tx) error { + return tx.ForEach(func(name []byte, b *bolt.Bucket) error { + return walkBucket(b, name, nil, &buf) + }) + }) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func walkBucket(parent *bolt.Bucket, k []byte, v []byte, w io.Writer) error { + if _, err := fmt.Fprintf(w, "%d:%x=%x\n", parent.Sequence(), k, v); err != nil { + return err + } + + // not a bucket, exit. + if v != nil { + return nil + } + return parent.ForEach(func(k, v []byte) error { + if v == nil { + return walkBucket(parent.Bucket(k), k, nil, w) + } + return walkBucket(parent, k, v, w) + }) +} diff --git a/vendor/github.com/boltdb/bolt/cursor.go b/vendor/github.com/boltdb/bolt/cursor.go new file mode 100644 index 000000000..1be9f35e3 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/cursor.go @@ -0,0 +1,400 @@ +package bolt + +import ( + "bytes" + "fmt" + "sort" +) + +// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. +// Cursors see nested buckets with value == nil. +// Cursors can be obtained from a transaction and are valid as long as the transaction is open. +// +// Keys and values returned from the cursor are only valid for the life of the transaction. +// +// Changing data while traversing with a cursor may cause it to be invalidated +// and return unexpected keys and/or values. You must reposition your cursor +// after mutating data. +type Cursor struct { + bucket *Bucket + stack []elemRef +} + +// Bucket returns the bucket that this cursor was created from. +func (c *Cursor) Bucket() *Bucket { + return c.bucket +} + +// First moves the cursor to the first item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) First() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + c.first() + + // If we land on an empty page then move to the next value. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + c.next() + } + + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v + +} + +// Last moves the cursor to the last item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Last() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + ref := elemRef{page: p, node: n} + ref.index = ref.count() - 1 + c.stack = append(c.stack, ref) + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Next moves the cursor to the next item in the bucket and returns its key and value. +// If the cursor is at the end of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Next() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + k, v, flags := c.next() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Prev moves the cursor to the previous item in the bucket and returns its key and value. +// If the cursor is at the beginning of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Prev() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Attempt to move back one element until we're successful. + // Move up the stack as we hit the beginning of each page in our stack. + for i := len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index > 0 { + elem.index-- + break + } + c.stack = c.stack[:i] + } + + // If we've hit the end then return nil. + if len(c.stack) == 0 { + return nil, nil + } + + // Move down the stack to find the last element of the last leaf under this branch. + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. If no keys +// follow, a nil key is returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { + k, v, flags := c.seek(seek) + + // If we ended up after the last element of a page then move to the next one. + if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { + k, v, flags = c.next() + } + + if k == nil { + return nil, nil + } else if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Delete removes the current key/value under the cursor from the bucket. +// Delete fails if current key/value is a bucket or if the transaction is not writable. +func (c *Cursor) Delete() error { + if c.bucket.tx.db == nil { + return ErrTxClosed + } else if !c.bucket.Writable() { + return ErrTxNotWritable + } + + key, _, flags := c.keyValue() + // Return an error if current value is a bucket. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + c.node().del(key) + + return nil +} + +// seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. +func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Start from root page/node and traverse to correct page. + c.stack = c.stack[:0] + c.search(seek, c.bucket.root) + ref := &c.stack[len(c.stack)-1] + + // If the cursor is pointing to the end of page/node then return nil. + if ref.index >= ref.count() { + return nil, nil, 0 + } + + // If this is a bucket then return a nil value. + return c.keyValue() +} + +// first moves the cursor to the first leaf element under the last page in the stack. +func (c *Cursor) first() { + for { + // Exit when we hit a leaf page. + var ref = &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the first element to the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + } +} + +// last moves the cursor to the last leaf element under the last page in the stack. +func (c *Cursor) last() { + for { + // Exit when we hit a leaf page. + ref := &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the last element in the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + + var nextRef = elemRef{page: p, node: n} + nextRef.index = nextRef.count() - 1 + c.stack = append(c.stack, nextRef) + } +} + +// next moves to the next leaf element and returns the key and value. +// If the cursor is at the last leaf element then it stays there and returns nil. +func (c *Cursor) next() (key []byte, value []byte, flags uint32) { + for { + // Attempt to move over one element until we're successful. + // Move up the stack as we hit the end of each page in our stack. + var i int + for i = len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index < elem.count()-1 { + elem.index++ + break + } + } + + // If we've hit the root page then stop and return. This will leave the + // cursor on the last element of the last page. + if i == -1 { + return nil, nil, 0 + } + + // Otherwise start from where we left off in the stack and find the + // first element of the first leaf page. + c.stack = c.stack[:i+1] + c.first() + + // If this is an empty page then restart and move back up the stack. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + continue + } + + return c.keyValue() + } +} + +// search recursively performs a binary search against a given page/node until it finds a given key. +func (c *Cursor) search(key []byte, pgid pgid) { + p, n := c.bucket.pageNode(pgid) + if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { + panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) + } + e := elemRef{page: p, node: n} + c.stack = append(c.stack, e) + + // If we're on a leaf page/node then find the specific node. + if e.isLeaf() { + c.nsearch(key) + return + } + + if n != nil { + c.searchNode(key, n) + return + } + c.searchPage(key, p) +} + +func (c *Cursor) searchNode(key []byte, n *node) { + var exact bool + index := sort.Search(len(n.inodes), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(n.inodes[i].key, key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, n.inodes[index].pgid) +} + +func (c *Cursor) searchPage(key []byte, p *page) { + // Binary search for the correct range. + inodes := p.branchPageElements() + + var exact bool + index := sort.Search(int(p.count), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(inodes[i].key(), key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, inodes[index].pgid) +} + +// nsearch searches the leaf node on the top of the stack for a key. +func (c *Cursor) nsearch(key []byte) { + e := &c.stack[len(c.stack)-1] + p, n := e.page, e.node + + // If we have a node then search its inodes. + if n != nil { + index := sort.Search(len(n.inodes), func(i int) bool { + return bytes.Compare(n.inodes[i].key, key) != -1 + }) + e.index = index + return + } + + // If we have a page then search its leaf elements. + inodes := p.leafPageElements() + index := sort.Search(int(p.count), func(i int) bool { + return bytes.Compare(inodes[i].key(), key) != -1 + }) + e.index = index +} + +// keyValue returns the key and value of the current leaf element. +func (c *Cursor) keyValue() ([]byte, []byte, uint32) { + ref := &c.stack[len(c.stack)-1] + if ref.count() == 0 || ref.index >= ref.count() { + return nil, nil, 0 + } + + // Retrieve value from node. + if ref.node != nil { + inode := &ref.node.inodes[ref.index] + return inode.key, inode.value, inode.flags + } + + // Or retrieve value from page. + elem := ref.page.leafPageElement(uint16(ref.index)) + return elem.key(), elem.value(), elem.flags +} + +// node returns the node that the cursor is currently positioned on. +func (c *Cursor) node() *node { + _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") + + // If the top of the stack is a leaf node then just return it. + if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { + return ref.node + } + + // Start from root and traverse down the hierarchy. + var n = c.stack[0].node + if n == nil { + n = c.bucket.node(c.stack[0].page.id, nil) + } + for _, ref := range c.stack[:len(c.stack)-1] { + _assert(!n.isLeaf, "expected branch node") + n = n.childAt(int(ref.index)) + } + _assert(n.isLeaf, "expected leaf node") + return n +} + +// elemRef represents a reference to an element on a given page/node. +type elemRef struct { + page *page + node *node + index int +} + +// isLeaf returns whether the ref is pointing at a leaf page/node. +func (r *elemRef) isLeaf() bool { + if r.node != nil { + return r.node.isLeaf + } + return (r.page.flags & leafPageFlag) != 0 +} + +// count returns the number of inodes or page elements. +func (r *elemRef) count() int { + if r.node != nil { + return len(r.node.inodes) + } + return int(r.page.count) +} diff --git a/vendor/github.com/boltdb/bolt/cursor_test.go b/vendor/github.com/boltdb/bolt/cursor_test.go new file mode 100644 index 000000000..562d60f9a --- /dev/null +++ b/vendor/github.com/boltdb/bolt/cursor_test.go @@ -0,0 +1,817 @@ +package bolt_test + +import ( + "bytes" + "encoding/binary" + "fmt" + "log" + "os" + "reflect" + "sort" + "testing" + "testing/quick" + + "github.com/boltdb/bolt" +) + +// Ensure that a cursor can return a reference to the bucket that created it. +func TestCursor_Bucket(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if cb := b.Cursor().Bucket(); !reflect.DeepEqual(cb, b) { + t.Fatal("cursor bucket mismatch") + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a Tx cursor can seek to the appropriate keys. +func TestCursor_Seek(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("0001")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("bar"), []byte("0002")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("baz"), []byte("0003")); err != nil { + t.Fatal(err) + } + + if _, err := b.CreateBucket([]byte("bkt")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("widgets")).Cursor() + + // Exact match should go to the key. + if k, v := c.Seek([]byte("bar")); !bytes.Equal(k, []byte("bar")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte("0002")) { + t.Fatalf("unexpected value: %v", v) + } + + // Inexact match should go to the next key. + if k, v := c.Seek([]byte("bas")); !bytes.Equal(k, []byte("baz")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte("0003")) { + t.Fatalf("unexpected value: %v", v) + } + + // Low key should go to the first key. + if k, v := c.Seek([]byte("")); !bytes.Equal(k, []byte("bar")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte("0002")) { + t.Fatalf("unexpected value: %v", v) + } + + // High key should return no key. + if k, v := c.Seek([]byte("zzz")); k != nil { + t.Fatalf("expected nil key: %v", k) + } else if v != nil { + t.Fatalf("expected nil value: %v", v) + } + + // Buckets should return their key but no value. + if k, v := c.Seek([]byte("bkt")); !bytes.Equal(k, []byte("bkt")) { + t.Fatalf("unexpected key: %v", k) + } else if v != nil { + t.Fatalf("expected nil value: %v", v) + } + + return nil + }); err != nil { + t.Fatal(err) + } +} + +func TestCursor_Delete(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + const count = 1000 + + // Insert every other key between 0 and $count. + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + for i := 0; i < count; i += 1 { + k := make([]byte, 8) + binary.BigEndian.PutUint64(k, uint64(i)) + if err := b.Put(k, make([]byte, 100)); err != nil { + t.Fatal(err) + } + } + if _, err := b.CreateBucket([]byte("sub")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.Update(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("widgets")).Cursor() + bound := make([]byte, 8) + binary.BigEndian.PutUint64(bound, uint64(count/2)) + for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() { + if err := c.Delete(); err != nil { + t.Fatal(err) + } + } + + c.Seek([]byte("sub")) + if err := c.Delete(); err != bolt.ErrIncompatibleValue { + t.Fatalf("unexpected error: %s", err) + } + + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + stats := tx.Bucket([]byte("widgets")).Stats() + if stats.KeyN != count/2+1 { + t.Fatalf("unexpected KeyN: %d", stats.KeyN) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a Tx cursor can seek to the appropriate keys when there are a +// large number of keys. This test also checks that seek will always move +// forward to the next key. +// +// Related: https://github.com/boltdb/bolt/pull/187 +func TestCursor_Seek_Large(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + var count = 10000 + + // Insert every other key between 0 and $count. + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < count; i += 100 { + for j := i; j < i+100; j += 2 { + k := make([]byte, 8) + binary.BigEndian.PutUint64(k, uint64(j)) + if err := b.Put(k, make([]byte, 100)); err != nil { + t.Fatal(err) + } + } + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("widgets")).Cursor() + for i := 0; i < count; i++ { + seek := make([]byte, 8) + binary.BigEndian.PutUint64(seek, uint64(i)) + + k, _ := c.Seek(seek) + + // The last seek is beyond the end of the the range so + // it should return nil. + if i == count-1 { + if k != nil { + t.Fatal("expected nil key") + } + continue + } + + // Otherwise we should seek to the exact key or the next key. + num := binary.BigEndian.Uint64(k) + if i%2 == 0 { + if num != uint64(i) { + t.Fatalf("unexpected num: %d", num) + } + } else { + if num != uint64(i+1) { + t.Fatalf("unexpected num: %d", num) + } + } + } + + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a cursor can iterate over an empty bucket without error. +func TestCursor_EmptyBucket(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("widgets")).Cursor() + k, v := c.First() + if k != nil { + t.Fatalf("unexpected key: %v", k) + } else if v != nil { + t.Fatalf("unexpected value: %v", v) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a Tx cursor can reverse iterate over an empty bucket without error. +func TestCursor_EmptyBucketReverse(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }); err != nil { + t.Fatal(err) + } + if err := db.View(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("widgets")).Cursor() + k, v := c.Last() + if k != nil { + t.Fatalf("unexpected key: %v", k) + } else if v != nil { + t.Fatalf("unexpected value: %v", v) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a Tx cursor can iterate over a single root with a couple elements. +func TestCursor_Iterate_Leaf(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("baz"), []byte{}); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte{0}); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("bar"), []byte{1}); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + tx, err := db.Begin(false) + if err != nil { + t.Fatal(err) + } + defer func() { _ = tx.Rollback() }() + + c := tx.Bucket([]byte("widgets")).Cursor() + + k, v := c.First() + if !bytes.Equal(k, []byte("bar")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte{1}) { + t.Fatalf("unexpected value: %v", v) + } + + k, v = c.Next() + if !bytes.Equal(k, []byte("baz")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte{}) { + t.Fatalf("unexpected value: %v", v) + } + + k, v = c.Next() + if !bytes.Equal(k, []byte("foo")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte{0}) { + t.Fatalf("unexpected value: %v", v) + } + + k, v = c.Next() + if k != nil { + t.Fatalf("expected nil key: %v", k) + } else if v != nil { + t.Fatalf("expected nil value: %v", v) + } + + k, v = c.Next() + if k != nil { + t.Fatalf("expected nil key: %v", k) + } else if v != nil { + t.Fatalf("expected nil value: %v", v) + } + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } +} + +// Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements. +func TestCursor_LeafRootReverse(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("baz"), []byte{}); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte{0}); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("bar"), []byte{1}); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + tx, err := db.Begin(false) + if err != nil { + t.Fatal(err) + } + c := tx.Bucket([]byte("widgets")).Cursor() + + if k, v := c.Last(); !bytes.Equal(k, []byte("foo")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte{0}) { + t.Fatalf("unexpected value: %v", v) + } + + if k, v := c.Prev(); !bytes.Equal(k, []byte("baz")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte{}) { + t.Fatalf("unexpected value: %v", v) + } + + if k, v := c.Prev(); !bytes.Equal(k, []byte("bar")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte{1}) { + t.Fatalf("unexpected value: %v", v) + } + + if k, v := c.Prev(); k != nil { + t.Fatalf("expected nil key: %v", k) + } else if v != nil { + t.Fatalf("expected nil value: %v", v) + } + + if k, v := c.Prev(); k != nil { + t.Fatalf("expected nil key: %v", k) + } else if v != nil { + t.Fatalf("expected nil value: %v", v) + } + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } +} + +// Ensure that a Tx cursor can restart from the beginning. +func TestCursor_Restart(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("bar"), []byte{}); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte{}); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + tx, err := db.Begin(false) + if err != nil { + t.Fatal(err) + } + c := tx.Bucket([]byte("widgets")).Cursor() + + if k, _ := c.First(); !bytes.Equal(k, []byte("bar")) { + t.Fatalf("unexpected key: %v", k) + } + if k, _ := c.Next(); !bytes.Equal(k, []byte("foo")) { + t.Fatalf("unexpected key: %v", k) + } + + if k, _ := c.First(); !bytes.Equal(k, []byte("bar")) { + t.Fatalf("unexpected key: %v", k) + } + if k, _ := c.Next(); !bytes.Equal(k, []byte("foo")) { + t.Fatalf("unexpected key: %v", k) + } + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } +} + +// Ensure that a cursor can skip over empty pages that have been deleted. +func TestCursor_First_EmptyPages(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + // Create 1000 keys in the "widgets" bucket. + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 1000; i++ { + if err := b.Put(u64tob(uint64(i)), []byte{}); err != nil { + t.Fatal(err) + } + } + + return nil + }); err != nil { + t.Fatal(err) + } + + // Delete half the keys and then try to iterate. + if err := db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for i := 0; i < 600; i++ { + if err := b.Delete(u64tob(uint64(i))); err != nil { + t.Fatal(err) + } + } + + c := b.Cursor() + var n int + for k, _ := c.First(); k != nil; k, _ = c.Next() { + n++ + } + if n != 400 { + t.Fatalf("unexpected key count: %d", n) + } + + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a Tx can iterate over all elements in a bucket. +func TestCursor_QuickCheck(t *testing.T) { + f := func(items testdata) bool { + db := MustOpenDB() + defer db.MustClose() + + // Bulk insert all values. + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + for _, item := range items { + if err := b.Put(item.Key, item.Value); err != nil { + t.Fatal(err) + } + } + if err := tx.Commit(); err != nil { + t.Fatal(err) + } + + // Sort test data. + sort.Sort(items) + + // Iterate over all items and check consistency. + var index = 0 + tx, err = db.Begin(false) + if err != nil { + t.Fatal(err) + } + + c := tx.Bucket([]byte("widgets")).Cursor() + for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() { + if !bytes.Equal(k, items[index].Key) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, items[index].Value) { + t.Fatalf("unexpected value: %v", v) + } + index++ + } + if len(items) != index { + t.Fatalf("unexpected item count: %v, expected %v", len(items), index) + } + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } + + return true + } + if err := quick.Check(f, qconfig()); err != nil { + t.Error(err) + } +} + +// Ensure that a transaction can iterate over all elements in a bucket in reverse. +func TestCursor_QuickCheck_Reverse(t *testing.T) { + f := func(items testdata) bool { + db := MustOpenDB() + defer db.MustClose() + + // Bulk insert all values. + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + for _, item := range items { + if err := b.Put(item.Key, item.Value); err != nil { + t.Fatal(err) + } + } + if err := tx.Commit(); err != nil { + t.Fatal(err) + } + + // Sort test data. + sort.Sort(revtestdata(items)) + + // Iterate over all items and check consistency. + var index = 0 + tx, err = db.Begin(false) + if err != nil { + t.Fatal(err) + } + c := tx.Bucket([]byte("widgets")).Cursor() + for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() { + if !bytes.Equal(k, items[index].Key) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, items[index].Value) { + t.Fatalf("unexpected value: %v", v) + } + index++ + } + if len(items) != index { + t.Fatalf("unexpected item count: %v, expected %v", len(items), index) + } + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } + + return true + } + if err := quick.Check(f, qconfig()); err != nil { + t.Error(err) + } +} + +// Ensure that a Tx cursor can iterate over subbuckets. +func TestCursor_QuickCheck_BucketsOnly(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if _, err := b.CreateBucket([]byte("foo")); err != nil { + t.Fatal(err) + } + if _, err := b.CreateBucket([]byte("bar")); err != nil { + t.Fatal(err) + } + if _, err := b.CreateBucket([]byte("baz")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + var names []string + c := tx.Bucket([]byte("widgets")).Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + names = append(names, string(k)) + if v != nil { + t.Fatalf("unexpected value: %v", v) + } + } + if !reflect.DeepEqual(names, []string{"bar", "baz", "foo"}) { + t.Fatalf("unexpected names: %+v", names) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a Tx cursor can reverse iterate over subbuckets. +func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if _, err := b.CreateBucket([]byte("foo")); err != nil { + t.Fatal(err) + } + if _, err := b.CreateBucket([]byte("bar")); err != nil { + t.Fatal(err) + } + if _, err := b.CreateBucket([]byte("baz")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + var names []string + c := tx.Bucket([]byte("widgets")).Cursor() + for k, v := c.Last(); k != nil; k, v = c.Prev() { + names = append(names, string(k)) + if v != nil { + t.Fatalf("unexpected value: %v", v) + } + } + if !reflect.DeepEqual(names, []string{"foo", "baz", "bar"}) { + t.Fatalf("unexpected names: %+v", names) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +func ExampleCursor() { + // Open the database. + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } + defer os.Remove(db.Path()) + + // Start a read-write transaction. + if err := db.Update(func(tx *bolt.Tx) error { + // Create a new bucket. + b, err := tx.CreateBucket([]byte("animals")) + if err != nil { + return err + } + + // Insert data into a bucket. + if err := b.Put([]byte("dog"), []byte("fun")); err != nil { + log.Fatal(err) + } + if err := b.Put([]byte("cat"), []byte("lame")); err != nil { + log.Fatal(err) + } + if err := b.Put([]byte("liger"), []byte("awesome")); err != nil { + log.Fatal(err) + } + + // Create a cursor for iteration. + c := b.Cursor() + + // Iterate over items in sorted key order. This starts from the + // first key/value pair and updates the k/v variables to the + // next key/value on each iteration. + // + // The loop finishes at the end of the cursor when a nil key is returned. + for k, v := c.First(); k != nil; k, v = c.Next() { + fmt.Printf("A %s is %s.\n", k, v) + } + + return nil + }); err != nil { + log.Fatal(err) + } + + if err := db.Close(); err != nil { + log.Fatal(err) + } + + // Output: + // A cat is lame. + // A dog is fun. + // A liger is awesome. +} + +func ExampleCursor_reverse() { + // Open the database. + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } + defer os.Remove(db.Path()) + + // Start a read-write transaction. + if err := db.Update(func(tx *bolt.Tx) error { + // Create a new bucket. + b, err := tx.CreateBucket([]byte("animals")) + if err != nil { + return err + } + + // Insert data into a bucket. + if err := b.Put([]byte("dog"), []byte("fun")); err != nil { + log.Fatal(err) + } + if err := b.Put([]byte("cat"), []byte("lame")); err != nil { + log.Fatal(err) + } + if err := b.Put([]byte("liger"), []byte("awesome")); err != nil { + log.Fatal(err) + } + + // Create a cursor for iteration. + c := b.Cursor() + + // Iterate over items in reverse sorted key order. This starts + // from the last key/value pair and updates the k/v variables to + // the previous key/value on each iteration. + // + // The loop finishes at the beginning of the cursor when a nil key + // is returned. + for k, v := c.Last(); k != nil; k, v = c.Prev() { + fmt.Printf("A %s is %s.\n", k, v) + } + + return nil + }); err != nil { + log.Fatal(err) + } + + // Close the database to release the file lock. + if err := db.Close(); err != nil { + log.Fatal(err) + } + + // Output: + // A liger is awesome. + // A dog is fun. + // A cat is lame. +} diff --git a/vendor/github.com/boltdb/bolt/db.go b/vendor/github.com/boltdb/bolt/db.go new file mode 100644 index 000000000..f352ff14f --- /dev/null +++ b/vendor/github.com/boltdb/bolt/db.go @@ -0,0 +1,1039 @@ +package bolt + +import ( + "errors" + "fmt" + "hash/fnv" + "log" + "os" + "runtime" + "runtime/debug" + "strings" + "sync" + "time" + "unsafe" +) + +// The largest step that can be taken when remapping the mmap. +const maxMmapStep = 1 << 30 // 1GB + +// The data file format version. +const version = 2 + +// Represents a marker value to indicate that a file is a Bolt DB. +const magic uint32 = 0xED0CDAED + +// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when +// syncing changes to a file. This is required as some operating systems, +// such as OpenBSD, do not have a unified buffer cache (UBC) and writes +// must be synchronized using the msync(2) syscall. +const IgnoreNoSync = runtime.GOOS == "openbsd" + +// Default values if not set in a DB instance. +const ( + DefaultMaxBatchSize int = 1000 + DefaultMaxBatchDelay = 10 * time.Millisecond + DefaultAllocSize = 16 * 1024 * 1024 +) + +// default page size for db is set to the OS page size. +var defaultPageSize = os.Getpagesize() + +// DB represents a collection of buckets persisted to a file on disk. +// All data access is performed through transactions which can be obtained through the DB. +// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. +type DB struct { + // When enabled, the database will perform a Check() after every commit. + // A panic is issued if the database is in an inconsistent state. This + // flag has a large performance impact so it should only be used for + // debugging purposes. + StrictMode bool + + // Setting the NoSync flag will cause the database to skip fsync() + // calls after each commit. This can be useful when bulk loading data + // into a database and you can restart the bulk load in the event of + // a system failure or database corruption. Do not set this flag for + // normal use. + // + // If the package global IgnoreNoSync constant is true, this value is + // ignored. See the comment on that constant for more details. + // + // THIS IS UNSAFE. PLEASE USE WITH CAUTION. + NoSync bool + + // When true, skips the truncate call when growing the database. + // Setting this to true is only safe on non-ext3/ext4 systems. + // Skipping truncation avoids preallocation of hard drive space and + // bypasses a truncate() and fsync() syscall on remapping. + // + // https://github.com/boltdb/bolt/issues/284 + NoGrowSync bool + + // If you want to read the entire database fast, you can set MmapFlag to + // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. + MmapFlags int + + // MaxBatchSize is the maximum size of a batch. Default value is + // copied from DefaultMaxBatchSize in Open. + // + // If <=0, disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchSize int + + // MaxBatchDelay is the maximum delay before a batch starts. + // Default value is copied from DefaultMaxBatchDelay in Open. + // + // If <=0, effectively disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchDelay time.Duration + + // AllocSize is the amount of space allocated when the database + // needs to create new pages. This is done to amortize the cost + // of truncate() and fsync() when growing the data file. + AllocSize int + + path string + file *os.File + lockfile *os.File // windows only + dataref []byte // mmap'ed readonly, write throws SEGV + data *[maxMapSize]byte + datasz int + filesz int // current on disk file size + meta0 *meta + meta1 *meta + pageSize int + opened bool + rwtx *Tx + txs []*Tx + freelist *freelist + stats Stats + + pagePool sync.Pool + + batchMu sync.Mutex + batch *batch + + rwlock sync.Mutex // Allows only one writer at a time. + metalock sync.Mutex // Protects meta page access. + mmaplock sync.RWMutex // Protects mmap access during remapping. + statlock sync.RWMutex // Protects stats access. + + ops struct { + writeAt func(b []byte, off int64) (n int, err error) + } + + // Read only mode. + // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. + readOnly bool +} + +// Path returns the path to currently open database file. +func (db *DB) Path() string { + return db.path +} + +// GoString returns the Go string representation of the database. +func (db *DB) GoString() string { + return fmt.Sprintf("bolt.DB{path:%q}", db.path) +} + +// String returns the string representation of the database. +func (db *DB) String() string { + return fmt.Sprintf("DB<%q>", db.path) +} + +// Open creates and opens a database at the given path. +// If the file does not exist then it will be created automatically. +// Passing in nil options will cause Bolt to open the database with the default options. +func Open(path string, mode os.FileMode, options *Options) (*DB, error) { + var db = &DB{opened: true} + + // Set default options if no options are provided. + if options == nil { + options = DefaultOptions + } + db.NoGrowSync = options.NoGrowSync + db.MmapFlags = options.MmapFlags + + // Set default values for later DB operations. + db.MaxBatchSize = DefaultMaxBatchSize + db.MaxBatchDelay = DefaultMaxBatchDelay + db.AllocSize = DefaultAllocSize + + flag := os.O_RDWR + if options.ReadOnly { + flag = os.O_RDONLY + db.readOnly = true + } + + // Open data file and separate sync handler for metadata writes. + db.path = path + var err error + if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil { + _ = db.close() + return nil, err + } + + // Lock file so that other processes using Bolt in read-write mode cannot + // use the database at the same time. This would cause corruption since + // the two processes would write meta pages and free pages separately. + // The database file is locked exclusively (only one process can grab the lock) + // if !options.ReadOnly. + // The database file is locked using the shared lock (more than one process may + // hold a lock at the same time) otherwise (options.ReadOnly is set). + if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil { + _ = db.close() + return nil, err + } + + // Default values for test hooks + db.ops.writeAt = db.file.WriteAt + + // Initialize the database if it doesn't exist. + if info, err := db.file.Stat(); err != nil { + return nil, err + } else if info.Size() == 0 { + // Initialize new files with meta pages. + if err := db.init(); err != nil { + return nil, err + } + } else { + // Read the first meta page to determine the page size. + var buf [0x1000]byte + if _, err := db.file.ReadAt(buf[:], 0); err == nil { + m := db.pageInBuffer(buf[:], 0).meta() + if err := m.validate(); err != nil { + // If we can't read the page size, we can assume it's the same + // as the OS -- since that's how the page size was chosen in the + // first place. + // + // If the first page is invalid and this OS uses a different + // page size than what the database was created with then we + // are out of luck and cannot access the database. + db.pageSize = os.Getpagesize() + } else { + db.pageSize = int(m.pageSize) + } + } + } + + // Initialize page pool. + db.pagePool = sync.Pool{ + New: func() interface{} { + return make([]byte, db.pageSize) + }, + } + + // Memory map the data file. + if err := db.mmap(options.InitialMmapSize); err != nil { + _ = db.close() + return nil, err + } + + // Read in the freelist. + db.freelist = newFreelist() + db.freelist.read(db.page(db.meta().freelist)) + + // Mark the database as opened and return. + return db, nil +} + +// mmap opens the underlying memory-mapped file and initializes the meta references. +// minsz is the minimum size that the new mmap can be. +func (db *DB) mmap(minsz int) error { + db.mmaplock.Lock() + defer db.mmaplock.Unlock() + + info, err := db.file.Stat() + if err != nil { + return fmt.Errorf("mmap stat error: %s", err) + } else if int(info.Size()) < db.pageSize*2 { + return fmt.Errorf("file size too small") + } + + // Ensure the size is at least the minimum size. + var size = int(info.Size()) + if size < minsz { + size = minsz + } + size, err = db.mmapSize(size) + if err != nil { + return err + } + + // Dereference all mmap references before unmapping. + if db.rwtx != nil { + db.rwtx.root.dereference() + } + + // Unmap existing data before continuing. + if err := db.munmap(); err != nil { + return err + } + + // Memory-map the data file as a byte slice. + if err := mmap(db, size); err != nil { + return err + } + + // Save references to the meta pages. + db.meta0 = db.page(0).meta() + db.meta1 = db.page(1).meta() + + // Validate the meta pages. We only return an error if both meta pages fail + // validation, since meta0 failing validation means that it wasn't saved + // properly -- but we can recover using meta1. And vice-versa. + err0 := db.meta0.validate() + err1 := db.meta1.validate() + if err0 != nil && err1 != nil { + return err0 + } + + return nil +} + +// munmap unmaps the data file from memory. +func (db *DB) munmap() error { + if err := munmap(db); err != nil { + return fmt.Errorf("unmap error: " + err.Error()) + } + return nil +} + +// mmapSize determines the appropriate size for the mmap given the current size +// of the database. The minimum size is 32KB and doubles until it reaches 1GB. +// Returns an error if the new mmap size is greater than the max allowed. +func (db *DB) mmapSize(size int) (int, error) { + // Double the size from 32KB until 1GB. + for i := uint(15); i <= 30; i++ { + if size <= 1< maxMapSize { + return 0, fmt.Errorf("mmap too large") + } + + // If larger than 1GB then grow by 1GB at a time. + sz := int64(size) + if remainder := sz % int64(maxMmapStep); remainder > 0 { + sz += int64(maxMmapStep) - remainder + } + + // Ensure that the mmap size is a multiple of the page size. + // This should always be true since we're incrementing in MBs. + pageSize := int64(db.pageSize) + if (sz % pageSize) != 0 { + sz = ((sz / pageSize) + 1) * pageSize + } + + // If we've exceeded the max size then only grow up to the max size. + if sz > maxMapSize { + sz = maxMapSize + } + + return int(sz), nil +} + +// init creates a new database file and initializes its meta pages. +func (db *DB) init() error { + // Set the page size to the OS page size. + db.pageSize = os.Getpagesize() + + // Create two meta pages on a buffer. + buf := make([]byte, db.pageSize*4) + for i := 0; i < 2; i++ { + p := db.pageInBuffer(buf[:], pgid(i)) + p.id = pgid(i) + p.flags = metaPageFlag + + // Initialize the meta page. + m := p.meta() + m.magic = magic + m.version = version + m.pageSize = uint32(db.pageSize) + m.freelist = 2 + m.root = bucket{root: 3} + m.pgid = 4 + m.txid = txid(i) + m.checksum = m.sum64() + } + + // Write an empty freelist at page 3. + p := db.pageInBuffer(buf[:], pgid(2)) + p.id = pgid(2) + p.flags = freelistPageFlag + p.count = 0 + + // Write an empty leaf page at page 4. + p = db.pageInBuffer(buf[:], pgid(3)) + p.id = pgid(3) + p.flags = leafPageFlag + p.count = 0 + + // Write the buffer to our data file. + if _, err := db.ops.writeAt(buf, 0); err != nil { + return err + } + if err := fdatasync(db); err != nil { + return err + } + + return nil +} + +// Close releases all database resources. +// All transactions must be closed before closing the database. +func (db *DB) Close() error { + db.rwlock.Lock() + defer db.rwlock.Unlock() + + db.metalock.Lock() + defer db.metalock.Unlock() + + db.mmaplock.RLock() + defer db.mmaplock.RUnlock() + + return db.close() +} + +func (db *DB) close() error { + if !db.opened { + return nil + } + + db.opened = false + + db.freelist = nil + + // Clear ops. + db.ops.writeAt = nil + + // Close the mmap. + if err := db.munmap(); err != nil { + return err + } + + // Close file handles. + if db.file != nil { + // No need to unlock read-only file. + if !db.readOnly { + // Unlock the file. + if err := funlock(db); err != nil { + log.Printf("bolt.Close(): funlock error: %s", err) + } + } + + // Close the file descriptor. + if err := db.file.Close(); err != nil { + return fmt.Errorf("db file close: %s", err) + } + db.file = nil + } + + db.path = "" + return nil +} + +// Begin starts a new transaction. +// Multiple read-only transactions can be used concurrently but only one +// write transaction can be used at a time. Starting multiple write transactions +// will cause the calls to block and be serialized until the current write +// transaction finishes. +// +// Transactions should not be dependent on one another. Opening a read +// transaction and a write transaction in the same goroutine can cause the +// writer to deadlock because the database periodically needs to re-mmap itself +// as it grows and it cannot do that while a read transaction is open. +// +// If a long running read transaction (for example, a snapshot transaction) is +// needed, you might want to set DB.InitialMmapSize to a large enough value +// to avoid potential blocking of write transaction. +// +// IMPORTANT: You must close read-only transactions after you are finished or +// else the database will not reclaim old pages. +func (db *DB) Begin(writable bool) (*Tx, error) { + if writable { + return db.beginRWTx() + } + return db.beginTx() +} + +func (db *DB) beginTx() (*Tx, error) { + // Lock the meta pages while we initialize the transaction. We obtain + // the meta lock before the mmap lock because that's the order that the + // write transaction will obtain them. + db.metalock.Lock() + + // Obtain a read-only lock on the mmap. When the mmap is remapped it will + // obtain a write lock so all transactions must finish before it can be + // remapped. + db.mmaplock.RLock() + + // Exit if the database is not open yet. + if !db.opened { + db.mmaplock.RUnlock() + db.metalock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{} + t.init(db) + + // Keep track of transaction until it closes. + db.txs = append(db.txs, t) + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Update the transaction stats. + db.statlock.Lock() + db.stats.TxN++ + db.stats.OpenTxN = n + db.statlock.Unlock() + + return t, nil +} + +func (db *DB) beginRWTx() (*Tx, error) { + // If the database was opened with Options.ReadOnly, return an error. + if db.readOnly { + return nil, ErrDatabaseReadOnly + } + + // Obtain writer lock. This is released by the transaction when it closes. + // This enforces only one writer transaction at a time. + db.rwlock.Lock() + + // Once we have the writer lock then we can lock the meta pages so that + // we can set up the transaction. + db.metalock.Lock() + defer db.metalock.Unlock() + + // Exit if the database is not open yet. + if !db.opened { + db.rwlock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{writable: true} + t.init(db) + db.rwtx = t + + // Free any pages associated with closed read-only transactions. + var minid txid = 0xFFFFFFFFFFFFFFFF + for _, t := range db.txs { + if t.meta.txid < minid { + minid = t.meta.txid + } + } + if minid > 0 { + db.freelist.release(minid - 1) + } + + return t, nil +} + +// removeTx removes a transaction from the database. +func (db *DB) removeTx(tx *Tx) { + // Release the read lock on the mmap. + db.mmaplock.RUnlock() + + // Use the meta lock to restrict access to the DB object. + db.metalock.Lock() + + // Remove the transaction. + for i, t := range db.txs { + if t == tx { + last := len(db.txs) - 1 + db.txs[i] = db.txs[last] + db.txs[last] = nil + db.txs = db.txs[:last] + break + } + } + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Merge statistics. + db.statlock.Lock() + db.stats.OpenTxN = n + db.stats.TxStats.add(&tx.stats) + db.statlock.Unlock() +} + +// Update executes a function within the context of a read-write managed transaction. +// If no error is returned from the function then the transaction is committed. +// If an error is returned then the entire transaction is rolled back. +// Any error that is returned from the function or returned from the commit is +// returned from the Update() method. +// +// Attempting to manually commit or rollback within the function will cause a panic. +func (db *DB) Update(fn func(*Tx) error) error { + t, err := db.Begin(true) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually commit. + t.managed = true + + // If an error is returned from the function then rollback and return error. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + return t.Commit() +} + +// View executes a function within the context of a managed read-only transaction. +// Any error that is returned from the function is returned from the View() method. +// +// Attempting to manually rollback within the function will cause a panic. +func (db *DB) View(fn func(*Tx) error) error { + t, err := db.Begin(false) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually rollback. + t.managed = true + + // If an error is returned from the function then pass it through. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + if err := t.Rollback(); err != nil { + return err + } + + return nil +} + +// Batch calls fn as part of a batch. It behaves similar to Update, +// except: +// +// 1. concurrent Batch calls can be combined into a single Bolt +// transaction. +// +// 2. the function passed to Batch may be called multiple times, +// regardless of whether it returns error or not. +// +// This means that Batch function side effects must be idempotent and +// take permanent effect only after a successful return is seen in +// caller. +// +// The maximum batch size and delay can be adjusted with DB.MaxBatchSize +// and DB.MaxBatchDelay, respectively. +// +// Batch is only useful when there are multiple goroutines calling it. +func (db *DB) Batch(fn func(*Tx) error) error { + errCh := make(chan error, 1) + + db.batchMu.Lock() + if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { + // There is no existing batch, or the existing batch is full; start a new one. + db.batch = &batch{ + db: db, + } + db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) + } + db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) + if len(db.batch.calls) >= db.MaxBatchSize { + // wake up batch, it's ready to run + go db.batch.trigger() + } + db.batchMu.Unlock() + + err := <-errCh + if err == trySolo { + err = db.Update(fn) + } + return err +} + +type call struct { + fn func(*Tx) error + err chan<- error +} + +type batch struct { + db *DB + timer *time.Timer + start sync.Once + calls []call +} + +// trigger runs the batch if it hasn't already been run. +func (b *batch) trigger() { + b.start.Do(b.run) +} + +// run performs the transactions in the batch and communicates results +// back to DB.Batch. +func (b *batch) run() { + b.db.batchMu.Lock() + b.timer.Stop() + // Make sure no new work is added to this batch, but don't break + // other batches. + if b.db.batch == b { + b.db.batch = nil + } + b.db.batchMu.Unlock() + +retry: + for len(b.calls) > 0 { + var failIdx = -1 + err := b.db.Update(func(tx *Tx) error { + for i, c := range b.calls { + if err := safelyCall(c.fn, tx); err != nil { + failIdx = i + return err + } + } + return nil + }) + + if failIdx >= 0 { + // take the failing transaction out of the batch. it's + // safe to shorten b.calls here because db.batch no longer + // points to us, and we hold the mutex anyway. + c := b.calls[failIdx] + b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] + // tell the submitter re-run it solo, continue with the rest of the batch + c.err <- trySolo + continue retry + } + + // pass success, or bolt internal errors, to all callers + for _, c := range b.calls { + if c.err != nil { + c.err <- err + } + } + break retry + } +} + +// trySolo is a special sentinel error value used for signaling that a +// transaction function should be re-run. It should never be seen by +// callers. +var trySolo = errors.New("batch function returned an error and should be re-run solo") + +type panicked struct { + reason interface{} +} + +func (p panicked) Error() string { + if err, ok := p.reason.(error); ok { + return err.Error() + } + return fmt.Sprintf("panic: %v", p.reason) +} + +func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { + defer func() { + if p := recover(); p != nil { + err = panicked{p} + } + }() + return fn(tx) +} + +// Sync executes fdatasync() against the database file handle. +// +// This is not necessary under normal operation, however, if you use NoSync +// then it allows you to force the database file to sync against the disk. +func (db *DB) Sync() error { return fdatasync(db) } + +// Stats retrieves ongoing performance stats for the database. +// This is only updated when a transaction closes. +func (db *DB) Stats() Stats { + db.statlock.RLock() + defer db.statlock.RUnlock() + return db.stats +} + +// This is for internal access to the raw data bytes from the C cursor, use +// carefully, or not at all. +func (db *DB) Info() *Info { + return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} +} + +// page retrieves a page reference from the mmap based on the current page size. +func (db *DB) page(id pgid) *page { + pos := id * pgid(db.pageSize) + return (*page)(unsafe.Pointer(&db.data[pos])) +} + +// pageInBuffer retrieves a page reference from a given byte array based on the current page size. +func (db *DB) pageInBuffer(b []byte, id pgid) *page { + return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) +} + +// meta retrieves the current meta page reference. +func (db *DB) meta() *meta { + // We have to return the meta with the highest txid which doesn't fail + // validation. Otherwise, we can cause errors when in fact the database is + // in a consistent state. metaA is the one with the higher txid. + metaA := db.meta0 + metaB := db.meta1 + if db.meta1.txid > db.meta0.txid { + metaA = db.meta1 + metaB = db.meta0 + } + + // Use higher meta page if valid. Otherwise fallback to previous, if valid. + if err := metaA.validate(); err == nil { + return metaA + } else if err := metaB.validate(); err == nil { + return metaB + } + + // This should never be reached, because both meta1 and meta0 were validated + // on mmap() and we do fsync() on every write. + panic("bolt.DB.meta(): invalid meta pages") +} + +// allocate returns a contiguous block of memory starting at a given page. +func (db *DB) allocate(count int) (*page, error) { + // Allocate a temporary buffer for the page. + var buf []byte + if count == 1 { + buf = db.pagePool.Get().([]byte) + } else { + buf = make([]byte, count*db.pageSize) + } + p := (*page)(unsafe.Pointer(&buf[0])) + p.overflow = uint32(count - 1) + + // Use pages from the freelist if they are available. + if p.id = db.freelist.allocate(count); p.id != 0 { + return p, nil + } + + // Resize mmap() if we're at the end. + p.id = db.rwtx.meta.pgid + var minsz = int((p.id+pgid(count))+1) * db.pageSize + if minsz >= db.datasz { + if err := db.mmap(minsz); err != nil { + return nil, fmt.Errorf("mmap allocate error: %s", err) + } + } + + // Move the page id high water mark. + db.rwtx.meta.pgid += pgid(count) + + return p, nil +} + +// grow grows the size of the database to the given sz. +func (db *DB) grow(sz int) error { + // Ignore if the new size is less than available file size. + if sz <= db.filesz { + return nil + } + + // If the data is smaller than the alloc size then only allocate what's needed. + // Once it goes over the allocation size then allocate in chunks. + if db.datasz < db.AllocSize { + sz = db.datasz + } else { + sz += db.AllocSize + } + + // Truncate and fsync to ensure file size metadata is flushed. + // https://github.com/boltdb/bolt/issues/284 + if !db.NoGrowSync && !db.readOnly { + if runtime.GOOS != "windows" { + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("file resize error: %s", err) + } + } + if err := db.file.Sync(); err != nil { + return fmt.Errorf("file sync error: %s", err) + } + } + + db.filesz = sz + return nil +} + +func (db *DB) IsReadOnly() bool { + return db.readOnly +} + +// Options represents the options that can be set when opening a database. +type Options struct { + // Timeout is the amount of time to wait to obtain a file lock. + // When set to zero it will wait indefinitely. This option is only + // available on Darwin and Linux. + Timeout time.Duration + + // Sets the DB.NoGrowSync flag before memory mapping the file. + NoGrowSync bool + + // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to + // grab a shared lock (UNIX). + ReadOnly bool + + // Sets the DB.MmapFlags flag before memory mapping the file. + MmapFlags int + + // InitialMmapSize is the initial mmap size of the database + // in bytes. Read transactions won't block write transaction + // if the InitialMmapSize is large enough to hold database mmap + // size. (See DB.Begin for more information) + // + // If <=0, the initial map size is 0. + // If initialMmapSize is smaller than the previous database size, + // it takes no effect. + InitialMmapSize int +} + +// DefaultOptions represent the options used if nil options are passed into Open(). +// No timeout is used which will cause Bolt to wait indefinitely for a lock. +var DefaultOptions = &Options{ + Timeout: 0, + NoGrowSync: false, +} + +// Stats represents statistics about the database. +type Stats struct { + // Freelist stats + FreePageN int // total number of free pages on the freelist + PendingPageN int // total number of pending pages on the freelist + FreeAlloc int // total bytes allocated in free pages + FreelistInuse int // total bytes used by the freelist + + // Transaction stats + TxN int // total number of started read transactions + OpenTxN int // number of currently open read transactions + + TxStats TxStats // global, ongoing stats. +} + +// Sub calculates and returns the difference between two sets of database stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *Stats) Sub(other *Stats) Stats { + if other == nil { + return *s + } + var diff Stats + diff.FreePageN = s.FreePageN + diff.PendingPageN = s.PendingPageN + diff.FreeAlloc = s.FreeAlloc + diff.FreelistInuse = s.FreelistInuse + diff.TxN = s.TxN - other.TxN + diff.TxStats = s.TxStats.Sub(&other.TxStats) + return diff +} + +func (s *Stats) add(other *Stats) { + s.TxStats.add(&other.TxStats) +} + +type Info struct { + Data uintptr + PageSize int +} + +type meta struct { + magic uint32 + version uint32 + pageSize uint32 + flags uint32 + root bucket + freelist pgid + pgid pgid + txid txid + checksum uint64 +} + +// validate checks the marker bytes and version of the meta page to ensure it matches this binary. +func (m *meta) validate() error { + if m.magic != magic { + return ErrInvalid + } else if m.version != version { + return ErrVersionMismatch + } else if m.checksum != 0 && m.checksum != m.sum64() { + return ErrChecksum + } + return nil +} + +// copy copies one meta object to another. +func (m *meta) copy(dest *meta) { + *dest = *m +} + +// write writes the meta onto a page. +func (m *meta) write(p *page) { + if m.root.root >= m.pgid { + panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) + } else if m.freelist >= m.pgid { + panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) + } + + // Page id is either going to be 0 or 1 which we can determine by the transaction ID. + p.id = pgid(m.txid % 2) + p.flags |= metaPageFlag + + // Calculate the checksum. + m.checksum = m.sum64() + + m.copy(p.meta()) +} + +// generates the checksum for the meta. +func (m *meta) sum64() uint64 { + var h = fnv.New64a() + _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) + return h.Sum64() +} + +// _assert will panic with a given formatted message if the given condition is false. +func _assert(condition bool, msg string, v ...interface{}) { + if !condition { + panic(fmt.Sprintf("assertion failed: "+msg, v...)) + } +} + +func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } +func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } + +func printstack() { + stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n") + fmt.Fprintln(os.Stderr, stack) +} diff --git a/vendor/github.com/boltdb/bolt/db_test.go b/vendor/github.com/boltdb/bolt/db_test.go new file mode 100644 index 000000000..3034d4f47 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/db_test.go @@ -0,0 +1,1545 @@ +package bolt_test + +import ( + "bytes" + "encoding/binary" + "errors" + "flag" + "fmt" + "hash/fnv" + "io/ioutil" + "log" + "os" + "path/filepath" + "regexp" + "sort" + "strings" + "sync" + "testing" + "time" + "unsafe" + + "github.com/boltdb/bolt" +) + +var statsFlag = flag.Bool("stats", false, "show performance stats") + +// version is the data file format version. +const version = 2 + +// magic is the marker value to indicate that a file is a Bolt DB. +const magic uint32 = 0xED0CDAED + +// pageSize is the size of one page in the data file. +const pageSize = 4096 + +// pageHeaderSize is the size of a page header. +const pageHeaderSize = 16 + +// meta represents a simplified version of a database meta page for testing. +type meta struct { + magic uint32 + version uint32 + _ uint32 + _ uint32 + _ [16]byte + _ uint64 + pgid uint64 + _ uint64 + checksum uint64 +} + +// Ensure that a database can be opened without error. +func TestOpen(t *testing.T) { + path := tempfile() + db, err := bolt.Open(path, 0666, nil) + if err != nil { + t.Fatal(err) + } else if db == nil { + t.Fatal("expected db") + } + + if s := db.Path(); s != path { + t.Fatalf("unexpected path: %s", s) + } + + if err := db.Close(); err != nil { + t.Fatal(err) + } +} + +// Ensure that opening a database with a blank path returns an error. +func TestOpen_ErrPathRequired(t *testing.T) { + _, err := bolt.Open("", 0666, nil) + if err == nil { + t.Fatalf("expected error") + } +} + +// Ensure that opening a database with a bad path returns an error. +func TestOpen_ErrNotExists(t *testing.T) { + _, err := bolt.Open(filepath.Join(tempfile(), "bad-path"), 0666, nil) + if err == nil { + t.Fatal("expected error") + } +} + +// Ensure that opening a file that is not a Bolt database returns ErrInvalid. +func TestOpen_ErrInvalid(t *testing.T) { + path := tempfile() + + f, err := os.Create(path) + if err != nil { + t.Fatal(err) + } + if _, err := fmt.Fprintln(f, "this is not a bolt database"); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + defer os.Remove(path) + + if _, err := bolt.Open(path, 0666, nil); err != bolt.ErrInvalid { + t.Fatalf("unexpected error: %s", err) + } +} + +// Ensure that opening a file with two invalid versions returns ErrVersionMismatch. +func TestOpen_ErrVersionMismatch(t *testing.T) { + if pageSize != os.Getpagesize() { + t.Skip("page size mismatch") + } + + // Create empty database. + db := MustOpenDB() + path := db.Path() + defer db.MustClose() + + // Close database. + if err := db.DB.Close(); err != nil { + t.Fatal(err) + } + + // Read data file. + buf, err := ioutil.ReadFile(path) + if err != nil { + t.Fatal(err) + } + + // Rewrite meta pages. + meta0 := (*meta)(unsafe.Pointer(&buf[pageHeaderSize])) + meta0.version++ + meta1 := (*meta)(unsafe.Pointer(&buf[pageSize+pageHeaderSize])) + meta1.version++ + if err := ioutil.WriteFile(path, buf, 0666); err != nil { + t.Fatal(err) + } + + // Reopen data file. + if _, err := bolt.Open(path, 0666, nil); err != bolt.ErrVersionMismatch { + t.Fatalf("unexpected error: %s", err) + } +} + +// Ensure that opening a file with two invalid checksums returns ErrChecksum. +func TestOpen_ErrChecksum(t *testing.T) { + if pageSize != os.Getpagesize() { + t.Skip("page size mismatch") + } + + // Create empty database. + db := MustOpenDB() + path := db.Path() + defer db.MustClose() + + // Close database. + if err := db.DB.Close(); err != nil { + t.Fatal(err) + } + + // Read data file. + buf, err := ioutil.ReadFile(path) + if err != nil { + t.Fatal(err) + } + + // Rewrite meta pages. + meta0 := (*meta)(unsafe.Pointer(&buf[pageHeaderSize])) + meta0.pgid++ + meta1 := (*meta)(unsafe.Pointer(&buf[pageSize+pageHeaderSize])) + meta1.pgid++ + if err := ioutil.WriteFile(path, buf, 0666); err != nil { + t.Fatal(err) + } + + // Reopen data file. + if _, err := bolt.Open(path, 0666, nil); err != bolt.ErrChecksum { + t.Fatalf("unexpected error: %s", err) + } +} + +// Ensure that opening a database does not increase its size. +// https://github.com/boltdb/bolt/issues/291 +func TestOpen_Size(t *testing.T) { + // Open a data file. + db := MustOpenDB() + path := db.Path() + defer db.MustClose() + + pagesize := db.Info().PageSize + + // Insert until we get above the minimum 4MB size. + if err := db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucketIfNotExists([]byte("data")) + for i := 0; i < 10000; i++ { + if err := b.Put([]byte(fmt.Sprintf("%04d", i)), make([]byte, 1000)); err != nil { + t.Fatal(err) + } + } + return nil + }); err != nil { + t.Fatal(err) + } + + // Close database and grab the size. + if err := db.DB.Close(); err != nil { + t.Fatal(err) + } + sz := fileSize(path) + if sz == 0 { + t.Fatalf("unexpected new file size: %d", sz) + } + + // Reopen database, update, and check size again. + db0, err := bolt.Open(path, 0666, nil) + if err != nil { + t.Fatal(err) + } + if err := db0.Update(func(tx *bolt.Tx) error { + if err := tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + if err := db0.Close(); err != nil { + t.Fatal(err) + } + newSz := fileSize(path) + if newSz == 0 { + t.Fatalf("unexpected new file size: %d", newSz) + } + + // Compare the original size with the new size. + // db size might increase by a few page sizes due to the new small update. + if sz < newSz-5*int64(pagesize) { + t.Fatalf("unexpected file growth: %d => %d", sz, newSz) + } +} + +// Ensure that opening a database beyond the max step size does not increase its size. +// https://github.com/boltdb/bolt/issues/303 +func TestOpen_Size_Large(t *testing.T) { + if testing.Short() { + t.Skip("short mode") + } + + // Open a data file. + db := MustOpenDB() + path := db.Path() + defer db.MustClose() + + pagesize := db.Info().PageSize + + // Insert until we get above the minimum 4MB size. + var index uint64 + for i := 0; i < 10000; i++ { + if err := db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucketIfNotExists([]byte("data")) + for j := 0; j < 1000; j++ { + if err := b.Put(u64tob(index), make([]byte, 50)); err != nil { + t.Fatal(err) + } + index++ + } + return nil + }); err != nil { + t.Fatal(err) + } + } + + // Close database and grab the size. + if err := db.DB.Close(); err != nil { + t.Fatal(err) + } + sz := fileSize(path) + if sz == 0 { + t.Fatalf("unexpected new file size: %d", sz) + } else if sz < (1 << 30) { + t.Fatalf("expected larger initial size: %d", sz) + } + + // Reopen database, update, and check size again. + db0, err := bolt.Open(path, 0666, nil) + if err != nil { + t.Fatal(err) + } + if err := db0.Update(func(tx *bolt.Tx) error { + return tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}) + }); err != nil { + t.Fatal(err) + } + if err := db0.Close(); err != nil { + t.Fatal(err) + } + + newSz := fileSize(path) + if newSz == 0 { + t.Fatalf("unexpected new file size: %d", newSz) + } + + // Compare the original size with the new size. + // db size might increase by a few page sizes due to the new small update. + if sz < newSz-5*int64(pagesize) { + t.Fatalf("unexpected file growth: %d => %d", sz, newSz) + } +} + +// Ensure that a re-opened database is consistent. +func TestOpen_Check(t *testing.T) { + path := tempfile() + + db, err := bolt.Open(path, 0666, nil) + if err != nil { + t.Fatal(err) + } + if err := db.View(func(tx *bolt.Tx) error { return <-tx.Check() }); err != nil { + t.Fatal(err) + } + if err := db.Close(); err != nil { + t.Fatal(err) + } + + db, err = bolt.Open(path, 0666, nil) + if err != nil { + t.Fatal(err) + } + if err := db.View(func(tx *bolt.Tx) error { return <-tx.Check() }); err != nil { + t.Fatal(err) + } + if err := db.Close(); err != nil { + t.Fatal(err) + } +} + +// Ensure that write errors to the meta file handler during initialization are returned. +func TestOpen_MetaInitWriteError(t *testing.T) { + t.Skip("pending") +} + +// Ensure that a database that is too small returns an error. +func TestOpen_FileTooSmall(t *testing.T) { + path := tempfile() + + db, err := bolt.Open(path, 0666, nil) + if err != nil { + t.Fatal(err) + } + if err := db.Close(); err != nil { + t.Fatal(err) + } + + // corrupt the database + if err := os.Truncate(path, int64(os.Getpagesize())); err != nil { + t.Fatal(err) + } + + db, err = bolt.Open(path, 0666, nil) + if err == nil || err.Error() != "file size too small" { + t.Fatalf("unexpected error: %s", err) + } +} + +// TestDB_Open_InitialMmapSize tests if having InitialMmapSize large enough +// to hold data from concurrent write transaction resolves the issue that +// read transaction blocks the write transaction and causes deadlock. +// This is a very hacky test since the mmap size is not exposed. +func TestDB_Open_InitialMmapSize(t *testing.T) { + path := tempfile() + defer os.Remove(path) + + initMmapSize := 1 << 31 // 2GB + testWriteSize := 1 << 27 // 134MB + + db, err := bolt.Open(path, 0666, &bolt.Options{InitialMmapSize: initMmapSize}) + if err != nil { + t.Fatal(err) + } + + // create a long-running read transaction + // that never gets closed while writing + rtx, err := db.Begin(false) + if err != nil { + t.Fatal(err) + } + + // create a write transaction + wtx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + + b, err := wtx.CreateBucket([]byte("test")) + if err != nil { + t.Fatal(err) + } + + // and commit a large write + err = b.Put([]byte("foo"), make([]byte, testWriteSize)) + if err != nil { + t.Fatal(err) + } + + done := make(chan struct{}) + + go func() { + if err := wtx.Commit(); err != nil { + t.Fatal(err) + } + done <- struct{}{} + }() + + select { + case <-time.After(5 * time.Second): + t.Errorf("unexpected that the reader blocks writer") + case <-done: + } + + if err := rtx.Rollback(); err != nil { + t.Fatal(err) + } +} + +// Ensure that a database cannot open a transaction when it's not open. +func TestDB_Begin_ErrDatabaseNotOpen(t *testing.T) { + var db bolt.DB + if _, err := db.Begin(false); err != bolt.ErrDatabaseNotOpen { + t.Fatalf("unexpected error: %s", err) + } +} + +// Ensure that a read-write transaction can be retrieved. +func TestDB_BeginRW(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } else if tx == nil { + t.Fatal("expected tx") + } + + if tx.DB() != db.DB { + t.Fatal("unexpected tx database") + } else if !tx.Writable() { + t.Fatal("expected writable tx") + } + + if err := tx.Commit(); err != nil { + t.Fatal(err) + } +} + +// Ensure that opening a transaction while the DB is closed returns an error. +func TestDB_BeginRW_Closed(t *testing.T) { + var db bolt.DB + if _, err := db.Begin(true); err != bolt.ErrDatabaseNotOpen { + t.Fatalf("unexpected error: %s", err) + } +} + +func TestDB_Close_PendingTx_RW(t *testing.T) { testDB_Close_PendingTx(t, true) } +func TestDB_Close_PendingTx_RO(t *testing.T) { testDB_Close_PendingTx(t, false) } + +// Ensure that a database cannot close while transactions are open. +func testDB_Close_PendingTx(t *testing.T, writable bool) { + db := MustOpenDB() + defer db.MustClose() + + // Start transaction. + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + + // Open update in separate goroutine. + done := make(chan struct{}) + go func() { + if err := db.Close(); err != nil { + t.Fatal(err) + } + close(done) + }() + + // Ensure database hasn't closed. + time.Sleep(100 * time.Millisecond) + select { + case <-done: + t.Fatal("database closed too early") + default: + } + + // Commit transaction. + if err := tx.Commit(); err != nil { + t.Fatal(err) + } + + // Ensure database closed now. + time.Sleep(100 * time.Millisecond) + select { + case <-done: + default: + t.Fatal("database did not close") + } +} + +// Ensure a database can provide a transactional block. +func TestDB_Update(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("baz"), []byte("bat")); err != nil { + t.Fatal(err) + } + if err := b.Delete([]byte("foo")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + if err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + if v := b.Get([]byte("foo")); v != nil { + t.Fatalf("expected nil value, got: %v", v) + } + if v := b.Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) { + t.Fatalf("unexpected value: %v", v) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure a closed database returns an error while running a transaction block +func TestDB_Update_Closed(t *testing.T) { + var db bolt.DB + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != bolt.ErrDatabaseNotOpen { + t.Fatalf("unexpected error: %s", err) + } +} + +// Ensure a panic occurs while trying to commit a managed transaction. +func TestDB_Update_ManualCommit(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + var panicked bool + if err := db.Update(func(tx *bolt.Tx) error { + func() { + defer func() { + if r := recover(); r != nil { + panicked = true + } + }() + + if err := tx.Commit(); err != nil { + t.Fatal(err) + } + }() + return nil + }); err != nil { + t.Fatal(err) + } else if !panicked { + t.Fatal("expected panic") + } +} + +// Ensure a panic occurs while trying to rollback a managed transaction. +func TestDB_Update_ManualRollback(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + var panicked bool + if err := db.Update(func(tx *bolt.Tx) error { + func() { + defer func() { + if r := recover(); r != nil { + panicked = true + } + }() + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } + }() + return nil + }); err != nil { + t.Fatal(err) + } else if !panicked { + t.Fatal("expected panic") + } +} + +// Ensure a panic occurs while trying to commit a managed transaction. +func TestDB_View_ManualCommit(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + var panicked bool + if err := db.View(func(tx *bolt.Tx) error { + func() { + defer func() { + if r := recover(); r != nil { + panicked = true + } + }() + + if err := tx.Commit(); err != nil { + t.Fatal(err) + } + }() + return nil + }); err != nil { + t.Fatal(err) + } else if !panicked { + t.Fatal("expected panic") + } +} + +// Ensure a panic occurs while trying to rollback a managed transaction. +func TestDB_View_ManualRollback(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + var panicked bool + if err := db.View(func(tx *bolt.Tx) error { + func() { + defer func() { + if r := recover(); r != nil { + panicked = true + } + }() + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } + }() + return nil + }); err != nil { + t.Fatal(err) + } else if !panicked { + t.Fatal("expected panic") + } +} + +// Ensure a write transaction that panics does not hold open locks. +func TestDB_Update_Panic(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + // Panic during update but recover. + func() { + defer func() { + if r := recover(); r != nil { + t.Log("recover: update", r) + } + }() + + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + panic("omg") + }); err != nil { + t.Fatal(err) + } + }() + + // Verify we can update again. + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + // Verify that our change persisted. + if err := db.Update(func(tx *bolt.Tx) error { + if tx.Bucket([]byte("widgets")) == nil { + t.Fatal("expected bucket") + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure a database can return an error through a read-only transactional block. +func TestDB_View_Error(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.View(func(tx *bolt.Tx) error { + return errors.New("xxx") + }); err == nil || err.Error() != "xxx" { + t.Fatalf("unexpected error: %s", err) + } +} + +// Ensure a read transaction that panics does not hold open locks. +func TestDB_View_Panic(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + // Panic during view transaction but recover. + func() { + defer func() { + if r := recover(); r != nil { + t.Log("recover: view", r) + } + }() + + if err := db.View(func(tx *bolt.Tx) error { + if tx.Bucket([]byte("widgets")) == nil { + t.Fatal("expected bucket") + } + panic("omg") + }); err != nil { + t.Fatal(err) + } + }() + + // Verify that we can still use read transactions. + if err := db.View(func(tx *bolt.Tx) error { + if tx.Bucket([]byte("widgets")) == nil { + t.Fatal("expected bucket") + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that DB stats can be returned. +func TestDB_Stats(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }); err != nil { + t.Fatal(err) + } + + stats := db.Stats() + if stats.TxStats.PageCount != 2 { + t.Fatalf("unexpected TxStats.PageCount: %d", stats.TxStats.PageCount) + } else if stats.FreePageN != 0 { + t.Fatalf("unexpected FreePageN != 0: %d", stats.FreePageN) + } else if stats.PendingPageN != 2 { + t.Fatalf("unexpected PendingPageN != 2: %d", stats.PendingPageN) + } +} + +// Ensure that database pages are in expected order and type. +func TestDB_Consistency(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }); err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + if err := db.Update(func(tx *bolt.Tx) error { + if err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + } + + if err := db.Update(func(tx *bolt.Tx) error { + if p, _ := tx.Page(0); p == nil { + t.Fatal("expected page") + } else if p.Type != "meta" { + t.Fatalf("unexpected page type: %s", p.Type) + } + + if p, _ := tx.Page(1); p == nil { + t.Fatal("expected page") + } else if p.Type != "meta" { + t.Fatalf("unexpected page type: %s", p.Type) + } + + if p, _ := tx.Page(2); p == nil { + t.Fatal("expected page") + } else if p.Type != "free" { + t.Fatalf("unexpected page type: %s", p.Type) + } + + if p, _ := tx.Page(3); p == nil { + t.Fatal("expected page") + } else if p.Type != "free" { + t.Fatalf("unexpected page type: %s", p.Type) + } + + if p, _ := tx.Page(4); p == nil { + t.Fatal("expected page") + } else if p.Type != "leaf" { + t.Fatalf("unexpected page type: %s", p.Type) + } + + if p, _ := tx.Page(5); p == nil { + t.Fatal("expected page") + } else if p.Type != "freelist" { + t.Fatalf("unexpected page type: %s", p.Type) + } + + if p, _ := tx.Page(6); p != nil { + t.Fatal("unexpected page") + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that DB stats can be subtracted from one another. +func TestDBStats_Sub(t *testing.T) { + var a, b bolt.Stats + a.TxStats.PageCount = 3 + a.FreePageN = 4 + b.TxStats.PageCount = 10 + b.FreePageN = 14 + diff := b.Sub(&a) + if diff.TxStats.PageCount != 7 { + t.Fatalf("unexpected TxStats.PageCount: %d", diff.TxStats.PageCount) + } + + // free page stats are copied from the receiver and not subtracted + if diff.FreePageN != 14 { + t.Fatalf("unexpected FreePageN: %d", diff.FreePageN) + } +} + +// Ensure two functions can perform updates in a single batch. +func TestDB_Batch(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + // Iterate over multiple updates in separate goroutines. + n := 2 + ch := make(chan error) + for i := 0; i < n; i++ { + go func(i int) { + ch <- db.Batch(func(tx *bolt.Tx) error { + return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) + }) + }(i) + } + + // Check all responses to make sure there's no error. + for i := 0; i < n; i++ { + if err := <-ch; err != nil { + t.Fatal(err) + } + } + + // Ensure data is correct. + if err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for i := 0; i < n; i++ { + if v := b.Get(u64tob(uint64(i))); v == nil { + t.Errorf("key not found: %d", i) + } + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +func TestDB_Batch_Panic(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + var sentinel int + var bork = &sentinel + var problem interface{} + var err error + + // Execute a function inside a batch that panics. + func() { + defer func() { + if p := recover(); p != nil { + problem = p + } + }() + err = db.Batch(func(tx *bolt.Tx) error { + panic(bork) + }) + }() + + // Verify there is no error. + if g, e := err, error(nil); g != e { + t.Fatalf("wrong error: %v != %v", g, e) + } + // Verify the panic was captured. + if g, e := problem, bork; g != e { + t.Fatalf("wrong error: %v != %v", g, e) + } +} + +func TestDB_BatchFull(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }); err != nil { + t.Fatal(err) + } + + const size = 3 + // buffered so we never leak goroutines + ch := make(chan error, size) + put := func(i int) { + ch <- db.Batch(func(tx *bolt.Tx) error { + return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) + }) + } + + db.MaxBatchSize = size + // high enough to never trigger here + db.MaxBatchDelay = 1 * time.Hour + + go put(1) + go put(2) + + // Give the batch a chance to exhibit bugs. + time.Sleep(10 * time.Millisecond) + + // not triggered yet + select { + case <-ch: + t.Fatalf("batch triggered too early") + default: + } + + go put(3) + + // Check all responses to make sure there's no error. + for i := 0; i < size; i++ { + if err := <-ch; err != nil { + t.Fatal(err) + } + } + + // Ensure data is correct. + if err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for i := 1; i <= size; i++ { + if v := b.Get(u64tob(uint64(i))); v == nil { + t.Errorf("key not found: %d", i) + } + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +func TestDB_BatchTime(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }); err != nil { + t.Fatal(err) + } + + const size = 1 + // buffered so we never leak goroutines + ch := make(chan error, size) + put := func(i int) { + ch <- db.Batch(func(tx *bolt.Tx) error { + return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) + }) + } + + db.MaxBatchSize = 1000 + db.MaxBatchDelay = 0 + + go put(1) + + // Batch must trigger by time alone. + + // Check all responses to make sure there's no error. + for i := 0; i < size; i++ { + if err := <-ch; err != nil { + t.Fatal(err) + } + } + + // Ensure data is correct. + if err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for i := 1; i <= size; i++ { + if v := b.Get(u64tob(uint64(i))); v == nil { + t.Errorf("key not found: %d", i) + } + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +func ExampleDB_Update() { + // Open the database. + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } + defer os.Remove(db.Path()) + + // Execute several commands within a read-write transaction. + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + return err + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + return err + } + return nil + }); err != nil { + log.Fatal(err) + } + + // Read the value back from a separate read-only transaction. + if err := db.View(func(tx *bolt.Tx) error { + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + fmt.Printf("The value of 'foo' is: %s\n", value) + return nil + }); err != nil { + log.Fatal(err) + } + + // Close database to release the file lock. + if err := db.Close(); err != nil { + log.Fatal(err) + } + + // Output: + // The value of 'foo' is: bar +} + +func ExampleDB_View() { + // Open the database. + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } + defer os.Remove(db.Path()) + + // Insert data into a bucket. + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("people")) + if err != nil { + return err + } + if err := b.Put([]byte("john"), []byte("doe")); err != nil { + return err + } + if err := b.Put([]byte("susy"), []byte("que")); err != nil { + return err + } + return nil + }); err != nil { + log.Fatal(err) + } + + // Access data from within a read-only transactional block. + if err := db.View(func(tx *bolt.Tx) error { + v := tx.Bucket([]byte("people")).Get([]byte("john")) + fmt.Printf("John's last name is %s.\n", v) + return nil + }); err != nil { + log.Fatal(err) + } + + // Close database to release the file lock. + if err := db.Close(); err != nil { + log.Fatal(err) + } + + // Output: + // John's last name is doe. +} + +func ExampleDB_Begin_ReadOnly() { + // Open the database. + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } + defer os.Remove(db.Path()) + + // Create a bucket using a read-write transaction. + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }); err != nil { + log.Fatal(err) + } + + // Create several keys in a transaction. + tx, err := db.Begin(true) + if err != nil { + log.Fatal(err) + } + b := tx.Bucket([]byte("widgets")) + if err := b.Put([]byte("john"), []byte("blue")); err != nil { + log.Fatal(err) + } + if err := b.Put([]byte("abby"), []byte("red")); err != nil { + log.Fatal(err) + } + if err := b.Put([]byte("zephyr"), []byte("purple")); err != nil { + log.Fatal(err) + } + if err := tx.Commit(); err != nil { + log.Fatal(err) + } + + // Iterate over the values in sorted key order. + tx, err = db.Begin(false) + if err != nil { + log.Fatal(err) + } + c := tx.Bucket([]byte("widgets")).Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + fmt.Printf("%s likes %s\n", k, v) + } + + if err := tx.Rollback(); err != nil { + log.Fatal(err) + } + + if err := db.Close(); err != nil { + log.Fatal(err) + } + + // Output: + // abby likes red + // john likes blue + // zephyr likes purple +} + +func BenchmarkDBBatchAutomatic(b *testing.B) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("bench")) + return err + }); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + start := make(chan struct{}) + var wg sync.WaitGroup + + for round := 0; round < 1000; round++ { + wg.Add(1) + + go func(id uint32) { + defer wg.Done() + <-start + + h := fnv.New32a() + buf := make([]byte, 4) + binary.LittleEndian.PutUint32(buf, id) + _, _ = h.Write(buf[:]) + k := h.Sum(nil) + insert := func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("bench")) + return b.Put(k, []byte("filler")) + } + if err := db.Batch(insert); err != nil { + b.Error(err) + return + } + }(uint32(round)) + } + close(start) + wg.Wait() + } + + b.StopTimer() + validateBatchBench(b, db) +} + +func BenchmarkDBBatchSingle(b *testing.B) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("bench")) + return err + }); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + start := make(chan struct{}) + var wg sync.WaitGroup + + for round := 0; round < 1000; round++ { + wg.Add(1) + go func(id uint32) { + defer wg.Done() + <-start + + h := fnv.New32a() + buf := make([]byte, 4) + binary.LittleEndian.PutUint32(buf, id) + _, _ = h.Write(buf[:]) + k := h.Sum(nil) + insert := func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("bench")) + return b.Put(k, []byte("filler")) + } + if err := db.Update(insert); err != nil { + b.Error(err) + return + } + }(uint32(round)) + } + close(start) + wg.Wait() + } + + b.StopTimer() + validateBatchBench(b, db) +} + +func BenchmarkDBBatchManual10x100(b *testing.B) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("bench")) + return err + }); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + start := make(chan struct{}) + var wg sync.WaitGroup + + for major := 0; major < 10; major++ { + wg.Add(1) + go func(id uint32) { + defer wg.Done() + <-start + + insert100 := func(tx *bolt.Tx) error { + h := fnv.New32a() + buf := make([]byte, 4) + for minor := uint32(0); minor < 100; minor++ { + binary.LittleEndian.PutUint32(buf, uint32(id*100+minor)) + h.Reset() + _, _ = h.Write(buf[:]) + k := h.Sum(nil) + b := tx.Bucket([]byte("bench")) + if err := b.Put(k, []byte("filler")); err != nil { + return err + } + } + return nil + } + if err := db.Update(insert100); err != nil { + b.Fatal(err) + } + }(uint32(major)) + } + close(start) + wg.Wait() + } + + b.StopTimer() + validateBatchBench(b, db) +} + +func validateBatchBench(b *testing.B, db *DB) { + var rollback = errors.New("sentinel error to cause rollback") + validate := func(tx *bolt.Tx) error { + bucket := tx.Bucket([]byte("bench")) + h := fnv.New32a() + buf := make([]byte, 4) + for id := uint32(0); id < 1000; id++ { + binary.LittleEndian.PutUint32(buf, id) + h.Reset() + _, _ = h.Write(buf[:]) + k := h.Sum(nil) + v := bucket.Get(k) + if v == nil { + b.Errorf("not found id=%d key=%x", id, k) + continue + } + if g, e := v, []byte("filler"); !bytes.Equal(g, e) { + b.Errorf("bad value for id=%d key=%x: %s != %q", id, k, g, e) + } + if err := bucket.Delete(k); err != nil { + return err + } + } + // should be empty now + c := bucket.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + b.Errorf("unexpected key: %x = %q", k, v) + } + return rollback + } + if err := db.Update(validate); err != nil && err != rollback { + b.Error(err) + } +} + +// DB is a test wrapper for bolt.DB. +type DB struct { + *bolt.DB +} + +// MustOpenDB returns a new, open DB at a temporary location. +func MustOpenDB() *DB { + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + panic(err) + } + return &DB{db} +} + +// Close closes the database and deletes the underlying file. +func (db *DB) Close() error { + // Log statistics. + if *statsFlag { + db.PrintStats() + } + + // Check database consistency after every test. + db.MustCheck() + + // Close database and remove file. + defer os.Remove(db.Path()) + return db.DB.Close() +} + +// MustClose closes the database and deletes the underlying file. Panic on error. +func (db *DB) MustClose() { + if err := db.Close(); err != nil { + panic(err) + } +} + +// PrintStats prints the database stats +func (db *DB) PrintStats() { + var stats = db.Stats() + fmt.Printf("[db] %-20s %-20s %-20s\n", + fmt.Sprintf("pg(%d/%d)", stats.TxStats.PageCount, stats.TxStats.PageAlloc), + fmt.Sprintf("cur(%d)", stats.TxStats.CursorCount), + fmt.Sprintf("node(%d/%d)", stats.TxStats.NodeCount, stats.TxStats.NodeDeref), + ) + fmt.Printf(" %-20s %-20s %-20s\n", + fmt.Sprintf("rebal(%d/%v)", stats.TxStats.Rebalance, truncDuration(stats.TxStats.RebalanceTime)), + fmt.Sprintf("spill(%d/%v)", stats.TxStats.Spill, truncDuration(stats.TxStats.SpillTime)), + fmt.Sprintf("w(%d/%v)", stats.TxStats.Write, truncDuration(stats.TxStats.WriteTime)), + ) +} + +// MustCheck runs a consistency check on the database and panics if any errors are found. +func (db *DB) MustCheck() { + if err := db.Update(func(tx *bolt.Tx) error { + // Collect all the errors. + var errors []error + for err := range tx.Check() { + errors = append(errors, err) + if len(errors) > 10 { + break + } + } + + // If errors occurred, copy the DB and print the errors. + if len(errors) > 0 { + var path = tempfile() + if err := tx.CopyFile(path, 0600); err != nil { + panic(err) + } + + // Print errors. + fmt.Print("\n\n") + fmt.Printf("consistency check failed (%d errors)\n", len(errors)) + for _, err := range errors { + fmt.Println(err) + } + fmt.Println("") + fmt.Println("db saved to:") + fmt.Println(path) + fmt.Print("\n\n") + os.Exit(-1) + } + + return nil + }); err != nil && err != bolt.ErrDatabaseNotOpen { + panic(err) + } +} + +// CopyTempFile copies a database to a temporary file. +func (db *DB) CopyTempFile() { + path := tempfile() + if err := db.View(func(tx *bolt.Tx) error { + return tx.CopyFile(path, 0600) + }); err != nil { + panic(err) + } + fmt.Println("db copied to: ", path) +} + +// tempfile returns a temporary file path. +func tempfile() string { + f, err := ioutil.TempFile("", "bolt-") + if err != nil { + panic(err) + } + if err := f.Close(); err != nil { + panic(err) + } + if err := os.Remove(f.Name()); err != nil { + panic(err) + } + return f.Name() +} + +// mustContainKeys checks that a bucket contains a given set of keys. +func mustContainKeys(b *bolt.Bucket, m map[string]string) { + found := make(map[string]string) + if err := b.ForEach(func(k, _ []byte) error { + found[string(k)] = "" + return nil + }); err != nil { + panic(err) + } + + // Check for keys found in bucket that shouldn't be there. + var keys []string + for k, _ := range found { + if _, ok := m[string(k)]; !ok { + keys = append(keys, k) + } + } + if len(keys) > 0 { + sort.Strings(keys) + panic(fmt.Sprintf("keys found(%d): %s", len(keys), strings.Join(keys, ","))) + } + + // Check for keys not found in bucket that should be there. + for k, _ := range m { + if _, ok := found[string(k)]; !ok { + keys = append(keys, k) + } + } + if len(keys) > 0 { + sort.Strings(keys) + panic(fmt.Sprintf("keys not found(%d): %s", len(keys), strings.Join(keys, ","))) + } +} + +func trunc(b []byte, length int) []byte { + if length < len(b) { + return b[:length] + } + return b +} + +func truncDuration(d time.Duration) string { + return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1") +} + +func fileSize(path string) int64 { + fi, err := os.Stat(path) + if err != nil { + return 0 + } + return fi.Size() +} + +func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } +func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } + +// u64tob converts a uint64 into an 8-byte slice. +func u64tob(v uint64) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, v) + return b +} + +// btou64 converts an 8-byte slice into an uint64. +func btou64(b []byte) uint64 { return binary.BigEndian.Uint64(b) } diff --git a/vendor/github.com/boltdb/bolt/doc.go b/vendor/github.com/boltdb/bolt/doc.go new file mode 100644 index 000000000..cc937845d --- /dev/null +++ b/vendor/github.com/boltdb/bolt/doc.go @@ -0,0 +1,44 @@ +/* +Package bolt implements a low-level key/value store in pure Go. It supports +fully serializable transactions, ACID semantics, and lock-free MVCC with +multiple readers and a single writer. Bolt can be used for projects that +want a simple data store without the need to add large dependencies such as +Postgres or MySQL. + +Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is +optimized for fast read access and does not require recovery in the event of a +system crash. Transactions which have not finished committing will simply be +rolled back in the event of a crash. + +The design of Bolt is based on Howard Chu's LMDB database project. + +Bolt currently works on Windows, Mac OS X, and Linux. + + +Basics + +There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is +a collection of buckets and is represented by a single file on disk. A bucket is +a collection of unique keys that are associated with values. + +Transactions provide either read-only or read-write access to the database. +Read-only transactions can retrieve key/value pairs and can use Cursors to +iterate over the dataset sequentially. Read-write transactions can create and +delete buckets and can insert and remove keys. Only one read-write transaction +is allowed at a time. + + +Caveats + +The database uses a read-only, memory-mapped data file to ensure that +applications cannot corrupt the database, however, this means that keys and +values returned from Bolt cannot be changed. Writing to a read-only byte slice +will cause Go to panic. + +Keys and values retrieved from the database are only valid for the life of +the transaction. When used outside the transaction, these byte slices can +point to different data or can point to invalid memory which will cause a panic. + + +*/ +package bolt diff --git a/vendor/github.com/boltdb/bolt/errors.go b/vendor/github.com/boltdb/bolt/errors.go new file mode 100644 index 000000000..a3620a3eb --- /dev/null +++ b/vendor/github.com/boltdb/bolt/errors.go @@ -0,0 +1,71 @@ +package bolt + +import "errors" + +// These errors can be returned when opening or calling methods on a DB. +var ( + // ErrDatabaseNotOpen is returned when a DB instance is accessed before it + // is opened or after it is closed. + ErrDatabaseNotOpen = errors.New("database not open") + + // ErrDatabaseOpen is returned when opening a database that is + // already open. + ErrDatabaseOpen = errors.New("database already open") + + // ErrInvalid is returned when both meta pages on a database are invalid. + // This typically occurs when a file is not a bolt database. + ErrInvalid = errors.New("invalid database") + + // ErrVersionMismatch is returned when the data file was created with a + // different version of Bolt. + ErrVersionMismatch = errors.New("version mismatch") + + // ErrChecksum is returned when either meta page checksum does not match. + ErrChecksum = errors.New("checksum error") + + // ErrTimeout is returned when a database cannot obtain an exclusive lock + // on the data file after the timeout passed to Open(). + ErrTimeout = errors.New("timeout") +) + +// These errors can occur when beginning or committing a Tx. +var ( + // ErrTxNotWritable is returned when performing a write operation on a + // read-only transaction. + ErrTxNotWritable = errors.New("tx not writable") + + // ErrTxClosed is returned when committing or rolling back a transaction + // that has already been committed or rolled back. + ErrTxClosed = errors.New("tx closed") + + // ErrDatabaseReadOnly is returned when a mutating transaction is started on a + // read-only database. + ErrDatabaseReadOnly = errors.New("database is in read-only mode") +) + +// These errors can occur when putting or deleting a value or a bucket. +var ( + // ErrBucketNotFound is returned when trying to access a bucket that has + // not been created yet. + ErrBucketNotFound = errors.New("bucket not found") + + // ErrBucketExists is returned when creating a bucket that already exists. + ErrBucketExists = errors.New("bucket already exists") + + // ErrBucketNameRequired is returned when creating a bucket with a blank name. + ErrBucketNameRequired = errors.New("bucket name required") + + // ErrKeyRequired is returned when inserting a zero-length key. + ErrKeyRequired = errors.New("key required") + + // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. + ErrKeyTooLarge = errors.New("key too large") + + // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. + ErrValueTooLarge = errors.New("value too large") + + // ErrIncompatibleValue is returned when trying create or delete a bucket + // on an existing non-bucket key or when trying to create or delete a + // non-bucket key on an existing bucket key. + ErrIncompatibleValue = errors.New("incompatible value") +) diff --git a/vendor/github.com/boltdb/bolt/freelist.go b/vendor/github.com/boltdb/bolt/freelist.go new file mode 100644 index 000000000..aba48f58c --- /dev/null +++ b/vendor/github.com/boltdb/bolt/freelist.go @@ -0,0 +1,252 @@ +package bolt + +import ( + "fmt" + "sort" + "unsafe" +) + +// freelist represents a list of all pages that are available for allocation. +// It also tracks pages that have been freed but are still in use by open transactions. +type freelist struct { + ids []pgid // all free and available free page ids. + pending map[txid][]pgid // mapping of soon-to-be free page ids by tx. + cache map[pgid]bool // fast lookup of all free and pending page ids. +} + +// newFreelist returns an empty, initialized freelist. +func newFreelist() *freelist { + return &freelist{ + pending: make(map[txid][]pgid), + cache: make(map[pgid]bool), + } +} + +// size returns the size of the page after serialization. +func (f *freelist) size() int { + n := f.count() + if n >= 0xFFFF { + // The first element will be used to store the count. See freelist.write. + n++ + } + return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n) +} + +// count returns count of pages on the freelist +func (f *freelist) count() int { + return f.free_count() + f.pending_count() +} + +// free_count returns count of free pages +func (f *freelist) free_count() int { + return len(f.ids) +} + +// pending_count returns count of pending pages +func (f *freelist) pending_count() int { + var count int + for _, list := range f.pending { + count += len(list) + } + return count +} + +// copyall copies into dst a list of all free ids and all pending ids in one sorted list. +// f.count returns the minimum length required for dst. +func (f *freelist) copyall(dst []pgid) { + m := make(pgids, 0, f.pending_count()) + for _, list := range f.pending { + m = append(m, list...) + } + sort.Sort(m) + mergepgids(dst, f.ids, m) +} + +// allocate returns the starting page id of a contiguous list of pages of a given size. +// If a contiguous block cannot be found then 0 is returned. +func (f *freelist) allocate(n int) pgid { + if len(f.ids) == 0 { + return 0 + } + + var initial, previd pgid + for i, id := range f.ids { + if id <= 1 { + panic(fmt.Sprintf("invalid page allocation: %d", id)) + } + + // Reset initial page if this is not contiguous. + if previd == 0 || id-previd != 1 { + initial = id + } + + // If we found a contiguous block then remove it and return it. + if (id-initial)+1 == pgid(n) { + // If we're allocating off the beginning then take the fast path + // and just adjust the existing slice. This will use extra memory + // temporarily but the append() in free() will realloc the slice + // as is necessary. + if (i + 1) == n { + f.ids = f.ids[i+1:] + } else { + copy(f.ids[i-n+1:], f.ids[i+1:]) + f.ids = f.ids[:len(f.ids)-n] + } + + // Remove from the free cache. + for i := pgid(0); i < pgid(n); i++ { + delete(f.cache, initial+i) + } + + return initial + } + + previd = id + } + return 0 +} + +// free releases a page and its overflow for a given transaction id. +// If the page is already free then a panic will occur. +func (f *freelist) free(txid txid, p *page) { + if p.id <= 1 { + panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) + } + + // Free page and all its overflow pages. + var ids = f.pending[txid] + for id := p.id; id <= p.id+pgid(p.overflow); id++ { + // Verify that page is not already free. + if f.cache[id] { + panic(fmt.Sprintf("page %d already freed", id)) + } + + // Add to the freelist and cache. + ids = append(ids, id) + f.cache[id] = true + } + f.pending[txid] = ids +} + +// release moves all page ids for a transaction id (or older) to the freelist. +func (f *freelist) release(txid txid) { + m := make(pgids, 0) + for tid, ids := range f.pending { + if tid <= txid { + // Move transaction's pending pages to the available freelist. + // Don't remove from the cache since the page is still free. + m = append(m, ids...) + delete(f.pending, tid) + } + } + sort.Sort(m) + f.ids = pgids(f.ids).merge(m) +} + +// rollback removes the pages from a given pending tx. +func (f *freelist) rollback(txid txid) { + // Remove page ids from cache. + for _, id := range f.pending[txid] { + delete(f.cache, id) + } + + // Remove pages from pending list. + delete(f.pending, txid) +} + +// freed returns whether a given page is in the free list. +func (f *freelist) freed(pgid pgid) bool { + return f.cache[pgid] +} + +// read initializes the freelist from a freelist page. +func (f *freelist) read(p *page) { + // If the page.count is at the max uint16 value (64k) then it's considered + // an overflow and the size of the freelist is stored as the first element. + idx, count := 0, int(p.count) + if count == 0xFFFF { + idx = 1 + count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0]) + } + + // Copy the list of page ids from the freelist. + if count == 0 { + f.ids = nil + } else { + ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count] + f.ids = make([]pgid, len(ids)) + copy(f.ids, ids) + + // Make sure they're sorted. + sort.Sort(pgids(f.ids)) + } + + // Rebuild the page cache. + f.reindex() +} + +// write writes the page ids onto a freelist page. All free and pending ids are +// saved to disk since in the event of a program crash, all pending ids will +// become free. +func (f *freelist) write(p *page) error { + // Combine the old free pgids and pgids waiting on an open transaction. + + // Update the header flag. + p.flags |= freelistPageFlag + + // The page.count can only hold up to 64k elements so if we overflow that + // number then we handle it by putting the size in the first element. + lenids := f.count() + if lenids == 0 { + p.count = uint16(lenids) + } else if lenids < 0xFFFF { + p.count = uint16(lenids) + f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:]) + } else { + p.count = 0xFFFF + ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids) + f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:]) + } + + return nil +} + +// reload reads the freelist from a page and filters out pending items. +func (f *freelist) reload(p *page) { + f.read(p) + + // Build a cache of only pending pages. + pcache := make(map[pgid]bool) + for _, pendingIDs := range f.pending { + for _, pendingID := range pendingIDs { + pcache[pendingID] = true + } + } + + // Check each page in the freelist and build a new available freelist + // with any pages not in the pending lists. + var a []pgid + for _, id := range f.ids { + if !pcache[id] { + a = append(a, id) + } + } + f.ids = a + + // Once the available list is rebuilt then rebuild the free cache so that + // it includes the available and pending free pages. + f.reindex() +} + +// reindex rebuilds the free cache based on available and pending free lists. +func (f *freelist) reindex() { + f.cache = make(map[pgid]bool, len(f.ids)) + for _, id := range f.ids { + f.cache[id] = true + } + for _, pendingIDs := range f.pending { + for _, pendingID := range pendingIDs { + f.cache[pendingID] = true + } + } +} diff --git a/vendor/github.com/boltdb/bolt/freelist_test.go b/vendor/github.com/boltdb/bolt/freelist_test.go new file mode 100644 index 000000000..4e9b3a8db --- /dev/null +++ b/vendor/github.com/boltdb/bolt/freelist_test.go @@ -0,0 +1,158 @@ +package bolt + +import ( + "math/rand" + "reflect" + "sort" + "testing" + "unsafe" +) + +// Ensure that a page is added to a transaction's freelist. +func TestFreelist_free(t *testing.T) { + f := newFreelist() + f.free(100, &page{id: 12}) + if !reflect.DeepEqual([]pgid{12}, f.pending[100]) { + t.Fatalf("exp=%v; got=%v", []pgid{12}, f.pending[100]) + } +} + +// Ensure that a page and its overflow is added to a transaction's freelist. +func TestFreelist_free_overflow(t *testing.T) { + f := newFreelist() + f.free(100, &page{id: 12, overflow: 3}) + if exp := []pgid{12, 13, 14, 15}; !reflect.DeepEqual(exp, f.pending[100]) { + t.Fatalf("exp=%v; got=%v", exp, f.pending[100]) + } +} + +// Ensure that a transaction's free pages can be released. +func TestFreelist_release(t *testing.T) { + f := newFreelist() + f.free(100, &page{id: 12, overflow: 1}) + f.free(100, &page{id: 9}) + f.free(102, &page{id: 39}) + f.release(100) + f.release(101) + if exp := []pgid{9, 12, 13}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } + + f.release(102) + if exp := []pgid{9, 12, 13, 39}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } +} + +// Ensure that a freelist can find contiguous blocks of pages. +func TestFreelist_allocate(t *testing.T) { + f := &freelist{ids: []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18}} + if id := int(f.allocate(3)); id != 3 { + t.Fatalf("exp=3; got=%v", id) + } + if id := int(f.allocate(1)); id != 6 { + t.Fatalf("exp=6; got=%v", id) + } + if id := int(f.allocate(3)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if id := int(f.allocate(2)); id != 12 { + t.Fatalf("exp=12; got=%v", id) + } + if id := int(f.allocate(1)); id != 7 { + t.Fatalf("exp=7; got=%v", id) + } + if id := int(f.allocate(0)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if id := int(f.allocate(0)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if exp := []pgid{9, 18}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } + + if id := int(f.allocate(1)); id != 9 { + t.Fatalf("exp=9; got=%v", id) + } + if id := int(f.allocate(1)); id != 18 { + t.Fatalf("exp=18; got=%v", id) + } + if id := int(f.allocate(1)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if exp := []pgid{}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } +} + +// Ensure that a freelist can deserialize from a freelist page. +func TestFreelist_read(t *testing.T) { + // Create a page. + var buf [4096]byte + page := (*page)(unsafe.Pointer(&buf[0])) + page.flags = freelistPageFlag + page.count = 2 + + // Insert 2 page ids. + ids := (*[3]pgid)(unsafe.Pointer(&page.ptr)) + ids[0] = 23 + ids[1] = 50 + + // Deserialize page into a freelist. + f := newFreelist() + f.read(page) + + // Ensure that there are two page ids in the freelist. + if exp := []pgid{23, 50}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } +} + +// Ensure that a freelist can serialize into a freelist page. +func TestFreelist_write(t *testing.T) { + // Create a freelist and write it to a page. + var buf [4096]byte + f := &freelist{ids: []pgid{12, 39}, pending: make(map[txid][]pgid)} + f.pending[100] = []pgid{28, 11} + f.pending[101] = []pgid{3} + p := (*page)(unsafe.Pointer(&buf[0])) + if err := f.write(p); err != nil { + t.Fatal(err) + } + + // Read the page back out. + f2 := newFreelist() + f2.read(p) + + // Ensure that the freelist is correct. + // All pages should be present and in reverse order. + if exp := []pgid{3, 11, 12, 28, 39}; !reflect.DeepEqual(exp, f2.ids) { + t.Fatalf("exp=%v; got=%v", exp, f2.ids) + } +} + +func Benchmark_FreelistRelease10K(b *testing.B) { benchmark_FreelistRelease(b, 10000) } +func Benchmark_FreelistRelease100K(b *testing.B) { benchmark_FreelistRelease(b, 100000) } +func Benchmark_FreelistRelease1000K(b *testing.B) { benchmark_FreelistRelease(b, 1000000) } +func Benchmark_FreelistRelease10000K(b *testing.B) { benchmark_FreelistRelease(b, 10000000) } + +func benchmark_FreelistRelease(b *testing.B, size int) { + ids := randomPgids(size) + pending := randomPgids(len(ids) / 400) + b.ResetTimer() + for i := 0; i < b.N; i++ { + f := &freelist{ids: ids, pending: map[txid][]pgid{1: pending}} + f.release(1) + } +} + +func randomPgids(n int) []pgid { + rand.Seed(42) + pgids := make(pgids, n) + for i := range pgids { + pgids[i] = pgid(rand.Int63()) + } + sort.Sort(pgids) + return pgids +} diff --git a/vendor/github.com/boltdb/bolt/node.go b/vendor/github.com/boltdb/bolt/node.go new file mode 100644 index 000000000..159318b22 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/node.go @@ -0,0 +1,604 @@ +package bolt + +import ( + "bytes" + "fmt" + "sort" + "unsafe" +) + +// node represents an in-memory, deserialized page. +type node struct { + bucket *Bucket + isLeaf bool + unbalanced bool + spilled bool + key []byte + pgid pgid + parent *node + children nodes + inodes inodes +} + +// root returns the top-level node this node is attached to. +func (n *node) root() *node { + if n.parent == nil { + return n + } + return n.parent.root() +} + +// minKeys returns the minimum number of inodes this node should have. +func (n *node) minKeys() int { + if n.isLeaf { + return 1 + } + return 2 +} + +// size returns the size of the node after serialization. +func (n *node) size() int { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + } + return sz +} + +// sizeLessThan returns true if the node is less than a given size. +// This is an optimization to avoid calculating a large node when we only need +// to know if it fits inside a certain page size. +func (n *node) sizeLessThan(v int) bool { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + if sz >= v { + return false + } + } + return true +} + +// pageElementSize returns the size of each page element based on the type of node. +func (n *node) pageElementSize() int { + if n.isLeaf { + return leafPageElementSize + } + return branchPageElementSize +} + +// childAt returns the child node at a given index. +func (n *node) childAt(index int) *node { + if n.isLeaf { + panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) + } + return n.bucket.node(n.inodes[index].pgid, n) +} + +// childIndex returns the index of a given child node. +func (n *node) childIndex(child *node) int { + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) + return index +} + +// numChildren returns the number of children. +func (n *node) numChildren() int { + return len(n.inodes) +} + +// nextSibling returns the next node with the same parent. +func (n *node) nextSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index >= n.parent.numChildren()-1 { + return nil + } + return n.parent.childAt(index + 1) +} + +// prevSibling returns the previous node with the same parent. +func (n *node) prevSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index == 0 { + return nil + } + return n.parent.childAt(index - 1) +} + +// put inserts a key/value. +func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { + if pgid >= n.bucket.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) + } else if len(oldKey) <= 0 { + panic("put: zero-length old key") + } else if len(newKey) <= 0 { + panic("put: zero-length new key") + } + + // Find insertion index. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) + + // Add capacity and shift nodes if we don't have an exact match and need to insert. + exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) + if !exact { + n.inodes = append(n.inodes, inode{}) + copy(n.inodes[index+1:], n.inodes[index:]) + } + + inode := &n.inodes[index] + inode.flags = flags + inode.key = newKey + inode.value = value + inode.pgid = pgid + _assert(len(inode.key) > 0, "put: zero-length inode key") +} + +// del removes a key from the node. +func (n *node) del(key []byte) { + // Find index of key. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) + + // Exit if the key isn't found. + if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { + return + } + + // Delete inode from the node. + n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) + + // Mark the node as needing rebalancing. + n.unbalanced = true +} + +// read initializes the node from a page. +func (n *node) read(p *page) { + n.pgid = p.id + n.isLeaf = ((p.flags & leafPageFlag) != 0) + n.inodes = make(inodes, int(p.count)) + + for i := 0; i < int(p.count); i++ { + inode := &n.inodes[i] + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + inode.flags = elem.flags + inode.key = elem.key() + inode.value = elem.value() + } else { + elem := p.branchPageElement(uint16(i)) + inode.pgid = elem.pgid + inode.key = elem.key() + } + _assert(len(inode.key) > 0, "read: zero-length inode key") + } + + // Save first key so we can find the node in the parent when we spill. + if len(n.inodes) > 0 { + n.key = n.inodes[0].key + _assert(len(n.key) > 0, "read: zero-length node key") + } else { + n.key = nil + } +} + +// write writes the items onto one or more pages. +func (n *node) write(p *page) { + // Initialize page. + if n.isLeaf { + p.flags |= leafPageFlag + } else { + p.flags |= branchPageFlag + } + + if len(n.inodes) >= 0xFFFF { + panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) + } + p.count = uint16(len(n.inodes)) + + // Stop here if there are no items to write. + if p.count == 0 { + return + } + + // Loop over each item and write it to the page. + b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):] + for i, item := range n.inodes { + _assert(len(item.key) > 0, "write: zero-length inode key") + + // Write the page element. + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.flags = item.flags + elem.ksize = uint32(len(item.key)) + elem.vsize = uint32(len(item.value)) + } else { + elem := p.branchPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.ksize = uint32(len(item.key)) + elem.pgid = item.pgid + _assert(elem.pgid != p.id, "write: circular dependency occurred") + } + + // If the length of key+value is larger than the max allocation size + // then we need to reallocate the byte array pointer. + // + // See: https://github.com/boltdb/bolt/pull/335 + klen, vlen := len(item.key), len(item.value) + if len(b) < klen+vlen { + b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:] + } + + // Write data for the element to the end of the page. + copy(b[0:], item.key) + b = b[klen:] + copy(b[0:], item.value) + b = b[vlen:] + } + + // DEBUG ONLY: n.dump() +} + +// split breaks up a node into multiple smaller nodes, if appropriate. +// This should only be called from the spill() function. +func (n *node) split(pageSize int) []*node { + var nodes []*node + + node := n + for { + // Split node into two. + a, b := node.splitTwo(pageSize) + nodes = append(nodes, a) + + // If we can't split then exit the loop. + if b == nil { + break + } + + // Set node to b so it gets split on the next iteration. + node = b + } + + return nodes +} + +// splitTwo breaks up a node into two smaller nodes, if appropriate. +// This should only be called from the split() function. +func (n *node) splitTwo(pageSize int) (*node, *node) { + // Ignore the split if the page doesn't have at least enough nodes for + // two pages or if the nodes can fit in a single page. + if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { + return n, nil + } + + // Determine the threshold before starting a new node. + var fillPercent = n.bucket.FillPercent + if fillPercent < minFillPercent { + fillPercent = minFillPercent + } else if fillPercent > maxFillPercent { + fillPercent = maxFillPercent + } + threshold := int(float64(pageSize) * fillPercent) + + // Determine split position and sizes of the two pages. + splitIndex, _ := n.splitIndex(threshold) + + // Split node into two separate nodes. + // If there's no parent then we'll need to create one. + if n.parent == nil { + n.parent = &node{bucket: n.bucket, children: []*node{n}} + } + + // Create a new node and add it to the parent. + next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} + n.parent.children = append(n.parent.children, next) + + // Split inodes across two nodes. + next.inodes = n.inodes[splitIndex:] + n.inodes = n.inodes[:splitIndex] + + // Update the statistics. + n.bucket.tx.stats.Split++ + + return n, next +} + +// splitIndex finds the position where a page will fill a given threshold. +// It returns the index as well as the size of the first page. +// This is only be called from split(). +func (n *node) splitIndex(threshold int) (index, sz int) { + sz = pageHeaderSize + + // Loop until we only have the minimum number of keys required for the second page. + for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { + index = i + inode := n.inodes[i] + elsize := n.pageElementSize() + len(inode.key) + len(inode.value) + + // If we have at least the minimum number of keys and adding another + // node would put us over the threshold then exit and return. + if i >= minKeysPerPage && sz+elsize > threshold { + break + } + + // Add the element size to the total size. + sz += elsize + } + + return +} + +// spill writes the nodes to dirty pages and splits nodes as it goes. +// Returns an error if dirty pages cannot be allocated. +func (n *node) spill() error { + var tx = n.bucket.tx + if n.spilled { + return nil + } + + // Spill child nodes first. Child nodes can materialize sibling nodes in + // the case of split-merge so we cannot use a range loop. We have to check + // the children size on every loop iteration. + sort.Sort(n.children) + for i := 0; i < len(n.children); i++ { + if err := n.children[i].spill(); err != nil { + return err + } + } + + // We no longer need the child list because it's only used for spill tracking. + n.children = nil + + // Split nodes into appropriate sizes. The first node will always be n. + var nodes = n.split(tx.db.pageSize) + for _, node := range nodes { + // Add node's page to the freelist if it's not new. + if node.pgid > 0 { + tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) + node.pgid = 0 + } + + // Allocate contiguous space for the node. + p, err := tx.allocate((node.size() / tx.db.pageSize) + 1) + if err != nil { + return err + } + + // Write the node. + if p.id >= tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) + } + node.pgid = p.id + node.write(p) + node.spilled = true + + // Insert into parent inodes. + if node.parent != nil { + var key = node.key + if key == nil { + key = node.inodes[0].key + } + + node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) + node.key = node.inodes[0].key + _assert(len(node.key) > 0, "spill: zero-length node key") + } + + // Update the statistics. + tx.stats.Spill++ + } + + // If the root node split and created a new root then we need to spill that + // as well. We'll clear out the children to make sure it doesn't try to respill. + if n.parent != nil && n.parent.pgid == 0 { + n.children = nil + return n.parent.spill() + } + + return nil +} + +// rebalance attempts to combine the node with sibling nodes if the node fill +// size is below a threshold or if there are not enough keys. +func (n *node) rebalance() { + if !n.unbalanced { + return + } + n.unbalanced = false + + // Update statistics. + n.bucket.tx.stats.Rebalance++ + + // Ignore if node is above threshold (25%) and has enough keys. + var threshold = n.bucket.tx.db.pageSize / 4 + if n.size() > threshold && len(n.inodes) > n.minKeys() { + return + } + + // Root node has special handling. + if n.parent == nil { + // If root node is a branch and only has one node then collapse it. + if !n.isLeaf && len(n.inodes) == 1 { + // Move root's child up. + child := n.bucket.node(n.inodes[0].pgid, n) + n.isLeaf = child.isLeaf + n.inodes = child.inodes[:] + n.children = child.children + + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent = n + } + } + + // Remove old child. + child.parent = nil + delete(n.bucket.nodes, child.pgid) + child.free() + } + + return + } + + // If node has no keys then just remove it. + if n.numChildren() == 0 { + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + n.parent.rebalance() + return + } + + _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") + + // Destination node is right sibling if idx == 0, otherwise left sibling. + var target *node + var useNextSibling = (n.parent.childIndex(n) == 0) + if useNextSibling { + target = n.nextSibling() + } else { + target = n.prevSibling() + } + + // If both this node and the target node are too small then merge them. + if useNextSibling { + // Reparent all child nodes being moved. + for _, inode := range target.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = n + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes from target and remove target. + n.inodes = append(n.inodes, target.inodes...) + n.parent.del(target.key) + n.parent.removeChild(target) + delete(n.bucket.nodes, target.pgid) + target.free() + } else { + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = target + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes to target and remove node. + target.inodes = append(target.inodes, n.inodes...) + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + } + + // Either this node or the target node was deleted from the parent so rebalance it. + n.parent.rebalance() +} + +// removes a node from the list of in-memory children. +// This does not affect the inodes. +func (n *node) removeChild(target *node) { + for i, child := range n.children { + if child == target { + n.children = append(n.children[:i], n.children[i+1:]...) + return + } + } +} + +// dereference causes the node to copy all its inode key/value references to heap memory. +// This is required when the mmap is reallocated so inodes are not pointing to stale data. +func (n *node) dereference() { + if n.key != nil { + key := make([]byte, len(n.key)) + copy(key, n.key) + n.key = key + _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") + } + + for i := range n.inodes { + inode := &n.inodes[i] + + key := make([]byte, len(inode.key)) + copy(key, inode.key) + inode.key = key + _assert(len(inode.key) > 0, "dereference: zero-length inode key") + + value := make([]byte, len(inode.value)) + copy(value, inode.value) + inode.value = value + } + + // Recursively dereference children. + for _, child := range n.children { + child.dereference() + } + + // Update statistics. + n.bucket.tx.stats.NodeDeref++ +} + +// free adds the node's underlying page to the freelist. +func (n *node) free() { + if n.pgid != 0 { + n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) + n.pgid = 0 + } +} + +// dump writes the contents of the node to STDERR for debugging purposes. +/* +func (n *node) dump() { + // Write node header. + var typ = "branch" + if n.isLeaf { + typ = "leaf" + } + warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) + + // Write out abbreviated version of each item. + for _, item := range n.inodes { + if n.isLeaf { + if item.flags&bucketLeafFlag != 0 { + bucket := (*bucket)(unsafe.Pointer(&item.value[0])) + warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) + } else { + warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) + } + } else { + warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) + } + } + warn("") +} +*/ + +type nodes []*node + +func (s nodes) Len() int { return len(s) } +func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 } + +// inode represents an internal node inside of a node. +// It can be used to point to elements in a page or point +// to an element which hasn't been added to a page yet. +type inode struct { + flags uint32 + pgid pgid + key []byte + value []byte +} + +type inodes []inode diff --git a/vendor/github.com/boltdb/bolt/node_test.go b/vendor/github.com/boltdb/bolt/node_test.go new file mode 100644 index 000000000..fa5d10f99 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/node_test.go @@ -0,0 +1,156 @@ +package bolt + +import ( + "testing" + "unsafe" +) + +// Ensure that a node can insert a key/value. +func TestNode_put(t *testing.T) { + n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{meta: &meta{pgid: 1}}}} + n.put([]byte("baz"), []byte("baz"), []byte("2"), 0, 0) + n.put([]byte("foo"), []byte("foo"), []byte("0"), 0, 0) + n.put([]byte("bar"), []byte("bar"), []byte("1"), 0, 0) + n.put([]byte("foo"), []byte("foo"), []byte("3"), 0, leafPageFlag) + + if len(n.inodes) != 3 { + t.Fatalf("exp=3; got=%d", len(n.inodes)) + } + if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "1" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "baz" || string(v) != "2" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if k, v := n.inodes[2].key, n.inodes[2].value; string(k) != "foo" || string(v) != "3" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if n.inodes[2].flags != uint32(leafPageFlag) { + t.Fatalf("not a leaf: %d", n.inodes[2].flags) + } +} + +// Ensure that a node can deserialize from a leaf page. +func TestNode_read_LeafPage(t *testing.T) { + // Create a page. + var buf [4096]byte + page := (*page)(unsafe.Pointer(&buf[0])) + page.flags = leafPageFlag + page.count = 2 + + // Insert 2 elements at the beginning. sizeof(leafPageElement) == 16 + nodes := (*[3]leafPageElement)(unsafe.Pointer(&page.ptr)) + nodes[0] = leafPageElement{flags: 0, pos: 32, ksize: 3, vsize: 4} // pos = sizeof(leafPageElement) * 2 + nodes[1] = leafPageElement{flags: 0, pos: 23, ksize: 10, vsize: 3} // pos = sizeof(leafPageElement) + 3 + 4 + + // Write data for the nodes at the end. + data := (*[4096]byte)(unsafe.Pointer(&nodes[2])) + copy(data[:], []byte("barfooz")) + copy(data[7:], []byte("helloworldbye")) + + // Deserialize page into a leaf. + n := &node{} + n.read(page) + + // Check that there are two inodes with correct data. + if !n.isLeaf { + t.Fatal("expected leaf") + } + if len(n.inodes) != 2 { + t.Fatalf("exp=2; got=%d", len(n.inodes)) + } + if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "fooz" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "helloworld" || string(v) != "bye" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } +} + +// Ensure that a node can serialize into a leaf page. +func TestNode_write_LeafPage(t *testing.T) { + // Create a node. + n := &node{isLeaf: true, inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} + n.put([]byte("susy"), []byte("susy"), []byte("que"), 0, 0) + n.put([]byte("ricki"), []byte("ricki"), []byte("lake"), 0, 0) + n.put([]byte("john"), []byte("john"), []byte("johnson"), 0, 0) + + // Write it to a page. + var buf [4096]byte + p := (*page)(unsafe.Pointer(&buf[0])) + n.write(p) + + // Read the page back in. + n2 := &node{} + n2.read(p) + + // Check that the two pages are the same. + if len(n2.inodes) != 3 { + t.Fatalf("exp=3; got=%d", len(n2.inodes)) + } + if k, v := n2.inodes[0].key, n2.inodes[0].value; string(k) != "john" || string(v) != "johnson" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if k, v := n2.inodes[1].key, n2.inodes[1].value; string(k) != "ricki" || string(v) != "lake" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if k, v := n2.inodes[2].key, n2.inodes[2].value; string(k) != "susy" || string(v) != "que" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } +} + +// Ensure that a node can split into appropriate subgroups. +func TestNode_split(t *testing.T) { + // Create a node. + n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} + n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) + n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) + n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0) + n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0) + n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0) + + // Split between 2 & 3. + n.split(100) + + var parent = n.parent + if len(parent.children) != 2 { + t.Fatalf("exp=2; got=%d", len(parent.children)) + } + if len(parent.children[0].inodes) != 2 { + t.Fatalf("exp=2; got=%d", len(parent.children[0].inodes)) + } + if len(parent.children[1].inodes) != 3 { + t.Fatalf("exp=3; got=%d", len(parent.children[1].inodes)) + } +} + +// Ensure that a page with the minimum number of inodes just returns a single node. +func TestNode_split_MinKeys(t *testing.T) { + // Create a node. + n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} + n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) + n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) + + // Split. + n.split(20) + if n.parent != nil { + t.Fatalf("expected nil parent") + } +} + +// Ensure that a node that has keys that all fit on a page just returns one leaf. +func TestNode_split_SinglePage(t *testing.T) { + // Create a node. + n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} + n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) + n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) + n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0) + n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0) + n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0) + + // Split. + n.split(4096) + if n.parent != nil { + t.Fatalf("expected nil parent") + } +} diff --git a/vendor/github.com/boltdb/bolt/page.go b/vendor/github.com/boltdb/bolt/page.go new file mode 100644 index 000000000..cde403ae8 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/page.go @@ -0,0 +1,197 @@ +package bolt + +import ( + "fmt" + "os" + "sort" + "unsafe" +) + +const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr)) + +const minKeysPerPage = 2 + +const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{})) +const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{})) + +const ( + branchPageFlag = 0x01 + leafPageFlag = 0x02 + metaPageFlag = 0x04 + freelistPageFlag = 0x10 +) + +const ( + bucketLeafFlag = 0x01 +) + +type pgid uint64 + +type page struct { + id pgid + flags uint16 + count uint16 + overflow uint32 + ptr uintptr +} + +// typ returns a human readable page type string used for debugging. +func (p *page) typ() string { + if (p.flags & branchPageFlag) != 0 { + return "branch" + } else if (p.flags & leafPageFlag) != 0 { + return "leaf" + } else if (p.flags & metaPageFlag) != 0 { + return "meta" + } else if (p.flags & freelistPageFlag) != 0 { + return "freelist" + } + return fmt.Sprintf("unknown<%02x>", p.flags) +} + +// meta returns a pointer to the metadata section of the page. +func (p *page) meta() *meta { + return (*meta)(unsafe.Pointer(&p.ptr)) +} + +// leafPageElement retrieves the leaf node by index +func (p *page) leafPageElement(index uint16) *leafPageElement { + n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] + return n +} + +// leafPageElements retrieves a list of leaf nodes. +func (p *page) leafPageElements() []leafPageElement { + if p.count == 0 { + return nil + } + return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:] +} + +// branchPageElement retrieves the branch node by index +func (p *page) branchPageElement(index uint16) *branchPageElement { + return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] +} + +// branchPageElements retrieves a list of branch nodes. +func (p *page) branchPageElements() []branchPageElement { + if p.count == 0 { + return nil + } + return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:] +} + +// dump writes n bytes of the page to STDERR as hex output. +func (p *page) hexdump(n int) { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n] + fmt.Fprintf(os.Stderr, "%x\n", buf) +} + +type pages []*page + +func (s pages) Len() int { return len(s) } +func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } + +// branchPageElement represents a node on a branch page. +type branchPageElement struct { + pos uint32 + ksize uint32 + pgid pgid +} + +// key returns a byte slice of the node key. +func (n *branchPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] +} + +// leafPageElement represents a node on a leaf page. +type leafPageElement struct { + flags uint32 + pos uint32 + ksize uint32 + vsize uint32 +} + +// key returns a byte slice of the node key. +func (n *leafPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize] +} + +// value returns a byte slice of the node value. +func (n *leafPageElement) value() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize] +} + +// PageInfo represents human readable information about a page. +type PageInfo struct { + ID int + Type string + Count int + OverflowCount int +} + +type pgids []pgid + +func (s pgids) Len() int { return len(s) } +func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pgids) Less(i, j int) bool { return s[i] < s[j] } + +// merge returns the sorted union of a and b. +func (a pgids) merge(b pgids) pgids { + // Return the opposite slice if one is nil. + if len(a) == 0 { + return b + } + if len(b) == 0 { + return a + } + merged := make(pgids, len(a)+len(b)) + mergepgids(merged, a, b) + return merged +} + +// mergepgids copies the sorted union of a and b into dst. +// If dst is too small, it panics. +func mergepgids(dst, a, b pgids) { + if len(dst) < len(a)+len(b) { + panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) + } + // Copy in the opposite slice if one is nil. + if len(a) == 0 { + copy(dst, b) + return + } + if len(b) == 0 { + copy(dst, a) + return + } + + // Merged will hold all elements from both lists. + merged := dst[:0] + + // Assign lead to the slice with a lower starting value, follow to the higher value. + lead, follow := a, b + if b[0] < a[0] { + lead, follow = b, a + } + + // Continue while there are elements in the lead. + for len(lead) > 0 { + // Merge largest prefix of lead that is ahead of follow[0]. + n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) + merged = append(merged, lead[:n]...) + if n >= len(lead) { + break + } + + // Swap lead and follow. + lead, follow = follow, lead[n:] + } + + // Append what's left in follow. + _ = append(merged, follow...) +} diff --git a/vendor/github.com/boltdb/bolt/page_test.go b/vendor/github.com/boltdb/bolt/page_test.go new file mode 100644 index 000000000..59f4a30ed --- /dev/null +++ b/vendor/github.com/boltdb/bolt/page_test.go @@ -0,0 +1,72 @@ +package bolt + +import ( + "reflect" + "sort" + "testing" + "testing/quick" +) + +// Ensure that the page type can be returned in human readable format. +func TestPage_typ(t *testing.T) { + if typ := (&page{flags: branchPageFlag}).typ(); typ != "branch" { + t.Fatalf("exp=branch; got=%v", typ) + } + if typ := (&page{flags: leafPageFlag}).typ(); typ != "leaf" { + t.Fatalf("exp=leaf; got=%v", typ) + } + if typ := (&page{flags: metaPageFlag}).typ(); typ != "meta" { + t.Fatalf("exp=meta; got=%v", typ) + } + if typ := (&page{flags: freelistPageFlag}).typ(); typ != "freelist" { + t.Fatalf("exp=freelist; got=%v", typ) + } + if typ := (&page{flags: 20000}).typ(); typ != "unknown<4e20>" { + t.Fatalf("exp=unknown<4e20>; got=%v", typ) + } +} + +// Ensure that the hexdump debugging function doesn't blow up. +func TestPage_dump(t *testing.T) { + (&page{id: 256}).hexdump(16) +} + +func TestPgids_merge(t *testing.T) { + a := pgids{4, 5, 6, 10, 11, 12, 13, 27} + b := pgids{1, 3, 8, 9, 25, 30} + c := a.merge(b) + if !reflect.DeepEqual(c, pgids{1, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30}) { + t.Errorf("mismatch: %v", c) + } + + a = pgids{4, 5, 6, 10, 11, 12, 13, 27, 35, 36} + b = pgids{8, 9, 25, 30} + c = a.merge(b) + if !reflect.DeepEqual(c, pgids{4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30, 35, 36}) { + t.Errorf("mismatch: %v", c) + } +} + +func TestPgids_merge_quick(t *testing.T) { + if err := quick.Check(func(a, b pgids) bool { + // Sort incoming lists. + sort.Sort(a) + sort.Sort(b) + + // Merge the two lists together. + got := a.merge(b) + + // The expected value should be the two lists combined and sorted. + exp := append(a, b...) + sort.Sort(exp) + + if !reflect.DeepEqual(exp, got) { + t.Errorf("\nexp=%+v\ngot=%+v\n", exp, got) + return false + } + + return true + }, nil); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/boltdb/bolt/quick_test.go b/vendor/github.com/boltdb/bolt/quick_test.go new file mode 100644 index 000000000..9e27792e1 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/quick_test.go @@ -0,0 +1,87 @@ +package bolt_test + +import ( + "bytes" + "flag" + "fmt" + "math/rand" + "os" + "reflect" + "testing/quick" + "time" +) + +// testing/quick defaults to 5 iterations and a random seed. +// You can override these settings from the command line: +// +// -quick.count The number of iterations to perform. +// -quick.seed The seed to use for randomizing. +// -quick.maxitems The maximum number of items to insert into a DB. +// -quick.maxksize The maximum size of a key. +// -quick.maxvsize The maximum size of a value. +// + +var qcount, qseed, qmaxitems, qmaxksize, qmaxvsize int + +func init() { + flag.IntVar(&qcount, "quick.count", 5, "") + flag.IntVar(&qseed, "quick.seed", int(time.Now().UnixNano())%100000, "") + flag.IntVar(&qmaxitems, "quick.maxitems", 1000, "") + flag.IntVar(&qmaxksize, "quick.maxksize", 1024, "") + flag.IntVar(&qmaxvsize, "quick.maxvsize", 1024, "") + flag.Parse() + fmt.Fprintln(os.Stderr, "seed:", qseed) + fmt.Fprintf(os.Stderr, "quick settings: count=%v, items=%v, ksize=%v, vsize=%v\n", qcount, qmaxitems, qmaxksize, qmaxvsize) +} + +func qconfig() *quick.Config { + return &quick.Config{ + MaxCount: qcount, + Rand: rand.New(rand.NewSource(int64(qseed))), + } +} + +type testdata []testdataitem + +func (t testdata) Len() int { return len(t) } +func (t testdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t testdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == -1 } + +func (t testdata) Generate(rand *rand.Rand, size int) reflect.Value { + n := rand.Intn(qmaxitems-1) + 1 + items := make(testdata, n) + used := make(map[string]bool) + for i := 0; i < n; i++ { + item := &items[i] + // Ensure that keys are unique by looping until we find one that we have not already used. + for { + item.Key = randByteSlice(rand, 1, qmaxksize) + if !used[string(item.Key)] { + used[string(item.Key)] = true + break + } + } + item.Value = randByteSlice(rand, 0, qmaxvsize) + } + return reflect.ValueOf(items) +} + +type revtestdata []testdataitem + +func (t revtestdata) Len() int { return len(t) } +func (t revtestdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t revtestdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == 1 } + +type testdataitem struct { + Key []byte + Value []byte +} + +func randByteSlice(rand *rand.Rand, minSize, maxSize int) []byte { + n := rand.Intn(maxSize-minSize) + minSize + b := make([]byte, n) + for i := 0; i < n; i++ { + b[i] = byte(rand.Intn(255)) + } + return b +} diff --git a/vendor/github.com/boltdb/bolt/simulation_test.go b/vendor/github.com/boltdb/bolt/simulation_test.go new file mode 100644 index 000000000..383101655 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/simulation_test.go @@ -0,0 +1,329 @@ +package bolt_test + +import ( + "bytes" + "fmt" + "math/rand" + "sync" + "testing" + + "github.com/boltdb/bolt" +) + +func TestSimulate_1op_1p(t *testing.T) { testSimulate(t, 1, 1) } +func TestSimulate_10op_1p(t *testing.T) { testSimulate(t, 10, 1) } +func TestSimulate_100op_1p(t *testing.T) { testSimulate(t, 100, 1) } +func TestSimulate_1000op_1p(t *testing.T) { testSimulate(t, 1000, 1) } +func TestSimulate_10000op_1p(t *testing.T) { testSimulate(t, 10000, 1) } + +func TestSimulate_10op_10p(t *testing.T) { testSimulate(t, 10, 10) } +func TestSimulate_100op_10p(t *testing.T) { testSimulate(t, 100, 10) } +func TestSimulate_1000op_10p(t *testing.T) { testSimulate(t, 1000, 10) } +func TestSimulate_10000op_10p(t *testing.T) { testSimulate(t, 10000, 10) } + +func TestSimulate_100op_100p(t *testing.T) { testSimulate(t, 100, 100) } +func TestSimulate_1000op_100p(t *testing.T) { testSimulate(t, 1000, 100) } +func TestSimulate_10000op_100p(t *testing.T) { testSimulate(t, 10000, 100) } + +func TestSimulate_10000op_1000p(t *testing.T) { testSimulate(t, 10000, 1000) } + +// Randomly generate operations on a given database with multiple clients to ensure consistency and thread safety. +func testSimulate(t *testing.T, threadCount, parallelism int) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + rand.Seed(int64(qseed)) + + // A list of operations that readers and writers can perform. + var readerHandlers = []simulateHandler{simulateGetHandler} + var writerHandlers = []simulateHandler{simulateGetHandler, simulatePutHandler} + + var versions = make(map[int]*QuickDB) + versions[1] = NewQuickDB() + + db := MustOpenDB() + defer db.MustClose() + + var mutex sync.Mutex + + // Run n threads in parallel, each with their own operation. + var wg sync.WaitGroup + var threads = make(chan bool, parallelism) + var i int + for { + threads <- true + wg.Add(1) + writable := ((rand.Int() % 100) < 20) // 20% writers + + // Choose an operation to execute. + var handler simulateHandler + if writable { + handler = writerHandlers[rand.Intn(len(writerHandlers))] + } else { + handler = readerHandlers[rand.Intn(len(readerHandlers))] + } + + // Execute a thread for the given operation. + go func(writable bool, handler simulateHandler) { + defer wg.Done() + + // Start transaction. + tx, err := db.Begin(writable) + if err != nil { + t.Fatal("tx begin: ", err) + } + + // Obtain current state of the dataset. + mutex.Lock() + var qdb = versions[tx.ID()] + if writable { + qdb = versions[tx.ID()-1].Copy() + } + mutex.Unlock() + + // Make sure we commit/rollback the tx at the end and update the state. + if writable { + defer func() { + mutex.Lock() + versions[tx.ID()] = qdb + mutex.Unlock() + + if err := tx.Commit(); err != nil { + t.Fatal(err) + } + }() + } else { + defer func() { _ = tx.Rollback() }() + } + + // Ignore operation if we don't have data yet. + if qdb == nil { + return + } + + // Execute handler. + handler(tx, qdb) + + // Release a thread back to the scheduling loop. + <-threads + }(writable, handler) + + i++ + if i > threadCount { + break + } + } + + // Wait until all threads are done. + wg.Wait() +} + +type simulateHandler func(tx *bolt.Tx, qdb *QuickDB) + +// Retrieves a key from the database and verifies that it is what is expected. +func simulateGetHandler(tx *bolt.Tx, qdb *QuickDB) { + // Randomly retrieve an existing exist. + keys := qdb.Rand() + if len(keys) == 0 { + return + } + + // Retrieve root bucket. + b := tx.Bucket(keys[0]) + if b == nil { + panic(fmt.Sprintf("bucket[0] expected: %08x\n", trunc(keys[0], 4))) + } + + // Drill into nested buckets. + for _, key := range keys[1 : len(keys)-1] { + b = b.Bucket(key) + if b == nil { + panic(fmt.Sprintf("bucket[n] expected: %v -> %v\n", keys, key)) + } + } + + // Verify key/value on the final bucket. + expected := qdb.Get(keys) + actual := b.Get(keys[len(keys)-1]) + if !bytes.Equal(actual, expected) { + fmt.Println("=== EXPECTED ===") + fmt.Println(expected) + fmt.Println("=== ACTUAL ===") + fmt.Println(actual) + fmt.Println("=== END ===") + panic("value mismatch") + } +} + +// Inserts a key into the database. +func simulatePutHandler(tx *bolt.Tx, qdb *QuickDB) { + var err error + keys, value := randKeys(), randValue() + + // Retrieve root bucket. + b := tx.Bucket(keys[0]) + if b == nil { + b, err = tx.CreateBucket(keys[0]) + if err != nil { + panic("create bucket: " + err.Error()) + } + } + + // Create nested buckets, if necessary. + for _, key := range keys[1 : len(keys)-1] { + child := b.Bucket(key) + if child != nil { + b = child + } else { + b, err = b.CreateBucket(key) + if err != nil { + panic("create bucket: " + err.Error()) + } + } + } + + // Insert into database. + if err := b.Put(keys[len(keys)-1], value); err != nil { + panic("put: " + err.Error()) + } + + // Insert into in-memory database. + qdb.Put(keys, value) +} + +// QuickDB is an in-memory database that replicates the functionality of the +// Bolt DB type except that it is entirely in-memory. It is meant for testing +// that the Bolt database is consistent. +type QuickDB struct { + sync.RWMutex + m map[string]interface{} +} + +// NewQuickDB returns an instance of QuickDB. +func NewQuickDB() *QuickDB { + return &QuickDB{m: make(map[string]interface{})} +} + +// Get retrieves the value at a key path. +func (db *QuickDB) Get(keys [][]byte) []byte { + db.RLock() + defer db.RUnlock() + + m := db.m + for _, key := range keys[:len(keys)-1] { + value := m[string(key)] + if value == nil { + return nil + } + switch value := value.(type) { + case map[string]interface{}: + m = value + case []byte: + return nil + } + } + + // Only return if it's a simple value. + if value, ok := m[string(keys[len(keys)-1])].([]byte); ok { + return value + } + return nil +} + +// Put inserts a value into a key path. +func (db *QuickDB) Put(keys [][]byte, value []byte) { + db.Lock() + defer db.Unlock() + + // Build buckets all the way down the key path. + m := db.m + for _, key := range keys[:len(keys)-1] { + if _, ok := m[string(key)].([]byte); ok { + return // Keypath intersects with a simple value. Do nothing. + } + + if m[string(key)] == nil { + m[string(key)] = make(map[string]interface{}) + } + m = m[string(key)].(map[string]interface{}) + } + + // Insert value into the last key. + m[string(keys[len(keys)-1])] = value +} + +// Rand returns a random key path that points to a simple value. +func (db *QuickDB) Rand() [][]byte { + db.RLock() + defer db.RUnlock() + if len(db.m) == 0 { + return nil + } + var keys [][]byte + db.rand(db.m, &keys) + return keys +} + +func (db *QuickDB) rand(m map[string]interface{}, keys *[][]byte) { + i, index := 0, rand.Intn(len(m)) + for k, v := range m { + if i == index { + *keys = append(*keys, []byte(k)) + if v, ok := v.(map[string]interface{}); ok { + db.rand(v, keys) + } + return + } + i++ + } + panic("quickdb rand: out-of-range") +} + +// Copy copies the entire database. +func (db *QuickDB) Copy() *QuickDB { + db.RLock() + defer db.RUnlock() + return &QuickDB{m: db.copy(db.m)} +} + +func (db *QuickDB) copy(m map[string]interface{}) map[string]interface{} { + clone := make(map[string]interface{}, len(m)) + for k, v := range m { + switch v := v.(type) { + case map[string]interface{}: + clone[k] = db.copy(v) + default: + clone[k] = v + } + } + return clone +} + +func randKey() []byte { + var min, max = 1, 1024 + n := rand.Intn(max-min) + min + b := make([]byte, n) + for i := 0; i < n; i++ { + b[i] = byte(rand.Intn(255)) + } + return b +} + +func randKeys() [][]byte { + var keys [][]byte + var count = rand.Intn(2) + 2 + for i := 0; i < count; i++ { + keys = append(keys, randKey()) + } + return keys +} + +func randValue() []byte { + n := rand.Intn(8192) + b := make([]byte, n) + for i := 0; i < n; i++ { + b[i] = byte(rand.Intn(255)) + } + return b +} diff --git a/vendor/github.com/boltdb/bolt/tx.go b/vendor/github.com/boltdb/bolt/tx.go new file mode 100644 index 000000000..6700308a2 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/tx.go @@ -0,0 +1,684 @@ +package bolt + +import ( + "fmt" + "io" + "os" + "sort" + "strings" + "time" + "unsafe" +) + +// txid represents the internal transaction identifier. +type txid uint64 + +// Tx represents a read-only or read/write transaction on the database. +// Read-only transactions can be used for retrieving values for keys and creating cursors. +// Read/write transactions can create and remove buckets and create and remove keys. +// +// IMPORTANT: You must commit or rollback transactions when you are done with +// them. Pages can not be reclaimed by the writer until no more transactions +// are using them. A long running read transaction can cause the database to +// quickly grow. +type Tx struct { + writable bool + managed bool + db *DB + meta *meta + root Bucket + pages map[pgid]*page + stats TxStats + commitHandlers []func() + + // WriteFlag specifies the flag for write-related methods like WriteTo(). + // Tx opens the database file with the specified flag to copy the data. + // + // By default, the flag is unset, which works well for mostly in-memory + // workloads. For databases that are much larger than available RAM, + // set the flag to syscall.O_DIRECT to avoid trashing the page cache. + WriteFlag int +} + +// init initializes the transaction. +func (tx *Tx) init(db *DB) { + tx.db = db + tx.pages = nil + + // Copy the meta page since it can be changed by the writer. + tx.meta = &meta{} + db.meta().copy(tx.meta) + + // Copy over the root bucket. + tx.root = newBucket(tx) + tx.root.bucket = &bucket{} + *tx.root.bucket = tx.meta.root + + // Increment the transaction id and add a page cache for writable transactions. + if tx.writable { + tx.pages = make(map[pgid]*page) + tx.meta.txid += txid(1) + } +} + +// ID returns the transaction id. +func (tx *Tx) ID() int { + return int(tx.meta.txid) +} + +// DB returns a reference to the database that created the transaction. +func (tx *Tx) DB() *DB { + return tx.db +} + +// Size returns current database size in bytes as seen by this transaction. +func (tx *Tx) Size() int64 { + return int64(tx.meta.pgid) * int64(tx.db.pageSize) +} + +// Writable returns whether the transaction can perform write operations. +func (tx *Tx) Writable() bool { + return tx.writable +} + +// Cursor creates a cursor associated with the root bucket. +// All items in the cursor will return a nil value because all root bucket keys point to buckets. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (tx *Tx) Cursor() *Cursor { + return tx.root.Cursor() +} + +// Stats retrieves a copy of the current transaction statistics. +func (tx *Tx) Stats() TxStats { + return tx.stats +} + +// Bucket retrieves a bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) Bucket(name []byte) *Bucket { + return tx.root.Bucket(name) +} + +// CreateBucket creates a new bucket. +// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { + return tx.root.CreateBucket(name) +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { + return tx.root.CreateBucketIfNotExists(name) +} + +// DeleteBucket deletes a bucket. +// Returns an error if the bucket cannot be found or if the key represents a non-bucket value. +func (tx *Tx) DeleteBucket(name []byte) error { + return tx.root.DeleteBucket(name) +} + +// ForEach executes a function for each bucket in the root. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. +func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { + return tx.root.ForEach(func(k, v []byte) error { + if err := fn(k, tx.root.Bucket(k)); err != nil { + return err + } + return nil + }) +} + +// OnCommit adds a handler function to be executed after the transaction successfully commits. +func (tx *Tx) OnCommit(fn func()) { + tx.commitHandlers = append(tx.commitHandlers, fn) +} + +// Commit writes all changes to disk and updates the meta page. +// Returns an error if a disk write error occurs, or if Commit is +// called on a read-only transaction. +func (tx *Tx) Commit() error { + _assert(!tx.managed, "managed tx commit not allowed") + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } + + // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. + + // Rebalance nodes which have had deletions. + var startTime = time.Now() + tx.root.rebalance() + if tx.stats.Rebalance > 0 { + tx.stats.RebalanceTime += time.Since(startTime) + } + + // spill data onto dirty pages. + startTime = time.Now() + if err := tx.root.spill(); err != nil { + tx.rollback() + return err + } + tx.stats.SpillTime += time.Since(startTime) + + // Free the old root bucket. + tx.meta.root.root = tx.root.root + + opgid := tx.meta.pgid + + // Free the freelist and allocate new pages for it. This will overestimate + // the size of the freelist but not underestimate the size (which would be bad). + tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) + p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + if err != nil { + tx.rollback() + return err + } + if err := tx.db.freelist.write(p); err != nil { + tx.rollback() + return err + } + tx.meta.freelist = p.id + + // If the high water mark has moved up then attempt to grow the database. + if tx.meta.pgid > opgid { + if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + tx.rollback() + return err + } + } + + // Write dirty pages to disk. + startTime = time.Now() + if err := tx.write(); err != nil { + tx.rollback() + return err + } + + // If strict mode is enabled then perform a consistency check. + // Only the first consistency error is reported in the panic. + if tx.db.StrictMode { + ch := tx.Check() + var errs []string + for { + err, ok := <-ch + if !ok { + break + } + errs = append(errs, err.Error()) + } + if len(errs) > 0 { + panic("check fail: " + strings.Join(errs, "\n")) + } + } + + // Write meta to disk. + if err := tx.writeMeta(); err != nil { + tx.rollback() + return err + } + tx.stats.WriteTime += time.Since(startTime) + + // Finalize the transaction. + tx.close() + + // Execute commit handlers now that the locks have been removed. + for _, fn := range tx.commitHandlers { + fn() + } + + return nil +} + +// Rollback closes the transaction and ignores all previous updates. Read-only +// transactions must be rolled back and not committed. +func (tx *Tx) Rollback() error { + _assert(!tx.managed, "managed tx rollback not allowed") + if tx.db == nil { + return ErrTxClosed + } + tx.rollback() + return nil +} + +func (tx *Tx) rollback() { + if tx.db == nil { + return + } + if tx.writable { + tx.db.freelist.rollback(tx.meta.txid) + tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) + } + tx.close() +} + +func (tx *Tx) close() { + if tx.db == nil { + return + } + if tx.writable { + // Grab freelist stats. + var freelistFreeN = tx.db.freelist.free_count() + var freelistPendingN = tx.db.freelist.pending_count() + var freelistAlloc = tx.db.freelist.size() + + // Remove transaction ref & writer lock. + tx.db.rwtx = nil + tx.db.rwlock.Unlock() + + // Merge statistics. + tx.db.statlock.Lock() + tx.db.stats.FreePageN = freelistFreeN + tx.db.stats.PendingPageN = freelistPendingN + tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize + tx.db.stats.FreelistInuse = freelistAlloc + tx.db.stats.TxStats.add(&tx.stats) + tx.db.statlock.Unlock() + } else { + tx.db.removeTx(tx) + } + + // Clear all references. + tx.db = nil + tx.meta = nil + tx.root = Bucket{tx: tx} + tx.pages = nil +} + +// Copy writes the entire database to a writer. +// This function exists for backwards compatibility. Use WriteTo() instead. +func (tx *Tx) Copy(w io.Writer) error { + _, err := tx.WriteTo(w) + return err +} + +// WriteTo writes the entire database to a writer. +// If err == nil then exactly tx.Size() bytes will be written into the writer. +func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { + // Attempt to open reader with WriteFlag + f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) + if err != nil { + return 0, err + } + defer func() { _ = f.Close() }() + + // Generate a meta page. We use the same page data for both meta pages. + buf := make([]byte, tx.db.pageSize) + page := (*page)(unsafe.Pointer(&buf[0])) + page.flags = metaPageFlag + *page.meta() = *tx.meta + + // Write meta 0. + page.id = 0 + page.meta().checksum = page.meta().sum64() + nn, err := w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 0 copy: %s", err) + } + + // Write meta 1 with a lower transaction id. + page.id = 1 + page.meta().txid -= 1 + page.meta().checksum = page.meta().sum64() + nn, err = w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 1 copy: %s", err) + } + + // Move past the meta pages in the file. + if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil { + return n, fmt.Errorf("seek: %s", err) + } + + // Copy data pages. + wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) + n += wn + if err != nil { + return n, err + } + + return n, f.Close() +} + +// CopyFile copies the entire database to file at the given path. +// A reader transaction is maintained during the copy so it is safe to continue +// using the database while a copy is in progress. +func (tx *Tx) CopyFile(path string, mode os.FileMode) error { + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) + if err != nil { + return err + } + + err = tx.Copy(f) + if err != nil { + _ = f.Close() + return err + } + return f.Close() +} + +// Check performs several consistency checks on the database for this transaction. +// An error is returned if any inconsistency is found. +// +// It can be safely run concurrently on a writable transaction. However, this +// incurs a high cost for large databases and databases with a lot of subbuckets +// because of caching. This overhead can be removed if running on a read-only +// transaction, however, it is not safe to execute other writer transactions at +// the same time. +func (tx *Tx) Check() <-chan error { + ch := make(chan error) + go tx.check(ch) + return ch +} + +func (tx *Tx) check(ch chan error) { + // Check if any pages are double freed. + freed := make(map[pgid]bool) + all := make([]pgid, tx.db.freelist.count()) + tx.db.freelist.copyall(all) + for _, id := range all { + if freed[id] { + ch <- fmt.Errorf("page %d: already freed", id) + } + freed[id] = true + } + + // Track every reachable page. + reachable := make(map[pgid]*page) + reachable[0] = tx.page(0) // meta0 + reachable[1] = tx.page(1) // meta1 + for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { + reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + } + + // Recursively check buckets. + tx.checkBucket(&tx.root, reachable, freed, ch) + + // Ensure all pages below high water mark are either reachable or freed. + for i := pgid(0); i < tx.meta.pgid; i++ { + _, isReachable := reachable[i] + if !isReachable && !freed[i] { + ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + } + } + + // Close the channel to signal completion. + close(ch) +} + +func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) { + // Ignore inline buckets. + if b.root == 0 { + return + } + + // Check every page used by this bucket. + b.tx.forEachPage(b.root, 0, func(p *page, _ int) { + if p.id > tx.meta.pgid { + ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)) + } + + // Ensure each page is only referenced once. + for i := pgid(0); i <= pgid(p.overflow); i++ { + var id = p.id + i + if _, ok := reachable[id]; ok { + ch <- fmt.Errorf("page %d: multiple references", int(id)) + } + reachable[id] = p + } + + // We should only encounter un-freed leaf and branch pages. + if freed[p.id] { + ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) + } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { + ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ()) + } + }) + + // Check each bucket within this bucket. + _ = b.ForEach(func(k, v []byte) error { + if child := b.Bucket(k); child != nil { + tx.checkBucket(child, reachable, freed, ch) + } + return nil + }) +} + +// allocate returns a contiguous block of memory starting at a given page. +func (tx *Tx) allocate(count int) (*page, error) { + p, err := tx.db.allocate(count) + if err != nil { + return nil, err + } + + // Save to our page cache. + tx.pages[p.id] = p + + // Update statistics. + tx.stats.PageCount++ + tx.stats.PageAlloc += count * tx.db.pageSize + + return p, nil +} + +// write writes any dirty pages to disk. +func (tx *Tx) write() error { + // Sort pages by id. + pages := make(pages, 0, len(tx.pages)) + for _, p := range tx.pages { + pages = append(pages, p) + } + // Clear out page cache early. + tx.pages = make(map[pgid]*page) + sort.Sort(pages) + + // Write pages to disk in order. + for _, p := range pages { + size := (int(p.overflow) + 1) * tx.db.pageSize + offset := int64(p.id) * int64(tx.db.pageSize) + + // Write out page in "max allocation" sized chunks. + ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p)) + for { + // Limit our write to our max allocation size. + sz := size + if sz > maxAllocSize-1 { + sz = maxAllocSize - 1 + } + + // Write chunk to disk. + buf := ptr[:sz] + if _, err := tx.db.ops.writeAt(buf, offset); err != nil { + return err + } + + // Update statistics. + tx.stats.Write++ + + // Exit inner for loop if we've written all the chunks. + size -= sz + if size == 0 { + break + } + + // Otherwise move offset forward and move pointer to next chunk. + offset += int64(sz) + ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz])) + } + } + + // Ignore file sync if flag is set on DB. + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Put small pages back to page pool. + for _, p := range pages { + // Ignore page sizes over 1 page. + // These are allocated using make() instead of the page pool. + if int(p.overflow) != 0 { + continue + } + + buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize] + + // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 + for i := range buf { + buf[i] = 0 + } + tx.db.pagePool.Put(buf) + } + + return nil +} + +// writeMeta writes the meta to the disk. +func (tx *Tx) writeMeta() error { + // Create a temporary buffer for the meta page. + buf := make([]byte, tx.db.pageSize) + p := tx.db.pageInBuffer(buf, 0) + tx.meta.write(p) + + // Write the meta page to file. + if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { + return err + } + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Update statistics. + tx.stats.Write++ + + return nil +} + +// page returns a reference to the page with a given id. +// If page has been written to then a temporary buffered page is returned. +func (tx *Tx) page(id pgid) *page { + // Check the dirty pages first. + if tx.pages != nil { + if p, ok := tx.pages[id]; ok { + return p + } + } + + // Otherwise return directly from the mmap. + return tx.db.page(id) +} + +// forEachPage iterates over every page within a given page and executes a function. +func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { + p := tx.page(pgid) + + // Execute function. + fn(p, depth) + + // Recursively loop over children. + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + tx.forEachPage(elem.pgid, depth+1, fn) + } + } +} + +// Page returns page information for a given page number. +// This is only safe for concurrent use when used by a writable transaction. +func (tx *Tx) Page(id int) (*PageInfo, error) { + if tx.db == nil { + return nil, ErrTxClosed + } else if pgid(id) >= tx.meta.pgid { + return nil, nil + } + + // Build the page info. + p := tx.db.page(pgid(id)) + info := &PageInfo{ + ID: id, + Count: int(p.count), + OverflowCount: int(p.overflow), + } + + // Determine the type (or if it's free). + if tx.db.freelist.freed(pgid(id)) { + info.Type = "free" + } else { + info.Type = p.typ() + } + + return info, nil +} + +// TxStats represents statistics about the actions performed by the transaction. +type TxStats struct { + // Page statistics. + PageCount int // number of page allocations + PageAlloc int // total bytes allocated + + // Cursor statistics. + CursorCount int // number of cursors created + + // Node statistics + NodeCount int // number of node allocations + NodeDeref int // number of node dereferences + + // Rebalance statistics. + Rebalance int // number of node rebalances + RebalanceTime time.Duration // total time spent rebalancing + + // Split/Spill statistics. + Split int // number of nodes split + Spill int // number of nodes spilled + SpillTime time.Duration // total time spent spilling + + // Write statistics. + Write int // number of writes performed + WriteTime time.Duration // total time spent writing to disk +} + +func (s *TxStats) add(other *TxStats) { + s.PageCount += other.PageCount + s.PageAlloc += other.PageAlloc + s.CursorCount += other.CursorCount + s.NodeCount += other.NodeCount + s.NodeDeref += other.NodeDeref + s.Rebalance += other.Rebalance + s.RebalanceTime += other.RebalanceTime + s.Split += other.Split + s.Spill += other.Spill + s.SpillTime += other.SpillTime + s.Write += other.Write + s.WriteTime += other.WriteTime +} + +// Sub calculates and returns the difference between two sets of transaction stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *TxStats) Sub(other *TxStats) TxStats { + var diff TxStats + diff.PageCount = s.PageCount - other.PageCount + diff.PageAlloc = s.PageAlloc - other.PageAlloc + diff.CursorCount = s.CursorCount - other.CursorCount + diff.NodeCount = s.NodeCount - other.NodeCount + diff.NodeDeref = s.NodeDeref - other.NodeDeref + diff.Rebalance = s.Rebalance - other.Rebalance + diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime + diff.Split = s.Split - other.Split + diff.Spill = s.Spill - other.Spill + diff.SpillTime = s.SpillTime - other.SpillTime + diff.Write = s.Write - other.Write + diff.WriteTime = s.WriteTime - other.WriteTime + return diff +} diff --git a/vendor/github.com/boltdb/bolt/tx_test.go b/vendor/github.com/boltdb/bolt/tx_test.go new file mode 100644 index 000000000..2201e7928 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/tx_test.go @@ -0,0 +1,716 @@ +package bolt_test + +import ( + "bytes" + "errors" + "fmt" + "log" + "os" + "testing" + + "github.com/boltdb/bolt" +) + +// Ensure that committing a closed transaction returns an error. +func TestTx_Commit_ErrTxClosed(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + + if _, err := tx.CreateBucket([]byte("foo")); err != nil { + t.Fatal(err) + } + + if err := tx.Commit(); err != nil { + t.Fatal(err) + } + + if err := tx.Commit(); err != bolt.ErrTxClosed { + t.Fatalf("unexpected error: %s", err) + } +} + +// Ensure that rolling back a closed transaction returns an error. +func TestTx_Rollback_ErrTxClosed(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } + if err := tx.Rollback(); err != bolt.ErrTxClosed { + t.Fatalf("unexpected error: %s", err) + } +} + +// Ensure that committing a read-only transaction returns an error. +func TestTx_Commit_ErrTxNotWritable(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + tx, err := db.Begin(false) + if err != nil { + t.Fatal(err) + } + if err := tx.Commit(); err != bolt.ErrTxNotWritable { + t.Fatal(err) + } +} + +// Ensure that a transaction can retrieve a cursor on the root bucket. +func TestTx_Cursor(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + + if _, err := tx.CreateBucket([]byte("woojits")); err != nil { + t.Fatal(err) + } + + c := tx.Cursor() + if k, v := c.First(); !bytes.Equal(k, []byte("widgets")) { + t.Fatalf("unexpected key: %v", k) + } else if v != nil { + t.Fatalf("unexpected value: %v", v) + } + + if k, v := c.Next(); !bytes.Equal(k, []byte("woojits")) { + t.Fatalf("unexpected key: %v", k) + } else if v != nil { + t.Fatalf("unexpected value: %v", v) + } + + if k, v := c.Next(); k != nil { + t.Fatalf("unexpected key: %v", k) + } else if v != nil { + t.Fatalf("unexpected value: %v", k) + } + + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that creating a bucket with a read-only transaction returns an error. +func TestTx_CreateBucket_ErrTxNotWritable(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.View(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("foo")) + if err != bolt.ErrTxNotWritable { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that creating a bucket on a closed transaction returns an error. +func TestTx_CreateBucket_ErrTxClosed(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + if err := tx.Commit(); err != nil { + t.Fatal(err) + } + + if _, err := tx.CreateBucket([]byte("foo")); err != bolt.ErrTxClosed { + t.Fatalf("unexpected error: %s", err) + } +} + +// Ensure that a Tx can retrieve a bucket. +func TestTx_Bucket(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + if tx.Bucket([]byte("widgets")) == nil { + t.Fatal("expected bucket") + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a Tx retrieving a non-existent key returns nil. +func TestTx_Get_NotFound(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if b.Get([]byte("no_such_key")) != nil { + t.Fatal("expected nil value") + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket can be created and retrieved. +func TestTx_CreateBucket(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + // Create a bucket. + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } else if b == nil { + t.Fatal("expected bucket") + } + return nil + }); err != nil { + t.Fatal(err) + } + + // Read the bucket through a separate transaction. + if err := db.View(func(tx *bolt.Tx) error { + if tx.Bucket([]byte("widgets")) == nil { + t.Fatal("expected bucket") + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket can be created if it doesn't already exist. +func TestTx_CreateBucketIfNotExists(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + // Create bucket. + if b, err := tx.CreateBucketIfNotExists([]byte("widgets")); err != nil { + t.Fatal(err) + } else if b == nil { + t.Fatal("expected bucket") + } + + // Create bucket again. + if b, err := tx.CreateBucketIfNotExists([]byte("widgets")); err != nil { + t.Fatal(err) + } else if b == nil { + t.Fatal("expected bucket") + } + + return nil + }); err != nil { + t.Fatal(err) + } + + // Read the bucket through a separate transaction. + if err := db.View(func(tx *bolt.Tx) error { + if tx.Bucket([]byte("widgets")) == nil { + t.Fatal("expected bucket") + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure transaction returns an error if creating an unnamed bucket. +func TestTx_CreateBucketIfNotExists_ErrBucketNameRequired(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucketIfNotExists([]byte{}); err != bolt.ErrBucketNameRequired { + t.Fatalf("unexpected error: %s", err) + } + + if _, err := tx.CreateBucketIfNotExists(nil); err != bolt.ErrBucketNameRequired { + t.Fatalf("unexpected error: %s", err) + } + + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket cannot be created twice. +func TestTx_CreateBucket_ErrBucketExists(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + // Create a bucket. + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + // Create the same bucket again. + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != bolt.ErrBucketExists { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket is created with a non-blank name. +func TestTx_CreateBucket_ErrBucketNameRequired(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket(nil); err != bolt.ErrBucketNameRequired { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that a bucket can be deleted. +func TestTx_DeleteBucket(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + // Create a bucket and add a value. + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + // Delete the bucket and make sure we can't get the value. + if err := db.Update(func(tx *bolt.Tx) error { + if err := tx.DeleteBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + if tx.Bucket([]byte("widgets")) != nil { + t.Fatal("unexpected bucket") + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.Update(func(tx *bolt.Tx) error { + // Create the bucket again and make sure there's not a phantom value. + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if v := b.Get([]byte("foo")); v != nil { + t.Fatalf("unexpected phantom value: %v", v) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that deleting a bucket on a closed transaction returns an error. +func TestTx_DeleteBucket_ErrTxClosed(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + if err := tx.Commit(); err != nil { + t.Fatal(err) + } + if err := tx.DeleteBucket([]byte("foo")); err != bolt.ErrTxClosed { + t.Fatalf("unexpected error: %s", err) + } +} + +// Ensure that deleting a bucket with a read-only transaction returns an error. +func TestTx_DeleteBucket_ReadOnly(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.View(func(tx *bolt.Tx) error { + if err := tx.DeleteBucket([]byte("foo")); err != bolt.ErrTxNotWritable { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that nothing happens when deleting a bucket that doesn't exist. +func TestTx_DeleteBucket_NotFound(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + if err := tx.DeleteBucket([]byte("widgets")); err != bolt.ErrBucketNotFound { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that no error is returned when a tx.ForEach function does not return +// an error. +func TestTx_ForEach_NoError(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + + if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { + return nil + }); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that an error is returned when a tx.ForEach function returns an error. +func TestTx_ForEach_WithError(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + + marker := errors.New("marker") + if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { + return marker + }); err != marker { + t.Fatalf("unexpected error: %s", err) + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +// Ensure that Tx commit handlers are called after a transaction successfully commits. +func TestTx_OnCommit(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + var x int + if err := db.Update(func(tx *bolt.Tx) error { + tx.OnCommit(func() { x += 1 }) + tx.OnCommit(func() { x += 2 }) + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } else if x != 3 { + t.Fatalf("unexpected x: %d", x) + } +} + +// Ensure that Tx commit handlers are NOT called after a transaction rolls back. +func TestTx_OnCommit_Rollback(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + var x int + if err := db.Update(func(tx *bolt.Tx) error { + tx.OnCommit(func() { x += 1 }) + tx.OnCommit(func() { x += 2 }) + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return errors.New("rollback this commit") + }); err == nil || err.Error() != "rollback this commit" { + t.Fatalf("unexpected error: %s", err) + } else if x != 0 { + t.Fatalf("unexpected x: %d", x) + } +} + +// Ensure that the database can be copied to a file path. +func TestTx_CopyFile(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + path := tempfile() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("baz"), []byte("bat")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + return tx.CopyFile(path, 0600) + }); err != nil { + t.Fatal(err) + } + + db2, err := bolt.Open(path, 0600, nil) + if err != nil { + t.Fatal(err) + } + + if err := db2.View(func(tx *bolt.Tx) error { + if v := tx.Bucket([]byte("widgets")).Get([]byte("foo")); !bytes.Equal(v, []byte("bar")) { + t.Fatalf("unexpected value: %v", v) + } + if v := tx.Bucket([]byte("widgets")).Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) { + t.Fatalf("unexpected value: %v", v) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db2.Close(); err != nil { + t.Fatal(err) + } +} + +type failWriterError struct{} + +func (failWriterError) Error() string { + return "error injected for tests" +} + +type failWriter struct { + // fail after this many bytes + After int +} + +func (f *failWriter) Write(p []byte) (n int, err error) { + n = len(p) + if n > f.After { + n = f.After + err = failWriterError{} + } + f.After -= n + return n, err +} + +// Ensure that Copy handles write errors right. +func TestTx_CopyFile_Error_Meta(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("baz"), []byte("bat")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + return tx.Copy(&failWriter{}) + }); err == nil || err.Error() != "meta 0 copy: error injected for tests" { + t.Fatalf("unexpected error: %v", err) + } +} + +// Ensure that Copy handles write errors right. +func TestTx_CopyFile_Error_Normal(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("baz"), []byte("bat")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + return tx.Copy(&failWriter{3 * db.Info().PageSize}) + }); err == nil || err.Error() != "error injected for tests" { + t.Fatalf("unexpected error: %v", err) + } +} + +func ExampleTx_Rollback() { + // Open the database. + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } + defer os.Remove(db.Path()) + + // Create a bucket. + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }); err != nil { + log.Fatal(err) + } + + // Set a value for a key. + if err := db.Update(func(tx *bolt.Tx) error { + return tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) + }); err != nil { + log.Fatal(err) + } + + // Update the key but rollback the transaction so it never saves. + tx, err := db.Begin(true) + if err != nil { + log.Fatal(err) + } + b := tx.Bucket([]byte("widgets")) + if err := b.Put([]byte("foo"), []byte("baz")); err != nil { + log.Fatal(err) + } + if err := tx.Rollback(); err != nil { + log.Fatal(err) + } + + // Ensure that our original value is still set. + if err := db.View(func(tx *bolt.Tx) error { + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + fmt.Printf("The value for 'foo' is still: %s\n", value) + return nil + }); err != nil { + log.Fatal(err) + } + + // Close database to release file lock. + if err := db.Close(); err != nil { + log.Fatal(err) + } + + // Output: + // The value for 'foo' is still: bar +} + +func ExampleTx_CopyFile() { + // Open the database. + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } + defer os.Remove(db.Path()) + + // Create a bucket and a key. + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + return err + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + return err + } + return nil + }); err != nil { + log.Fatal(err) + } + + // Copy the database to another file. + toFile := tempfile() + if err := db.View(func(tx *bolt.Tx) error { + return tx.CopyFile(toFile, 0666) + }); err != nil { + log.Fatal(err) + } + defer os.Remove(toFile) + + // Open the cloned database. + db2, err := bolt.Open(toFile, 0666, nil) + if err != nil { + log.Fatal(err) + } + + // Ensure that the key exists in the copy. + if err := db2.View(func(tx *bolt.Tx) error { + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + fmt.Printf("The value for 'foo' in the clone is: %s\n", value) + return nil + }); err != nil { + log.Fatal(err) + } + + // Close database to release file lock. + if err := db.Close(); err != nil { + log.Fatal(err) + } + + if err := db2.Close(); err != nil { + log.Fatal(err) + } + + // Output: + // The value for 'foo' in the clone is: bar +} diff --git a/vendor/github.com/docker/libkv/.travis.yml b/vendor/github.com/docker/libkv/.travis.yml new file mode 100644 index 000000000..a7a3bcffc --- /dev/null +++ b/vendor/github.com/docker/libkv/.travis.yml @@ -0,0 +1,31 @@ +language: go + +go: + - 1.7.1 + +# let us have speedy Docker-based Travis workers +sudo: false + +before_install: + # Symlink below is needed for Travis CI to work correctly on personal forks of libkv + - ln -s $HOME/gopath/src/github.com/${TRAVIS_REPO_SLUG///libkv/} $HOME/gopath/src/github.com/docker + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls + - go get github.com/golang/lint/golint + - go get github.com/GeertJohan/fgt + +before_script: + - script/travis_consul.sh 0.6.3 + - script/travis_etcd.sh 3.0.0 + - script/travis_zk.sh 3.5.1-alpha + +script: + - ./consul agent -server -bootstrap -advertise=127.0.0.1 -data-dir /tmp/consul -config-file=./config.json 1>/dev/null & + - ./etcd/etcd --listen-client-urls 'http://0.0.0.0:4001' --advertise-client-urls 'http://127.0.0.1:4001' >/dev/null 2>&1 & + - ./zk/bin/zkServer.sh start ./zk/conf/zoo.cfg 1> /dev/null + - script/validate-gofmt + - go vet ./... + - fgt golint ./... + - go test -v -race ./... + - script/coverage + - goveralls -service=travis-ci -coverprofile=goverage.report diff --git a/vendor/github.com/docker/libkv/LICENSE.code b/vendor/github.com/docker/libkv/LICENSE.code new file mode 100644 index 000000000..34c4ea7c5 --- /dev/null +++ b/vendor/github.com/docker/libkv/LICENSE.code @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/libkv/LICENSE.docs b/vendor/github.com/docker/libkv/LICENSE.docs new file mode 100644 index 000000000..e26cd4fc8 --- /dev/null +++ b/vendor/github.com/docker/libkv/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/docker/libkv/MAINTAINERS b/vendor/github.com/docker/libkv/MAINTAINERS new file mode 100644 index 000000000..4a8bbc613 --- /dev/null +++ b/vendor/github.com/docker/libkv/MAINTAINERS @@ -0,0 +1,40 @@ +# Libkv maintainers file +# +# This file describes who runs the docker/libkv project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "aluzzardi", + "sanimej", + "vieux", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aluzzardi] + Name = "Andrea Luzzardi" + Email = "al@docker.com" + GitHub = "aluzzardi" + + [people.sanimej] + Name = "Santhosh Manohar" + Email = "santhosh@docker.com" + GitHub = "sanimej" + + [people.vieux] + Name = "Victor Vieux" + Email = "vieux@docker.com" + GitHub = "vieux" diff --git a/vendor/github.com/docker/libkv/README.md b/vendor/github.com/docker/libkv/README.md new file mode 100644 index 000000000..ff2cc446d --- /dev/null +++ b/vendor/github.com/docker/libkv/README.md @@ -0,0 +1,107 @@ +# libkv + +[![GoDoc](https://godoc.org/github.com/docker/libkv?status.png)](https://godoc.org/github.com/docker/libkv) +[![Build Status](https://travis-ci.org/docker/libkv.svg?branch=master)](https://travis-ci.org/docker/libkv) +[![Coverage Status](https://coveralls.io/repos/docker/libkv/badge.svg)](https://coveralls.io/r/docker/libkv) +[![Go Report Card](https://goreportcard.com/badge/github.com/docker/libkv)](https://goreportcard.com/report/github.com/docker/libkv) + +`libkv` provides a `Go` native library to store metadata. + +The goal of `libkv` is to abstract common store operations for multiple distributed and/or local Key/Value store backends. + +For example, you can use it to store your metadata or for service discovery to register machines and endpoints inside your cluster. + +You can also easily implement a generic *Leader Election* on top of it (see the [docker/leadership](https://github.com/docker/leadership) repository). + +As of now, `libkv` offers support for `Consul`, `Etcd`, `Zookeeper` (**Distributed** store) and `BoltDB` (**Local** store). + +## Usage + +`libkv` is meant to be used as an abstraction layer over existing distributed Key/Value stores. It is especially useful if you plan to support `consul`, `etcd` and `zookeeper` using the same codebase. + +It is ideal if you plan for something written in Go that should support: + +- A simple metadata storage, distributed or local +- A lightweight discovery service for your nodes +- A distributed lock mechanism + +You can find examples of usage for `libkv` under in `docs/examples.go`. Optionally you can also take a look at the `docker/swarm` or `docker/libnetwork` repositories which are using `docker/libkv` for all the use cases listed above. + +## Supported versions + +`libkv` supports: +- Consul versions >= `0.5.1` because it uses Sessions with `Delete` behavior for the use of `TTLs` (mimics zookeeper's Ephemeral node support), If you don't plan to use `TTLs`: you can use Consul version `0.4.0+`. +- Etcd versions >= `2.0` because it uses the new `coreos/etcd/client`, this might change in the future as the support for `APIv3` comes along and adds more capabilities. +- Zookeeper versions >= `3.4.5`. Although this might work with previous version but this remains untested as of now. +- Boltdb, which shouldn't be subject to any version dependencies. + +## Interface + +A **storage backend** in `libkv` should implement (fully or partially) this interface: + +```go +type Store interface { + Put(key string, value []byte, options *WriteOptions) error + Get(key string) (*KVPair, error) + Delete(key string) error + Exists(key string) (bool, error) + Watch(key string, stopCh <-chan struct{}) (<-chan *KVPair, error) + WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*KVPair, error) + NewLock(key string, options *LockOptions) (Locker, error) + List(directory string) ([]*KVPair, error) + DeleteTree(directory string) error + AtomicPut(key string, value []byte, previous *KVPair, options *WriteOptions) (bool, *KVPair, error) + AtomicDelete(key string, previous *KVPair) (bool, error) + Close() +} +``` + +## Compatibility matrix + +Backend drivers in `libkv` are generally divided between **local drivers** and **distributed drivers**. Distributed backends offer enhanced capabilities like `Watches` and/or distributed `Locks`. + +Local drivers are usually used in complement to the distributed drivers to store informations that only needs to be available locally. + +| Calls | Consul | Etcd | Zookeeper | BoltDB | +|-----------------------|:----------:|:------:|:-----------:|:--------:| +| Put | X | X | X | X | +| Get | X | X | X | X | +| Delete | X | X | X | X | +| Exists | X | X | X | X | +| Watch | X | X | X | | +| WatchTree | X | X | X | | +| NewLock (Lock/Unlock) | X | X | X | | +| List | X | X | X | X | +| DeleteTree | X | X | X | X | +| AtomicPut | X | X | X | X | +| Close | X | X | X | X | + +## Limitations + +Distributed Key/Value stores often have different concepts for managing and formatting keys and their associated values. Even though `libkv` tries to abstract those stores aiming for some consistency, in some cases it can't be applied easily. + +Please refer to the `docs/compatibility.md` to see what are the special cases for cross-backend compatibility. + +Other than those special cases, you should expect the same experience for basic operations like `Get`/`Put`, etc. + +Calls like `WatchTree` may return different events (or number of events) depending on the backend (for now, `Etcd` and `Consul` will likely return more events than `Zookeeper` that you should triage properly). Although you should be able to use it successfully to watch on events in an interchangeable way (see the **docker/leadership** repository or the **pkg/discovery/kv** package in **docker/docker**). + +## TLS + +Only `Consul` and `etcd` have support for TLS and you should build and provide your own `config.TLS` object to feed the client. Support is planned for `zookeeper`. + +## Roadmap + +- Make the API nicer to use (using `options`) +- Provide more options (`consistency` for example) +- Improve performance (remove extras `Get`/`List` operations) +- Better key formatting +- New backends? + +## Contributing + +Want to hack on libkv? [Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md) apply. + +## Copyright and license + +Copyright © 2014-2016 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/vendor/github.com/docker/libkv/docs/compatibility.md b/vendor/github.com/docker/libkv/docs/compatibility.md new file mode 100644 index 000000000..c4f27e9c4 --- /dev/null +++ b/vendor/github.com/docker/libkv/docs/compatibility.md @@ -0,0 +1,82 @@ +#Cross-Backend Compatibility + +The value of `libkv` is not to duplicate the code for programs that should support multiple distributed K/V stores like the classic `Consul`/`etcd`/`zookeeper` trio. + +This document provides with general guidelines for users willing to support those backends with the same code using `libkv`. + +Please note that most of those workarounds are going to disappear in the future with `etcd` APIv3. + +##Etcd directory/key distinction + +`etcd` with APIv2 makes the distinction between keys and directories. The result with `libkv` is that when using the etcd driver: + +- You cannot store values on directories +- You cannot invoke `WatchTree` (watching on child values), on a regular key + +This is fundamentaly different than `Consul` and `zookeeper` which are more permissive and allow the same set of operations on keys and directories (called a Node for zookeeper). + +Apiv3 is in the work for `etcd`, which removes this key/directory distinction, but until then you should follow these workarounds to make your `libkv` code work across backends. + +###Put + +`etcd` cannot put values on directories, so this puts a major restriction compared to `Consul` and `zookeeper`. + +If you want to support all those three backends, you should make sure to only put data on **leaves**. + +For example: + +```go +_ := kv.Put("path/to/key/bis", []byte("foo"), nil) +_ := kv.Put("path/to/key", []byte("bar"), nil) +``` + +Will work on `Consul` and `zookeeper` but fail for `etcd`. This is because the first `Put` in the case of `etcd` will recursively create the directory hierarchy and `path/to/key` is now considered as a directory. Thus, values should always be stored on leaves if the support for the three backends is planned. + +###WatchTree + +When initializing the `WatchTree`, the natural way to do so is through the following code: + +```go +key := "path/to/key" +if !kv.Exists(key) { + err := kv.Put(key, []byte("data"), nil) +} +events, err := kv.WatchTree(key, nil) +``` + +The code above will not work across backends and etcd will fail on the `WatchTree` call. What happens exactly: + +- `Consul` will create a regular `key` because it has no distinction between directories and keys. This is not an issue as we can invoke `WatchTree` on regular keys. +- `zookeeper` is going to create a `node` that can either be a directory or a key during the lifetime of a program but it does not matter as a directory can hold values and be watchable like a regular key. +- `etcd` is going to create a regular `key`. We cannot invoke `WatchTree` on regular keys using etcd. + +To be cross-compatible between those three backends for `WatchTree`, we need to enforce a parameter that is only interpreted with `etcd` and which tells the client to create a `directory` instead of a key. + +```go +key := "path/to/key" +if !kv.Exists(key) { + // We enforce IsDir = true to make sure etcd creates a directory + err := kv.Put(key, []byte("data"), &store.WriteOptions{IsDir:true}) +} +events, err := kv.WatchTree(key, nil) +``` + +The code above will work for the three backends but make sure to not try to store any value at that path as the call to `Put` will fail for `etcd` (you can only put at `path/to/key/foo`, `path/to/key/bar` for example). + +##Etcd distributed locking + +There is `Lock` mechanisms baked in the `coreos/etcd/client` for now. Instead, `libkv` has its own implementation of a `Lock` on top of `etcd`. + +The general workflow for the `Lock` is as follows: + +- Call Lock concurrently on a `key` between threads/programs +- Only one will create that key, others are going to fail because the key has already been created +- The thread locking the key can get the right index to set the value of the key using Compare And Swap and effectively Lock and hold the key +- Other threads are given a wrong index to fail the Compare and Swap and block until the key has been released by the thread holding the Lock +- Lock seekers are setting up a Watch listening on that key and events happening on the key +- When the thread/program stops holding the lock, it deletes the key triggering a `delete` event that will notify all the other threads. In case the program crashes, the key has a TTL attached that will send an `expire` event when this TTL expires. +- Once everyone is notified, back to the first step. First come, first served with the Lock. + +The whole Lock process is highly dependent on the `delete`/`expire` events of `etcd`. So don't expect the key to be still there once the Lock is released. + +For example if the whole logic is to `Lock` a key and expect the value to still be there after it has been unlocked, it is not going to be cross-backend compatible with `Consul` and `zookeeper`. On the other end the `etcd` Lock can still be used to do Leader Election for example and still be cross-compatible with other backends. \ No newline at end of file diff --git a/vendor/github.com/docker/libkv/docs/examples.md b/vendor/github.com/docker/libkv/docs/examples.md new file mode 100644 index 000000000..09752db19 --- /dev/null +++ b/vendor/github.com/docker/libkv/docs/examples.md @@ -0,0 +1,157 @@ +#Examples + +This document contains useful example of usage for `libkv`. It might not be complete but provides with general informations on how to use the client. + +##Create a store and use Put/Get/Delete + +```go +package main + +import ( + "fmt" + "time" + "log" + + "github.com/docker/libkv" + "github.com/docker/libkv/store" + "github.com/docker/libkv/store/consul" +) + +func init() { + // Register consul store to libkv + consul.Register() + + // We can register as many backends that are supported by libkv + etcd.Register() + zookeeper.Register() + boltdb.Register() +} + +func main() { + client := "localhost:8500" + + // Initialize a new store with consul + kv, err := libkv.NewStore( + store.CONSUL, // or "consul" + []string{client}, + &store.Config{ + ConnectionTimeout: 10*time.Second, + }, + ) + if err != nil { + log.Fatal("Cannot create store consul") + } + + key := "foo" + err = kv.Put(key, []byte("bar"), nil) + if err != nil { + fmt.Errorf("Error trying to put value at key: %v", key) + } + + pair, err := kv.Get(key) + if err != nil { + fmt.Errorf("Error trying accessing value at key: %v", key) + } + + err = kv.Delete(key) + if err != nil { + fmt.Errorf("Error trying to delete key %v", key) + } + + log.Info("value: ", string(pair.Value)) +} +``` + +##List keys + +```go +// List will list all the keys under `key` if it contains a set of child keys/values +entries, err := kv.List(key) +for _, pair := range entries { + fmt.Printf("key=%v - value=%v", pair.Key, string(pair.Value)) +} + +``` + +##Watching for events on a single key (Watch) + +You can use watches to watch modifications on a key. First you need to check if the key exists. If this is not the case, we need to create it using the `Put` function. + +```go +// Checking on the key before watching +if !kv.Exists(key) { + err := kv.Put(key, []byte("bar"), nil) + if err != nil { + fmt.Errorf("Something went wrong when initializing key %v", key) + } +} + +stopCh := make(<-chan struct{}) +events, err := kv.Watch(key, stopCh) + +select { + case pair := <-events: + // Do something with events + fmt.Printf("value changed on key %v: new value=%v", key, pair.Value) +} + +``` + +##Watching for events happening on child keys (WatchTree) + +You can use watches to watch modifications on a key. First you need to check if the key exists. If this is not the case, we need to create it using the `Put` function. There is a special step here though if you want your code to work across backends. Because `etcd` is a special case and it makes the distinction between directories and keys, we need to make sure that the created key is considered as a directory by enforcing `IsDir` at `true`. + +```go +// Checking on the key before watching +if !kv.Exists(key) { + // Don't forget IsDir:true if the code is used cross-backend + err := kv.Put(key, []byte("bar"), &store.WriteOptions{IsDir:true}) + if err != nil { + fmt.Errorf("Something went wrong when initializing key %v", key) + } +} + +stopCh := make(<-chan struct{}) +events, err := kv.WatchTree(key, stopCh) + +select { + case pairs := <-events: + // Do something with events + for _, pair := range pairs { + fmt.Printf("value changed on key %v: new value=%v", key, pair.Value) + } +} + +``` + +## Distributed Locking, using Lock/Unlock + +```go +key := "lockKey" +value := []byte("bar") + +// Initialize a distributed lock. TTL is optional, it is here to make sure that +// the lock is released after the program that is holding the lock ends or crashes +lock, err := kv.NewLock(key, &store.LockOptions{Value: value, TTL: 2 * time.Second}) +if err != nil { + fmt.Errorf("something went wrong when trying to initialize the Lock") +} + +// Try to lock the key, the call to Lock() is blocking +_, err := lock.Lock(nil) +if err != nil { + fmt.Errorf("something went wrong when trying to lock key %v", key) +} + +// Get should work because we are holding the key +pair, err := kv.Get(key) +if err != nil { + fmt.Errorf("key %v has value %v", key, pair.Value) +} + +// Unlock the key +err = lock.Unlock() +if err != nil { + fmt.Errorf("something went wrong when trying to unlock key %v", key) +} +``` \ No newline at end of file diff --git a/vendor/github.com/docker/libkv/libkv.go b/vendor/github.com/docker/libkv/libkv.go new file mode 100644 index 000000000..bdb8c7529 --- /dev/null +++ b/vendor/github.com/docker/libkv/libkv.go @@ -0,0 +1,40 @@ +package libkv + +import ( + "fmt" + "sort" + "strings" + + "github.com/docker/libkv/store" +) + +// Initialize creates a new Store object, initializing the client +type Initialize func(addrs []string, options *store.Config) (store.Store, error) + +var ( + // Backend initializers + initializers = make(map[store.Backend]Initialize) + + supportedBackend = func() string { + keys := make([]string, 0, len(initializers)) + for k := range initializers { + keys = append(keys, string(k)) + } + sort.Strings(keys) + return strings.Join(keys, ", ") + }() +) + +// NewStore creates an instance of store +func NewStore(backend store.Backend, addrs []string, options *store.Config) (store.Store, error) { + if init, exists := initializers[backend]; exists { + return init(addrs, options) + } + + return nil, fmt.Errorf("%s %s", store.ErrBackendNotSupported.Error(), supportedBackend) +} + +// AddStore adds a new store backend to libkv +func AddStore(store store.Backend, init Initialize) { + initializers[store] = init +} diff --git a/vendor/github.com/docker/libkv/libkv_test.go b/vendor/github.com/docker/libkv/libkv_test.go new file mode 100644 index 000000000..fe7af6b06 --- /dev/null +++ b/vendor/github.com/docker/libkv/libkv_test.go @@ -0,0 +1,24 @@ +package libkv + +import ( + "testing" + "time" + + "github.com/docker/libkv/store" + "github.com/stretchr/testify/assert" +) + +func TestNewStoreUnsupported(t *testing.T) { + client := "localhost:9999" + + kv, err := NewStore( + "unsupported", + []string{client}, + &store.Config{ + ConnectionTimeout: 10 * time.Second, + }, + ) + assert.Error(t, err) + assert.Nil(t, kv) + assert.Equal(t, "Backend storage not supported yet, please choose one of ", err.Error()) +} diff --git a/vendor/github.com/docker/libkv/script/.validate b/vendor/github.com/docker/libkv/script/.validate new file mode 100644 index 000000000..3767f4223 --- /dev/null +++ b/vendor/github.com/docker/libkv/script/.validate @@ -0,0 +1,33 @@ +#!/bin/bash + +if [ -z "$VALIDATE_UPSTREAM" ]; then + # this is kind of an expensive check, so let's not do this twice if we + # are running more than one validate bundlescript + + VALIDATE_REPO='https://github.com/docker/libkv.git' + VALIDATE_BRANCH='master' + + if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then + VALIDATE_REPO="https://github.com/${TRAVIS_REPO_SLUG}.git" + VALIDATE_BRANCH="${TRAVIS_BRANCH}" + fi + + VALIDATE_HEAD="$(git rev-parse --verify HEAD)" + + git fetch -q "$VALIDATE_REPO" "refs/heads/$VALIDATE_BRANCH" + VALIDATE_UPSTREAM="$(git rev-parse --verify FETCH_HEAD)" + + VALIDATE_COMMIT_LOG="$VALIDATE_UPSTREAM..$VALIDATE_HEAD" + VALIDATE_COMMIT_DIFF="$VALIDATE_UPSTREAM...$VALIDATE_HEAD" + + validate_diff() { + if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then + git diff "$VALIDATE_COMMIT_DIFF" "$@" + fi + } + validate_log() { + if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then + git log "$VALIDATE_COMMIT_LOG" "$@" + fi + } +fi diff --git a/vendor/github.com/docker/libkv/script/coverage b/vendor/github.com/docker/libkv/script/coverage new file mode 100755 index 000000000..a7a13f450 --- /dev/null +++ b/vendor/github.com/docker/libkv/script/coverage @@ -0,0 +1,21 @@ +#!/bin/bash + +MODE="mode: count" +ROOT=${TRAVIS_BUILD_DIR:-.}/../../.. + +# Grab the list of packages. +# Exclude the API and CLI from coverage as it will be covered by integration tests. +PACKAGES=`go list ./...` + +# Create the empty coverage file. +echo $MODE > goverage.report + +# Run coverage on every package. +for package in $PACKAGES; do + output="$ROOT/$package/coverage.out" + + go test -test.short -covermode=count -coverprofile=$output $package + if [ -f "$output" ] ; then + cat "$output" | grep -v "$MODE" >> goverage.report + fi +done diff --git a/vendor/github.com/docker/libkv/script/travis_consul.sh b/vendor/github.com/docker/libkv/script/travis_consul.sh new file mode 100755 index 000000000..7b63d6b6d --- /dev/null +++ b/vendor/github.com/docker/libkv/script/travis_consul.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +if [ $# -gt 0 ] ; then + CONSUL_VERSION="$1" +else + CONSUL_VERSION="0.5.2" +fi + +# install consul +wget "https://releases.hashicorp.com/consul/${CONSUL_VERSION}/consul_${CONSUL_VERSION}_linux_amd64.zip" +unzip "consul_${CONSUL_VERSION}_linux_amd64.zip" + +# make config for minimum ttl +touch config.json +echo "{\"session_ttl_min\": \"1s\"}" >> config.json + +# check +./consul --version diff --git a/vendor/github.com/docker/libkv/script/travis_etcd.sh b/vendor/github.com/docker/libkv/script/travis_etcd.sh new file mode 100755 index 000000000..bee8567fc --- /dev/null +++ b/vendor/github.com/docker/libkv/script/travis_etcd.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +if [ $# -gt 0 ] ; then + ETCD_VERSION="$1" +else + ETCD_VERSION="2.2.0" +fi + +curl -L https://github.com/coreos/etcd/releases/download/v$ETCD_VERSION/etcd-v$ETCD_VERSION-linux-amd64.tar.gz -o etcd-v$ETCD_VERSION-linux-amd64.tar.gz +tar xzvf etcd-v$ETCD_VERSION-linux-amd64.tar.gz +mv etcd-v$ETCD_VERSION-linux-amd64 etcd diff --git a/vendor/github.com/docker/libkv/script/travis_zk.sh b/vendor/github.com/docker/libkv/script/travis_zk.sh new file mode 100755 index 000000000..636a2407f --- /dev/null +++ b/vendor/github.com/docker/libkv/script/travis_zk.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +if [ $# -gt 0 ] ; then + ZK_VERSION="$1" +else + ZK_VERSION="3.4.7" +fi + +wget "http://apache.cs.utah.edu/zookeeper/zookeeper-${ZK_VERSION}/zookeeper-${ZK_VERSION}.tar.gz" +tar -xvf "zookeeper-${ZK_VERSION}.tar.gz" +mv zookeeper-$ZK_VERSION zk +mv ./zk/conf/zoo_sample.cfg ./zk/conf/zoo.cfg diff --git a/vendor/github.com/docker/libkv/script/validate-gofmt b/vendor/github.com/docker/libkv/script/validate-gofmt new file mode 100755 index 000000000..c565976b4 --- /dev/null +++ b/vendor/github.com/docker/libkv/script/validate-gofmt @@ -0,0 +1,30 @@ +#!/bin/bash + +source "$(dirname "$BASH_SOURCE")/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^Godeps/' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + # we use "git show" here to validate that what's committed is formatted + if [ "$(git show "$VALIDATE_HEAD:$f" | gofmt -s -l)" ]; then + badFiles+=( "$f" ) + fi +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! All Go source files are properly formatted.' +else + { + echo "These files are not properly gofmt'd:" + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + echo 'Please reformat the above files using "gofmt -s -w" and commit the result.' + echo + } >&2 + false +fi diff --git a/vendor/github.com/docker/libkv/store/boltdb/boltdb.go b/vendor/github.com/docker/libkv/store/boltdb/boltdb.go new file mode 100644 index 000000000..cdfd74f87 --- /dev/null +++ b/vendor/github.com/docker/libkv/store/boltdb/boltdb.go @@ -0,0 +1,474 @@ +package boltdb + +import ( + "bytes" + "encoding/binary" + "errors" + "os" + "path/filepath" + "sync" + "sync/atomic" + "time" + + "github.com/boltdb/bolt" + "github.com/docker/libkv" + "github.com/docker/libkv/store" +) + +var ( + // ErrMultipleEndpointsUnsupported is thrown when multiple endpoints specified for + // BoltDB. Endpoint has to be a local file path + ErrMultipleEndpointsUnsupported = errors.New("boltdb supports one endpoint and should be a file path") + // ErrBoltBucketOptionMissing is thrown when boltBcuket config option is missing + ErrBoltBucketOptionMissing = errors.New("boltBucket config option missing") +) + +const ( + filePerm os.FileMode = 0644 +) + +//BoltDB type implements the Store interface +type BoltDB struct { + client *bolt.DB + boltBucket []byte + dbIndex uint64 + path string + timeout time.Duration + // By default libkv opens and closes the bolt DB connection for every + // get/put operation. This allows multiple apps to use a Bolt DB at the + // same time. + // PersistConnection flag provides an option to override ths behavior. + // ie: open the connection in New and use it till Close is called. + PersistConnection bool + sync.Mutex +} + +const ( + libkvmetadatalen = 8 + transientTimeout = time.Duration(10) * time.Second +) + +// Register registers boltdb to libkv +func Register() { + libkv.AddStore(store.BOLTDB, New) +} + +// New opens a new BoltDB connection to the specified path and bucket +func New(endpoints []string, options *store.Config) (store.Store, error) { + var ( + db *bolt.DB + err error + boltOptions *bolt.Options + timeout = transientTimeout + ) + + if len(endpoints) > 1 { + return nil, ErrMultipleEndpointsUnsupported + } + + if (options == nil) || (len(options.Bucket) == 0) { + return nil, ErrBoltBucketOptionMissing + } + + dir, _ := filepath.Split(endpoints[0]) + if err = os.MkdirAll(dir, 0750); err != nil { + return nil, err + } + + if options.PersistConnection { + boltOptions = &bolt.Options{Timeout: options.ConnectionTimeout} + db, err = bolt.Open(endpoints[0], filePerm, boltOptions) + if err != nil { + return nil, err + } + } + + if options.ConnectionTimeout != 0 { + timeout = options.ConnectionTimeout + } + + b := &BoltDB{ + client: db, + path: endpoints[0], + boltBucket: []byte(options.Bucket), + timeout: timeout, + PersistConnection: options.PersistConnection, + } + + return b, nil +} + +func (b *BoltDB) reset() { + b.path = "" + b.boltBucket = []byte{} +} + +func (b *BoltDB) getDBhandle() (*bolt.DB, error) { + var ( + db *bolt.DB + err error + ) + if !b.PersistConnection { + boltOptions := &bolt.Options{Timeout: b.timeout} + if db, err = bolt.Open(b.path, filePerm, boltOptions); err != nil { + return nil, err + } + b.client = db + } + + return b.client, nil +} + +func (b *BoltDB) releaseDBhandle() { + if !b.PersistConnection { + b.client.Close() + } +} + +// Get the value at "key". BoltDB doesn't provide an inbuilt last modified index with every kv pair. Its implemented by +// by a atomic counter maintained by the libkv and appened to the value passed by the client. +func (b *BoltDB) Get(key string) (*store.KVPair, error) { + var ( + val []byte + db *bolt.DB + err error + ) + b.Lock() + defer b.Unlock() + + if db, err = b.getDBhandle(); err != nil { + return nil, err + } + defer b.releaseDBhandle() + + err = db.View(func(tx *bolt.Tx) error { + bucket := tx.Bucket(b.boltBucket) + if bucket == nil { + return store.ErrKeyNotFound + } + + v := bucket.Get([]byte(key)) + val = make([]byte, len(v)) + copy(val, v) + + return nil + }) + + if len(val) == 0 { + return nil, store.ErrKeyNotFound + } + if err != nil { + return nil, err + } + + dbIndex := binary.LittleEndian.Uint64(val[:libkvmetadatalen]) + val = val[libkvmetadatalen:] + + return &store.KVPair{Key: key, Value: val, LastIndex: (dbIndex)}, nil +} + +//Put the key, value pair. index number metadata is prepended to the value +func (b *BoltDB) Put(key string, value []byte, opts *store.WriteOptions) error { + var ( + dbIndex uint64 + db *bolt.DB + err error + ) + b.Lock() + defer b.Unlock() + + dbval := make([]byte, libkvmetadatalen) + + if db, err = b.getDBhandle(); err != nil { + return err + } + defer b.releaseDBhandle() + + err = db.Update(func(tx *bolt.Tx) error { + bucket, err := tx.CreateBucketIfNotExists(b.boltBucket) + if err != nil { + return err + } + + dbIndex = atomic.AddUint64(&b.dbIndex, 1) + binary.LittleEndian.PutUint64(dbval, dbIndex) + dbval = append(dbval, value...) + + err = bucket.Put([]byte(key), dbval) + if err != nil { + return err + } + return nil + }) + return err +} + +//Delete the value for the given key. +func (b *BoltDB) Delete(key string) error { + var ( + db *bolt.DB + err error + ) + b.Lock() + defer b.Unlock() + + if db, err = b.getDBhandle(); err != nil { + return err + } + defer b.releaseDBhandle() + + err = db.Update(func(tx *bolt.Tx) error { + bucket := tx.Bucket(b.boltBucket) + if bucket == nil { + return store.ErrKeyNotFound + } + err := bucket.Delete([]byte(key)) + return err + }) + return err +} + +// Exists checks if the key exists inside the store +func (b *BoltDB) Exists(key string) (bool, error) { + var ( + val []byte + db *bolt.DB + err error + ) + b.Lock() + defer b.Unlock() + + if db, err = b.getDBhandle(); err != nil { + return false, err + } + defer b.releaseDBhandle() + + err = db.View(func(tx *bolt.Tx) error { + bucket := tx.Bucket(b.boltBucket) + if bucket == nil { + return store.ErrKeyNotFound + } + + val = bucket.Get([]byte(key)) + + return nil + }) + + if len(val) == 0 { + return false, err + } + return true, err +} + +// List returns the range of keys starting with the passed in prefix +func (b *BoltDB) List(keyPrefix string) ([]*store.KVPair, error) { + var ( + db *bolt.DB + err error + ) + b.Lock() + defer b.Unlock() + + kv := []*store.KVPair{} + + if db, err = b.getDBhandle(); err != nil { + return nil, err + } + defer b.releaseDBhandle() + + err = db.View(func(tx *bolt.Tx) error { + bucket := tx.Bucket(b.boltBucket) + if bucket == nil { + return store.ErrKeyNotFound + } + + cursor := bucket.Cursor() + prefix := []byte(keyPrefix) + + for key, v := cursor.Seek(prefix); bytes.HasPrefix(key, prefix); key, v = cursor.Next() { + + dbIndex := binary.LittleEndian.Uint64(v[:libkvmetadatalen]) + v = v[libkvmetadatalen:] + val := make([]byte, len(v)) + copy(val, v) + + kv = append(kv, &store.KVPair{ + Key: string(key), + Value: val, + LastIndex: dbIndex, + }) + } + return nil + }) + if len(kv) == 0 { + return nil, store.ErrKeyNotFound + } + return kv, err +} + +// AtomicDelete deletes a value at "key" if the key +// has not been modified in the meantime, throws an +// error if this is the case +func (b *BoltDB) AtomicDelete(key string, previous *store.KVPair) (bool, error) { + var ( + val []byte + db *bolt.DB + err error + ) + b.Lock() + defer b.Unlock() + + if previous == nil { + return false, store.ErrPreviousNotSpecified + } + if db, err = b.getDBhandle(); err != nil { + return false, err + } + defer b.releaseDBhandle() + + err = db.Update(func(tx *bolt.Tx) error { + bucket := tx.Bucket(b.boltBucket) + if bucket == nil { + return store.ErrKeyNotFound + } + + val = bucket.Get([]byte(key)) + if val == nil { + return store.ErrKeyNotFound + } + dbIndex := binary.LittleEndian.Uint64(val[:libkvmetadatalen]) + if dbIndex != previous.LastIndex { + return store.ErrKeyModified + } + err := bucket.Delete([]byte(key)) + return err + }) + if err != nil { + return false, err + } + return true, err +} + +// AtomicPut puts a value at "key" if the key has not been +// modified since the last Put, throws an error if this is the case +func (b *BoltDB) AtomicPut(key string, value []byte, previous *store.KVPair, options *store.WriteOptions) (bool, *store.KVPair, error) { + var ( + val []byte + dbIndex uint64 + db *bolt.DB + err error + ) + b.Lock() + defer b.Unlock() + + dbval := make([]byte, libkvmetadatalen) + + if db, err = b.getDBhandle(); err != nil { + return false, nil, err + } + defer b.releaseDBhandle() + + err = db.Update(func(tx *bolt.Tx) error { + var err error + bucket := tx.Bucket(b.boltBucket) + if bucket == nil { + if previous != nil { + return store.ErrKeyNotFound + } + bucket, err = tx.CreateBucket(b.boltBucket) + if err != nil { + return err + } + } + // AtomicPut is equivalent to Put if previous is nil and the Ky + // doesn't exist in the DB. + val = bucket.Get([]byte(key)) + if previous == nil && len(val) != 0 { + return store.ErrKeyExists + } + if previous != nil { + if len(val) == 0 { + return store.ErrKeyNotFound + } + dbIndex = binary.LittleEndian.Uint64(val[:libkvmetadatalen]) + if dbIndex != previous.LastIndex { + return store.ErrKeyModified + } + } + dbIndex = atomic.AddUint64(&b.dbIndex, 1) + binary.LittleEndian.PutUint64(dbval, b.dbIndex) + dbval = append(dbval, value...) + return (bucket.Put([]byte(key), dbval)) + }) + if err != nil { + return false, nil, err + } + + updated := &store.KVPair{ + Key: key, + Value: value, + LastIndex: dbIndex, + } + + return true, updated, nil +} + +// Close the db connection to the BoltDB +func (b *BoltDB) Close() { + b.Lock() + defer b.Unlock() + + if !b.PersistConnection { + b.reset() + } else { + b.client.Close() + } + return +} + +// DeleteTree deletes a range of keys with a given prefix +func (b *BoltDB) DeleteTree(keyPrefix string) error { + var ( + db *bolt.DB + err error + ) + b.Lock() + defer b.Unlock() + + if db, err = b.getDBhandle(); err != nil { + return err + } + defer b.releaseDBhandle() + + err = db.Update(func(tx *bolt.Tx) error { + bucket := tx.Bucket(b.boltBucket) + if bucket == nil { + return store.ErrKeyNotFound + } + + cursor := bucket.Cursor() + prefix := []byte(keyPrefix) + + for key, _ := cursor.Seek(prefix); bytes.HasPrefix(key, prefix); key, _ = cursor.Next() { + _ = bucket.Delete([]byte(key)) + } + return nil + }) + + return err +} + +// NewLock has to implemented at the library level since its not supported by BoltDB +func (b *BoltDB) NewLock(key string, options *store.LockOptions) (store.Locker, error) { + return nil, store.ErrCallNotSupported +} + +// Watch has to implemented at the library level since its not supported by BoltDB +func (b *BoltDB) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { + return nil, store.ErrCallNotSupported +} + +// WatchTree has to implemented at the library level since its not supported by BoltDB +func (b *BoltDB) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { + return nil, store.ErrCallNotSupported +} diff --git a/vendor/github.com/docker/libkv/store/boltdb/boltdb_test.go b/vendor/github.com/docker/libkv/store/boltdb/boltdb_test.go new file mode 100644 index 000000000..3eb4e84b2 --- /dev/null +++ b/vendor/github.com/docker/libkv/store/boltdb/boltdb_test.go @@ -0,0 +1,144 @@ +package boltdb + +import ( + "os" + "testing" + "time" + + "github.com/docker/libkv" + "github.com/docker/libkv/store" + "github.com/docker/libkv/testutils" + "github.com/stretchr/testify/assert" +) + +func makeBoltDBClient(t *testing.T) store.Store { + kv, err := New([]string{"/tmp/not_exist_dir/__boltdbtest"}, &store.Config{Bucket: "boltDBTest"}) + + if err != nil { + t.Fatalf("cannot create store: %v", err) + } + + return kv +} + +func TestRegister(t *testing.T) { + Register() + + kv, err := libkv.NewStore( + store.BOLTDB, + []string{"/tmp/not_exist_dir/__boltdbtest"}, + &store.Config{Bucket: "boltDBTest"}, + ) + assert.NoError(t, err) + assert.NotNil(t, kv) + + if _, ok := kv.(*BoltDB); !ok { + t.Fatal("Error registering and initializing boltDB") + } + + _ = os.Remove("/tmp/not_exist_dir/__boltdbtest") +} + +// TestMultiplePersistConnection tests the second connection to a +// BoltDB fails when one is already open with PersistConnection flag +func TestMultiplePersistConnection(t *testing.T) { + kv, err := libkv.NewStore( + store.BOLTDB, + []string{"/tmp/not_exist_dir/__boltdbtest"}, + &store.Config{ + Bucket: "boltDBTest", + ConnectionTimeout: 1 * time.Second, + PersistConnection: true}, + ) + assert.NoError(t, err) + assert.NotNil(t, kv) + + if _, ok := kv.(*BoltDB); !ok { + t.Fatal("Error registering and initializing boltDB") + } + + // Must fail if multiple boltdb requests are made with a valid timeout + kv, err = libkv.NewStore( + store.BOLTDB, + []string{"/tmp/not_exist_dir/__boltdbtest"}, + &store.Config{ + Bucket: "boltDBTest", + ConnectionTimeout: 1 * time.Second, + PersistConnection: true}, + ) + assert.Error(t, err) + + _ = os.Remove("/tmp/not_exist_dir/__boltdbtest") +} + +// TestConcurrentConnection tests simultaenous get/put using +// two handles. +func TestConcurrentConnection(t *testing.T) { + var err error + kv1, err1 := libkv.NewStore( + store.BOLTDB, + []string{"/tmp/__boltdbtest"}, + &store.Config{ + Bucket: "boltDBTest", + ConnectionTimeout: 1 * time.Second}, + ) + assert.NoError(t, err1) + assert.NotNil(t, kv1) + + kv2, err2 := libkv.NewStore( + store.BOLTDB, + []string{"/tmp/__boltdbtest"}, + &store.Config{Bucket: "boltDBTest", + ConnectionTimeout: 1 * time.Second}, + ) + assert.NoError(t, err2) + assert.NotNil(t, kv2) + + key1 := "TestKV1" + value1 := []byte("TestVal1") + err = kv1.Put(key1, value1, nil) + assert.NoError(t, err) + + key2 := "TestKV2" + value2 := []byte("TestVal2") + err = kv2.Put(key2, value2, nil) + assert.NoError(t, err) + + pair1, err1 := kv1.Get(key1) + assert.NoError(t, err) + if assert.NotNil(t, pair1) { + assert.NotNil(t, pair1.Value) + } + assert.Equal(t, pair1.Value, value1) + + pair2, err2 := kv2.Get(key2) + assert.NoError(t, err) + if assert.NotNil(t, pair2) { + assert.NotNil(t, pair2.Value) + } + assert.Equal(t, pair2.Value, value2) + + // AtomicPut using kv1 and kv2 should succeed + _, _, err = kv1.AtomicPut(key1, []byte("TestnewVal1"), pair1, nil) + assert.NoError(t, err) + + _, _, err = kv2.AtomicPut(key2, []byte("TestnewVal2"), pair2, nil) + assert.NoError(t, err) + + testutils.RunTestCommon(t, kv1) + testutils.RunTestCommon(t, kv2) + + kv1.Close() + kv2.Close() + + _ = os.Remove("/tmp/__boltdbtest") +} + +func TestBoldDBStore(t *testing.T) { + kv := makeBoltDBClient(t) + + testutils.RunTestCommon(t, kv) + testutils.RunTestAtomic(t, kv) + + _ = os.Remove("/tmp/not_exist_dir/__boltdbtest") +} diff --git a/vendor/github.com/docker/libkv/store/consul/consul.go b/vendor/github.com/docker/libkv/store/consul/consul.go new file mode 100644 index 000000000..cb64be72d --- /dev/null +++ b/vendor/github.com/docker/libkv/store/consul/consul.go @@ -0,0 +1,558 @@ +package consul + +import ( + "crypto/tls" + "errors" + "net/http" + "strings" + "sync" + "time" + + "github.com/docker/libkv" + "github.com/docker/libkv/store" + api "github.com/hashicorp/consul/api" +) + +const ( + // DefaultWatchWaitTime is how long we block for at a + // time to check if the watched key has changed. This + // affects the minimum time it takes to cancel a watch. + DefaultWatchWaitTime = 15 * time.Second + + // RenewSessionRetryMax is the number of time we should try + // to renew the session before giving up and throwing an error + RenewSessionRetryMax = 5 + + // MaxSessionDestroyAttempts is the maximum times we will try + // to explicitely destroy the session attached to a lock after + // the connectivity to the store has been lost + MaxSessionDestroyAttempts = 5 + + // defaultLockTTL is the default ttl for the consul lock + defaultLockTTL = 20 * time.Second +) + +var ( + // ErrMultipleEndpointsUnsupported is thrown when there are + // multiple endpoints specified for Consul + ErrMultipleEndpointsUnsupported = errors.New("consul does not support multiple endpoints") + + // ErrSessionRenew is thrown when the session can't be + // renewed because the Consul version does not support sessions + ErrSessionRenew = errors.New("cannot set or renew session for ttl, unable to operate on sessions") +) + +// Consul is the receiver type for the +// Store interface +type Consul struct { + sync.Mutex + config *api.Config + client *api.Client +} + +type consulLock struct { + lock *api.Lock + renewCh chan struct{} +} + +// Register registers consul to libkv +func Register() { + libkv.AddStore(store.CONSUL, New) +} + +// New creates a new Consul client given a list +// of endpoints and optional tls config +func New(endpoints []string, options *store.Config) (store.Store, error) { + if len(endpoints) > 1 { + return nil, ErrMultipleEndpointsUnsupported + } + + s := &Consul{} + + // Create Consul client + config := api.DefaultConfig() + s.config = config + config.HttpClient = http.DefaultClient + config.Address = endpoints[0] + config.Scheme = "http" + + // Set options + if options != nil { + if options.TLS != nil { + s.setTLS(options.TLS) + } + if options.ConnectionTimeout != 0 { + s.setTimeout(options.ConnectionTimeout) + } + } + + // Creates a new client + client, err := api.NewClient(config) + if err != nil { + return nil, err + } + s.client = client + + return s, nil +} + +// SetTLS sets Consul TLS options +func (s *Consul) setTLS(tls *tls.Config) { + s.config.HttpClient.Transport = &http.Transport{ + TLSClientConfig: tls, + } + s.config.Scheme = "https" +} + +// SetTimeout sets the timeout for connecting to Consul +func (s *Consul) setTimeout(time time.Duration) { + s.config.WaitTime = time +} + +// Normalize the key for usage in Consul +func (s *Consul) normalize(key string) string { + key = store.Normalize(key) + return strings.TrimPrefix(key, "/") +} + +func (s *Consul) renewSession(pair *api.KVPair, ttl time.Duration) error { + // Check if there is any previous session with an active TTL + session, err := s.getActiveSession(pair.Key) + if err != nil { + return err + } + + if session == "" { + entry := &api.SessionEntry{ + Behavior: api.SessionBehaviorDelete, // Delete the key when the session expires + TTL: (ttl / 2).String(), // Consul multiplies the TTL by 2x + LockDelay: 1 * time.Millisecond, // Virtually disable lock delay + } + + // Create the key session + session, _, err = s.client.Session().Create(entry, nil) + if err != nil { + return err + } + + lockOpts := &api.LockOptions{ + Key: pair.Key, + Session: session, + } + + // Lock and ignore if lock is held + // It's just a placeholder for the + // ephemeral behavior + lock, _ := s.client.LockOpts(lockOpts) + if lock != nil { + lock.Lock(nil) + } + } + + _, _, err = s.client.Session().Renew(session, nil) + return err +} + +// getActiveSession checks if the key already has +// a session attached +func (s *Consul) getActiveSession(key string) (string, error) { + pair, _, err := s.client.KV().Get(key, nil) + if err != nil { + return "", err + } + if pair != nil && pair.Session != "" { + return pair.Session, nil + } + return "", nil +} + +// Get the value at "key", returns the last modified index +// to use in conjunction to CAS calls +func (s *Consul) Get(key string) (*store.KVPair, error) { + options := &api.QueryOptions{ + AllowStale: false, + RequireConsistent: true, + } + + pair, meta, err := s.client.KV().Get(s.normalize(key), options) + if err != nil { + return nil, err + } + + // If pair is nil then the key does not exist + if pair == nil { + return nil, store.ErrKeyNotFound + } + + return &store.KVPair{Key: pair.Key, Value: pair.Value, LastIndex: meta.LastIndex}, nil +} + +// Put a value at "key" +func (s *Consul) Put(key string, value []byte, opts *store.WriteOptions) error { + key = s.normalize(key) + + p := &api.KVPair{ + Key: key, + Value: value, + Flags: api.LockFlagValue, + } + + if opts != nil && opts.TTL > 0 { + // Create or renew a session holding a TTL. Operations on sessions + // are not deterministic: creating or renewing a session can fail + for retry := 1; retry <= RenewSessionRetryMax; retry++ { + err := s.renewSession(p, opts.TTL) + if err == nil { + break + } + if retry == RenewSessionRetryMax { + return ErrSessionRenew + } + } + } + + _, err := s.client.KV().Put(p, nil) + return err +} + +// Delete a value at "key" +func (s *Consul) Delete(key string) error { + if _, err := s.Get(key); err != nil { + return err + } + _, err := s.client.KV().Delete(s.normalize(key), nil) + return err +} + +// Exists checks that the key exists inside the store +func (s *Consul) Exists(key string) (bool, error) { + _, err := s.Get(key) + if err != nil { + if err == store.ErrKeyNotFound { + return false, nil + } + return false, err + } + return true, nil +} + +// List child nodes of a given directory +func (s *Consul) List(directory string) ([]*store.KVPair, error) { + pairs, _, err := s.client.KV().List(s.normalize(directory), nil) + if err != nil { + return nil, err + } + if len(pairs) == 0 { + return nil, store.ErrKeyNotFound + } + + kv := []*store.KVPair{} + + for _, pair := range pairs { + if pair.Key == directory { + continue + } + kv = append(kv, &store.KVPair{ + Key: pair.Key, + Value: pair.Value, + LastIndex: pair.ModifyIndex, + }) + } + + return kv, nil +} + +// DeleteTree deletes a range of keys under a given directory +func (s *Consul) DeleteTree(directory string) error { + if _, err := s.List(directory); err != nil { + return err + } + _, err := s.client.KV().DeleteTree(s.normalize(directory), nil) + return err +} + +// Watch for changes on a "key" +// It returns a channel that will receive changes or pass +// on errors. Upon creation, the current value will first +// be sent to the channel. Providing a non-nil stopCh can +// be used to stop watching. +func (s *Consul) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { + kv := s.client.KV() + watchCh := make(chan *store.KVPair) + + go func() { + defer close(watchCh) + + // Use a wait time in order to check if we should quit + // from time to time. + opts := &api.QueryOptions{WaitTime: DefaultWatchWaitTime} + + for { + // Check if we should quit + select { + case <-stopCh: + return + default: + } + + // Get the key + pair, meta, err := kv.Get(key, opts) + if err != nil { + return + } + + // If LastIndex didn't change then it means `Get` returned + // because of the WaitTime and the key didn't changed. + if opts.WaitIndex == meta.LastIndex { + continue + } + opts.WaitIndex = meta.LastIndex + + // Return the value to the channel + // FIXME: What happens when a key is deleted? + if pair != nil { + watchCh <- &store.KVPair{ + Key: pair.Key, + Value: pair.Value, + LastIndex: pair.ModifyIndex, + } + } + } + }() + + return watchCh, nil +} + +// WatchTree watches for changes on a "directory" +// It returns a channel that will receive changes or pass +// on errors. Upon creating a watch, the current childs values +// will be sent to the channel .Providing a non-nil stopCh can +// be used to stop watching. +func (s *Consul) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { + kv := s.client.KV() + watchCh := make(chan []*store.KVPair) + + go func() { + defer close(watchCh) + + // Use a wait time in order to check if we should quit + // from time to time. + opts := &api.QueryOptions{WaitTime: DefaultWatchWaitTime} + for { + // Check if we should quit + select { + case <-stopCh: + return + default: + } + + // Get all the childrens + pairs, meta, err := kv.List(directory, opts) + if err != nil { + return + } + + // If LastIndex didn't change then it means `Get` returned + // because of the WaitTime and the child keys didn't change. + if opts.WaitIndex == meta.LastIndex { + continue + } + opts.WaitIndex = meta.LastIndex + + // Return children KV pairs to the channel + kvpairs := []*store.KVPair{} + for _, pair := range pairs { + if pair.Key == directory { + continue + } + kvpairs = append(kvpairs, &store.KVPair{ + Key: pair.Key, + Value: pair.Value, + LastIndex: pair.ModifyIndex, + }) + } + watchCh <- kvpairs + } + }() + + return watchCh, nil +} + +// NewLock returns a handle to a lock struct which can +// be used to provide mutual exclusion on a key +func (s *Consul) NewLock(key string, options *store.LockOptions) (store.Locker, error) { + lockOpts := &api.LockOptions{ + Key: s.normalize(key), + } + + lock := &consulLock{} + + ttl := defaultLockTTL + + if options != nil { + // Set optional TTL on Lock + if options.TTL != 0 { + ttl = options.TTL + } + // Set optional value on Lock + if options.Value != nil { + lockOpts.Value = options.Value + } + } + + entry := &api.SessionEntry{ + Behavior: api.SessionBehaviorRelease, // Release the lock when the session expires + TTL: (ttl / 2).String(), // Consul multiplies the TTL by 2x + LockDelay: 1 * time.Millisecond, // Virtually disable lock delay + } + + // Create the key session + session, _, err := s.client.Session().Create(entry, nil) + if err != nil { + return nil, err + } + + // Place the session and renew chan on lock + lockOpts.Session = session + lock.renewCh = options.RenewLock + + l, err := s.client.LockOpts(lockOpts) + if err != nil { + return nil, err + } + + // Renew the session ttl lock periodically + s.renewLockSession(entry.TTL, session, options.RenewLock) + + lock.lock = l + return lock, nil +} + +// renewLockSession is used to renew a session Lock, it takes +// a stopRenew chan which is used to explicitely stop the session +// renew process. The renew routine never stops until a signal is +// sent to this channel. If deleting the session fails because the +// connection to the store is lost, it keeps trying to delete the +// session periodically until it can contact the store, this ensures +// that the lock is not maintained indefinitely which ensures liveness +// over safety for the lock when the store becomes unavailable. +func (s *Consul) renewLockSession(initialTTL string, id string, stopRenew chan struct{}) { + sessionDestroyAttempts := 0 + ttl, err := time.ParseDuration(initialTTL) + if err != nil { + return + } + go func() { + for { + select { + case <-time.After(ttl / 2): + entry, _, err := s.client.Session().Renew(id, nil) + if err != nil { + // If an error occurs, continue until the + // session gets destroyed explicitely or + // the session ttl times out + continue + } + if entry == nil { + return + } + + // Handle the server updating the TTL + ttl, _ = time.ParseDuration(entry.TTL) + + case <-stopRenew: + // Attempt a session destroy + _, err := s.client.Session().Destroy(id, nil) + if err == nil { + return + } + + if sessionDestroyAttempts >= MaxSessionDestroyAttempts { + return + } + + // We can't destroy the session because the store + // is unavailable, wait for the session renew period + sessionDestroyAttempts++ + time.Sleep(ttl / 2) + } + } + }() +} + +// Lock attempts to acquire the lock and blocks while +// doing so. It returns a channel that is closed if our +// lock is lost or if an error occurs +func (l *consulLock) Lock(stopChan chan struct{}) (<-chan struct{}, error) { + return l.lock.Lock(stopChan) +} + +// Unlock the "key". Calling unlock while +// not holding the lock will throw an error +func (l *consulLock) Unlock() error { + if l.renewCh != nil { + close(l.renewCh) + } + return l.lock.Unlock() +} + +// AtomicPut put a value at "key" if the key has not been +// modified in the meantime, throws an error if this is the case +func (s *Consul) AtomicPut(key string, value []byte, previous *store.KVPair, options *store.WriteOptions) (bool, *store.KVPair, error) { + + p := &api.KVPair{Key: s.normalize(key), Value: value, Flags: api.LockFlagValue} + + if previous == nil { + // Consul interprets ModifyIndex = 0 as new key. + p.ModifyIndex = 0 + } else { + p.ModifyIndex = previous.LastIndex + } + + ok, _, err := s.client.KV().CAS(p, nil) + if err != nil { + return false, nil, err + } + if !ok { + if previous == nil { + return false, nil, store.ErrKeyExists + } + return false, nil, store.ErrKeyModified + } + + pair, err := s.Get(key) + if err != nil { + return false, nil, err + } + + return true, pair, nil +} + +// AtomicDelete deletes a value at "key" if the key has not +// been modified in the meantime, throws an error if this is the case +func (s *Consul) AtomicDelete(key string, previous *store.KVPair) (bool, error) { + if previous == nil { + return false, store.ErrPreviousNotSpecified + } + + p := &api.KVPair{Key: s.normalize(key), ModifyIndex: previous.LastIndex, Flags: api.LockFlagValue} + + // Extra Get operation to check on the key + _, err := s.Get(key) + if err != nil && err == store.ErrKeyNotFound { + return false, err + } + + if work, _, err := s.client.KV().DeleteCAS(p, nil); err != nil { + return false, err + } else if !work { + return false, store.ErrKeyModified + } + + return true, nil +} + +// Close closes the client connection +func (s *Consul) Close() { + return +} diff --git a/vendor/github.com/docker/libkv/store/consul/consul_test.go b/vendor/github.com/docker/libkv/store/consul/consul_test.go new file mode 100644 index 000000000..5019494ca --- /dev/null +++ b/vendor/github.com/docker/libkv/store/consul/consul_test.go @@ -0,0 +1,84 @@ +package consul + +import ( + "testing" + "time" + + "github.com/docker/libkv" + "github.com/docker/libkv/store" + "github.com/docker/libkv/testutils" + "github.com/stretchr/testify/assert" +) + +var ( + client = "localhost:8500" +) + +func makeConsulClient(t *testing.T) store.Store { + + kv, err := New( + []string{client}, + &store.Config{ + ConnectionTimeout: 3 * time.Second, + }, + ) + + if err != nil { + t.Fatalf("cannot create store: %v", err) + } + + return kv +} + +func TestRegister(t *testing.T) { + Register() + + kv, err := libkv.NewStore(store.CONSUL, []string{client}, nil) + assert.NoError(t, err) + assert.NotNil(t, kv) + + if _, ok := kv.(*Consul); !ok { + t.Fatal("Error registering and initializing consul") + } +} + +func TestConsulStore(t *testing.T) { + kv := makeConsulClient(t) + lockKV := makeConsulClient(t) + ttlKV := makeConsulClient(t) + + testutils.RunTestCommon(t, kv) + testutils.RunTestAtomic(t, kv) + testutils.RunTestWatch(t, kv) + testutils.RunTestLock(t, kv) + testutils.RunTestLockTTL(t, kv, lockKV) + testutils.RunTestTTL(t, kv, ttlKV) + testutils.RunCleanup(t, kv) +} + +func TestGetActiveSession(t *testing.T) { + kv := makeConsulClient(t) + + consul := kv.(*Consul) + + key := "foo" + value := []byte("bar") + + // Put the first key with the Ephemeral flag + err := kv.Put(key, value, &store.WriteOptions{TTL: 2 * time.Second}) + assert.NoError(t, err) + + // Session should not be empty + session, err := consul.getActiveSession(key) + assert.NoError(t, err) + assert.NotEqual(t, session, "") + + // Delete the key + err = kv.Delete(key) + assert.NoError(t, err) + + // Check the session again, it should return nothing + session, err = consul.getActiveSession(key) + assert.NoError(t, err) + assert.Equal(t, session, "") +} diff --git a/vendor/github.com/docker/libkv/store/helpers.go b/vendor/github.com/docker/libkv/store/helpers.go new file mode 100644 index 000000000..0fb74c9ae --- /dev/null +++ b/vendor/github.com/docker/libkv/store/helpers.go @@ -0,0 +1,47 @@ +package store + +import ( + "strings" +) + +// CreateEndpoints creates a list of endpoints given the right scheme +func CreateEndpoints(addrs []string, scheme string) (entries []string) { + for _, addr := range addrs { + entries = append(entries, scheme+"://"+addr) + } + return entries +} + +// Normalize the key for each store to the form: +// +// /path/to/key +// +func Normalize(key string) string { + return "/" + join(SplitKey(key)) +} + +// GetDirectory gets the full directory part of +// the key to the form: +// +// /path/to/ +// +func GetDirectory(key string) string { + parts := SplitKey(key) + parts = parts[:len(parts)-1] + return "/" + join(parts) +} + +// SplitKey splits the key to extract path informations +func SplitKey(key string) (path []string) { + if strings.Contains(key, "/") { + path = strings.Split(key, "/") + } else { + path = []string{key} + } + return path +} + +// join the path parts with '/' +func join(parts []string) string { + return strings.Join(parts, "/") +} diff --git a/vendor/github.com/docker/libkv/store/mock/mock.go b/vendor/github.com/docker/libkv/store/mock/mock.go new file mode 100644 index 000000000..82a5b03b4 --- /dev/null +++ b/vendor/github.com/docker/libkv/store/mock/mock.go @@ -0,0 +1,113 @@ +package mock + +import ( + "github.com/docker/libkv/store" + "github.com/stretchr/testify/mock" +) + +// Mock store. Mocks all Store functions using testify.Mock +type Mock struct { + mock.Mock + + // Endpoints passed to InitializeMock + Endpoints []string + + // Options passed to InitializeMock + Options *store.Config +} + +// New creates a Mock store +func New(endpoints []string, options *store.Config) (store.Store, error) { + s := &Mock{} + s.Endpoints = endpoints + s.Options = options + return s, nil +} + +// Put mock +func (s *Mock) Put(key string, value []byte, opts *store.WriteOptions) error { + args := s.Mock.Called(key, value, opts) + return args.Error(0) +} + +// Get mock +func (s *Mock) Get(key string) (*store.KVPair, error) { + args := s.Mock.Called(key) + return args.Get(0).(*store.KVPair), args.Error(1) +} + +// Delete mock +func (s *Mock) Delete(key string) error { + args := s.Mock.Called(key) + return args.Error(0) +} + +// Exists mock +func (s *Mock) Exists(key string) (bool, error) { + args := s.Mock.Called(key) + return args.Bool(0), args.Error(1) +} + +// Watch mock +func (s *Mock) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { + args := s.Mock.Called(key, stopCh) + return args.Get(0).(<-chan *store.KVPair), args.Error(1) +} + +// WatchTree mock +func (s *Mock) WatchTree(prefix string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { + args := s.Mock.Called(prefix, stopCh) + return args.Get(0).(chan []*store.KVPair), args.Error(1) +} + +// NewLock mock +func (s *Mock) NewLock(key string, options *store.LockOptions) (store.Locker, error) { + args := s.Mock.Called(key, options) + return args.Get(0).(store.Locker), args.Error(1) +} + +// List mock +func (s *Mock) List(prefix string) ([]*store.KVPair, error) { + args := s.Mock.Called(prefix) + return args.Get(0).([]*store.KVPair), args.Error(1) +} + +// DeleteTree mock +func (s *Mock) DeleteTree(prefix string) error { + args := s.Mock.Called(prefix) + return args.Error(0) +} + +// AtomicPut mock +func (s *Mock) AtomicPut(key string, value []byte, previous *store.KVPair, opts *store.WriteOptions) (bool, *store.KVPair, error) { + args := s.Mock.Called(key, value, previous, opts) + return args.Bool(0), args.Get(1).(*store.KVPair), args.Error(2) +} + +// AtomicDelete mock +func (s *Mock) AtomicDelete(key string, previous *store.KVPair) (bool, error) { + args := s.Mock.Called(key, previous) + return args.Bool(0), args.Error(1) +} + +// Lock mock implementation of Locker +type Lock struct { + mock.Mock +} + +// Lock mock +func (l *Lock) Lock(stopCh chan struct{}) (<-chan struct{}, error) { + args := l.Mock.Called(stopCh) + return args.Get(0).(<-chan struct{}), args.Error(1) +} + +// Unlock mock +func (l *Lock) Unlock() error { + args := l.Mock.Called() + return args.Error(0) +} + +// Close mock +func (s *Mock) Close() { + return +} diff --git a/vendor/github.com/docker/libkv/store/store.go b/vendor/github.com/docker/libkv/store/store.go new file mode 100644 index 000000000..7a4850c01 --- /dev/null +++ b/vendor/github.com/docker/libkv/store/store.go @@ -0,0 +1,132 @@ +package store + +import ( + "crypto/tls" + "errors" + "time" +) + +// Backend represents a KV Store Backend +type Backend string + +const ( + // CONSUL backend + CONSUL Backend = "consul" + // ETCD backend + ETCD Backend = "etcd" + // ZK backend + ZK Backend = "zk" + // BOLTDB backend + BOLTDB Backend = "boltdb" +) + +var ( + // ErrBackendNotSupported is thrown when the backend k/v store is not supported by libkv + ErrBackendNotSupported = errors.New("Backend storage not supported yet, please choose one of") + // ErrCallNotSupported is thrown when a method is not implemented/supported by the current backend + ErrCallNotSupported = errors.New("The current call is not supported with this backend") + // ErrNotReachable is thrown when the API cannot be reached for issuing common store operations + ErrNotReachable = errors.New("Api not reachable") + // ErrCannotLock is thrown when there is an error acquiring a lock on a key + ErrCannotLock = errors.New("Error acquiring the lock") + // ErrKeyModified is thrown during an atomic operation if the index does not match the one in the store + ErrKeyModified = errors.New("Unable to complete atomic operation, key modified") + // ErrKeyNotFound is thrown when the key is not found in the store during a Get operation + ErrKeyNotFound = errors.New("Key not found in store") + // ErrPreviousNotSpecified is thrown when the previous value is not specified for an atomic operation + ErrPreviousNotSpecified = errors.New("Previous K/V pair should be provided for the Atomic operation") + // ErrKeyExists is thrown when the previous value exists in the case of an AtomicPut + ErrKeyExists = errors.New("Previous K/V pair exists, cannot complete Atomic operation") +) + +// Config contains the options for a storage client +type Config struct { + ClientTLS *ClientTLSConfig + TLS *tls.Config + ConnectionTimeout time.Duration + Bucket string + PersistConnection bool + Username string + Password string +} + +// ClientTLSConfig contains data for a Client TLS configuration in the form +// the etcd client wants it. Eventually we'll adapt it for ZK and Consul. +type ClientTLSConfig struct { + CertFile string + KeyFile string + CACertFile string +} + +// Store represents the backend K/V storage +// Each store should support every call listed +// here. Or it couldn't be implemented as a K/V +// backend for libkv +type Store interface { + // Put a value at the specified key + Put(key string, value []byte, options *WriteOptions) error + + // Get a value given its key + Get(key string) (*KVPair, error) + + // Delete the value at the specified key + Delete(key string) error + + // Verify if a Key exists in the store + Exists(key string) (bool, error) + + // Watch for changes on a key + Watch(key string, stopCh <-chan struct{}) (<-chan *KVPair, error) + + // WatchTree watches for changes on child nodes under + // a given directory + WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*KVPair, error) + + // NewLock creates a lock for a given key. + // The returned Locker is not held and must be acquired + // with `.Lock`. The Value is optional. + NewLock(key string, options *LockOptions) (Locker, error) + + // List the content of a given prefix + List(directory string) ([]*KVPair, error) + + // DeleteTree deletes a range of keys under a given directory + DeleteTree(directory string) error + + // Atomic CAS operation on a single value. + // Pass previous = nil to create a new key. + AtomicPut(key string, value []byte, previous *KVPair, options *WriteOptions) (bool, *KVPair, error) + + // Atomic delete of a single value + AtomicDelete(key string, previous *KVPair) (bool, error) + + // Close the store connection + Close() +} + +// KVPair represents {Key, Value, Lastindex} tuple +type KVPair struct { + Key string + Value []byte + LastIndex uint64 +} + +// WriteOptions contains optional request parameters +type WriteOptions struct { + IsDir bool + TTL time.Duration +} + +// LockOptions contains optional request parameters +type LockOptions struct { + Value []byte // Optional, value to associate with the lock + TTL time.Duration // Optional, expiration ttl associated with the lock + RenewLock chan struct{} // Optional, chan used to control and stop the session ttl renewal for the lock +} + +// Locker provides locking mechanism on top of the store. +// Similar to `sync.Lock` except it may return errors. +type Locker interface { + Lock(stopChan chan struct{}) (<-chan struct{}, error) + Unlock() error +} diff --git a/vendor/github.com/docker/libkv/testutils/utils.go b/vendor/github.com/docker/libkv/testutils/utils.go new file mode 100644 index 000000000..5385bac60 --- /dev/null +++ b/vendor/github.com/docker/libkv/testutils/utils.go @@ -0,0 +1,622 @@ +package testutils + +import ( + "fmt" + "testing" + "time" + + "github.com/docker/libkv/store" + "github.com/stretchr/testify/assert" +) + +// RunTestCommon tests the minimal required APIs which +// should be supported by all K/V backends +func RunTestCommon(t *testing.T, kv store.Store) { + testPutGetDeleteExists(t, kv) + testList(t, kv) + testDeleteTree(t, kv) +} + +// RunTestAtomic tests the Atomic operations by the K/V +// backends +func RunTestAtomic(t *testing.T, kv store.Store) { + testAtomicPut(t, kv) + testAtomicPutCreate(t, kv) + testAtomicPutWithSlashSuffixKey(t, kv) + testAtomicDelete(t, kv) +} + +// RunTestWatch tests the watch/monitor APIs supported +// by the K/V backends. +func RunTestWatch(t *testing.T, kv store.Store) { + testWatch(t, kv) + testWatchTree(t, kv) +} + +// RunTestLock tests the KV pair Lock/Unlock APIs supported +// by the K/V backends. +func RunTestLock(t *testing.T, kv store.Store) { + testLockUnlock(t, kv) +} + +// RunTestLockTTL tests the KV pair Lock with TTL APIs supported +// by the K/V backends. +func RunTestLockTTL(t *testing.T, kv store.Store, backup store.Store) { + testLockTTL(t, kv, backup) +} + +// RunTestTTL tests the TTL functionality of the K/V backend. +func RunTestTTL(t *testing.T, kv store.Store, backup store.Store) { + testPutTTL(t, kv, backup) +} + +func testPutGetDeleteExists(t *testing.T, kv store.Store) { + // Get a not exist key should return ErrKeyNotFound + pair, err := kv.Get("testPutGetDelete_not_exist_key") + assert.Equal(t, store.ErrKeyNotFound, err) + + value := []byte("bar") + for _, key := range []string{ + "testPutGetDeleteExists", + "testPutGetDeleteExists/", + "testPutGetDeleteExists/testbar/", + "testPutGetDeleteExists/testbar/testfoobar", + } { + failMsg := fmt.Sprintf("Fail key %s", key) + + // Put the key + err = kv.Put(key, value, nil) + assert.NoError(t, err, failMsg) + + // Get should return the value and an incremented index + pair, err = kv.Get(key) + assert.NoError(t, err, failMsg) + if assert.NotNil(t, pair, failMsg) { + assert.NotNil(t, pair.Value, failMsg) + } + assert.Equal(t, pair.Value, value, failMsg) + assert.NotEqual(t, pair.LastIndex, 0, failMsg) + + // Exists should return true + exists, err := kv.Exists(key) + assert.NoError(t, err, failMsg) + assert.True(t, exists, failMsg) + + // Delete the key + err = kv.Delete(key) + assert.NoError(t, err, failMsg) + + // Get should fail + pair, err = kv.Get(key) + assert.Error(t, err, failMsg) + assert.Nil(t, pair, failMsg) + + // Exists should return false + exists, err = kv.Exists(key) + assert.NoError(t, err, failMsg) + assert.False(t, exists, failMsg) + } +} + +func testWatch(t *testing.T, kv store.Store) { + key := "testWatch" + value := []byte("world") + newValue := []byte("world!") + + // Put the key + err := kv.Put(key, value, nil) + assert.NoError(t, err) + + stopCh := make(<-chan struct{}) + events, err := kv.Watch(key, stopCh) + assert.NoError(t, err) + assert.NotNil(t, events) + + // Update loop + go func() { + timeout := time.After(1 * time.Second) + tick := time.Tick(250 * time.Millisecond) + for { + select { + case <-timeout: + return + case <-tick: + err := kv.Put(key, newValue, nil) + if assert.NoError(t, err) { + continue + } + return + } + } + }() + + // Check for updates + eventCount := 1 + for { + select { + case event := <-events: + assert.NotNil(t, event) + if eventCount == 1 { + assert.Equal(t, event.Key, key) + assert.Equal(t, event.Value, value) + } else { + assert.Equal(t, event.Key, key) + assert.Equal(t, event.Value, newValue) + } + eventCount++ + // We received all the events we wanted to check + if eventCount >= 4 { + return + } + case <-time.After(4 * time.Second): + t.Fatal("Timeout reached") + return + } + } +} + +func testWatchTree(t *testing.T, kv store.Store) { + dir := "testWatchTree" + + node1 := "testWatchTree/node1" + value1 := []byte("node1") + + node2 := "testWatchTree/node2" + value2 := []byte("node2") + + node3 := "testWatchTree/node3" + value3 := []byte("node3") + + err := kv.Put(node1, value1, nil) + assert.NoError(t, err) + err = kv.Put(node2, value2, nil) + assert.NoError(t, err) + err = kv.Put(node3, value3, nil) + assert.NoError(t, err) + + stopCh := make(<-chan struct{}) + events, err := kv.WatchTree(dir, stopCh) + assert.NoError(t, err) + assert.NotNil(t, events) + + // Update loop + go func() { + timeout := time.After(500 * time.Millisecond) + for { + select { + case <-timeout: + err := kv.Delete(node3) + assert.NoError(t, err) + return + } + } + }() + + // Check for updates + eventCount := 1 + for { + select { + case event := <-events: + assert.NotNil(t, event) + // We received the Delete event on a child node + // Exit test successfully + if eventCount == 2 { + return + } + eventCount++ + case <-time.After(4 * time.Second): + t.Fatal("Timeout reached") + return + } + } +} + +func testAtomicPut(t *testing.T, kv store.Store) { + key := "testAtomicPut" + value := []byte("world") + + // Put the key + err := kv.Put(key, value, nil) + assert.NoError(t, err) + + // Get should return the value and an incremented index + pair, err := kv.Get(key) + assert.NoError(t, err) + if assert.NotNil(t, pair) { + assert.NotNil(t, pair.Value) + } + assert.Equal(t, pair.Value, value) + assert.NotEqual(t, pair.LastIndex, 0) + + // This CAS should fail: previous exists. + success, _, err := kv.AtomicPut(key, []byte("WORLD"), nil, nil) + assert.Error(t, err) + assert.False(t, success) + + // This CAS should succeed + success, _, err = kv.AtomicPut(key, []byte("WORLD"), pair, nil) + assert.NoError(t, err) + assert.True(t, success) + + // This CAS should fail, key exists. + pair.LastIndex = 6744 + success, _, err = kv.AtomicPut(key, []byte("WORLDWORLD"), pair, nil) + assert.Error(t, err) + assert.False(t, success) +} + +func testAtomicPutCreate(t *testing.T, kv store.Store) { + // Use a key in a new directory to ensure Stores will create directories + // that don't yet exist. + key := "testAtomicPutCreate/create" + value := []byte("putcreate") + + // AtomicPut the key, previous = nil indicates create. + success, _, err := kv.AtomicPut(key, value, nil, nil) + assert.NoError(t, err) + assert.True(t, success) + + // Get should return the value and an incremented index + pair, err := kv.Get(key) + assert.NoError(t, err) + if assert.NotNil(t, pair) { + assert.NotNil(t, pair.Value) + } + assert.Equal(t, pair.Value, value) + + // Attempting to create again should fail. + success, _, err = kv.AtomicPut(key, value, nil, nil) + assert.Error(t, store.ErrKeyExists) + assert.False(t, success) + + // This CAS should succeed, since it has the value from Get() + success, _, err = kv.AtomicPut(key, []byte("PUTCREATE"), pair, nil) + assert.NoError(t, err) + assert.True(t, success) +} + +func testAtomicPutWithSlashSuffixKey(t *testing.T, kv store.Store) { + k1 := "testAtomicPutWithSlashSuffixKey/key/" + success, _, err := kv.AtomicPut(k1, []byte{}, nil, nil) + assert.Nil(t, err) + assert.True(t, success) +} + +func testAtomicDelete(t *testing.T, kv store.Store) { + key := "testAtomicDelete" + value := []byte("world") + + // Put the key + err := kv.Put(key, value, nil) + assert.NoError(t, err) + + // Get should return the value and an incremented index + pair, err := kv.Get(key) + assert.NoError(t, err) + if assert.NotNil(t, pair) { + assert.NotNil(t, pair.Value) + } + assert.Equal(t, pair.Value, value) + assert.NotEqual(t, pair.LastIndex, 0) + + tempIndex := pair.LastIndex + + // AtomicDelete should fail + pair.LastIndex = 6744 + success, err := kv.AtomicDelete(key, pair) + assert.Error(t, err) + assert.False(t, success) + + // AtomicDelete should succeed + pair.LastIndex = tempIndex + success, err = kv.AtomicDelete(key, pair) + assert.NoError(t, err) + assert.True(t, success) + + // Delete a non-existent key; should fail + success, err = kv.AtomicDelete(key, pair) + assert.Error(t, store.ErrKeyNotFound) + assert.False(t, success) +} + +func testLockUnlock(t *testing.T, kv store.Store) { + key := "testLockUnlock" + value := []byte("bar") + + // We should be able to create a new lock on key + lock, err := kv.NewLock(key, &store.LockOptions{Value: value, TTL: 2 * time.Second}) + assert.NoError(t, err) + assert.NotNil(t, lock) + + // Lock should successfully succeed or block + lockChan, err := lock.Lock(nil) + assert.NoError(t, err) + assert.NotNil(t, lockChan) + + // Get should work + pair, err := kv.Get(key) + assert.NoError(t, err) + if assert.NotNil(t, pair) { + assert.NotNil(t, pair.Value) + } + assert.Equal(t, pair.Value, value) + assert.NotEqual(t, pair.LastIndex, 0) + + // Unlock should succeed + err = lock.Unlock() + assert.NoError(t, err) + + // Lock should succeed again + lockChan, err = lock.Lock(nil) + assert.NoError(t, err) + assert.NotNil(t, lockChan) + + // Get should work + pair, err = kv.Get(key) + assert.NoError(t, err) + if assert.NotNil(t, pair) { + assert.NotNil(t, pair.Value) + } + assert.Equal(t, pair.Value, value) + assert.NotEqual(t, pair.LastIndex, 0) + + err = lock.Unlock() + assert.NoError(t, err) +} + +func testLockTTL(t *testing.T, kv store.Store, otherConn store.Store) { + key := "testLockTTL" + value := []byte("bar") + + renewCh := make(chan struct{}) + + // We should be able to create a new lock on key + lock, err := otherConn.NewLock(key, &store.LockOptions{ + Value: value, + TTL: 2 * time.Second, + RenewLock: renewCh, + }) + assert.NoError(t, err) + assert.NotNil(t, lock) + + // Lock should successfully succeed + lockChan, err := lock.Lock(nil) + assert.NoError(t, err) + assert.NotNil(t, lockChan) + + // Get should work + pair, err := otherConn.Get(key) + assert.NoError(t, err) + if assert.NotNil(t, pair) { + assert.NotNil(t, pair.Value) + } + assert.Equal(t, pair.Value, value) + assert.NotEqual(t, pair.LastIndex, 0) + + time.Sleep(3 * time.Second) + + done := make(chan struct{}) + stop := make(chan struct{}) + + value = []byte("foobar") + + // Create a new lock with another connection + lock, err = kv.NewLock( + key, + &store.LockOptions{ + Value: value, + TTL: 3 * time.Second, + }, + ) + assert.NoError(t, err) + assert.NotNil(t, lock) + + // Lock should block, the session on the lock + // is still active and renewed periodically + go func(<-chan struct{}) { + _, _ = lock.Lock(stop) + done <- struct{}{} + }(done) + + select { + case _ = <-done: + t.Fatal("Lock succeeded on a key that is supposed to be locked by another client") + case <-time.After(4 * time.Second): + // Stop requesting the lock as we are blocked as expected + stop <- struct{}{} + break + } + + // Close the connection + otherConn.Close() + + // Force stop the session renewal for the lock + close(renewCh) + + // Let the session on the lock expire + time.Sleep(3 * time.Second) + locked := make(chan struct{}) + + // Lock should now succeed for the other client + go func(<-chan struct{}) { + lockChan, err = lock.Lock(nil) + assert.NoError(t, err) + assert.NotNil(t, lockChan) + locked <- struct{}{} + }(locked) + + select { + case _ = <-locked: + break + case <-time.After(4 * time.Second): + t.Fatal("Unable to take the lock, timed out") + } + + // Get should work with the new value + pair, err = kv.Get(key) + assert.NoError(t, err) + if assert.NotNil(t, pair) { + assert.NotNil(t, pair.Value) + } + assert.Equal(t, pair.Value, value) + assert.NotEqual(t, pair.LastIndex, 0) + + err = lock.Unlock() + assert.NoError(t, err) +} + +func testPutTTL(t *testing.T, kv store.Store, otherConn store.Store) { + firstKey := "testPutTTL" + firstValue := []byte("foo") + + secondKey := "second" + secondValue := []byte("bar") + + // Put the first key with the Ephemeral flag + err := otherConn.Put(firstKey, firstValue, &store.WriteOptions{TTL: 2 * time.Second}) + assert.NoError(t, err) + + // Put a second key with the Ephemeral flag + err = otherConn.Put(secondKey, secondValue, &store.WriteOptions{TTL: 2 * time.Second}) + assert.NoError(t, err) + + // Get on firstKey should work + pair, err := kv.Get(firstKey) + assert.NoError(t, err) + assert.NotNil(t, pair) + + // Get on secondKey should work + pair, err = kv.Get(secondKey) + assert.NoError(t, err) + assert.NotNil(t, pair) + + // Close the connection + otherConn.Close() + + // Let the session expire + time.Sleep(3 * time.Second) + + // Get on firstKey shouldn't work + pair, err = kv.Get(firstKey) + assert.Error(t, err) + assert.Nil(t, pair) + + // Get on secondKey shouldn't work + pair, err = kv.Get(secondKey) + assert.Error(t, err) + assert.Nil(t, pair) +} + +func testList(t *testing.T, kv store.Store) { + prefix := "testList" + + firstKey := "testList/first" + firstValue := []byte("first") + + secondKey := "testList/second" + secondValue := []byte("second") + + // Put the first key + err := kv.Put(firstKey, firstValue, nil) + assert.NoError(t, err) + + // Put the second key + err = kv.Put(secondKey, secondValue, nil) + assert.NoError(t, err) + + // List should work and return the two correct values + for _, parent := range []string{prefix, prefix + "/"} { + pairs, err := kv.List(parent) + assert.NoError(t, err) + if assert.NotNil(t, pairs) { + assert.Equal(t, len(pairs), 2) + } + + // Check pairs, those are not necessarily in Put order + for _, pair := range pairs { + if pair.Key == firstKey { + assert.Equal(t, pair.Value, firstValue) + } + if pair.Key == secondKey { + assert.Equal(t, pair.Value, secondValue) + } + } + } + + // List should fail: the key does not exist + pairs, err := kv.List("idontexist") + assert.Equal(t, store.ErrKeyNotFound, err) + assert.Nil(t, pairs) +} + +func testDeleteTree(t *testing.T, kv store.Store) { + prefix := "testDeleteTree" + + firstKey := "testDeleteTree/first" + firstValue := []byte("first") + + secondKey := "testDeleteTree/second" + secondValue := []byte("second") + + // Put the first key + err := kv.Put(firstKey, firstValue, nil) + assert.NoError(t, err) + + // Put the second key + err = kv.Put(secondKey, secondValue, nil) + assert.NoError(t, err) + + // Get should work on the first Key + pair, err := kv.Get(firstKey) + assert.NoError(t, err) + if assert.NotNil(t, pair) { + assert.NotNil(t, pair.Value) + } + assert.Equal(t, pair.Value, firstValue) + assert.NotEqual(t, pair.LastIndex, 0) + + // Get should work on the second Key + pair, err = kv.Get(secondKey) + assert.NoError(t, err) + if assert.NotNil(t, pair) { + assert.NotNil(t, pair.Value) + } + assert.Equal(t, pair.Value, secondValue) + assert.NotEqual(t, pair.LastIndex, 0) + + // Delete Values under directory `nodes` + err = kv.DeleteTree(prefix) + assert.NoError(t, err) + + // Get should fail on both keys + pair, err = kv.Get(firstKey) + assert.Error(t, err) + assert.Nil(t, pair) + + pair, err = kv.Get(secondKey) + assert.Error(t, err) + assert.Nil(t, pair) +} + +// RunCleanup cleans up keys introduced by the tests +func RunCleanup(t *testing.T, kv store.Store) { + for _, key := range []string{ + "testAtomicPutWithSlashSuffixKey", + "testPutGetDeleteExists", + "testWatch", + "testWatchTree", + "testAtomicPut", + "testAtomicPutCreate", + "testAtomicDelete", + "testLockUnlock", + "testLockTTL", + "testPutTTL", + "testList", + "testDeleteTree", + } { + err := kv.DeleteTree(key) + assert.True(t, err == nil || err == store.ErrKeyNotFound, fmt.Sprintf("failed to delete tree key %s: %v", key, err)) + err = kv.Delete(key) + assert.True(t, err == nil || err == store.ErrKeyNotFound, fmt.Sprintf("failed to delete key %s: %v", key, err)) + } +} diff --git a/vendor/github.com/hashicorp/consul/api/README.md b/vendor/github.com/hashicorp/consul/api/README.md new file mode 100644 index 000000000..7e64988f4 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/README.md @@ -0,0 +1,43 @@ +Consul API client +================= + +This package provides the `api` package which attempts to +provide programmatic access to the full Consul API. + +Currently, all of the Consul APIs included in version 0.6.0 are supported. + +Documentation +============= + +The full documentation is available on [Godoc](https://godoc.org/github.com/hashicorp/consul/api) + +Usage +===== + +Below is an example of using the Consul client: + +```go +// Get a new client +client, err := api.NewClient(api.DefaultConfig()) +if err != nil { + panic(err) +} + +// Get a handle to the KV API +kv := client.KV() + +// PUT a new KV pair +p := &api.KVPair{Key: "foo", Value: []byte("test")} +_, err = kv.Put(p, nil) +if err != nil { + panic(err) +} + +// Lookup the pair +pair, _, err := kv.Get("foo", nil) +if err != nil { + panic(err) +} +fmt.Printf("KV: %v", pair) + +``` diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go new file mode 100644 index 000000000..15d1f9f5a --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/acl.go @@ -0,0 +1,175 @@ +package api + +import ( + "time" +) + +const ( + // ACLCLientType is the client type token + ACLClientType = "client" + + // ACLManagementType is the management type token + ACLManagementType = "management" +) + +// ACLEntry is used to represent an ACL entry +type ACLEntry struct { + CreateIndex uint64 + ModifyIndex uint64 + ID string + Name string + Type string + Rules string +} + +// ACLReplicationStatus is used to represent the status of ACL replication. +type ACLReplicationStatus struct { + Enabled bool + Running bool + SourceDatacenter string + ReplicatedIndex uint64 + LastSuccess time.Time + LastError time.Time +} + +// ACL can be used to query the ACL endpoints +type ACL struct { + c *Client +} + +// ACL returns a handle to the ACL endpoints +func (c *Client) ACL() *ACL { + return &ACL{c} +} + +// Create is used to generate a new token with the given parameters +func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/create") + r.setWriteOptions(q) + r.obj = acl + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Update is used to update the rules of an existing token +func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/update") + r.setWriteOptions(q) + r.obj = acl + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// Destroy is used to destroy a given ACL token ID +func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// Clone is used to return a new token cloned from an existing one +func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/clone/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Info is used to query for information about an ACL token +func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/info/"+id) + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List is used to get all the ACL tokens +func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/list") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// Replication returns the status of the ACL replication process in the datacenter +func (a *ACL) Replication(q *QueryOptions) (*ACLReplicationStatus, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/replication") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries *ACLReplicationStatus + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/acl_test.go b/vendor/github.com/hashicorp/consul/api/acl_test.go new file mode 100644 index 000000000..5adc2b56f --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/acl_test.go @@ -0,0 +1,157 @@ +package api + +import ( + "testing" +) + +func TestAPI_ACLCreateDestroy(t *testing.T) { + t.Parallel() + c, s := makeACLClient(t) + defer s.Stop() + + acl := c.ACL() + + ae := ACLEntry{ + Name: "API test", + Type: ACLClientType, + Rules: `key "" { policy = "deny" }`, + } + + id, wm, err := acl.Create(&ae, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if wm.RequestTime == 0 { + t.Fatalf("bad: %v", wm) + } + + if id == "" { + t.Fatalf("invalid: %v", id) + } + + ae2, _, err := acl.Info(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if ae2.Name != ae.Name || ae2.Type != ae.Type || ae2.Rules != ae.Rules { + t.Fatalf("Bad: %#v", ae2) + } + + wm, err = acl.Destroy(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if wm.RequestTime == 0 { + t.Fatalf("bad: %v", wm) + } +} + +func TestAPI_ACLCloneDestroy(t *testing.T) { + t.Parallel() + c, s := makeACLClient(t) + defer s.Stop() + + acl := c.ACL() + + id, wm, err := acl.Clone(c.config.Token, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if wm.RequestTime == 0 { + t.Fatalf("bad: %v", wm) + } + + if id == "" { + t.Fatalf("invalid: %v", id) + } + + wm, err = acl.Destroy(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if wm.RequestTime == 0 { + t.Fatalf("bad: %v", wm) + } +} + +func TestAPI_ACLInfo(t *testing.T) { + t.Parallel() + c, s := makeACLClient(t) + defer s.Stop() + + acl := c.ACL() + + ae, qm, err := acl.Info(c.config.Token, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if qm.LastIndex == 0 { + t.Fatalf("bad: %v", qm) + } + if !qm.KnownLeader { + t.Fatalf("bad: %v", qm) + } + + if ae == nil || ae.ID != c.config.Token || ae.Type != ACLManagementType { + t.Fatalf("bad: %#v", ae) + } +} + +func TestAPI_ACLList(t *testing.T) { + t.Parallel() + c, s := makeACLClient(t) + defer s.Stop() + + acl := c.ACL() + + acls, qm, err := acl.List(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(acls) < 2 { + t.Fatalf("bad: %v", acls) + } + + if qm.LastIndex == 0 { + t.Fatalf("bad: %v", qm) + } + if !qm.KnownLeader { + t.Fatalf("bad: %v", qm) + } +} + +func TestAPI_ACLReplication(t *testing.T) { + t.Parallel() + c, s := makeACLClient(t) + defer s.Stop() + + acl := c.ACL() + + repl, qm, err := acl.Replication(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if repl == nil { + t.Fatalf("bad: %v", repl) + } + + if repl.Running { + t.Fatal("bad: repl should not be running") + } + + if repl.Enabled { + t.Fatal("bad: repl should not be enabled") + } + + if qm.RequestTime == 0 { + t.Fatalf("bad: %v", qm) + } +} diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go new file mode 100644 index 000000000..86c9414ae --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/agent.go @@ -0,0 +1,515 @@ +package api + +import ( + "bufio" + "fmt" +) + +// AgentCheck represents a check known to the agent +type AgentCheck struct { + Node string + CheckID string + Name string + Status string + Notes string + Output string + ServiceID string + ServiceName string +} + +// AgentService represents a service known to the agent +type AgentService struct { + ID string + Service string + Tags []string + Port int + Address string + EnableTagOverride bool + CreateIndex uint64 + ModifyIndex uint64 +} + +// AgentMember represents a cluster member known to the agent +type AgentMember struct { + Name string + Addr string + Port uint16 + Tags map[string]string + Status int + ProtocolMin uint8 + ProtocolMax uint8 + ProtocolCur uint8 + DelegateMin uint8 + DelegateMax uint8 + DelegateCur uint8 +} + +// AgentServiceRegistration is used to register a new service +type AgentServiceRegistration struct { + ID string `json:",omitempty"` + Name string `json:",omitempty"` + Tags []string `json:",omitempty"` + Port int `json:",omitempty"` + Address string `json:",omitempty"` + EnableTagOverride bool `json:",omitempty"` + Check *AgentServiceCheck + Checks AgentServiceChecks +} + +// AgentCheckRegistration is used to register a new check +type AgentCheckRegistration struct { + ID string `json:",omitempty"` + Name string `json:",omitempty"` + Notes string `json:",omitempty"` + ServiceID string `json:",omitempty"` + AgentServiceCheck +} + +// AgentServiceCheck is used to define a node or service level check +type AgentServiceCheck struct { + Script string `json:",omitempty"` + DockerContainerID string `json:",omitempty"` + Shell string `json:",omitempty"` // Only supported for Docker. + Interval string `json:",omitempty"` + Timeout string `json:",omitempty"` + TTL string `json:",omitempty"` + HTTP string `json:",omitempty"` + Header map[string][]string `json:",omitempty"` + Method string `json:",omitempty"` + TCP string `json:",omitempty"` + Status string `json:",omitempty"` + Notes string `json:",omitempty"` + TLSSkipVerify bool `json:",omitempty"` + + // In Consul 0.7 and later, checks that are associated with a service + // may also contain this optional DeregisterCriticalServiceAfter field, + // which is a timeout in the same Go time format as Interval and TTL. If + // a check is in the critical state for more than this configured value, + // then its associated service (and all of its associated checks) will + // automatically be deregistered. + DeregisterCriticalServiceAfter string `json:",omitempty"` +} +type AgentServiceChecks []*AgentServiceCheck + +// AgentToken is used when updating ACL tokens for an agent. +type AgentToken struct { + Token string +} + +// Agent can be used to query the Agent endpoints +type Agent struct { + c *Client + + // cache the node name + nodeName string +} + +// Agent returns a handle to the agent endpoints +func (c *Client) Agent() *Agent { + return &Agent{c: c} +} + +// Self is used to query the agent we are speaking to for +// information about itself +func (a *Agent) Self() (map[string]map[string]interface{}, error) { + r := a.c.newRequest("GET", "/v1/agent/self") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]map[string]interface{} + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Reload triggers a configuration reload for the agent we are connected to. +func (a *Agent) Reload() error { + r := a.c.newRequest("PUT", "/v1/agent/reload") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// NodeName is used to get the node name of the agent +func (a *Agent) NodeName() (string, error) { + if a.nodeName != "" { + return a.nodeName, nil + } + info, err := a.Self() + if err != nil { + return "", err + } + name := info["Config"]["NodeName"].(string) + a.nodeName = name + return name, nil +} + +// Checks returns the locally registered checks +func (a *Agent) Checks() (map[string]*AgentCheck, error) { + r := a.c.newRequest("GET", "/v1/agent/checks") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]*AgentCheck + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Services returns the locally registered services +func (a *Agent) Services() (map[string]*AgentService, error) { + r := a.c.newRequest("GET", "/v1/agent/services") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]*AgentService + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Members returns the known gossip members. The WAN +// flag can be used to query a server for WAN members. +func (a *Agent) Members(wan bool) ([]*AgentMember, error) { + r := a.c.newRequest("GET", "/v1/agent/members") + if wan { + r.params.Set("wan", "1") + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*AgentMember + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// ServiceRegister is used to register a new service with +// the local agent +func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error { + r := a.c.newRequest("PUT", "/v1/agent/service/register") + r.obj = service + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ServiceDeregister is used to deregister a service with +// the local agent +func (a *Agent) ServiceDeregister(serviceID string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// PassTTL is used to set a TTL check to the passing state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. +func (a *Agent) PassTTL(checkID, note string) error { + return a.updateTTL(checkID, note, "pass") +} + +// WarnTTL is used to set a TTL check to the warning state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. +func (a *Agent) WarnTTL(checkID, note string) error { + return a.updateTTL(checkID, note, "warn") +} + +// FailTTL is used to set a TTL check to the failing state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. +func (a *Agent) FailTTL(checkID, note string) error { + return a.updateTTL(checkID, note, "fail") +} + +// updateTTL is used to update the TTL of a check. This is the internal +// method that uses the old API that's present in Consul versions prior to +// 0.6.4. Since Consul didn't have an analogous "update" API before it seemed +// ok to break this (former) UpdateTTL in favor of the new UpdateTTL below, +// but keep the old Pass/Warn/Fail methods using the old API under the hood. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 and the server endpoints will +// be removed in 0.9. +func (a *Agent) updateTTL(checkID, note, status string) error { + switch status { + case "pass": + case "warn": + case "fail": + default: + return fmt.Errorf("Invalid status: %s", status) + } + endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID) + r := a.c.newRequest("PUT", endpoint) + r.params.Set("note", note) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// checkUpdate is the payload for a PUT for a check update. +type checkUpdate struct { + // Status is one of the api.Health* states: HealthPassing + // ("passing"), HealthWarning ("warning"), or HealthCritical + // ("critical"). + Status string + + // Output is the information to post to the UI for operators as the + // output of the process that decided to hit the TTL check. This is + // different from the note field that's associated with the check + // itself. + Output string +} + +// UpdateTTL is used to update the TTL of a check. This uses the newer API +// that was introduced in Consul 0.6.4 and later. We translate the old status +// strings for compatibility (though a newer version of Consul will still be +// required to use this API). +func (a *Agent) UpdateTTL(checkID, output, status string) error { + switch status { + case "pass", HealthPassing: + status = HealthPassing + case "warn", HealthWarning: + status = HealthWarning + case "fail", HealthCritical: + status = HealthCritical + default: + return fmt.Errorf("Invalid status: %s", status) + } + + endpoint := fmt.Sprintf("/v1/agent/check/update/%s", checkID) + r := a.c.newRequest("PUT", endpoint) + r.obj = &checkUpdate{ + Status: status, + Output: output, + } + + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// CheckRegister is used to register a new check with +// the local agent +func (a *Agent) CheckRegister(check *AgentCheckRegistration) error { + r := a.c.newRequest("PUT", "/v1/agent/check/register") + r.obj = check + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// CheckDeregister is used to deregister a check with +// the local agent +func (a *Agent) CheckDeregister(checkID string) error { + r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// Join is used to instruct the agent to attempt a join to +// another cluster member +func (a *Agent) Join(addr string, wan bool) error { + r := a.c.newRequest("PUT", "/v1/agent/join/"+addr) + if wan { + r.params.Set("wan", "1") + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// Leave is used to have the agent gracefully leave the cluster and shutdown +func (a *Agent) Leave() error { + r := a.c.newRequest("PUT", "/v1/agent/leave") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ForceLeave is used to have the agent eject a failed node +func (a *Agent) ForceLeave(node string) error { + r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// EnableServiceMaintenance toggles service maintenance mode on +// for the given service ID. +func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) + r.params.Set("enable", "true") + r.params.Set("reason", reason) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// DisableServiceMaintenance toggles service maintenance mode off +// for the given service ID. +func (a *Agent) DisableServiceMaintenance(serviceID string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) + r.params.Set("enable", "false") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// EnableNodeMaintenance toggles node maintenance mode on for the +// agent we are connected to. +func (a *Agent) EnableNodeMaintenance(reason string) error { + r := a.c.newRequest("PUT", "/v1/agent/maintenance") + r.params.Set("enable", "true") + r.params.Set("reason", reason) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// DisableNodeMaintenance toggles node maintenance mode off for the +// agent we are connected to. +func (a *Agent) DisableNodeMaintenance() error { + r := a.c.newRequest("PUT", "/v1/agent/maintenance") + r.params.Set("enable", "false") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// Monitor returns a channel which will receive streaming logs from the agent +// Providing a non-nil stopCh can be used to close the connection and stop the +// log stream +func (a *Agent) Monitor(loglevel string, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) { + r := a.c.newRequest("GET", "/v1/agent/monitor") + r.setQueryOptions(q) + if loglevel != "" { + r.params.Add("loglevel", loglevel) + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + + logCh := make(chan string, 64) + go func() { + defer resp.Body.Close() + + scanner := bufio.NewScanner(resp.Body) + for { + select { + case <-stopCh: + close(logCh) + return + default: + } + if scanner.Scan() { + logCh <- scanner.Text() + } + } + }() + + return logCh, nil +} + +// UpdateACLToken updates the agent's "acl_token". See updateToken for more +// details. +func (c *Agent) UpdateACLToken(token string, q *WriteOptions) (*WriteMeta, error) { + return c.updateToken("acl_token", token, q) +} + +// UpdateACLAgentToken updates the agent's "acl_agent_token". See updateToken +// for more details. +func (c *Agent) UpdateACLAgentToken(token string, q *WriteOptions) (*WriteMeta, error) { + return c.updateToken("acl_agent_token", token, q) +} + +// UpdateACLAgentMasterToken updates the agent's "acl_agent_master_token". See +// updateToken for more details. +func (c *Agent) UpdateACLAgentMasterToken(token string, q *WriteOptions) (*WriteMeta, error) { + return c.updateToken("acl_agent_master_token", token, q) +} + +// updateToken can be used to update an agent's ACL token after the agent has +// started. The tokens are not persisted, so will need to be updated again if +// the agent is restarted. +func (c *Agent) updateToken(target, token string, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", fmt.Sprintf("/v1/agent/token/%s", target)) + r.setWriteOptions(q) + r.obj = &AgentToken{Token: token} + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/agent_test.go b/vendor/github.com/hashicorp/consul/api/agent_test.go new file mode 100644 index 000000000..2f6d02c81 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/agent_test.go @@ -0,0 +1,788 @@ +package api + +import ( + "io/ioutil" + "strings" + "testing" + "time" + + "github.com/hashicorp/consul/testutil" + "github.com/hashicorp/serf/serf" +) + +func TestAPI_AgentSelf(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + info, err := agent.Self() + if err != nil { + t.Fatalf("err: %v", err) + } + + name := info["Config"]["NodeName"] + if name == "" { + t.Fatalf("bad: %v", info) + } +} + +func TestAPI_AgentReload(t *testing.T) { + t.Parallel() + + // Create our initial empty config file, to be overwritten later + configFile := testutil.TempFile(t, "reload") + if _, err := configFile.Write([]byte("{}")); err != nil { + t.Fatalf("err: %s", err) + } + configFile.Close() + + c, s := makeClientWithConfig(t, nil, func(conf *testutil.TestServerConfig) { + conf.Args = []string{"-config-file", configFile.Name()} + }) + defer s.Stop() + + agent := c.Agent() + + // Update the config file with a service definition + config := `{"service":{"name":"redis", "port":1234}}` + err := ioutil.WriteFile(configFile.Name(), []byte(config), 0644) + if err != nil { + t.Fatalf("err: %v", err) + } + + if err = agent.Reload(); err != nil { + t.Fatalf("err: %v", err) + } + + services, err := agent.Services() + if err != nil { + t.Fatalf("err: %v", err) + } + + service, ok := services["redis"] + if !ok { + t.Fatalf("bad: %v", ok) + } + if service.Port != 1234 { + t.Fatalf("bad: %v", service.Port) + } +} + +func TestAPI_AgentMembers(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + members, err := agent.Members(false) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(members) != 1 { + t.Fatalf("bad: %v", members) + } +} + +func TestAPI_AgentServices(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + reg := &AgentServiceRegistration{ + Name: "foo", + Tags: []string{"bar", "baz"}, + Port: 8000, + Check: &AgentServiceCheck{ + TTL: "15s", + }, + } + if err := agent.ServiceRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + services, err := agent.Services() + if err != nil { + t.Fatalf("err: %v", err) + } + if _, ok := services["foo"]; !ok { + t.Fatalf("missing service: %v", services) + } + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + chk, ok := checks["service:foo"] + if !ok { + t.Fatalf("missing check: %v", checks) + } + + // Checks should default to critical + if chk.Status != HealthCritical { + t.Fatalf("Bad: %#v", chk) + } + + if err := agent.ServiceDeregister("foo"); err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestAPI_AgentServices_CheckPassing(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + reg := &AgentServiceRegistration{ + Name: "foo", + Tags: []string{"bar", "baz"}, + Port: 8000, + Check: &AgentServiceCheck{ + TTL: "15s", + Status: HealthPassing, + }, + } + if err := agent.ServiceRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + services, err := agent.Services() + if err != nil { + t.Fatalf("err: %v", err) + } + if _, ok := services["foo"]; !ok { + t.Fatalf("missing service: %v", services) + } + + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + chk, ok := checks["service:foo"] + if !ok { + t.Fatalf("missing check: %v", checks) + } + + if chk.Status != HealthPassing { + t.Fatalf("Bad: %#v", chk) + } + if err := agent.ServiceDeregister("foo"); err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestAPI_AgentServices_CheckBadStatus(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + reg := &AgentServiceRegistration{ + Name: "foo", + Tags: []string{"bar", "baz"}, + Port: 8000, + Check: &AgentServiceCheck{ + TTL: "15s", + Status: "fluffy", + }, + } + if err := agent.ServiceRegister(reg); err == nil { + t.Fatalf("bad status accepted") + } +} + +func TestAPI_AgentServiceAddress(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + reg1 := &AgentServiceRegistration{ + Name: "foo1", + Port: 8000, + Address: "192.168.0.42", + } + reg2 := &AgentServiceRegistration{ + Name: "foo2", + Port: 8000, + } + if err := agent.ServiceRegister(reg1); err != nil { + t.Fatalf("err: %v", err) + } + if err := agent.ServiceRegister(reg2); err != nil { + t.Fatalf("err: %v", err) + } + + services, err := agent.Services() + if err != nil { + t.Fatalf("err: %v", err) + } + + if _, ok := services["foo1"]; !ok { + t.Fatalf("missing service: %v", services) + } + if _, ok := services["foo2"]; !ok { + t.Fatalf("missing service: %v", services) + } + + if services["foo1"].Address != "192.168.0.42" { + t.Fatalf("missing Address field in service foo1: %v", services) + } + if services["foo2"].Address != "" { + t.Fatalf("missing Address field in service foo2: %v", services) + } + + if err := agent.ServiceDeregister("foo"); err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestAPI_AgentEnableTagOverride(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + reg1 := &AgentServiceRegistration{ + Name: "foo1", + Port: 8000, + Address: "192.168.0.42", + EnableTagOverride: true, + } + reg2 := &AgentServiceRegistration{ + Name: "foo2", + Port: 8000, + } + if err := agent.ServiceRegister(reg1); err != nil { + t.Fatalf("err: %v", err) + } + if err := agent.ServiceRegister(reg2); err != nil { + t.Fatalf("err: %v", err) + } + + services, err := agent.Services() + if err != nil { + t.Fatalf("err: %v", err) + } + + if _, ok := services["foo1"]; !ok { + t.Fatalf("missing service: %v", services) + } + if services["foo1"].EnableTagOverride != true { + t.Fatalf("tag override not set on service foo1: %v", services) + } + if _, ok := services["foo2"]; !ok { + t.Fatalf("missing service: %v", services) + } + if services["foo2"].EnableTagOverride != false { + t.Fatalf("tag override set on service foo2: %v", services) + } +} + +func TestAPI_AgentServices_MultipleChecks(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + reg := &AgentServiceRegistration{ + Name: "foo", + Tags: []string{"bar", "baz"}, + Port: 8000, + Checks: AgentServiceChecks{ + &AgentServiceCheck{ + TTL: "15s", + }, + &AgentServiceCheck{ + TTL: "30s", + }, + }, + } + if err := agent.ServiceRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + services, err := agent.Services() + if err != nil { + t.Fatalf("err: %v", err) + } + if _, ok := services["foo"]; !ok { + t.Fatalf("missing service: %v", services) + } + + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + if _, ok := checks["service:foo:1"]; !ok { + t.Fatalf("missing check: %v", checks) + } + if _, ok := checks["service:foo:2"]; !ok { + t.Fatalf("missing check: %v", checks) + } +} + +func TestAPI_AgentSetTTLStatus(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + reg := &AgentServiceRegistration{ + Name: "foo", + Check: &AgentServiceCheck{ + TTL: "15s", + }, + } + if err := agent.ServiceRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + verify := func(status, output string) { + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + chk, ok := checks["service:foo"] + if !ok { + t.Fatalf("missing check: %v", checks) + } + if chk.Status != status { + t.Fatalf("Bad: %#v", chk) + } + if chk.Output != output { + t.Fatalf("Bad: %#v", chk) + } + } + + if err := agent.WarnTTL("service:foo", "foo"); err != nil { + t.Fatalf("err: %v", err) + } + verify(HealthWarning, "foo") + + if err := agent.PassTTL("service:foo", "bar"); err != nil { + t.Fatalf("err: %v", err) + } + verify(HealthPassing, "bar") + + if err := agent.FailTTL("service:foo", "baz"); err != nil { + t.Fatalf("err: %v", err) + } + verify(HealthCritical, "baz") + + if err := agent.UpdateTTL("service:foo", "foo", "warn"); err != nil { + t.Fatalf("err: %v", err) + } + verify(HealthWarning, "foo") + + if err := agent.UpdateTTL("service:foo", "bar", "pass"); err != nil { + t.Fatalf("err: %v", err) + } + verify(HealthPassing, "bar") + + if err := agent.UpdateTTL("service:foo", "baz", "fail"); err != nil { + t.Fatalf("err: %v", err) + } + verify(HealthCritical, "baz") + + if err := agent.UpdateTTL("service:foo", "foo", HealthWarning); err != nil { + t.Fatalf("err: %v", err) + } + verify(HealthWarning, "foo") + + if err := agent.UpdateTTL("service:foo", "bar", HealthPassing); err != nil { + t.Fatalf("err: %v", err) + } + verify(HealthPassing, "bar") + + if err := agent.UpdateTTL("service:foo", "baz", HealthCritical); err != nil { + t.Fatalf("err: %v", err) + } + verify(HealthCritical, "baz") + + if err := agent.ServiceDeregister("foo"); err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestAPI_AgentChecks(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + reg := &AgentCheckRegistration{ + Name: "foo", + } + reg.TTL = "15s" + if err := agent.CheckRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + chk, ok := checks["foo"] + if !ok { + t.Fatalf("missing check: %v", checks) + } + if chk.Status != HealthCritical { + t.Fatalf("check not critical: %v", chk) + } + + if err := agent.CheckDeregister("foo"); err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestAPI_AgentCheckStartPassing(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + reg := &AgentCheckRegistration{ + Name: "foo", + AgentServiceCheck: AgentServiceCheck{ + Status: HealthPassing, + }, + } + reg.TTL = "15s" + if err := agent.CheckRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + chk, ok := checks["foo"] + if !ok { + t.Fatalf("missing check: %v", checks) + } + if chk.Status != HealthPassing { + t.Fatalf("check not passing: %v", chk) + } + + if err := agent.CheckDeregister("foo"); err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestAPI_AgentChecks_serviceBound(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + // First register a service + serviceReg := &AgentServiceRegistration{ + Name: "redis", + } + if err := agent.ServiceRegister(serviceReg); err != nil { + t.Fatalf("err: %v", err) + } + + // Register a check bound to the service + reg := &AgentCheckRegistration{ + Name: "redischeck", + ServiceID: "redis", + } + reg.TTL = "15s" + reg.DeregisterCriticalServiceAfter = "nope" + err := agent.CheckRegister(reg) + if err == nil || !strings.Contains(err.Error(), "invalid duration") { + t.Fatalf("err: %v", err) + } + + reg.DeregisterCriticalServiceAfter = "90m" + if err := agent.CheckRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + + check, ok := checks["redischeck"] + if !ok { + t.Fatalf("missing check: %v", checks) + } + if check.ServiceID != "redis" { + t.Fatalf("missing service association for check: %v", check) + } +} + +func TestAPI_AgentChecks_Docker(t *testing.T) { + t.Parallel() + c, s := makeClientWithConfig(t, nil, func(c *testutil.TestServerConfig) { + c.EnableScriptChecks = true + }) + defer s.Stop() + + agent := c.Agent() + + // First register a service + serviceReg := &AgentServiceRegistration{ + Name: "redis", + } + if err := agent.ServiceRegister(serviceReg); err != nil { + t.Fatalf("err: %v", err) + } + + // Register a check bound to the service + reg := &AgentCheckRegistration{ + Name: "redischeck", + ServiceID: "redis", + AgentServiceCheck: AgentServiceCheck{ + DockerContainerID: "f972c95ebf0e", + Script: "/bin/true", + Shell: "/bin/bash", + Interval: "10s", + }, + } + if err := agent.CheckRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + + check, ok := checks["redischeck"] + if !ok { + t.Fatalf("missing check: %v", checks) + } + if check.ServiceID != "redis" { + t.Fatalf("missing service association for check: %v", check) + } +} + +func TestAPI_AgentJoin(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + info, err := agent.Self() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Join ourself + addr := info["Config"]["AdvertiseAddr"].(string) + err = agent.Join(addr, false) + if err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestAPI_AgentLeave(t *testing.T) { + t.Parallel() + c1, s1 := makeClient(t) + defer s1.Stop() + + c2, s2 := makeClientWithConfig(t, nil, func(conf *testutil.TestServerConfig) { + conf.Server = false + conf.Bootstrap = false + }) + defer s2.Stop() + + if err := c2.Agent().Join(s1.LANAddr, false); err != nil { + t.Fatalf("err: %v", err) + } + + // We sometimes see an EOF response to this one, depending on timing. + err := c2.Agent().Leave() + if err != nil && !strings.Contains(err.Error(), "EOF") { + t.Fatalf("err: %v", err) + } + + // Make sure the second agent's status is 'Left' + members, err := c1.Agent().Members(false) + if err != nil { + t.Fatalf("err: %v", err) + } + member := members[0] + if member.Name == s1.Config.NodeName { + member = members[1] + } + if member.Status != int(serf.StatusLeft) { + t.Fatalf("bad: %v", *member) + } +} + +func TestAPI_AgentForceLeave(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + // Eject somebody + err := agent.ForceLeave("foo") + if err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestAPI_AgentMonitor(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + logCh, err := agent.Monitor("info", nil, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Wait for the first log message and validate it + select { + case log := <-logCh: + if !strings.Contains(log, "[INFO]") { + t.Fatalf("bad: %q", log) + } + case <-time.After(10 * time.Second): + t.Fatalf("failed to get a log message") + } +} + +func TestAPI_ServiceMaintenance(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + // First register a service + serviceReg := &AgentServiceRegistration{ + Name: "redis", + } + if err := agent.ServiceRegister(serviceReg); err != nil { + t.Fatalf("err: %v", err) + } + + // Enable maintenance mode + if err := agent.EnableServiceMaintenance("redis", "broken"); err != nil { + t.Fatalf("err: %s", err) + } + + // Ensure a critical check was added + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + found := false + for _, check := range checks { + if strings.Contains(check.CheckID, "maintenance") { + found = true + if check.Status != HealthCritical || check.Notes != "broken" { + t.Fatalf("bad: %#v", checks) + } + } + } + if !found { + t.Fatalf("bad: %#v", checks) + } + + // Disable maintenance mode + if err := agent.DisableServiceMaintenance("redis"); err != nil { + t.Fatalf("err: %s", err) + } + + // Ensure the critical health check was removed + checks, err = agent.Checks() + if err != nil { + t.Fatalf("err: %s", err) + } + for _, check := range checks { + if strings.Contains(check.CheckID, "maintenance") { + t.Fatalf("should have removed health check") + } + } +} + +func TestAPI_NodeMaintenance(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + // Enable maintenance mode + if err := agent.EnableNodeMaintenance("broken"); err != nil { + t.Fatalf("err: %s", err) + } + + // Check that a critical check was added + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %s", err) + } + found := false + for _, check := range checks { + if strings.Contains(check.CheckID, "maintenance") { + found = true + if check.Status != HealthCritical || check.Notes != "broken" { + t.Fatalf("bad: %#v", checks) + } + } + } + if !found { + t.Fatalf("bad: %#v", checks) + } + + // Disable maintenance mode + if err := agent.DisableNodeMaintenance(); err != nil { + t.Fatalf("err: %s", err) + } + + // Ensure the check was removed + checks, err = agent.Checks() + if err != nil { + t.Fatalf("err: %s", err) + } + for _, check := range checks { + if strings.Contains(check.CheckID, "maintenance") { + t.Fatalf("should have removed health check") + } + } +} + +func TestAPI_AgentUpdateToken(t *testing.T) { + t.Parallel() + c, s := makeACLClient(t) + defer s.Stop() + + agent := c.Agent() + + if _, err := agent.UpdateACLToken("root", nil); err != nil { + t.Fatalf("err: %v", err) + } + + if _, err := agent.UpdateACLAgentToken("root", nil); err != nil { + t.Fatalf("err: %v", err) + } + + if _, err := agent.UpdateACLAgentMasterToken("root", nil); err != nil { + t.Fatalf("err: %v", err) + } +} diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go new file mode 100644 index 000000000..0a62b4f68 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -0,0 +1,772 @@ +package api + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-rootcerts" +) + +const ( + // HTTPAddrEnvName defines an environment variable name which sets + // the HTTP address if there is no -http-addr specified. + HTTPAddrEnvName = "CONSUL_HTTP_ADDR" + + // HTTPTokenEnvName defines an environment variable name which sets + // the HTTP token. + HTTPTokenEnvName = "CONSUL_HTTP_TOKEN" + + // HTTPAuthEnvName defines an environment variable name which sets + // the HTTP authentication header. + HTTPAuthEnvName = "CONSUL_HTTP_AUTH" + + // HTTPSSLEnvName defines an environment variable name which sets + // whether or not to use HTTPS. + HTTPSSLEnvName = "CONSUL_HTTP_SSL" + + // HTTPCAFile defines an environment variable name which sets the + // CA file to use for talking to Consul over TLS. + HTTPCAFile = "CONSUL_CACERT" + + // HTTPCAPath defines an environment variable name which sets the + // path to a directory of CA certs to use for talking to Consul over TLS. + HTTPCAPath = "CONSUL_CAPATH" + + // HTTPClientCert defines an environment variable name which sets the + // client cert file to use for talking to Consul over TLS. + HTTPClientCert = "CONSUL_CLIENT_CERT" + + // HTTPClientKey defines an environment variable name which sets the + // client key file to use for talking to Consul over TLS. + HTTPClientKey = "CONSUL_CLIENT_KEY" + + // HTTPTLSServerName defines an environment variable name which sets the + // server name to use as the SNI host when connecting via TLS + HTTPTLSServerName = "CONSUL_TLS_SERVER_NAME" + + // HTTPSSLVerifyEnvName defines an environment variable name which sets + // whether or not to disable certificate checking. + HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY" +) + +// QueryOptions are used to parameterize a query +type QueryOptions struct { + // Providing a datacenter overwrites the DC provided + // by the Config + Datacenter string + + // AllowStale allows any Consul server (non-leader) to service + // a read. This allows for lower latency and higher throughput + AllowStale bool + + // RequireConsistent forces the read to be fully consistent. + // This is more expensive but prevents ever performing a stale + // read. + RequireConsistent bool + + // WaitIndex is used to enable a blocking query. Waits + // until the timeout or the next index is reached + WaitIndex uint64 + + // WaitTime is used to bound the duration of a wait. + // Defaults to that of the Config, but can be overridden. + WaitTime time.Duration + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string + + // Near is used to provide a node name that will sort the results + // in ascending order based on the estimated round trip time from + // that node. Setting this to "_agent" will use the agent's node + // for the sort. + Near string + + // NodeMeta is used to filter results by nodes with the given + // metadata key/value pairs. Currently, only one key/value pair can + // be provided for filtering. + NodeMeta map[string]string + + // RelayFactor is used in keyring operations to cause reponses to be + // relayed back to the sender through N other random nodes. Must be + // a value from 0 to 5 (inclusive). + RelayFactor uint8 + + // ctx is an optional context pass through to the underlying HTTP + // request layer. Use Context() and WithContext() to manage this. + ctx context.Context +} + +func (o *QueryOptions) Context() context.Context { + if o != nil && o.ctx != nil { + return o.ctx + } + return context.Background() +} + +func (o *QueryOptions) WithContext(ctx context.Context) *QueryOptions { + o2 := new(QueryOptions) + if o != nil { + *o2 = *o + } + o2.ctx = ctx + return o2 +} + +// WriteOptions are used to parameterize a write +type WriteOptions struct { + // Providing a datacenter overwrites the DC provided + // by the Config + Datacenter string + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string + + // RelayFactor is used in keyring operations to cause reponses to be + // relayed back to the sender through N other random nodes. Must be + // a value from 0 to 5 (inclusive). + RelayFactor uint8 + + // ctx is an optional context pass through to the underlying HTTP + // request layer. Use Context() and WithContext() to manage this. + ctx context.Context +} + +func (o *WriteOptions) Context() context.Context { + if o != nil && o.ctx != nil { + return o.ctx + } + return context.Background() +} + +func (o *WriteOptions) WithContext(ctx context.Context) *WriteOptions { + o2 := new(WriteOptions) + if o != nil { + *o2 = *o + } + o2.ctx = ctx + return o2 +} + +// QueryMeta is used to return meta data about a query +type QueryMeta struct { + // LastIndex. This can be used as a WaitIndex to perform + // a blocking query + LastIndex uint64 + + // Time of last contact from the leader for the + // server servicing the request + LastContact time.Duration + + // Is there a known leader + KnownLeader bool + + // How long did the request take + RequestTime time.Duration + + // Is address translation enabled for HTTP responses on this agent + AddressTranslationEnabled bool +} + +// WriteMeta is used to return meta data about a write +type WriteMeta struct { + // How long did the request take + RequestTime time.Duration +} + +// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication +type HttpBasicAuth struct { + // Username to use for HTTP Basic Authentication + Username string + + // Password to use for HTTP Basic Authentication + Password string +} + +// Config is used to configure the creation of a client +type Config struct { + // Address is the address of the Consul server + Address string + + // Scheme is the URI scheme for the Consul server + Scheme string + + // Datacenter to use. If not provided, the default agent datacenter is used. + Datacenter string + + // Transport is the Transport to use for the http client. + Transport *http.Transport + + // HttpClient is the client to use. Default will be + // used if not provided. + HttpClient *http.Client + + // HttpAuth is the auth info to use for http access. + HttpAuth *HttpBasicAuth + + // WaitTime limits how long a Watch will block. If not provided, + // the agent default values will be used. + WaitTime time.Duration + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string + + TLSConfig TLSConfig +} + +// TLSConfig is used to generate a TLSClientConfig that's useful for talking to +// Consul using TLS. +type TLSConfig struct { + // Address is the optional address of the Consul server. The port, if any + // will be removed from here and this will be set to the ServerName of the + // resulting config. + Address string + + // CAFile is the optional path to the CA certificate used for Consul + // communication, defaults to the system bundle if not specified. + CAFile string + + // CAPath is the optional path to a directory of CA certificates to use for + // Consul communication, defaults to the system bundle if not specified. + CAPath string + + // CertFile is the optional path to the certificate for Consul + // communication. If this is set then you need to also set KeyFile. + CertFile string + + // KeyFile is the optional path to the private key for Consul communication. + // If this is set then you need to also set CertFile. + KeyFile string + + // InsecureSkipVerify if set to true will disable TLS host verification. + InsecureSkipVerify bool +} + +// DefaultConfig returns a default configuration for the client. By default this +// will pool and reuse idle connections to Consul. If you have a long-lived +// client object, this is the desired behavior and should make the most efficient +// use of the connections to Consul. If you don't reuse a client object , which +// is not recommended, then you may notice idle connections building up over +// time. To avoid this, use the DefaultNonPooledConfig() instead. +func DefaultConfig() *Config { + return defaultConfig(cleanhttp.DefaultPooledTransport) +} + +// DefaultNonPooledConfig returns a default configuration for the client which +// does not pool connections. This isn't a recommended configuration because it +// will reconnect to Consul on every request, but this is useful to avoid the +// accumulation of idle connections if you make many client objects during the +// lifetime of your application. +func DefaultNonPooledConfig() *Config { + return defaultConfig(cleanhttp.DefaultTransport) +} + +// defaultConfig returns the default configuration for the client, using the +// given function to make the transport. +func defaultConfig(transportFn func() *http.Transport) *Config { + config := &Config{ + Address: "127.0.0.1:8500", + Scheme: "http", + Transport: transportFn(), + } + + if addr := os.Getenv(HTTPAddrEnvName); addr != "" { + config.Address = addr + } + + if token := os.Getenv(HTTPTokenEnvName); token != "" { + config.Token = token + } + + if auth := os.Getenv(HTTPAuthEnvName); auth != "" { + var username, password string + if strings.Contains(auth, ":") { + split := strings.SplitN(auth, ":", 2) + username = split[0] + password = split[1] + } else { + username = auth + } + + config.HttpAuth = &HttpBasicAuth{ + Username: username, + Password: password, + } + } + + if ssl := os.Getenv(HTTPSSLEnvName); ssl != "" { + enabled, err := strconv.ParseBool(ssl) + if err != nil { + log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLEnvName, err) + } + + if enabled { + config.Scheme = "https" + } + } + + if v := os.Getenv(HTTPTLSServerName); v != "" { + config.TLSConfig.Address = v + } + if v := os.Getenv(HTTPCAFile); v != "" { + config.TLSConfig.CAFile = v + } + if v := os.Getenv(HTTPCAPath); v != "" { + config.TLSConfig.CAPath = v + } + if v := os.Getenv(HTTPClientCert); v != "" { + config.TLSConfig.CertFile = v + } + if v := os.Getenv(HTTPClientKey); v != "" { + config.TLSConfig.KeyFile = v + } + if v := os.Getenv(HTTPSSLVerifyEnvName); v != "" { + doVerify, err := strconv.ParseBool(v) + if err != nil { + log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLVerifyEnvName, err) + } + if !doVerify { + config.TLSConfig.InsecureSkipVerify = true + } + } + + return config +} + +// TLSConfig is used to generate a TLSClientConfig that's useful for talking to +// Consul using TLS. +func SetupTLSConfig(tlsConfig *TLSConfig) (*tls.Config, error) { + tlsClientConfig := &tls.Config{ + InsecureSkipVerify: tlsConfig.InsecureSkipVerify, + } + + if tlsConfig.Address != "" { + server := tlsConfig.Address + hasPort := strings.LastIndex(server, ":") > strings.LastIndex(server, "]") + if hasPort { + var err error + server, _, err = net.SplitHostPort(server) + if err != nil { + return nil, err + } + } + tlsClientConfig.ServerName = server + } + + if tlsConfig.CertFile != "" && tlsConfig.KeyFile != "" { + tlsCert, err := tls.LoadX509KeyPair(tlsConfig.CertFile, tlsConfig.KeyFile) + if err != nil { + return nil, err + } + tlsClientConfig.Certificates = []tls.Certificate{tlsCert} + } + + rootConfig := &rootcerts.Config{ + CAFile: tlsConfig.CAFile, + CAPath: tlsConfig.CAPath, + } + if err := rootcerts.ConfigureTLS(tlsClientConfig, rootConfig); err != nil { + return nil, err + } + + return tlsClientConfig, nil +} + +// Client provides a client to the Consul API +type Client struct { + config Config +} + +// NewClient returns a new client +func NewClient(config *Config) (*Client, error) { + // bootstrap the config + defConfig := DefaultConfig() + + if len(config.Address) == 0 { + config.Address = defConfig.Address + } + + if len(config.Scheme) == 0 { + config.Scheme = defConfig.Scheme + } + + if config.Transport == nil { + config.Transport = defConfig.Transport + } + + if config.TLSConfig.Address == "" { + config.TLSConfig.Address = defConfig.TLSConfig.Address + } + + if config.TLSConfig.CAFile == "" { + config.TLSConfig.CAFile = defConfig.TLSConfig.CAFile + } + + if config.TLSConfig.CAPath == "" { + config.TLSConfig.CAPath = defConfig.TLSConfig.CAPath + } + + if config.TLSConfig.CertFile == "" { + config.TLSConfig.CertFile = defConfig.TLSConfig.CertFile + } + + if config.TLSConfig.KeyFile == "" { + config.TLSConfig.KeyFile = defConfig.TLSConfig.KeyFile + } + + if !config.TLSConfig.InsecureSkipVerify { + config.TLSConfig.InsecureSkipVerify = defConfig.TLSConfig.InsecureSkipVerify + } + + if config.HttpClient == nil { + var err error + config.HttpClient, err = NewHttpClient(config.Transport, config.TLSConfig) + if err != nil { + return nil, err + } + } + + parts := strings.SplitN(config.Address, "://", 2) + if len(parts) == 2 { + switch parts[0] { + case "http": + case "https": + config.Scheme = "https" + case "unix": + trans := cleanhttp.DefaultTransport() + trans.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial("unix", parts[1]) + } + config.HttpClient = &http.Client{ + Transport: trans, + } + default: + return nil, fmt.Errorf("Unknown protocol scheme: %s", parts[0]) + } + config.Address = parts[1] + } + + client := &Client{ + config: *config, + } + return client, nil +} + +// NewHttpClient returns an http client configured with the given Transport and TLS +// config. +func NewHttpClient(transport *http.Transport, tlsConf TLSConfig) (*http.Client, error) { + client := &http.Client{ + Transport: transport, + } + + if transport.TLSClientConfig == nil { + tlsClientConfig, err := SetupTLSConfig(&tlsConf) + + if err != nil { + return nil, err + } + + transport.TLSClientConfig = tlsClientConfig + } + + return client, nil +} + +// request is used to help build up a request +type request struct { + config *Config + method string + url *url.URL + params url.Values + body io.Reader + header http.Header + obj interface{} + ctx context.Context +} + +// setQueryOptions is used to annotate the request with +// additional query options +func (r *request) setQueryOptions(q *QueryOptions) { + if q == nil { + return + } + if q.Datacenter != "" { + r.params.Set("dc", q.Datacenter) + } + if q.AllowStale { + r.params.Set("stale", "") + } + if q.RequireConsistent { + r.params.Set("consistent", "") + } + if q.WaitIndex != 0 { + r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10)) + } + if q.WaitTime != 0 { + r.params.Set("wait", durToMsec(q.WaitTime)) + } + if q.Token != "" { + r.header.Set("X-Consul-Token", q.Token) + } + if q.Near != "" { + r.params.Set("near", q.Near) + } + if len(q.NodeMeta) > 0 { + for key, value := range q.NodeMeta { + r.params.Add("node-meta", key+":"+value) + } + } + if q.RelayFactor != 0 { + r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) + } + r.ctx = q.ctx +} + +// durToMsec converts a duration to a millisecond specified string. If the +// user selected a positive value that rounds to 0 ms, then we will use 1 ms +// so they get a short delay, otherwise Consul will translate the 0 ms into +// a huge default delay. +func durToMsec(dur time.Duration) string { + ms := dur / time.Millisecond + if dur > 0 && ms == 0 { + ms = 1 + } + return fmt.Sprintf("%dms", ms) +} + +// serverError is a string we look for to detect 500 errors. +const serverError = "Unexpected response code: 500" + +// IsServerError returns true for 500 errors from the Consul servers, these are +// usually retryable at a later time. +func IsServerError(err error) bool { + if err == nil { + return false + } + + // TODO (slackpad) - Make a real error type here instead of using + // a string check. + return strings.Contains(err.Error(), serverError) +} + +// setWriteOptions is used to annotate the request with +// additional write options +func (r *request) setWriteOptions(q *WriteOptions) { + if q == nil { + return + } + if q.Datacenter != "" { + r.params.Set("dc", q.Datacenter) + } + if q.Token != "" { + r.header.Set("X-Consul-Token", q.Token) + } + if q.RelayFactor != 0 { + r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) + } + r.ctx = q.ctx +} + +// toHTTP converts the request to an HTTP request +func (r *request) toHTTP() (*http.Request, error) { + // Encode the query parameters + r.url.RawQuery = r.params.Encode() + + // Check if we should encode the body + if r.body == nil && r.obj != nil { + b, err := encodeBody(r.obj) + if err != nil { + return nil, err + } + r.body = b + } + + // Create the HTTP request + req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body) + if err != nil { + return nil, err + } + + req.URL.Host = r.url.Host + req.URL.Scheme = r.url.Scheme + req.Host = r.url.Host + req.Header = r.header + + // Setup auth + if r.config.HttpAuth != nil { + req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password) + } + if r.ctx != nil { + return req.WithContext(r.ctx), nil + } else { + return req, nil + } +} + +// newRequest is used to create a new request +func (c *Client) newRequest(method, path string) *request { + r := &request{ + config: &c.config, + method: method, + url: &url.URL{ + Scheme: c.config.Scheme, + Host: c.config.Address, + Path: path, + }, + params: make(map[string][]string), + header: make(http.Header), + } + if c.config.Datacenter != "" { + r.params.Set("dc", c.config.Datacenter) + } + if c.config.WaitTime != 0 { + r.params.Set("wait", durToMsec(r.config.WaitTime)) + } + if c.config.Token != "" { + r.header.Set("X-Consul-Token", r.config.Token) + } + return r +} + +// doRequest runs a request with our client +func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) { + req, err := r.toHTTP() + if err != nil { + return 0, nil, err + } + start := time.Now() + resp, err := c.config.HttpClient.Do(req) + diff := time.Now().Sub(start) + return diff, resp, err +} + +// Query is used to do a GET request against an endpoint +// and deserialize the response into an interface using +// standard Consul conventions. +func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { + r := c.newRequest("GET", endpoint) + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if err := decodeBody(resp, out); err != nil { + return nil, err + } + return qm, nil +} + +// write is used to do a PUT request against an endpoint +// and serialize/deserialized using the standard Consul conventions. +func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { + r := c.newRequest("PUT", endpoint) + r.setWriteOptions(q) + r.obj = in + rtt, resp, err := requireOK(c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + if out != nil { + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + } else if _, err := ioutil.ReadAll(resp.Body); err != nil { + return nil, err + } + return wm, nil +} + +// parseQueryMeta is used to help parse query meta-data +func parseQueryMeta(resp *http.Response, q *QueryMeta) error { + header := resp.Header + + // Parse the X-Consul-Index + index, err := strconv.ParseUint(header.Get("X-Consul-Index"), 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Consul-Index: %v", err) + } + q.LastIndex = index + + // Parse the X-Consul-LastContact + last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err) + } + q.LastContact = time.Duration(last) * time.Millisecond + + // Parse the X-Consul-KnownLeader + switch header.Get("X-Consul-KnownLeader") { + case "true": + q.KnownLeader = true + default: + q.KnownLeader = false + } + + // Parse X-Consul-Translate-Addresses + switch header.Get("X-Consul-Translate-Addresses") { + case "true": + q.AddressTranslationEnabled = true + default: + q.AddressTranslationEnabled = false + } + + return nil +} + +// decodeBody is used to JSON decode a body +func decodeBody(resp *http.Response, out interface{}) error { + dec := json.NewDecoder(resp.Body) + return dec.Decode(out) +} + +// encodeBody is used to encode a request body +func encodeBody(obj interface{}) (io.Reader, error) { + buf := bytes.NewBuffer(nil) + enc := json.NewEncoder(buf) + if err := enc.Encode(obj); err != nil { + return nil, err + } + return buf, nil +} + +// requireOK is used to wrap doRequest and check for a 200 +func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) { + if e != nil { + if resp != nil { + resp.Body.Close() + } + return d, nil, e + } + if resp.StatusCode != 200 { + var buf bytes.Buffer + io.Copy(&buf, resp.Body) + resp.Body.Close() + return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) + } + return d, resp, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/api_test.go b/vendor/github.com/hashicorp/consul/api/api_test.go new file mode 100644 index 000000000..341956c4f --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/api_test.go @@ -0,0 +1,537 @@ +package api + +import ( + crand "crypto/rand" + "crypto/tls" + "fmt" + "net/http" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "testing" + "time" + + "github.com/hashicorp/consul/testutil" +) + +type configCallback func(c *Config) + +func makeClient(t *testing.T) (*Client, *testutil.TestServer) { + return makeClientWithConfig(t, nil, nil) +} + +func makeACLClient(t *testing.T) (*Client, *testutil.TestServer) { + return makeClientWithConfig(t, func(clientConfig *Config) { + clientConfig.Token = "root" + }, func(serverConfig *testutil.TestServerConfig) { + serverConfig.ACLMasterToken = "root" + serverConfig.ACLDatacenter = "dc1" + serverConfig.ACLDefaultPolicy = "deny" + }) +} + +func makeClientWithConfig( + t *testing.T, + cb1 configCallback, + cb2 testutil.ServerConfigCallback) (*Client, *testutil.TestServer) { + + // Make client config + conf := DefaultConfig() + if cb1 != nil { + cb1(conf) + } + // Create server + server, err := testutil.NewTestServerConfigT(t, cb2) + if err != nil { + t.Fatal(err) + } + conf.Address = server.HTTPAddr + + // Create client + client, err := NewClient(conf) + if err != nil { + t.Fatalf("err: %v", err) + } + + return client, server +} + +func testKey() string { + buf := make([]byte, 16) + if _, err := crand.Read(buf); err != nil { + panic(fmt.Errorf("Failed to read random bytes: %v", err)) + } + + return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", + buf[0:4], + buf[4:6], + buf[6:8], + buf[8:10], + buf[10:16]) +} + +func TestAPI_DefaultConfig_env(t *testing.T) { + t.Parallel() + addr := "1.2.3.4:5678" + token := "abcd1234" + auth := "username:password" + + os.Setenv(HTTPAddrEnvName, addr) + defer os.Setenv(HTTPAddrEnvName, "") + os.Setenv(HTTPTokenEnvName, token) + defer os.Setenv(HTTPTokenEnvName, "") + os.Setenv(HTTPAuthEnvName, auth) + defer os.Setenv(HTTPAuthEnvName, "") + os.Setenv(HTTPSSLEnvName, "1") + defer os.Setenv(HTTPSSLEnvName, "") + os.Setenv(HTTPCAFile, "ca.pem") + defer os.Setenv(HTTPCAFile, "") + os.Setenv(HTTPCAPath, "certs/") + defer os.Setenv(HTTPCAPath, "") + os.Setenv(HTTPClientCert, "client.crt") + defer os.Setenv(HTTPClientCert, "") + os.Setenv(HTTPClientKey, "client.key") + defer os.Setenv(HTTPClientKey, "") + os.Setenv(HTTPTLSServerName, "consul.test") + defer os.Setenv(HTTPTLSServerName, "") + os.Setenv(HTTPSSLVerifyEnvName, "0") + defer os.Setenv(HTTPSSLVerifyEnvName, "") + + for i, config := range []*Config{DefaultConfig(), DefaultNonPooledConfig()} { + if config.Address != addr { + t.Errorf("expected %q to be %q", config.Address, addr) + } + if config.Token != token { + t.Errorf("expected %q to be %q", config.Token, token) + } + if config.HttpAuth == nil { + t.Fatalf("expected HttpAuth to be enabled") + } + if config.HttpAuth.Username != "username" { + t.Errorf("expected %q to be %q", config.HttpAuth.Username, "username") + } + if config.HttpAuth.Password != "password" { + t.Errorf("expected %q to be %q", config.HttpAuth.Password, "password") + } + if config.Scheme != "https" { + t.Errorf("expected %q to be %q", config.Scheme, "https") + } + if config.TLSConfig.CAFile != "ca.pem" { + t.Errorf("expected %q to be %q", config.TLSConfig.CAFile, "ca.pem") + } + if config.TLSConfig.CAPath != "certs/" { + t.Errorf("expected %q to be %q", config.TLSConfig.CAPath, "certs/") + } + if config.TLSConfig.CertFile != "client.crt" { + t.Errorf("expected %q to be %q", config.TLSConfig.CertFile, "client.crt") + } + if config.TLSConfig.KeyFile != "client.key" { + t.Errorf("expected %q to be %q", config.TLSConfig.KeyFile, "client.key") + } + if config.TLSConfig.Address != "consul.test" { + t.Errorf("expected %q to be %q", config.TLSConfig.Address, "consul.test") + } + if !config.TLSConfig.InsecureSkipVerify { + t.Errorf("expected SSL verification to be off") + } + + // Use keep alives as a check for whether pooling is on or off. + if pooled := i == 0; pooled { + if config.Transport.DisableKeepAlives != false { + t.Errorf("expected keep alives to be enabled") + } + } else { + if config.Transport.DisableKeepAlives != true { + t.Errorf("expected keep alives to be disabled") + } + } + } +} + +func TestAPI_SetupTLSConfig(t *testing.T) { + // A default config should result in a clean default client config. + tlsConfig := &TLSConfig{} + cc, err := SetupTLSConfig(tlsConfig) + if err != nil { + t.Fatalf("err: %v", err) + } + expected := &tls.Config{RootCAs: cc.RootCAs} + if !reflect.DeepEqual(cc, expected) { + t.Fatalf("bad: \n%v, \n%v", cc, expected) + } + + // Try some address variations with and without ports. + tlsConfig.Address = "127.0.0.1" + cc, err = SetupTLSConfig(tlsConfig) + if err != nil { + t.Fatalf("err: %v", err) + } + expected.ServerName = "127.0.0.1" + if !reflect.DeepEqual(cc, expected) { + t.Fatalf("bad: %v", cc) + } + + tlsConfig.Address = "127.0.0.1:80" + cc, err = SetupTLSConfig(tlsConfig) + if err != nil { + t.Fatalf("err: %v", err) + } + expected.ServerName = "127.0.0.1" + if !reflect.DeepEqual(cc, expected) { + t.Fatalf("bad: %v", cc) + } + + tlsConfig.Address = "demo.consul.io:80" + cc, err = SetupTLSConfig(tlsConfig) + if err != nil { + t.Fatalf("err: %v", err) + } + expected.ServerName = "demo.consul.io" + if !reflect.DeepEqual(cc, expected) { + t.Fatalf("bad: %v", cc) + } + + tlsConfig.Address = "[2001:db8:a0b:12f0::1]" + cc, err = SetupTLSConfig(tlsConfig) + if err != nil { + t.Fatalf("err: %v", err) + } + expected.ServerName = "[2001:db8:a0b:12f0::1]" + if !reflect.DeepEqual(cc, expected) { + t.Fatalf("bad: %v", cc) + } + + tlsConfig.Address = "[2001:db8:a0b:12f0::1]:80" + cc, err = SetupTLSConfig(tlsConfig) + if err != nil { + t.Fatalf("err: %v", err) + } + expected.ServerName = "2001:db8:a0b:12f0::1" + if !reflect.DeepEqual(cc, expected) { + t.Fatalf("bad: %v", cc) + } + + // Skip verification. + tlsConfig.InsecureSkipVerify = true + cc, err = SetupTLSConfig(tlsConfig) + if err != nil { + t.Fatalf("err: %v", err) + } + expected.InsecureSkipVerify = true + if !reflect.DeepEqual(cc, expected) { + t.Fatalf("bad: %v", cc) + } + + // Make a new config that hits all the file parsers. + tlsConfig = &TLSConfig{ + CertFile: "../test/hostname/Alice.crt", + KeyFile: "../test/hostname/Alice.key", + CAFile: "../test/hostname/CertAuth.crt", + } + cc, err = SetupTLSConfig(tlsConfig) + if err != nil { + t.Fatalf("err: %v", err) + } + if len(cc.Certificates) != 1 { + t.Fatalf("missing certificate: %v", cc.Certificates) + } + if cc.RootCAs == nil { + t.Fatalf("didn't load root CAs") + } + + // Use a directory to load the certs instead + cc, err = SetupTLSConfig(&TLSConfig{ + CAPath: "../test/ca_path", + }) + if err != nil { + t.Fatalf("err: %v", err) + } + if len(cc.RootCAs.Subjects()) != 2 { + t.Fatalf("didn't load root CAs") + } +} + +func TestAPI_ClientTLSOptions(t *testing.T) { + t.Parallel() + // Start a server that verifies incoming HTTPS connections + _, srvVerify := makeClientWithConfig(t, nil, func(conf *testutil.TestServerConfig) { + conf.CAFile = "../test/client_certs/rootca.crt" + conf.CertFile = "../test/client_certs/server.crt" + conf.KeyFile = "../test/client_certs/server.key" + conf.VerifyIncomingHTTPS = true + }) + defer srvVerify.Stop() + + // Start a server without VerifyIncomingHTTPS + _, srvNoVerify := makeClientWithConfig(t, nil, func(conf *testutil.TestServerConfig) { + conf.CAFile = "../test/client_certs/rootca.crt" + conf.CertFile = "../test/client_certs/server.crt" + conf.KeyFile = "../test/client_certs/server.key" + conf.VerifyIncomingHTTPS = false + }) + defer srvNoVerify.Stop() + + // Client without a cert + t.Run("client without cert, validation", func(t *testing.T) { + client, err := NewClient(&Config{ + Address: srvVerify.HTTPSAddr, + Scheme: "https", + TLSConfig: TLSConfig{ + Address: "consul.test", + CAFile: "../test/client_certs/rootca.crt", + }, + }) + if err != nil { + t.Fatal(err) + } + + // Should fail + _, err = client.Agent().Self() + if err == nil || !strings.Contains(err.Error(), "bad certificate") { + t.Fatal(err) + } + }) + + // Client with a valid cert + t.Run("client with cert, validation", func(t *testing.T) { + client, err := NewClient(&Config{ + Address: srvVerify.HTTPSAddr, + Scheme: "https", + TLSConfig: TLSConfig{ + Address: "consul.test", + CAFile: "../test/client_certs/rootca.crt", + CertFile: "../test/client_certs/client.crt", + KeyFile: "../test/client_certs/client.key", + }, + }) + if err != nil { + t.Fatal(err) + } + + // Should succeed + _, err = client.Agent().Self() + if err != nil { + t.Fatal(err) + } + }) + + // Client without a cert + t.Run("client without cert, no validation", func(t *testing.T) { + client, err := NewClient(&Config{ + Address: srvNoVerify.HTTPSAddr, + Scheme: "https", + TLSConfig: TLSConfig{ + Address: "consul.test", + CAFile: "../test/client_certs/rootca.crt", + }, + }) + if err != nil { + t.Fatal(err) + } + + // Should succeed + _, err = client.Agent().Self() + if err != nil { + t.Fatal(err) + } + }) + + // Client with a valid cert + t.Run("client with cert, no validation", func(t *testing.T) { + client, err := NewClient(&Config{ + Address: srvNoVerify.HTTPSAddr, + Scheme: "https", + TLSConfig: TLSConfig{ + Address: "consul.test", + CAFile: "../test/client_certs/rootca.crt", + CertFile: "../test/client_certs/client.crt", + KeyFile: "../test/client_certs/client.key", + }, + }) + if err != nil { + t.Fatal(err) + } + + // Should succeed + _, err = client.Agent().Self() + if err != nil { + t.Fatal(err) + } + }) +} + +func TestAPI_SetQueryOptions(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + r := c.newRequest("GET", "/v1/kv/foo") + q := &QueryOptions{ + Datacenter: "foo", + AllowStale: true, + RequireConsistent: true, + WaitIndex: 1000, + WaitTime: 100 * time.Second, + Token: "12345", + Near: "nodex", + } + r.setQueryOptions(q) + + if r.params.Get("dc") != "foo" { + t.Fatalf("bad: %v", r.params) + } + if _, ok := r.params["stale"]; !ok { + t.Fatalf("bad: %v", r.params) + } + if _, ok := r.params["consistent"]; !ok { + t.Fatalf("bad: %v", r.params) + } + if r.params.Get("index") != "1000" { + t.Fatalf("bad: %v", r.params) + } + if r.params.Get("wait") != "100000ms" { + t.Fatalf("bad: %v", r.params) + } + if r.header.Get("X-Consul-Token") != "12345" { + t.Fatalf("bad: %v", r.header) + } + if r.params.Get("near") != "nodex" { + t.Fatalf("bad: %v", r.params) + } +} + +func TestAPI_SetWriteOptions(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + r := c.newRequest("GET", "/v1/kv/foo") + q := &WriteOptions{ + Datacenter: "foo", + Token: "23456", + } + r.setWriteOptions(q) + + if r.params.Get("dc") != "foo" { + t.Fatalf("bad: %v", r.params) + } + if r.header.Get("X-Consul-Token") != "23456" { + t.Fatalf("bad: %v", r.header) + } +} + +func TestAPI_RequestToHTTP(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + r := c.newRequest("DELETE", "/v1/kv/foo") + q := &QueryOptions{ + Datacenter: "foo", + } + r.setQueryOptions(q) + req, err := r.toHTTP() + if err != nil { + t.Fatalf("err: %v", err) + } + + if req.Method != "DELETE" { + t.Fatalf("bad: %v", req) + } + if req.URL.RequestURI() != "/v1/kv/foo?dc=foo" { + t.Fatalf("bad: %v", req) + } +} + +func TestAPI_ParseQueryMeta(t *testing.T) { + t.Parallel() + resp := &http.Response{ + Header: make(map[string][]string), + } + resp.Header.Set("X-Consul-Index", "12345") + resp.Header.Set("X-Consul-LastContact", "80") + resp.Header.Set("X-Consul-KnownLeader", "true") + resp.Header.Set("X-Consul-Translate-Addresses", "true") + + qm := &QueryMeta{} + if err := parseQueryMeta(resp, qm); err != nil { + t.Fatalf("err: %v", err) + } + + if qm.LastIndex != 12345 { + t.Fatalf("Bad: %v", qm) + } + if qm.LastContact != 80*time.Millisecond { + t.Fatalf("Bad: %v", qm) + } + if !qm.KnownLeader { + t.Fatalf("Bad: %v", qm) + } + if !qm.AddressTranslationEnabled { + t.Fatalf("Bad: %v", qm) + } +} + +func TestAPI_UnixSocket(t *testing.T) { + t.Parallel() + if runtime.GOOS == "windows" { + t.SkipNow() + } + + tempDir := testutil.TempDir(t, "consul") + defer os.RemoveAll(tempDir) + socket := filepath.Join(tempDir, "test.sock") + + c, s := makeClientWithConfig(t, func(c *Config) { + c.Address = "unix://" + socket + }, func(c *testutil.TestServerConfig) { + c.Addresses = &testutil.TestAddressConfig{ + HTTP: "unix://" + socket, + } + }) + defer s.Stop() + + agent := c.Agent() + + info, err := agent.Self() + if err != nil { + t.Fatalf("err: %s", err) + } + if info["Config"]["NodeName"] == "" { + t.Fatalf("bad: %v", info) + } +} + +func TestAPI_durToMsec(t *testing.T) { + if ms := durToMsec(0); ms != "0ms" { + t.Fatalf("bad: %s", ms) + } + + if ms := durToMsec(time.Millisecond); ms != "1ms" { + t.Fatalf("bad: %s", ms) + } + + if ms := durToMsec(time.Microsecond); ms != "1ms" { + t.Fatalf("bad: %s", ms) + } + + if ms := durToMsec(5 * time.Millisecond); ms != "5ms" { + t.Fatalf("bad: %s", ms) + } +} + +func TestAPI_IsServerError(t *testing.T) { + if IsServerError(nil) { + t.Fatalf("should not be a server error") + } + + if IsServerError(fmt.Errorf("not the error you are looking for")) { + t.Fatalf("should not be a server error") + } + + if !IsServerError(fmt.Errorf(serverError)) { + t.Fatalf("should be a server error") + } +} diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go new file mode 100644 index 000000000..babfc9a1d --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/catalog.go @@ -0,0 +1,198 @@ +package api + +type Node struct { + ID string + Node string + Address string + Datacenter string + TaggedAddresses map[string]string + Meta map[string]string + CreateIndex uint64 + ModifyIndex uint64 +} + +type CatalogService struct { + ID string + Node string + Address string + Datacenter string + TaggedAddresses map[string]string + NodeMeta map[string]string + ServiceID string + ServiceName string + ServiceAddress string + ServiceTags []string + ServicePort int + ServiceEnableTagOverride bool + CreateIndex uint64 + ModifyIndex uint64 +} + +type CatalogNode struct { + Node *Node + Services map[string]*AgentService +} + +type CatalogRegistration struct { + ID string + Node string + Address string + TaggedAddresses map[string]string + NodeMeta map[string]string + Datacenter string + Service *AgentService + Check *AgentCheck +} + +type CatalogDeregistration struct { + Node string + Address string // Obsolete. + Datacenter string + ServiceID string + CheckID string +} + +// Catalog can be used to query the Catalog endpoints +type Catalog struct { + c *Client +} + +// Catalog returns a handle to the catalog endpoints +func (c *Client) Catalog() *Catalog { + return &Catalog{c} +} + +func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/catalog/register") + r.setWriteOptions(q) + r.obj = reg + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/catalog/deregister") + r.setWriteOptions(q) + r.obj = dereg + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +// Datacenters is used to query for all the known datacenters +func (c *Catalog) Datacenters() ([]string, error) { + r := c.c.newRequest("GET", "/v1/catalog/datacenters") + _, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []string + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Nodes is used to query all the known nodes +func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/nodes") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*Node + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Services is used to query for all known services +func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/services") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out map[string][]string + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Service is used to query catalog entries for a given service +func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/service/"+service) + r.setQueryOptions(q) + if tag != "" { + r.params.Set("tag", tag) + } + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*CatalogService + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Node is used to query for service information about a single node +func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out *CatalogNode + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/catalog_test.go b/vendor/github.com/hashicorp/consul/api/catalog_test.go new file mode 100644 index 000000000..2f49969ba --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/catalog_test.go @@ -0,0 +1,474 @@ +package api + +import ( + "testing" + + "github.com/hashicorp/consul/testutil" + "github.com/hashicorp/consul/testutil/retry" + "github.com/pascaldekloe/goe/verify" +) + +func TestAPI_CatalogDatacenters(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + catalog := c.Catalog() + retry.Run(t, func(r *retry.R) { + datacenters, err := catalog.Datacenters() + if err != nil { + r.Fatal(err) + } + if len(datacenters) < 1 { + r.Fatal("got 0 datacenters want at least one") + } + }) +} + +func TestAPI_CatalogNodes(t *testing.T) { + c, s := makeClient(t) + defer s.Stop() + + catalog := c.Catalog() + retry.RunWith(retry.ThreeTimes(), t, func(r *retry.R) { + nodes, meta, err := catalog.Nodes(nil) + if err != nil { + r.Fatal(err) + } + if meta.LastIndex == 0 { + r.Fatal("got last index 0 want > 0") + } + want := []*Node{ + { + ID: s.Config.NodeID, + Node: s.Config.NodeName, + Address: "127.0.0.1", + Datacenter: "dc1", + TaggedAddresses: map[string]string{ + "lan": "127.0.0.1", + "wan": "127.0.0.1", + }, + Meta: map[string]string{}, + CreateIndex: meta.LastIndex - 1, + ModifyIndex: meta.LastIndex, + }, + } + if !verify.Values(r, "", nodes, want) { + r.FailNow() + } + }) +} + +func TestAPI_CatalogNodes_MetaFilter(t *testing.T) { + meta := map[string]string{"somekey": "somevalue"} + c, s := makeClientWithConfig(t, nil, func(conf *testutil.TestServerConfig) { + conf.NodeMeta = meta + }) + defer s.Stop() + + catalog := c.Catalog() + // Make sure we get the node back when filtering by its metadata + retry.Run(t, func(r *retry.R) { + nodes, meta, err := catalog.Nodes(&QueryOptions{NodeMeta: meta}) + if err != nil { + r.Fatal(err) + } + + if meta.LastIndex == 0 { + r.Fatalf("Bad: %v", meta) + } + + if len(nodes) == 0 { + r.Fatalf("Bad: %v", nodes) + } + + if _, ok := nodes[0].TaggedAddresses["wan"]; !ok { + r.Fatalf("Bad: %v", nodes[0]) + } + + if v, ok := nodes[0].Meta["somekey"]; !ok || v != "somevalue" { + r.Fatalf("Bad: %v", nodes[0].Meta) + } + + if nodes[0].Datacenter != "dc1" { + r.Fatalf("Bad datacenter: %v", nodes[0]) + } + }) + + retry.Run(t, func(r *retry.R) { + // Get nothing back when we use an invalid filter + nodes, meta, err := catalog.Nodes(&QueryOptions{NodeMeta: map[string]string{"nope": "nope"}}) + if err != nil { + r.Fatal(err) + } + + if meta.LastIndex == 0 { + r.Fatalf("Bad: %v", meta) + } + + if len(nodes) != 0 { + r.Fatalf("Bad: %v", nodes) + } + }) +} + +func TestAPI_CatalogServices(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + catalog := c.Catalog() + retry.Run(t, func(r *retry.R) { + services, meta, err := catalog.Services(nil) + if err != nil { + r.Fatal(err) + } + + if meta.LastIndex == 0 { + r.Fatalf("Bad: %v", meta) + } + + if len(services) == 0 { + r.Fatalf("Bad: %v", services) + } + }) +} + +func TestAPI_CatalogServices_NodeMetaFilter(t *testing.T) { + meta := map[string]string{"somekey": "somevalue"} + c, s := makeClientWithConfig(t, nil, func(conf *testutil.TestServerConfig) { + conf.NodeMeta = meta + }) + defer s.Stop() + + catalog := c.Catalog() + // Make sure we get the service back when filtering by the node's metadata + retry.Run(t, func(r *retry.R) { + services, meta, err := catalog.Services(&QueryOptions{NodeMeta: meta}) + if err != nil { + r.Fatal(err) + } + + if meta.LastIndex == 0 { + r.Fatalf("Bad: %v", meta) + } + + if len(services) == 0 { + r.Fatalf("Bad: %v", services) + } + }) + + retry.Run(t, func(r *retry.R) { + // Get nothing back when using an invalid filter + services, meta, err := catalog.Services(&QueryOptions{NodeMeta: map[string]string{"nope": "nope"}}) + if err != nil { + r.Fatal(err) + } + + if meta.LastIndex == 0 { + r.Fatalf("Bad: %v", meta) + } + + if len(services) != 0 { + r.Fatalf("Bad: %v", services) + } + }) +} + +func TestAPI_CatalogService(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + catalog := c.Catalog() + retry.Run(t, func(r *retry.R) { + services, meta, err := catalog.Service("consul", "", nil) + if err != nil { + r.Fatal(err) + } + + if meta.LastIndex == 0 { + r.Fatalf("Bad: %v", meta) + } + + if len(services) == 0 { + r.Fatalf("Bad: %v", services) + } + + if services[0].Datacenter != "dc1" { + r.Fatalf("Bad datacenter: %v", services[0]) + } + }) +} + +func TestAPI_CatalogService_NodeMetaFilter(t *testing.T) { + t.Parallel() + meta := map[string]string{"somekey": "somevalue"} + c, s := makeClientWithConfig(t, nil, func(conf *testutil.TestServerConfig) { + conf.NodeMeta = meta + }) + defer s.Stop() + + catalog := c.Catalog() + retry.Run(t, func(r *retry.R) { + services, meta, err := catalog.Service("consul", "", &QueryOptions{NodeMeta: meta}) + if err != nil { + r.Fatal(err) + } + + if meta.LastIndex == 0 { + r.Fatalf("Bad: %v", meta) + } + + if len(services) == 0 { + r.Fatalf("Bad: %v", services) + } + + if services[0].Datacenter != "dc1" { + r.Fatalf("Bad datacenter: %v", services[0]) + } + }) +} + +func TestAPI_CatalogNode(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + catalog := c.Catalog() + name, _ := c.Agent().NodeName() + retry.Run(t, func(r *retry.R) { + info, meta, err := catalog.Node(name, nil) + if err != nil { + r.Fatal(err) + } + + if meta.LastIndex == 0 { + r.Fatalf("Bad: %v", meta) + } + + if len(info.Services) == 0 { + r.Fatalf("Bad: %v", info) + } + + if _, ok := info.Node.TaggedAddresses["wan"]; !ok { + r.Fatalf("Bad: %v", info) + } + + if info.Node.Datacenter != "dc1" { + r.Fatalf("Bad datacenter: %v", info) + } + }) +} + +func TestAPI_CatalogRegistration(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + catalog := c.Catalog() + + service := &AgentService{ + ID: "redis1", + Service: "redis", + Tags: []string{"master", "v1"}, + Port: 8000, + } + + check := &AgentCheck{ + Node: "foobar", + CheckID: "service:redis1", + Name: "Redis health check", + Notes: "Script based health check", + Status: HealthPassing, + ServiceID: "redis1", + } + + reg := &CatalogRegistration{ + Datacenter: "dc1", + Node: "foobar", + Address: "192.168.10.10", + NodeMeta: map[string]string{"somekey": "somevalue"}, + Service: service, + Check: check, + } + retry.Run(t, func(r *retry.R) { + if _, err := catalog.Register(reg, nil); err != nil { + r.Fatal(err) + } + + node, _, err := catalog.Node("foobar", nil) + if err != nil { + r.Fatal(err) + } + + if _, ok := node.Services["redis1"]; !ok { + r.Fatal("missing service: redis1") + } + + health, _, err := c.Health().Node("foobar", nil) + if err != nil { + r.Fatal(err) + } + + if health[0].CheckID != "service:redis1" { + r.Fatal("missing checkid service:redis1") + } + + if v, ok := node.Node.Meta["somekey"]; !ok || v != "somevalue" { + r.Fatal("missing node meta pair somekey:somevalue") + } + }) + + // Test catalog deregistration of the previously registered service + dereg := &CatalogDeregistration{ + Datacenter: "dc1", + Node: "foobar", + Address: "192.168.10.10", + ServiceID: "redis1", + } + + if _, err := catalog.Deregister(dereg, nil); err != nil { + t.Fatalf("err: %v", err) + } + + retry.Run(t, func(r *retry.R) { + node, _, err := catalog.Node("foobar", nil) + if err != nil { + r.Fatal(err) + } + + if _, ok := node.Services["redis1"]; ok { + r.Fatal("ServiceID:redis1 is not deregistered") + } + }) + + // Test deregistration of the previously registered check + dereg = &CatalogDeregistration{ + Datacenter: "dc1", + Node: "foobar", + Address: "192.168.10.10", + CheckID: "service:redis1", + } + + if _, err := catalog.Deregister(dereg, nil); err != nil { + t.Fatalf("err: %v", err) + } + + retry.Run(t, func(r *retry.R) { + health, _, err := c.Health().Node("foobar", nil) + if err != nil { + r.Fatal(err) + } + + if len(health) != 0 { + r.Fatal("CheckID:service:redis1 is not deregistered") + } + }) + + // Test node deregistration of the previously registered node + dereg = &CatalogDeregistration{ + Datacenter: "dc1", + Node: "foobar", + Address: "192.168.10.10", + } + + if _, err := catalog.Deregister(dereg, nil); err != nil { + t.Fatalf("err: %v", err) + } + + retry.Run(t, func(r *retry.R) { + node, _, err := catalog.Node("foobar", nil) + if err != nil { + r.Fatal(err) + } + + if node != nil { + r.Fatalf("node is not deregistered: %v", node) + } + }) +} + +func TestAPI_CatalogEnableTagOverride(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + catalog := c.Catalog() + + service := &AgentService{ + ID: "redis1", + Service: "redis", + Tags: []string{"master", "v1"}, + Port: 8000, + } + + reg := &CatalogRegistration{ + Datacenter: "dc1", + Node: "foobar", + Address: "192.168.10.10", + Service: service, + } + + retry.Run(t, func(r *retry.R) { + if _, err := catalog.Register(reg, nil); err != nil { + r.Fatal(err) + } + + node, _, err := catalog.Node("foobar", nil) + if err != nil { + r.Fatal(err) + } + + if _, ok := node.Services["redis1"]; !ok { + r.Fatal("missing service: redis1") + } + if node.Services["redis1"].EnableTagOverride != false { + r.Fatal("tag override set") + } + + services, _, err := catalog.Service("redis", "", nil) + if err != nil { + r.Fatal(err) + } + + if len(services) < 1 || services[0].ServiceName != "redis" { + r.Fatal("missing service: redis") + } + if services[0].ServiceEnableTagOverride != false { + r.Fatal("tag override set") + } + }) + + service.EnableTagOverride = true + + retry.Run(t, func(r *retry.R) { + if _, err := catalog.Register(reg, nil); err != nil { + r.Fatal(err) + } + + node, _, err := catalog.Node("foobar", nil) + if err != nil { + r.Fatal(err) + } + + if _, ok := node.Services["redis1"]; !ok { + r.Fatal("missing service: redis1") + } + if node.Services["redis1"].EnableTagOverride != true { + r.Fatal("tag override not set") + } + + services, _, err := catalog.Service("redis", "", nil) + if err != nil { + r.Fatal(err) + } + + if len(services) < 1 || services[0].ServiceName != "redis" { + r.Fatal("missing service: redis") + } + if services[0].ServiceEnableTagOverride != true { + r.Fatal("tag override not set") + } + }) +} diff --git a/vendor/github.com/hashicorp/consul/api/coordinate.go b/vendor/github.com/hashicorp/consul/api/coordinate.go new file mode 100644 index 000000000..ae8d16ee6 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/coordinate.go @@ -0,0 +1,67 @@ +package api + +import ( + "github.com/hashicorp/serf/coordinate" +) + +// CoordinateEntry represents a node and its associated network coordinate. +type CoordinateEntry struct { + Node string + Coord *coordinate.Coordinate +} + +// CoordinateDatacenterMap has the coordinates for servers in a given datacenter +// and area. Network coordinates are only compatible within the same area. +type CoordinateDatacenterMap struct { + Datacenter string + AreaID string + Coordinates []CoordinateEntry +} + +// Coordinate can be used to query the coordinate endpoints +type Coordinate struct { + c *Client +} + +// Coordinate returns a handle to the coordinate endpoints +func (c *Client) Coordinate() *Coordinate { + return &Coordinate{c} +} + +// Datacenters is used to return the coordinates of all the servers in the WAN +// pool. +func (c *Coordinate) Datacenters() ([]*CoordinateDatacenterMap, error) { + r := c.c.newRequest("GET", "/v1/coordinate/datacenters") + _, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*CoordinateDatacenterMap + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Nodes is used to return the coordinates of all the nodes in the LAN pool. +func (c *Coordinate) Nodes(q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/coordinate/nodes") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*CoordinateEntry + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/coordinate_test.go b/vendor/github.com/hashicorp/consul/api/coordinate_test.go new file mode 100644 index 000000000..f41756b57 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/coordinate_test.go @@ -0,0 +1,44 @@ +package api + +import ( + "testing" + + "github.com/hashicorp/consul/testutil/retry" +) + +func TestAPI_CoordinateDatacenters(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + coordinate := c.Coordinate() + retry.Run(t, func(r *retry.R) { + datacenters, err := coordinate.Datacenters() + if err != nil { + r.Fatal(err) + } + + if len(datacenters) == 0 { + r.Fatalf("Bad: %v", datacenters) + } + }) +} + +func TestAPI_CoordinateNodes(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + coordinate := c.Coordinate() + retry.Run(t, func(r *retry.R) { + _, _, err := coordinate.Nodes(nil) + if err != nil { + r.Fatal(err) + } + + // There's not a good way to populate coordinates without + // waiting for them to calculate and update, so the best + // we can do is call the endpoint and make sure we don't + // get an error. + }) +} diff --git a/vendor/github.com/hashicorp/consul/api/event.go b/vendor/github.com/hashicorp/consul/api/event.go new file mode 100644 index 000000000..85b5b069b --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/event.go @@ -0,0 +1,104 @@ +package api + +import ( + "bytes" + "strconv" +) + +// Event can be used to query the Event endpoints +type Event struct { + c *Client +} + +// UserEvent represents an event that was fired by the user +type UserEvent struct { + ID string + Name string + Payload []byte + NodeFilter string + ServiceFilter string + TagFilter string + Version int + LTime uint64 +} + +// Event returns a handle to the event endpoints +func (c *Client) Event() *Event { + return &Event{c} +} + +// Fire is used to fire a new user event. Only the Name, Payload and Filters +// are respected. This returns the ID or an associated error. Cross DC requests +// are supported. +func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) { + r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name) + r.setWriteOptions(q) + if params.NodeFilter != "" { + r.params.Set("node", params.NodeFilter) + } + if params.ServiceFilter != "" { + r.params.Set("service", params.ServiceFilter) + } + if params.TagFilter != "" { + r.params.Set("tag", params.TagFilter) + } + if params.Payload != nil { + r.body = bytes.NewReader(params.Payload) + } + + rtt, resp, err := requireOK(e.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out UserEvent + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// List is used to get the most recent events an agent has received. +// This list can be optionally filtered by the name. This endpoint supports +// quasi-blocking queries. The index is not monotonic, nor does it provide provide +// LastContact or KnownLeader. +func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) { + r := e.c.newRequest("GET", "/v1/event/list") + r.setQueryOptions(q) + if name != "" { + r.params.Set("name", name) + } + rtt, resp, err := requireOK(e.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*UserEvent + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// IDToIndex is a bit of a hack. This simulates the index generation to +// convert an event ID into a WaitIndex. +func (e *Event) IDToIndex(uuid string) uint64 { + lower := uuid[0:8] + uuid[9:13] + uuid[14:18] + upper := uuid[19:23] + uuid[24:36] + lowVal, err := strconv.ParseUint(lower, 16, 64) + if err != nil { + panic("Failed to convert " + lower) + } + highVal, err := strconv.ParseUint(upper, 16, 64) + if err != nil { + panic("Failed to convert " + upper) + } + return lowVal ^ highVal +} diff --git a/vendor/github.com/hashicorp/consul/api/event_test.go b/vendor/github.com/hashicorp/consul/api/event_test.go new file mode 100644 index 000000000..1459590e0 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/event_test.go @@ -0,0 +1,50 @@ +package api + +import ( + "testing" + + "github.com/hashicorp/consul/testutil/retry" +) + +func TestAPI_EventFireList(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + event := c.Event() + + params := &UserEvent{Name: "foo"} + id, meta, err := event.Fire(params, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if meta.RequestTime == 0 { + t.Fatalf("bad: %v", meta) + } + + if id == "" { + t.Fatalf("invalid: %v", id) + } + + var events []*UserEvent + var qm *QueryMeta + + retry.Run(t, func(r *retry.R) { + events, qm, err = event.List("", nil) + if err != nil { + r.Fatalf("err: %v", err) + } + if len(events) <= 0 { + r.Fatal(err) + } + }) + + if events[len(events)-1].ID != id { + t.Fatalf("bad: %#v", events) + } + + if qm.LastIndex != event.IDToIndex(id) { + t.Fatalf("Bad: %#v", qm) + } +} diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go new file mode 100644 index 000000000..38c105fdb --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/health.go @@ -0,0 +1,200 @@ +package api + +import ( + "fmt" + "strings" +) + +const ( + // HealthAny is special, and is used as a wild card, + // not as a specific state. + HealthAny = "any" + HealthPassing = "passing" + HealthWarning = "warning" + HealthCritical = "critical" + HealthMaint = "maintenance" +) + +const ( + // NodeMaint is the special key set by a node in maintenance mode. + NodeMaint = "_node_maintenance" + + // ServiceMaintPrefix is the prefix for a service in maintenance mode. + ServiceMaintPrefix = "_service_maintenance:" +) + +// HealthCheck is used to represent a single check +type HealthCheck struct { + Node string + CheckID string + Name string + Status string + Notes string + Output string + ServiceID string + ServiceName string + ServiceTags []string +} + +// HealthChecks is a collection of HealthCheck structs. +type HealthChecks []*HealthCheck + +// AggregatedStatus returns the "best" status for the list of health checks. +// Because a given entry may have many service and node-level health checks +// attached, this function determines the best representative of the status as +// as single string using the following heuristic: +// +// maintenance > critical > warning > passing +// +func (c HealthChecks) AggregatedStatus() string { + var passing, warning, critical, maintenance bool + for _, check := range c { + id := string(check.CheckID) + if id == NodeMaint || strings.HasPrefix(id, ServiceMaintPrefix) { + maintenance = true + continue + } + + switch check.Status { + case HealthPassing: + passing = true + case HealthWarning: + warning = true + case HealthCritical: + critical = true + default: + return "" + } + } + + switch { + case maintenance: + return HealthMaint + case critical: + return HealthCritical + case warning: + return HealthWarning + case passing: + return HealthPassing + default: + return HealthPassing + } +} + +// ServiceEntry is used for the health service endpoint +type ServiceEntry struct { + Node *Node + Service *AgentService + Checks HealthChecks +} + +// Health can be used to query the Health endpoints +type Health struct { + c *Client +} + +// Health returns a handle to the health endpoints +func (c *Client) Health() *Health { + return &Health{c} +} + +// Node is used to query for checks belonging to a given node +func (h *Health) Node(node string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out HealthChecks + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Checks is used to return the checks associated with a service +func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/checks/"+service) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out HealthChecks + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Service is used to query health information along with service info +// for a given service. It can optionally do server-side filtering on a tag +// or nodes with passing health checks only. +func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/service/"+service) + r.setQueryOptions(q) + if tag != "" { + r.params.Set("tag", tag) + } + if passingOnly { + r.params.Set(HealthPassing, "1") + } + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*ServiceEntry + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// State is used to retrieve all the checks in a given state. +// The wildcard "any" state can also be used for all checks. +func (h *Health) State(state string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { + switch state { + case HealthAny: + case HealthWarning: + case HealthCritical: + case HealthPassing: + default: + return nil, nil, fmt.Errorf("Unsupported state: %v", state) + } + r := h.c.newRequest("GET", "/v1/health/state/"+state) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out HealthChecks + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/health_test.go b/vendor/github.com/hashicorp/consul/api/health_test.go new file mode 100644 index 000000000..9cbb55e21 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/health_test.go @@ -0,0 +1,354 @@ +package api + +import ( + "fmt" + "testing" + + "github.com/hashicorp/consul/testutil" + "github.com/hashicorp/consul/testutil/retry" + "github.com/pascaldekloe/goe/verify" +) + +func TestAPI_HealthNode(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + health := c.Health() + + info, err := agent.Self() + if err != nil { + t.Fatalf("err: %v", err) + } + name := info["Config"]["NodeName"].(string) + retry.Run(t, func(r *retry.R) { + checks, meta, err := health.Node(name, nil) + if err != nil { + r.Fatal(err) + } + if meta.LastIndex == 0 { + r.Fatalf("bad: %v", meta) + } + if len(checks) == 0 { + r.Fatalf("bad: %v", checks) + } + }) +} + +func TestAPI_HealthChecks_AggregatedStatus(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + checks HealthChecks + exp string + }{ + { + "empty", + nil, + HealthPassing, + }, + { + "passing", + HealthChecks{ + &HealthCheck{ + Status: HealthPassing, + }, + }, + HealthPassing, + }, + { + "warning", + HealthChecks{ + &HealthCheck{ + Status: HealthWarning, + }, + }, + HealthWarning, + }, + { + "critical", + HealthChecks{ + &HealthCheck{ + Status: HealthCritical, + }, + }, + HealthCritical, + }, + { + "node_maintenance", + HealthChecks{ + &HealthCheck{ + CheckID: NodeMaint, + }, + }, + HealthMaint, + }, + { + "service_maintenance", + HealthChecks{ + &HealthCheck{ + CheckID: ServiceMaintPrefix + "service", + }, + }, + HealthMaint, + }, + { + "unknown", + HealthChecks{ + &HealthCheck{ + Status: "nope-nope-noper", + }, + }, + "", + }, + { + "maintenance_over_critical", + HealthChecks{ + &HealthCheck{ + CheckID: NodeMaint, + }, + &HealthCheck{ + Status: HealthCritical, + }, + }, + HealthMaint, + }, + { + "critical_over_warning", + HealthChecks{ + &HealthCheck{ + Status: HealthCritical, + }, + &HealthCheck{ + Status: HealthWarning, + }, + }, + HealthCritical, + }, + { + "warning_over_passing", + HealthChecks{ + &HealthCheck{ + Status: HealthWarning, + }, + &HealthCheck{ + Status: HealthPassing, + }, + }, + HealthWarning, + }, + { + "lots", + HealthChecks{ + &HealthCheck{ + Status: HealthPassing, + }, + &HealthCheck{ + Status: HealthPassing, + }, + &HealthCheck{ + Status: HealthPassing, + }, + &HealthCheck{ + Status: HealthWarning, + }, + }, + HealthWarning, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("%d_%s", i, tc.name), func(t *testing.T) { + act := tc.checks.AggregatedStatus() + if tc.exp != act { + t.Errorf("\nexp: %#v\nact: %#v", tc.exp, act) + } + }) + } +} + +func TestAPI_HealthChecks(t *testing.T) { + t.Parallel() + c, s := makeClientWithConfig(t, nil, func(conf *testutil.TestServerConfig) { + conf.NodeName = "node123" + }) + defer s.Stop() + + agent := c.Agent() + health := c.Health() + + // Make a service with a check + reg := &AgentServiceRegistration{ + Name: "foo", + Tags: []string{"bar"}, + Check: &AgentServiceCheck{ + TTL: "15s", + }, + } + if err := agent.ServiceRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + defer agent.ServiceDeregister("foo") + + retry.Run(t, func(r *retry.R) { + checks := HealthChecks{ + &HealthCheck{ + Node: "node123", + CheckID: "service:foo", + Name: "Service 'foo' check", + Status: "critical", + ServiceID: "foo", + ServiceName: "foo", + ServiceTags: []string{"bar"}, + }, + } + + out, meta, err := health.Checks("foo", nil) + if err != nil { + r.Fatal(err) + } + if meta.LastIndex == 0 { + r.Fatalf("bad: %v", meta) + } + if got, want := out, checks; !verify.Values(t, "checks", got, want) { + r.Fatal("health.Checks failed") + } + }) +} + +func TestAPI_HealthChecks_NodeMetaFilter(t *testing.T) { + t.Parallel() + meta := map[string]string{"somekey": "somevalue"} + c, s := makeClientWithConfig(t, nil, func(conf *testutil.TestServerConfig) { + conf.NodeMeta = meta + }) + defer s.Stop() + + agent := c.Agent() + health := c.Health() + + // Make a service with a check + reg := &AgentServiceRegistration{ + Name: "foo", + Check: &AgentServiceCheck{ + TTL: "15s", + }, + } + if err := agent.ServiceRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + defer agent.ServiceDeregister("foo") + + retry.Run(t, func(r *retry.R) { + checks, meta, err := health.Checks("foo", &QueryOptions{NodeMeta: meta}) + if err != nil { + r.Fatal(err) + } + if meta.LastIndex == 0 { + r.Fatalf("bad: %v", meta) + } + if len(checks) == 0 { + r.Fatalf("Bad: %v", checks) + } + }) +} + +func TestAPI_HealthService(t *testing.T) { + c, s := makeClient(t) + defer s.Stop() + + health := c.Health() + retry.Run(t, func(r *retry.R) { + // consul service should always exist... + checks, meta, err := health.Service("consul", "", true, nil) + if err != nil { + r.Fatal(err) + } + if meta.LastIndex == 0 { + r.Fatalf("bad: %v", meta) + } + if len(checks) == 0 { + r.Fatalf("Bad: %v", checks) + } + if _, ok := checks[0].Node.TaggedAddresses["wan"]; !ok { + r.Fatalf("Bad: %v", checks[0].Node) + } + if checks[0].Node.Datacenter != "dc1" { + r.Fatalf("Bad datacenter: %v", checks[0].Node) + } + }) +} + +func TestAPI_HealthService_NodeMetaFilter(t *testing.T) { + meta := map[string]string{"somekey": "somevalue"} + c, s := makeClientWithConfig(t, nil, func(conf *testutil.TestServerConfig) { + conf.NodeMeta = meta + }) + defer s.Stop() + + health := c.Health() + retry.Run(t, func(r *retry.R) { + // consul service should always exist... + checks, meta, err := health.Service("consul", "", true, &QueryOptions{NodeMeta: meta}) + if err != nil { + r.Fatal(err) + } + if meta.LastIndex == 0 { + r.Fatalf("bad: %v", meta) + } + if len(checks) == 0 { + r.Fatalf("Bad: %v", checks) + } + if _, ok := checks[0].Node.TaggedAddresses["wan"]; !ok { + r.Fatalf("Bad: %v", checks[0].Node) + } + if checks[0].Node.Datacenter != "dc1" { + r.Fatalf("Bad datacenter: %v", checks[0].Node) + } + }) +} + +func TestAPI_HealthState(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + health := c.Health() + retry.Run(t, func(r *retry.R) { + checks, meta, err := health.State("any", nil) + if err != nil { + r.Fatal(err) + } + if meta.LastIndex == 0 { + r.Fatalf("bad: %v", meta) + } + if len(checks) == 0 { + r.Fatalf("Bad: %v", checks) + } + }) +} + +func TestAPI_HealthState_NodeMetaFilter(t *testing.T) { + t.Parallel() + meta := map[string]string{"somekey": "somevalue"} + c, s := makeClientWithConfig(t, nil, func(conf *testutil.TestServerConfig) { + conf.NodeMeta = meta + }) + defer s.Stop() + + health := c.Health() + retry.Run(t, func(r *retry.R) { + checks, meta, err := health.State("any", &QueryOptions{NodeMeta: meta}) + if err != nil { + r.Fatal(err) + } + if meta.LastIndex == 0 { + r.Fatalf("bad: %v", meta) + } + if len(checks) == 0 { + r.Fatalf("Bad: %v", checks) + } + }) +} diff --git a/vendor/github.com/hashicorp/consul/api/kv.go b/vendor/github.com/hashicorp/consul/api/kv.go new file mode 100644 index 000000000..f91bb50fc --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/kv.go @@ -0,0 +1,420 @@ +package api + +import ( + "bytes" + "fmt" + "io" + "net/http" + "strconv" + "strings" +) + +// KVPair is used to represent a single K/V entry +type KVPair struct { + // Key is the name of the key. It is also part of the URL path when accessed + // via the API. + Key string + + // CreateIndex holds the index corresponding the creation of this KVPair. This + // is a read-only field. + CreateIndex uint64 + + // ModifyIndex is used for the Check-And-Set operations and can also be fed + // back into the WaitIndex of the QueryOptions in order to perform blocking + // queries. + ModifyIndex uint64 + + // LockIndex holds the index corresponding to a lock on this key, if any. This + // is a read-only field. + LockIndex uint64 + + // Flags are any user-defined flags on the key. It is up to the implementer + // to check these values, since Consul does not treat them specially. + Flags uint64 + + // Value is the value for the key. This can be any value, but it will be + // base64 encoded upon transport. + Value []byte + + // Session is a string representing the ID of the session. Any other + // interactions with this key over the same session must specify the same + // session ID. + Session string +} + +// KVPairs is a list of KVPair objects +type KVPairs []*KVPair + +// KVOp constants give possible operations available in a KVTxn. +type KVOp string + +const ( + KVSet KVOp = "set" + KVDelete KVOp = "delete" + KVDeleteCAS KVOp = "delete-cas" + KVDeleteTree KVOp = "delete-tree" + KVCAS KVOp = "cas" + KVLock KVOp = "lock" + KVUnlock KVOp = "unlock" + KVGet KVOp = "get" + KVGetTree KVOp = "get-tree" + KVCheckSession KVOp = "check-session" + KVCheckIndex KVOp = "check-index" + KVCheckNotExists KVOp = "check-not-exists" +) + +// KVTxnOp defines a single operation inside a transaction. +type KVTxnOp struct { + Verb KVOp + Key string + Value []byte + Flags uint64 + Index uint64 + Session string +} + +// KVTxnOps defines a set of operations to be performed inside a single +// transaction. +type KVTxnOps []*KVTxnOp + +// KVTxnResponse has the outcome of a transaction. +type KVTxnResponse struct { + Results []*KVPair + Errors TxnErrors +} + +// KV is used to manipulate the K/V API +type KV struct { + c *Client +} + +// KV is used to return a handle to the K/V apis +func (c *Client) KV() *KV { + return &KV{c} +} + +// Get is used to lookup a single key. The returned pointer +// to the KVPair will be nil if the key does not exist. +func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) { + resp, qm, err := k.getInternal(key, nil, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []*KVPair + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List is used to lookup all keys under a prefix +func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) { + resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []*KVPair + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// Keys is used to list all the keys under a prefix. Optionally, +// a separator can be used to limit the responses. +func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) { + params := map[string]string{"keys": ""} + if separator != "" { + params["separator"] = separator + } + resp, qm, err := k.getInternal(prefix, params, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []string + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) { + r := k.c.newRequest("GET", "/v1/kv/"+strings.TrimPrefix(key, "/")) + r.setQueryOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + rtt, resp, err := k.c.doRequest(r) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if resp.StatusCode == 404 { + resp.Body.Close() + return nil, qm, nil + } else if resp.StatusCode != 200 { + resp.Body.Close() + return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) + } + return resp, qm, nil +} + +// Put is used to write a new value. Only the +// Key, Flags and Value is respected. +func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) { + params := make(map[string]string, 1) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + _, wm, err := k.put(p.Key, params, p.Value, q) + return wm, err +} + +// CAS is used for a Check-And-Set operation. The Key, +// ModifyIndex, Flags and Value are respected. Returns true +// on success or false on failures. +func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["cas"] = strconv.FormatUint(p.ModifyIndex, 10) + return k.put(p.Key, params, p.Value, q) +} + +// Acquire is used for a lock acquisition operation. The Key, +// Flags, Value and Session are respected. Returns true +// on success or false on failures. +func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["acquire"] = p.Session + return k.put(p.Key, params, p.Value, q) +} + +// Release is used for a lock release operation. The Key, +// Flags, Value and Session are respected. Returns true +// on success or false on failures. +func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["release"] = p.Session + return k.put(p.Key, params, p.Value, q) +} + +func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) { + if len(key) > 0 && key[0] == '/' { + return false, nil, fmt.Errorf("Invalid key. Key must not begin with a '/': %s", key) + } + + r := k.c.newRequest("PUT", "/v1/kv/"+key) + r.setWriteOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + r.body = bytes.NewReader(body) + rtt, resp, err := requireOK(k.c.doRequest(r)) + if err != nil { + return false, nil, err + } + defer resp.Body.Close() + + qm := &WriteMeta{} + qm.RequestTime = rtt + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(string(buf.Bytes()), "true") + return res, qm, nil +} + +// Delete is used to delete a single key +func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) { + _, qm, err := k.deleteInternal(key, nil, w) + return qm, err +} + +// DeleteCAS is used for a Delete Check-And-Set operation. The Key +// and ModifyIndex are respected. Returns true on success or false on failures. +func (k *KV) DeleteCAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := map[string]string{ + "cas": strconv.FormatUint(p.ModifyIndex, 10), + } + return k.deleteInternal(p.Key, params, q) +} + +// DeleteTree is used to delete all keys under a prefix +func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) { + _, qm, err := k.deleteInternal(prefix, map[string]string{"recurse": ""}, w) + return qm, err +} + +func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) { + r := k.c.newRequest("DELETE", "/v1/kv/"+strings.TrimPrefix(key, "/")) + r.setWriteOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + rtt, resp, err := requireOK(k.c.doRequest(r)) + if err != nil { + return false, nil, err + } + defer resp.Body.Close() + + qm := &WriteMeta{} + qm.RequestTime = rtt + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(string(buf.Bytes()), "true") + return res, qm, nil +} + +// TxnOp is the internal format we send to Consul. It's not specific to KV, +// though currently only KV operations are supported. +type TxnOp struct { + KV *KVTxnOp +} + +// TxnOps is a list of transaction operations. +type TxnOps []*TxnOp + +// TxnResult is the internal format we receive from Consul. +type TxnResult struct { + KV *KVPair +} + +// TxnResults is a list of TxnResult objects. +type TxnResults []*TxnResult + +// TxnError is used to return information about an operation in a transaction. +type TxnError struct { + OpIndex int + What string +} + +// TxnErrors is a list of TxnError objects. +type TxnErrors []*TxnError + +// TxnResponse is the internal format we receive from Consul. +type TxnResponse struct { + Results TxnResults + Errors TxnErrors +} + +// Txn is used to apply multiple KV operations in a single, atomic transaction. +// +// Note that Go will perform the required base64 encoding on the values +// automatically because the type is a byte slice. Transactions are defined as a +// list of operations to perform, using the KVOp constants and KVTxnOp structure +// to define operations. If any operation fails, none of the changes are applied +// to the state store. Note that this hides the internal raw transaction interface +// and munges the input and output types into KV-specific ones for ease of use. +// If there are more non-KV operations in the future we may break out a new +// transaction API client, but it will be easy to keep this KV-specific variant +// supported. +// +// Even though this is generally a write operation, we take a QueryOptions input +// and return a QueryMeta output. If the transaction contains only read ops, then +// Consul will fast-path it to a different endpoint internally which supports +// consistency controls, but not blocking. If there are write operations then +// the request will always be routed through raft and any consistency settings +// will be ignored. +// +// Here's an example: +// +// ops := KVTxnOps{ +// &KVTxnOp{ +// Verb: KVLock, +// Key: "test/lock", +// Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e", +// Value: []byte("hello"), +// }, +// &KVTxnOp{ +// Verb: KVGet, +// Key: "another/key", +// }, +// } +// ok, response, _, err := kv.Txn(&ops, nil) +// +// If there is a problem making the transaction request then an error will be +// returned. Otherwise, the ok value will be true if the transaction succeeded +// or false if it was rolled back. The response is a structured return value which +// will have the outcome of the transaction. Its Results member will have entries +// for each operation. Deleted keys will have a nil entry in the, and to save +// space, the Value of each key in the Results will be nil unless the operation +// is a KVGet. If the transaction was rolled back, the Errors member will have +// entries referencing the index of the operation that failed along with an error +// message. +func (k *KV) Txn(txn KVTxnOps, q *QueryOptions) (bool, *KVTxnResponse, *QueryMeta, error) { + r := k.c.newRequest("PUT", "/v1/txn") + r.setQueryOptions(q) + + // Convert into the internal format since this is an all-KV txn. + ops := make(TxnOps, 0, len(txn)) + for _, kvOp := range txn { + ops = append(ops, &TxnOp{KV: kvOp}) + } + r.obj = ops + rtt, resp, err := k.c.doRequest(r) + if err != nil { + return false, nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusConflict { + var txnResp TxnResponse + if err := decodeBody(resp, &txnResp); err != nil { + return false, nil, nil, err + } + + // Convert from the internal format. + kvResp := KVTxnResponse{ + Errors: txnResp.Errors, + } + for _, result := range txnResp.Results { + kvResp.Results = append(kvResp.Results, result.KV) + } + return resp.StatusCode == http.StatusOK, &kvResp, qm, nil + } + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, nil, fmt.Errorf("Failed to read response: %v", err) + } + return false, nil, nil, fmt.Errorf("Failed request: %s", buf.String()) +} diff --git a/vendor/github.com/hashicorp/consul/api/kv_test.go b/vendor/github.com/hashicorp/consul/api/kv_test.go new file mode 100644 index 000000000..1ca6cd794 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/kv_test.go @@ -0,0 +1,574 @@ +package api + +import ( + "bytes" + "path" + "strings" + "testing" + "time" +) + +func TestAPI_ClientPutGetDelete(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + kv := c.KV() + + // Get a get without a key + key := testKey() + pair, _, err := kv.Get(key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair != nil { + t.Fatalf("unexpected value: %#v", pair) + } + + value := []byte("test") + + // Put a key that begins with a '/', this should fail + invalidKey := "/test" + p := &KVPair{Key: invalidKey, Flags: 42, Value: value} + if _, err := kv.Put(p, nil); err == nil { + t.Fatalf("Invalid key not detected: %s", invalidKey) + } + + // Put the key + p = &KVPair{Key: key, Flags: 42, Value: value} + if _, err := kv.Put(p, nil); err != nil { + t.Fatalf("err: %v", err) + } + + // Get should work + pair, meta, err := kv.Get(key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair == nil { + t.Fatalf("expected value: %#v", pair) + } + if !bytes.Equal(pair.Value, value) { + t.Fatalf("unexpected value: %#v", pair) + } + if pair.Flags != 42 { + t.Fatalf("unexpected value: %#v", pair) + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } + + // Delete + if _, err := kv.Delete(key, nil); err != nil { + t.Fatalf("err: %v", err) + } + + // Get should fail + pair, _, err = kv.Get(key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair != nil { + t.Fatalf("unexpected value: %#v", pair) + } +} + +func TestAPI_ClientList_DeleteRecurse(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + kv := c.KV() + + // Generate some test keys + prefix := testKey() + var keys []string + for i := 0; i < 100; i++ { + keys = append(keys, path.Join(prefix, testKey())) + } + + // Set values + value := []byte("test") + for _, key := range keys { + p := &KVPair{Key: key, Value: value} + if _, err := kv.Put(p, nil); err != nil { + t.Fatalf("err: %v", err) + } + } + + // List the values + pairs, meta, err := kv.List(prefix, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if len(pairs) != len(keys) { + t.Fatalf("got %d keys", len(pairs)) + } + for _, pair := range pairs { + if !bytes.Equal(pair.Value, value) { + t.Fatalf("unexpected value: %#v", pair) + } + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } + + // Delete all + if _, err := kv.DeleteTree(prefix, nil); err != nil { + t.Fatalf("err: %v", err) + } + + // List the values + pairs, _, err = kv.List(prefix, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if len(pairs) != 0 { + t.Fatalf("got %d keys", len(pairs)) + } +} + +func TestAPI_ClientDeleteCAS(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + kv := c.KV() + + // Put the key + key := testKey() + value := []byte("test") + p := &KVPair{Key: key, Value: value} + if work, _, err := kv.CAS(p, nil); err != nil { + t.Fatalf("err: %v", err) + } else if !work { + t.Fatalf("CAS failure") + } + + // Get should work + pair, meta, err := kv.Get(key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair == nil { + t.Fatalf("expected value: %#v", pair) + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } + + // CAS update with bad index + p.ModifyIndex = 1 + if work, _, err := kv.DeleteCAS(p, nil); err != nil { + t.Fatalf("err: %v", err) + } else if work { + t.Fatalf("unexpected CAS") + } + + // CAS update with valid index + p.ModifyIndex = meta.LastIndex + if work, _, err := kv.DeleteCAS(p, nil); err != nil { + t.Fatalf("err: %v", err) + } else if !work { + t.Fatalf("unexpected CAS failure") + } +} + +func TestAPI_ClientCAS(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + kv := c.KV() + + // Put the key + key := testKey() + value := []byte("test") + p := &KVPair{Key: key, Value: value} + if work, _, err := kv.CAS(p, nil); err != nil { + t.Fatalf("err: %v", err) + } else if !work { + t.Fatalf("CAS failure") + } + + // Get should work + pair, meta, err := kv.Get(key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair == nil { + t.Fatalf("expected value: %#v", pair) + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } + + // CAS update with bad index + newVal := []byte("foo") + p.Value = newVal + p.ModifyIndex = 1 + if work, _, err := kv.CAS(p, nil); err != nil { + t.Fatalf("err: %v", err) + } else if work { + t.Fatalf("unexpected CAS") + } + + // CAS update with valid index + p.ModifyIndex = meta.LastIndex + if work, _, err := kv.CAS(p, nil); err != nil { + t.Fatalf("err: %v", err) + } else if !work { + t.Fatalf("unexpected CAS failure") + } +} + +func TestAPI_ClientWatchGet(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + kv := c.KV() + + // Get a get without a key + key := testKey() + pair, meta, err := kv.Get(key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair != nil { + t.Fatalf("unexpected value: %#v", pair) + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } + + // Put the key + value := []byte("test") + doneCh := make(chan struct{}) + go func() { + kv := c.KV() + + time.Sleep(100 * time.Millisecond) + p := &KVPair{Key: key, Flags: 42, Value: value} + if _, err := kv.Put(p, nil); err != nil { + t.Fatalf("err: %v", err) + } + doneCh <- struct{}{} + }() + + // Get should work + options := &QueryOptions{WaitIndex: meta.LastIndex} + pair, meta2, err := kv.Get(key, options) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair == nil { + t.Fatalf("expected value: %#v", pair) + } + if !bytes.Equal(pair.Value, value) { + t.Fatalf("unexpected value: %#v", pair) + } + if pair.Flags != 42 { + t.Fatalf("unexpected value: %#v", pair) + } + if meta2.LastIndex <= meta.LastIndex { + t.Fatalf("unexpected value: %#v", meta2) + } + + // Block until put finishes to avoid a race between it and deferred s.Stop() + <-doneCh +} + +func TestAPI_ClientWatchList(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + kv := c.KV() + + // Get a get without a key + prefix := testKey() + key := path.Join(prefix, testKey()) + pairs, meta, err := kv.List(prefix, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if len(pairs) != 0 { + t.Fatalf("unexpected value: %#v", pairs) + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } + + // Put the key + value := []byte("test") + doneCh := make(chan struct{}) + go func() { + kv := c.KV() + + time.Sleep(100 * time.Millisecond) + p := &KVPair{Key: key, Flags: 42, Value: value} + if _, err := kv.Put(p, nil); err != nil { + t.Fatalf("err: %v", err) + } + doneCh <- struct{}{} + }() + + // Get should work + options := &QueryOptions{WaitIndex: meta.LastIndex} + pairs, meta2, err := kv.List(prefix, options) + if err != nil { + t.Fatalf("err: %v", err) + } + if len(pairs) != 1 { + t.Fatalf("expected value: %#v", pairs) + } + if !bytes.Equal(pairs[0].Value, value) { + t.Fatalf("unexpected value: %#v", pairs) + } + if pairs[0].Flags != 42 { + t.Fatalf("unexpected value: %#v", pairs) + } + if meta2.LastIndex <= meta.LastIndex { + t.Fatalf("unexpected value: %#v", meta2) + } + + // Block until put finishes to avoid a race between it and deferred s.Stop() + <-doneCh +} + +func TestAPI_ClientKeys_DeleteRecurse(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + kv := c.KV() + + // Generate some test keys + prefix := testKey() + var keys []string + for i := 0; i < 100; i++ { + keys = append(keys, path.Join(prefix, testKey())) + } + + // Set values + value := []byte("test") + for _, key := range keys { + p := &KVPair{Key: key, Value: value} + if _, err := kv.Put(p, nil); err != nil { + t.Fatalf("err: %v", err) + } + } + + // List the values + out, meta, err := kv.Keys(prefix, "", nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if len(out) != len(keys) { + t.Fatalf("got %d keys", len(out)) + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } + + // Delete all + if _, err := kv.DeleteTree(prefix, nil); err != nil { + t.Fatalf("err: %v", err) + } + + // List the values + out, _, err = kv.Keys(prefix, "", nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if len(out) != 0 { + t.Fatalf("got %d keys", len(out)) + } +} + +func TestAPI_ClientAcquireRelease(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + session := c.Session() + kv := c.KV() + + // Make a session + id, _, err := session.CreateNoChecks(nil, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer session.Destroy(id, nil) + + // Acquire the key + key := testKey() + value := []byte("test") + p := &KVPair{Key: key, Value: value, Session: id} + if work, _, err := kv.Acquire(p, nil); err != nil { + t.Fatalf("err: %v", err) + } else if !work { + t.Fatalf("Lock failure") + } + + // Get should work + pair, meta, err := kv.Get(key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair == nil { + t.Fatalf("expected value: %#v", pair) + } + if pair.LockIndex != 1 { + t.Fatalf("Expected lock: %v", pair) + } + if pair.Session != id { + t.Fatalf("Expected lock: %v", pair) + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } + + // Release + if work, _, err := kv.Release(p, nil); err != nil { + t.Fatalf("err: %v", err) + } else if !work { + t.Fatalf("Release fail") + } + + // Get should work + pair, meta, err = kv.Get(key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair == nil { + t.Fatalf("expected value: %#v", pair) + } + if pair.LockIndex != 1 { + t.Fatalf("Expected lock: %v", pair) + } + if pair.Session != "" { + t.Fatalf("Expected unlock: %v", pair) + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } +} + +func TestAPI_ClientTxn(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + session := c.Session() + kv := c.KV() + + // Make a session. + id, _, err := session.CreateNoChecks(nil, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer session.Destroy(id, nil) + + // Acquire and get the key via a transaction, but don't supply a valid + // session. + key := testKey() + value := []byte("test") + txn := KVTxnOps{ + &KVTxnOp{ + Verb: KVLock, + Key: key, + Value: value, + }, + &KVTxnOp{ + Verb: KVGet, + Key: key, + }, + } + ok, ret, _, err := kv.Txn(txn, nil) + if err != nil { + t.Fatalf("err: %v", err) + } else if ok { + t.Fatalf("transaction should have failed") + } + + if ret == nil || len(ret.Errors) != 2 || len(ret.Results) != 0 { + t.Fatalf("bad: %v", ret) + } + if ret.Errors[0].OpIndex != 0 || + !strings.Contains(ret.Errors[0].What, "missing session") || + !strings.Contains(ret.Errors[1].What, "doesn't exist") { + t.Fatalf("bad: %v", ret.Errors[0]) + } + + // Now poke in a real session and try again. + txn[0].Session = id + ok, ret, _, err = kv.Txn(txn, nil) + if err != nil { + t.Fatalf("err: %v", err) + } else if !ok { + t.Fatalf("transaction failure") + } + + if ret == nil || len(ret.Errors) != 0 || len(ret.Results) != 2 { + t.Fatalf("bad: %v", ret) + } + for i, result := range ret.Results { + var expected []byte + if i == 1 { + expected = value + } + + if result.Key != key || + !bytes.Equal(result.Value, expected) || + result.Session != id || + result.LockIndex != 1 { + t.Fatalf("bad: %v", result) + } + } + + // Run a read-only transaction. + txn = KVTxnOps{ + &KVTxnOp{ + Verb: KVGet, + Key: key, + }, + } + ok, ret, _, err = kv.Txn(txn, nil) + if err != nil { + t.Fatalf("err: %v", err) + } else if !ok { + t.Fatalf("transaction failure") + } + + if ret == nil || len(ret.Errors) != 0 || len(ret.Results) != 1 { + t.Fatalf("bad: %v", ret) + } + for _, result := range ret.Results { + if result.Key != key || + !bytes.Equal(result.Value, value) || + result.Session != id || + result.LockIndex != 1 { + t.Fatalf("bad: %v", result) + } + } + + // Sanity check using the regular GET API. + pair, meta, err := kv.Get(key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair == nil { + t.Fatalf("expected value: %#v", pair) + } + if pair.LockIndex != 1 { + t.Fatalf("Expected lock: %v", pair) + } + if pair.Session != id { + t.Fatalf("Expected lock: %v", pair) + } + if meta.LastIndex == 0 { + t.Fatalf("unexpected value: %#v", meta) + } +} diff --git a/vendor/github.com/hashicorp/consul/api/lock.go b/vendor/github.com/hashicorp/consul/api/lock.go new file mode 100644 index 000000000..466ef5fdf --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/lock.go @@ -0,0 +1,385 @@ +package api + +import ( + "fmt" + "sync" + "time" +) + +const ( + // DefaultLockSessionName is the Session Name we assign if none is provided + DefaultLockSessionName = "Consul API Lock" + + // DefaultLockSessionTTL is the default session TTL if no Session is provided + // when creating a new Lock. This is used because we do not have another + // other check to depend upon. + DefaultLockSessionTTL = "15s" + + // DefaultLockWaitTime is how long we block for at a time to check if lock + // acquisition is possible. This affects the minimum time it takes to cancel + // a Lock acquisition. + DefaultLockWaitTime = 15 * time.Second + + // DefaultLockRetryTime is how long we wait after a failed lock acquisition + // before attempting to do the lock again. This is so that once a lock-delay + // is in effect, we do not hot loop retrying the acquisition. + DefaultLockRetryTime = 5 * time.Second + + // DefaultMonitorRetryTime is how long we wait after a failed monitor check + // of a lock (500 response code). This allows the monitor to ride out brief + // periods of unavailability, subject to the MonitorRetries setting in the + // lock options which is by default set to 0, disabling this feature. This + // affects locks and semaphores. + DefaultMonitorRetryTime = 2 * time.Second + + // LockFlagValue is a magic flag we set to indicate a key + // is being used for a lock. It is used to detect a potential + // conflict with a semaphore. + LockFlagValue = 0x2ddccbc058a50c18 +) + +var ( + // ErrLockHeld is returned if we attempt to double lock + ErrLockHeld = fmt.Errorf("Lock already held") + + // ErrLockNotHeld is returned if we attempt to unlock a lock + // that we do not hold. + ErrLockNotHeld = fmt.Errorf("Lock not held") + + // ErrLockInUse is returned if we attempt to destroy a lock + // that is in use. + ErrLockInUse = fmt.Errorf("Lock in use") + + // ErrLockConflict is returned if the flags on a key + // used for a lock do not match expectation + ErrLockConflict = fmt.Errorf("Existing key does not match lock use") +) + +// Lock is used to implement client-side leader election. It is follows the +// algorithm as described here: https://www.consul.io/docs/guides/leader-election.html. +type Lock struct { + c *Client + opts *LockOptions + + isHeld bool + sessionRenew chan struct{} + lockSession string + l sync.Mutex +} + +// LockOptions is used to parameterize the Lock behavior. +type LockOptions struct { + Key string // Must be set and have write permissions + Value []byte // Optional, value to associate with the lock + Session string // Optional, created if not specified + SessionOpts *SessionEntry // Optional, options to use when creating a session + SessionName string // Optional, defaults to DefaultLockSessionName (ignored if SessionOpts is given) + SessionTTL string // Optional, defaults to DefaultLockSessionTTL (ignored if SessionOpts is given) + MonitorRetries int // Optional, defaults to 0 which means no retries + MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime + LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime + LockTryOnce bool // Optional, defaults to false which means try forever +} + +// LockKey returns a handle to a lock struct which can be used +// to acquire and release the mutex. The key used must have +// write permissions. +func (c *Client) LockKey(key string) (*Lock, error) { + opts := &LockOptions{ + Key: key, + } + return c.LockOpts(opts) +} + +// LockOpts returns a handle to a lock struct which can be used +// to acquire and release the mutex. The key used must have +// write permissions. +func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) { + if opts.Key == "" { + return nil, fmt.Errorf("missing key") + } + if opts.SessionName == "" { + opts.SessionName = DefaultLockSessionName + } + if opts.SessionTTL == "" { + opts.SessionTTL = DefaultLockSessionTTL + } else { + if _, err := time.ParseDuration(opts.SessionTTL); err != nil { + return nil, fmt.Errorf("invalid SessionTTL: %v", err) + } + } + if opts.MonitorRetryTime == 0 { + opts.MonitorRetryTime = DefaultMonitorRetryTime + } + if opts.LockWaitTime == 0 { + opts.LockWaitTime = DefaultLockWaitTime + } + l := &Lock{ + c: c, + opts: opts, + } + return l, nil +} + +// Lock attempts to acquire the lock and blocks while doing so. +// Providing a non-nil stopCh can be used to abort the lock attempt. +// Returns a channel that is closed if our lock is lost or an error. +// This channel could be closed at any time due to session invalidation, +// communication errors, operator intervention, etc. It is NOT safe to +// assume that the lock is held until Unlock() unless the Session is specifically +// created without any associated health checks. By default Consul sessions +// prefer liveness over safety and an application must be able to handle +// the lock being lost. +func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + // Hold the lock as we try to acquire + l.l.Lock() + defer l.l.Unlock() + + // Check if we already hold the lock + if l.isHeld { + return nil, ErrLockHeld + } + + // Check if we need to create a session first + l.lockSession = l.opts.Session + if l.lockSession == "" { + s, err := l.createSession() + if err != nil { + return nil, fmt.Errorf("failed to create session: %v", err) + } + + l.sessionRenew = make(chan struct{}) + l.lockSession = s + session := l.c.Session() + go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew) + + // If we fail to acquire the lock, cleanup the session + defer func() { + if !l.isHeld { + close(l.sessionRenew) + l.sessionRenew = nil + } + }() + } + + // Setup the query options + kv := l.c.KV() + qOpts := &QueryOptions{ + WaitTime: l.opts.LockWaitTime, + } + + start := time.Now() + attempts := 0 +WAIT: + // Check if we should quit + select { + case <-stopCh: + return nil, nil + default: + } + + // Handle the one-shot mode. + if l.opts.LockTryOnce && attempts > 0 { + elapsed := time.Now().Sub(start) + if elapsed > qOpts.WaitTime { + return nil, nil + } + + qOpts.WaitTime -= elapsed + } + attempts++ + + // Look for an existing lock, blocking until not taken + pair, meta, err := kv.Get(l.opts.Key, qOpts) + if err != nil { + return nil, fmt.Errorf("failed to read lock: %v", err) + } + if pair != nil && pair.Flags != LockFlagValue { + return nil, ErrLockConflict + } + locked := false + if pair != nil && pair.Session == l.lockSession { + goto HELD + } + if pair != nil && pair.Session != "" { + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } + + // Try to acquire the lock + pair = l.lockEntry(l.lockSession) + locked, _, err = kv.Acquire(pair, nil) + if err != nil { + return nil, fmt.Errorf("failed to acquire lock: %v", err) + } + + // Handle the case of not getting the lock + if !locked { + // Determine why the lock failed + qOpts.WaitIndex = 0 + pair, meta, err = kv.Get(l.opts.Key, qOpts) + if pair != nil && pair.Session != "" { + //If the session is not null, this means that a wait can safely happen + //using a long poll + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } else { + // If the session is empty and the lock failed to acquire, then it means + // a lock-delay is in effect and a timed wait must be used + select { + case <-time.After(DefaultLockRetryTime): + goto WAIT + case <-stopCh: + return nil, nil + } + } + } + +HELD: + // Watch to ensure we maintain leadership + leaderCh := make(chan struct{}) + go l.monitorLock(l.lockSession, leaderCh) + + // Set that we own the lock + l.isHeld = true + + // Locked! All done + return leaderCh, nil +} + +// Unlock released the lock. It is an error to call this +// if the lock is not currently held. +func (l *Lock) Unlock() error { + // Hold the lock as we try to release + l.l.Lock() + defer l.l.Unlock() + + // Ensure the lock is actually held + if !l.isHeld { + return ErrLockNotHeld + } + + // Set that we no longer own the lock + l.isHeld = false + + // Stop the session renew + if l.sessionRenew != nil { + defer func() { + close(l.sessionRenew) + l.sessionRenew = nil + }() + } + + // Get the lock entry, and clear the lock session + lockEnt := l.lockEntry(l.lockSession) + l.lockSession = "" + + // Release the lock explicitly + kv := l.c.KV() + _, _, err := kv.Release(lockEnt, nil) + if err != nil { + return fmt.Errorf("failed to release lock: %v", err) + } + return nil +} + +// Destroy is used to cleanup the lock entry. It is not necessary +// to invoke. It will fail if the lock is in use. +func (l *Lock) Destroy() error { + // Hold the lock as we try to release + l.l.Lock() + defer l.l.Unlock() + + // Check if we already hold the lock + if l.isHeld { + return ErrLockHeld + } + + // Look for an existing lock + kv := l.c.KV() + pair, _, err := kv.Get(l.opts.Key, nil) + if err != nil { + return fmt.Errorf("failed to read lock: %v", err) + } + + // Nothing to do if the lock does not exist + if pair == nil { + return nil + } + + // Check for possible flag conflict + if pair.Flags != LockFlagValue { + return ErrLockConflict + } + + // Check if it is in use + if pair.Session != "" { + return ErrLockInUse + } + + // Attempt the delete + didRemove, _, err := kv.DeleteCAS(pair, nil) + if err != nil { + return fmt.Errorf("failed to remove lock: %v", err) + } + if !didRemove { + return ErrLockInUse + } + return nil +} + +// createSession is used to create a new managed session +func (l *Lock) createSession() (string, error) { + session := l.c.Session() + se := l.opts.SessionOpts + if se == nil { + se = &SessionEntry{ + Name: l.opts.SessionName, + TTL: l.opts.SessionTTL, + } + } + id, _, err := session.Create(se, nil) + if err != nil { + return "", err + } + return id, nil +} + +// lockEntry returns a formatted KVPair for the lock +func (l *Lock) lockEntry(session string) *KVPair { + return &KVPair{ + Key: l.opts.Key, + Value: l.opts.Value, + Session: session, + Flags: LockFlagValue, + } +} + +// monitorLock is a long running routine to monitor a lock ownership +// It closes the stopCh if we lose our leadership. +func (l *Lock) monitorLock(session string, stopCh chan struct{}) { + defer close(stopCh) + kv := l.c.KV() + opts := &QueryOptions{RequireConsistent: true} +WAIT: + retries := l.opts.MonitorRetries +RETRY: + pair, meta, err := kv.Get(l.opts.Key, opts) + if err != nil { + // If configured we can try to ride out a brief Consul unavailability + // by doing retries. Note that we have to attempt the retry in a non- + // blocking fashion so that we have a clean place to reset the retry + // counter if service is restored. + if retries > 0 && IsServerError(err) { + time.Sleep(l.opts.MonitorRetryTime) + retries-- + opts.WaitIndex = 0 + goto RETRY + } + return + } + if pair != nil && pair.Session == session { + opts.WaitIndex = meta.LastIndex + goto WAIT + } +} diff --git a/vendor/github.com/hashicorp/consul/api/lock_test.go b/vendor/github.com/hashicorp/consul/api/lock_test.go new file mode 100644 index 000000000..560209874 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/lock_test.go @@ -0,0 +1,560 @@ +package api + +import ( + "log" + "net/http" + "net/http/httptest" + "net/http/httputil" + "strings" + "sync" + "testing" + "time" +) + +func TestAPI_LockLockUnlock(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + lock, err := c.LockKey("test/lock") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Initial unlock should fail + err = lock.Unlock() + if err != ErrLockNotHeld { + t.Fatalf("err: %v", err) + } + + // Should work + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("not leader") + } + + // Double lock should fail + _, err = lock.Lock(nil) + if err != ErrLockHeld { + t.Fatalf("err: %v", err) + } + + // Should be leader + select { + case <-leaderCh: + t.Fatalf("should be leader") + default: + } + + // Initial unlock should work + err = lock.Unlock() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Double unlock should fail + err = lock.Unlock() + if err != ErrLockNotHeld { + t.Fatalf("err: %v", err) + } + + // Should lose leadership + select { + case <-leaderCh: + case <-time.After(time.Second): + t.Fatalf("should not be leader") + } +} + +func TestAPI_LockForceInvalidate(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + lock, err := c.LockKey("test/lock") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("not leader") + } + defer lock.Unlock() + + go func() { + // Nuke the session, simulator an operator invalidation + // or a health check failure + session := c.Session() + session.Destroy(lock.lockSession, nil) + }() + + // Should loose leadership + select { + case <-leaderCh: + case <-time.After(time.Second): + t.Fatalf("should not be leader") + } +} + +func TestAPI_LockDeleteKey(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + // This uncovered some issues around special-case handling of low index + // numbers where it would work with a low number but fail for higher + // ones, so we loop this a bit to sweep the index up out of that + // territory. + for i := 0; i < 10; i++ { + func() { + lock, err := c.LockKey("test/lock") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("not leader") + } + defer lock.Unlock() + + go func() { + // Nuke the key, simulate an operator intervention + kv := c.KV() + kv.Delete("test/lock", nil) + }() + + // Should loose leadership + select { + case <-leaderCh: + case <-time.After(10 * time.Second): + t.Fatalf("should not be leader") + } + }() + } +} + +func TestAPI_LockContend(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + wg := &sync.WaitGroup{} + acquired := make([]bool, 3) + for idx := range acquired { + wg.Add(1) + go func(idx int) { + defer wg.Done() + lock, err := c.LockKey("test/lock") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work eventually, will contend + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("not leader") + } + defer lock.Unlock() + log.Printf("Contender %d acquired", idx) + + // Set acquired and then leave + acquired[idx] = true + }(idx) + } + + // Wait for termination + doneCh := make(chan struct{}) + go func() { + wg.Wait() + close(doneCh) + }() + + // Wait for everybody to get a turn + select { + case <-doneCh: + case <-time.After(3 * DefaultLockRetryTime): + t.Fatalf("timeout") + } + + for idx, did := range acquired { + if !did { + t.Fatalf("contender %d never acquired", idx) + } + } +} + +func TestAPI_LockDestroy(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + lock, err := c.LockKey("test/lock") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("not leader") + } + + // Destroy should fail + if err := lock.Destroy(); err != ErrLockHeld { + t.Fatalf("err: %v", err) + } + + // Should be able to release + err = lock.Unlock() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Acquire with a different lock + l2, err := c.LockKey("test/lock") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work + leaderCh, err = l2.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("not leader") + } + + // Destroy should still fail + if err := lock.Destroy(); err != ErrLockInUse { + t.Fatalf("err: %v", err) + } + + // Should release + err = l2.Unlock() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Destroy should work + err = lock.Destroy() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Double destroy should work + err = l2.Destroy() + if err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestAPI_LockConflict(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + sema, err := c.SemaphorePrefix("test/lock/", 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work + lockCh, err := sema.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if lockCh == nil { + t.Fatalf("not hold") + } + defer sema.Release() + + lock, err := c.LockKey("test/lock/.lock") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should conflict with semaphore + _, err = lock.Lock(nil) + if err != ErrLockConflict { + t.Fatalf("err: %v", err) + } + + // Should conflict with semaphore + err = lock.Destroy() + if err != ErrLockConflict { + t.Fatalf("err: %v", err) + } +} + +func TestAPI_LockReclaimLock(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + session, _, err := c.Session().Create(&SessionEntry{}, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + lock, err := c.LockOpts(&LockOptions{Key: "test/lock", Session: session}) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("not leader") + } + defer lock.Unlock() + + l2, err := c.LockOpts(&LockOptions{Key: "test/lock", Session: session}) + if err != nil { + t.Fatalf("err: %v", err) + } + + reclaimed := make(chan (<-chan struct{}), 1) + go func() { + l2Ch, err := l2.Lock(nil) + if err != nil { + t.Fatalf("not locked: %v", err) + } + reclaimed <- l2Ch + }() + + // Should reclaim the lock + var leader2Ch <-chan struct{} + + select { + case leader2Ch = <-reclaimed: + case <-time.After(time.Second): + t.Fatalf("should have locked") + } + + // unlock should work + err = l2.Unlock() + if err != nil { + t.Fatalf("err: %v", err) + } + + //Both locks should see the unlock + select { + case <-leader2Ch: + case <-time.After(time.Second): + t.Fatalf("should not be leader") + } + + select { + case <-leaderCh: + case <-time.After(time.Second): + t.Fatalf("should not be leader") + } +} + +func TestAPI_LockMonitorRetry(t *testing.T) { + t.Parallel() + raw, s := makeClient(t) + defer s.Stop() + + // Set up a server that always responds with 500 errors. + failer := func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(500) + } + outage := httptest.NewServer(http.HandlerFunc(failer)) + defer outage.Close() + + // Set up a reverse proxy that will send some requests to the + // 500 server and pass everything else through to the real Consul + // server. + var mutex sync.Mutex + errors := 0 + director := func(req *http.Request) { + mutex.Lock() + defer mutex.Unlock() + + req.URL.Scheme = "http" + if errors > 0 && req.Method == "GET" && strings.Contains(req.URL.Path, "/v1/kv/test/lock") { + req.URL.Host = outage.URL[7:] // Strip off "http://". + errors-- + } else { + req.URL.Host = raw.config.Address + } + } + proxy := httptest.NewServer(&httputil.ReverseProxy{Director: director}) + defer proxy.Close() + + // Make another client that points at the proxy instead of the real + // Consul server. + config := raw.config + config.Address = proxy.URL[7:] // Strip off "http://". + c, err := NewClient(&config) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Set up a lock with retries enabled. + opts := &LockOptions{ + Key: "test/lock", + SessionTTL: "60s", + MonitorRetries: 3, + } + lock, err := c.LockOpts(opts) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Make sure the default got set. + if lock.opts.MonitorRetryTime != DefaultMonitorRetryTime { + t.Fatalf("bad: %d", lock.opts.MonitorRetryTime) + } + + // Now set a custom time for the test. + opts.MonitorRetryTime = 250 * time.Millisecond + lock, err = c.LockOpts(opts) + if err != nil { + t.Fatalf("err: %v", err) + } + if lock.opts.MonitorRetryTime != 250*time.Millisecond { + t.Fatalf("bad: %d", lock.opts.MonitorRetryTime) + } + + // Should get the lock. + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("not leader") + } + + // Poke the key using the raw client to force the monitor to wake up + // and check the lock again. This time we will return errors for some + // of the responses. + mutex.Lock() + errors = 2 + mutex.Unlock() + pair, _, err := raw.KV().Get("test/lock", &QueryOptions{}) + if err != nil { + t.Fatalf("err: %v", err) + } + if _, err := raw.KV().Put(pair, &WriteOptions{}); err != nil { + t.Fatalf("err: %v", err) + } + time.Sleep(5 * opts.MonitorRetryTime) + + // Should still be the leader. + select { + case <-leaderCh: + t.Fatalf("should be leader") + default: + } + + // Now return an overwhelming number of errors. + mutex.Lock() + errors = 10 + mutex.Unlock() + if _, err := raw.KV().Put(pair, &WriteOptions{}); err != nil { + t.Fatalf("err: %v", err) + } + time.Sleep(5 * opts.MonitorRetryTime) + + // Should lose leadership. + select { + case <-leaderCh: + case <-time.After(time.Second): + t.Fatalf("should not be leader") + } +} + +func TestAPI_LockOneShot(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + // Set up a lock as a one-shot. + opts := &LockOptions{ + Key: "test/lock", + LockTryOnce: true, + } + lock, err := c.LockOpts(opts) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Make sure the default got set. + if lock.opts.LockWaitTime != DefaultLockWaitTime { + t.Fatalf("bad: %d", lock.opts.LockWaitTime) + } + + // Now set a custom time for the test. + opts.LockWaitTime = 250 * time.Millisecond + lock, err = c.LockOpts(opts) + if err != nil { + t.Fatalf("err: %v", err) + } + if lock.opts.LockWaitTime != 250*time.Millisecond { + t.Fatalf("bad: %d", lock.opts.LockWaitTime) + } + + // Should get the lock. + ch, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if ch == nil { + t.Fatalf("not leader") + } + + // Now try with another session. + contender, err := c.LockOpts(opts) + if err != nil { + t.Fatalf("err: %v", err) + } + start := time.Now() + ch, err = contender.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if ch != nil { + t.Fatalf("should not be leader") + } + diff := time.Now().Sub(start) + if diff < contender.opts.LockWaitTime || diff > 2*contender.opts.LockWaitTime { + t.Fatalf("time out of bounds: %9.6f", diff.Seconds()) + } + + // Unlock and then make sure the contender can get it. + if err := lock.Unlock(); err != nil { + t.Fatalf("err: %v", err) + } + ch, err = contender.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if ch == nil { + t.Fatalf("should be leader") + } +} diff --git a/vendor/github.com/hashicorp/consul/api/operator.go b/vendor/github.com/hashicorp/consul/api/operator.go new file mode 100644 index 000000000..079e22486 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator.go @@ -0,0 +1,11 @@ +package api + +// Operator can be used to perform low-level operator tasks for Consul. +type Operator struct { + c *Client +} + +// Operator returns a handle to the operator endpoints. +func (c *Client) Operator() *Operator { + return &Operator{c} +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_area.go b/vendor/github.com/hashicorp/consul/api/operator_area.go new file mode 100644 index 000000000..7b0e461e9 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_area.go @@ -0,0 +1,168 @@ +// The /v1/operator/area endpoints are available only in Consul Enterprise and +// interact with its network area subsystem. Network areas are used to link +// together Consul servers in different Consul datacenters. With network areas, +// Consul datacenters can be linked together in ways other than a fully-connected +// mesh, as is required for Consul's WAN. +package api + +import ( + "net" + "time" +) + +// Area defines a network area. +type Area struct { + // ID is this identifier for an area (a UUID). This must be left empty + // when creating a new area. + ID string + + // PeerDatacenter is the peer Consul datacenter that will make up the + // other side of this network area. Network areas always involve a pair + // of datacenters: the datacenter where the area was created, and the + // peer datacenter. This is required. + PeerDatacenter string + + // RetryJoin specifies the address of Consul servers to join to, such as + // an IPs or hostnames with an optional port number. This is optional. + RetryJoin []string +} + +// AreaJoinResponse is returned when a join occurs and gives the result for each +// address. +type AreaJoinResponse struct { + // The address that was joined. + Address string + + // Whether or not the join was a success. + Joined bool + + // If we couldn't join, this is the message with information. + Error string +} + +// SerfMember is a generic structure for reporting information about members in +// a Serf cluster. This is only used by the area endpoints right now, but this +// could be expanded to other endpoints in the future. +type SerfMember struct { + // ID is the node identifier (a UUID). + ID string + + // Name is the node name. + Name string + + // Addr has the IP address. + Addr net.IP + + // Port is the RPC port. + Port uint16 + + // Datacenter is the DC name. + Datacenter string + + // Role is "client", "server", or "unknown". + Role string + + // Build has the version of the Consul agent. + Build string + + // Protocol is the protocol of the Consul agent. + Protocol int + + // Status is the Serf health status "none", "alive", "leaving", "left", + // or "failed". + Status string + + // RTT is the estimated round trip time from the server handling the + // request to the this member. This will be negative if no RTT estimate + // is available. + RTT time.Duration +} + +// AreaCreate will create a new network area. The ID in the given structure must +// be empty and a generated ID will be returned on success. +func (op *Operator) AreaCreate(area *Area, q *WriteOptions) (string, *WriteMeta, error) { + r := op.c.newRequest("POST", "/v1/operator/area") + r.setWriteOptions(q) + r.obj = area + rtt, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// AreaGet returns a single network area. +func (op *Operator) AreaGet(areaID string, q *QueryOptions) ([]*Area, *QueryMeta, error) { + var out []*Area + qm, err := op.c.query("/v1/operator/area/"+areaID, &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// AreaList returns all the available network areas. +func (op *Operator) AreaList(q *QueryOptions) ([]*Area, *QueryMeta, error) { + var out []*Area + qm, err := op.c.query("/v1/operator/area", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// AreaDelete deletes the given network area. +func (op *Operator) AreaDelete(areaID string, q *WriteOptions) (*WriteMeta, error) { + r := op.c.newRequest("DELETE", "/v1/operator/area/"+areaID) + r.setWriteOptions(q) + rtt, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil +} + +// AreaJoin attempts to join the given set of join addresses to the given +// network area. See the Area structure for details about join addresses. +func (op *Operator) AreaJoin(areaID string, addresses []string, q *WriteOptions) ([]*AreaJoinResponse, *WriteMeta, error) { + r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID+"/join") + r.setWriteOptions(q) + r.obj = addresses + rtt, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out []*AreaJoinResponse + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, wm, nil +} + +// AreaMembers lists the Serf information about the members in the given area. +func (op *Operator) AreaMembers(areaID string, q *QueryOptions) ([]*SerfMember, *QueryMeta, error) { + var out []*SerfMember + qm, err := op.c.query("/v1/operator/area/"+areaID+"/members", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go new file mode 100644 index 000000000..0fa9d1604 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go @@ -0,0 +1,219 @@ +package api + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" + "time" +) + +// AutopilotConfiguration is used for querying/setting the Autopilot configuration. +// Autopilot helps manage operator tasks related to Consul servers like removing +// failed servers from the Raft quorum. +type AutopilotConfiguration struct { + // CleanupDeadServers controls whether to remove dead servers from the Raft + // peer list when a new server joins + CleanupDeadServers bool + + // LastContactThreshold is the limit on the amount of time a server can go + // without leader contact before being considered unhealthy. + LastContactThreshold *ReadableDuration + + // MaxTrailingLogs is the amount of entries in the Raft Log that a server can + // be behind before being considered unhealthy. + MaxTrailingLogs uint64 + + // ServerStabilizationTime is the minimum amount of time a server must be + // in a stable, healthy state before it can be added to the cluster. Only + // applicable with Raft protocol version 3 or higher. + ServerStabilizationTime *ReadableDuration + + // (Enterprise-only) RedundancyZoneTag is the node tag to use for separating + // servers into zones for redundancy. If left blank, this feature will be disabled. + RedundancyZoneTag string + + // (Enterprise-only) DisableUpgradeMigration will disable Autopilot's upgrade migration + // strategy of waiting until enough newer-versioned servers have been added to the + // cluster before promoting them to voters. + DisableUpgradeMigration bool + + // (Enterprise-only) UpgradeVersionTag is the node tag to use for version info when + // performing upgrade migrations. If left blank, the Consul version will be used. + UpgradeVersionTag string + + // CreateIndex holds the index corresponding the creation of this configuration. + // This is a read-only field. + CreateIndex uint64 + + // ModifyIndex will be set to the index of the last update when retrieving the + // Autopilot configuration. Resubmitting a configuration with + // AutopilotCASConfiguration will perform a check-and-set operation which ensures + // there hasn't been a subsequent update since the configuration was retrieved. + ModifyIndex uint64 +} + +// ServerHealth is the health (from the leader's point of view) of a server. +type ServerHealth struct { + // ID is the raft ID of the server. + ID string + + // Name is the node name of the server. + Name string + + // Address is the address of the server. + Address string + + // The status of the SerfHealth check for the server. + SerfStatus string + + // Version is the Consul version of the server. + Version string + + // Leader is whether this server is currently the leader. + Leader bool + + // LastContact is the time since this node's last contact with the leader. + LastContact *ReadableDuration + + // LastTerm is the highest leader term this server has a record of in its Raft log. + LastTerm uint64 + + // LastIndex is the last log index this server has a record of in its Raft log. + LastIndex uint64 + + // Healthy is whether or not the server is healthy according to the current + // Autopilot config. + Healthy bool + + // Voter is whether this is a voting server. + Voter bool + + // StableSince is the last time this server's Healthy value changed. + StableSince time.Time +} + +// OperatorHealthReply is a representation of the overall health of the cluster +type OperatorHealthReply struct { + // Healthy is true if all the servers in the cluster are healthy. + Healthy bool + + // FailureTolerance is the number of healthy servers that could be lost without + // an outage occurring. + FailureTolerance int + + // Servers holds the health of each server. + Servers []ServerHealth +} + +// ReadableDuration is a duration type that is serialized to JSON in human readable format. +type ReadableDuration time.Duration + +func NewReadableDuration(dur time.Duration) *ReadableDuration { + d := ReadableDuration(dur) + return &d +} + +func (d *ReadableDuration) String() string { + return d.Duration().String() +} + +func (d *ReadableDuration) Duration() time.Duration { + if d == nil { + return time.Duration(0) + } + return time.Duration(*d) +} + +func (d *ReadableDuration) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, d.Duration().String())), nil +} + +func (d *ReadableDuration) UnmarshalJSON(raw []byte) error { + if d == nil { + return fmt.Errorf("cannot unmarshal to nil pointer") + } + + str := string(raw) + if len(str) < 2 || str[0] != '"' || str[len(str)-1] != '"' { + return fmt.Errorf("must be enclosed with quotes: %s", str) + } + dur, err := time.ParseDuration(str[1 : len(str)-1]) + if err != nil { + return err + } + *d = ReadableDuration(dur) + return nil +} + +// AutopilotGetConfiguration is used to query the current Autopilot configuration. +func (op *Operator) AutopilotGetConfiguration(q *QueryOptions) (*AutopilotConfiguration, error) { + r := op.c.newRequest("GET", "/v1/operator/autopilot/configuration") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out AutopilotConfiguration + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + + return &out, nil +} + +// AutopilotSetConfiguration is used to set the current Autopilot configuration. +func (op *Operator) AutopilotSetConfiguration(conf *AutopilotConfiguration, q *WriteOptions) error { + r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration") + r.setWriteOptions(q) + r.obj = conf + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// AutopilotCASConfiguration is used to perform a Check-And-Set update on the +// Autopilot configuration. The ModifyIndex value will be respected. Returns +// true on success or false on failures. +func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *WriteOptions) (bool, error) { + r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration") + r.setWriteOptions(q) + r.params.Set("cas", strconv.FormatUint(conf.ModifyIndex, 10)) + r.obj = conf + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return false, err + } + defer resp.Body.Close() + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(string(buf.Bytes()), "true") + + return res, nil +} + +// AutopilotServerHealth +func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply, error) { + r := op.c.newRequest("GET", "/v1/operator/autopilot/health") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out OperatorHealthReply + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return &out, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_autopilot_test.go b/vendor/github.com/hashicorp/consul/api/operator_autopilot_test.go new file mode 100644 index 000000000..9b84b9b4e --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_autopilot_test.go @@ -0,0 +1,104 @@ +package api + +import ( + "testing" + + "github.com/hashicorp/consul/testutil" + "github.com/hashicorp/consul/testutil/retry" +) + +func TestAPI_OperatorAutopilotGetSetConfiguration(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + operator := c.Operator() + config, err := operator.AutopilotGetConfiguration(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if !config.CleanupDeadServers { + t.Fatalf("bad: %v", config) + } + + // Change a config setting + newConf := &AutopilotConfiguration{CleanupDeadServers: false} + if err := operator.AutopilotSetConfiguration(newConf, nil); err != nil { + t.Fatalf("err: %v", err) + } + + config, err = operator.AutopilotGetConfiguration(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if config.CleanupDeadServers { + t.Fatalf("bad: %v", config) + } +} + +func TestAPI_OperatorAutopilotCASConfiguration(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + operator := c.Operator() + config, err := operator.AutopilotGetConfiguration(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if !config.CleanupDeadServers { + t.Fatalf("bad: %v", config) + } + + // Pass an invalid ModifyIndex + { + newConf := &AutopilotConfiguration{ + CleanupDeadServers: false, + ModifyIndex: config.ModifyIndex - 1, + } + resp, err := operator.AutopilotCASConfiguration(newConf, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if resp { + t.Fatalf("bad: %v", resp) + } + } + + // Pass a valid ModifyIndex + { + newConf := &AutopilotConfiguration{ + CleanupDeadServers: false, + ModifyIndex: config.ModifyIndex, + } + resp, err := operator.AutopilotCASConfiguration(newConf, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if !resp { + t.Fatalf("bad: %v", resp) + } + } +} + +func TestAPI_OperatorAutopilotServerHealth(t *testing.T) { + t.Parallel() + c, s := makeClientWithConfig(t, nil, func(c *testutil.TestServerConfig) { + c.RaftProtocol = 3 + }) + defer s.Stop() + + operator := c.Operator() + retry.Run(t, func(r *retry.R) { + out, err := operator.AutopilotServerHealth(nil) + if err != nil { + r.Fatalf("err: %v", err) + } + + if len(out.Servers) != 1 || + !out.Servers[0].Healthy || + out.Servers[0].Name != s.Config.NodeName { + r.Fatalf("bad: %v", out) + } + }) +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_keyring.go b/vendor/github.com/hashicorp/consul/api/operator_keyring.go new file mode 100644 index 000000000..4f91c3543 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_keyring.go @@ -0,0 +1,83 @@ +package api + +// keyringRequest is used for performing Keyring operations +type keyringRequest struct { + Key string +} + +// KeyringResponse is returned when listing the gossip encryption keys +type KeyringResponse struct { + // Whether this response is for a WAN ring + WAN bool + + // The datacenter name this request corresponds to + Datacenter string + + // A map of the encryption keys to the number of nodes they're installed on + Keys map[string]int + + // The total number of nodes in this ring + NumNodes int +} + +// KeyringInstall is used to install a new gossip encryption key into the cluster +func (op *Operator) KeyringInstall(key string, q *WriteOptions) error { + r := op.c.newRequest("POST", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// KeyringList is used to list the gossip keys installed in the cluster +func (op *Operator) KeyringList(q *QueryOptions) ([]*KeyringResponse, error) { + r := op.c.newRequest("GET", "/v1/operator/keyring") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*KeyringResponse + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// KeyringRemove is used to remove a gossip encryption key from the cluster +func (op *Operator) KeyringRemove(key string, q *WriteOptions) error { + r := op.c.newRequest("DELETE", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// KeyringUse is used to change the active gossip encryption key +func (op *Operator) KeyringUse(key string, q *WriteOptions) error { + r := op.c.newRequest("PUT", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_keyring_test.go b/vendor/github.com/hashicorp/consul/api/operator_keyring_test.go new file mode 100644 index 000000000..e877170fe --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_keyring_test.go @@ -0,0 +1,73 @@ +package api + +import ( + "testing" + + "github.com/hashicorp/consul/testutil" +) + +func TestAPI_OperatorKeyringInstallListPutRemove(t *testing.T) { + oldKey := "d8wu8CSUrqgtjVsvcBPmhQ==" + newKey := "qxycTi/SsePj/TZzCBmNXw==" + t.Parallel() + c, s := makeClientWithConfig(t, nil, func(c *testutil.TestServerConfig) { + c.Encrypt = oldKey + }) + defer s.Stop() + + operator := c.Operator() + if err := operator.KeyringInstall(newKey, nil); err != nil { + t.Fatalf("err: %v", err) + } + + listResponses, err := operator.KeyringList(nil) + if err != nil { + t.Fatalf("err %v", err) + } + + // Make sure the new key is installed + if len(listResponses) != 2 { + t.Fatalf("bad: %v", len(listResponses)) + } + for _, response := range listResponses { + if len(response.Keys) != 2 { + t.Fatalf("bad: %v", len(response.Keys)) + } + if _, ok := response.Keys[oldKey]; !ok { + t.Fatalf("bad: %v", ok) + } + if _, ok := response.Keys[newKey]; !ok { + t.Fatalf("bad: %v", ok) + } + } + + // Switch the primary to the new key + if err := operator.KeyringUse(newKey, nil); err != nil { + t.Fatalf("err: %v", err) + } + + if err := operator.KeyringRemove(oldKey, nil); err != nil { + t.Fatalf("err: %v", err) + } + + listResponses, err = operator.KeyringList(nil) + if err != nil { + t.Fatalf("err %v", err) + } + + // Make sure the old key is removed + if len(listResponses) != 2 { + t.Fatalf("bad: %v", len(listResponses)) + } + for _, response := range listResponses { + if len(response.Keys) != 1 { + t.Fatalf("bad: %v", len(response.Keys)) + } + if _, ok := response.Keys[oldKey]; ok { + t.Fatalf("bad: %v", ok) + } + if _, ok := response.Keys[newKey]; !ok { + t.Fatalf("bad: %v", ok) + } + } +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_raft.go b/vendor/github.com/hashicorp/consul/api/operator_raft.go new file mode 100644 index 000000000..5f3c25b13 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_raft.go @@ -0,0 +1,86 @@ +package api + +// RaftServer has information about a server in the Raft configuration. +type RaftServer struct { + // ID is the unique ID for the server. These are currently the same + // as the address, but they will be changed to a real GUID in a future + // release of Consul. + ID string + + // Node is the node name of the server, as known by Consul, or this + // will be set to "(unknown)" otherwise. + Node string + + // Address is the IP:port of the server, used for Raft communications. + Address string + + // Leader is true if this server is the current cluster leader. + Leader bool + + // Voter is true if this server has a vote in the cluster. This might + // be false if the server is staging and still coming online, or if + // it's a non-voting server, which will be added in a future release of + // Consul. + Voter bool +} + +// RaftConfigration is returned when querying for the current Raft configuration. +type RaftConfiguration struct { + // Servers has the list of servers in the Raft configuration. + Servers []*RaftServer + + // Index has the Raft index of this configuration. + Index uint64 +} + +// RaftGetConfiguration is used to query the current Raft peer set. +func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) { + r := op.c.newRequest("GET", "/v1/operator/raft/configuration") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out RaftConfiguration + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return &out, nil +} + +// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft +// quorum but no longer known to Serf or the catalog) by address in the form of +// "IP:port". +func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error { + r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") + r.setWriteOptions(q) + + r.params.Set("address", string(address)) + + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + + resp.Body.Close() + return nil +} + +// RaftRemovePeerByID is used to kick a stale peer (one that it in the Raft +// quorum but no longer known to Serf or the catalog) by ID. +func (op *Operator) RaftRemovePeerByID(id string, q *WriteOptions) error { + r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") + r.setWriteOptions(q) + + r.params.Set("id", string(id)) + + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_raft_test.go b/vendor/github.com/hashicorp/consul/api/operator_raft_test.go new file mode 100644 index 000000000..a6eada42c --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_raft_test.go @@ -0,0 +1,38 @@ +package api + +import ( + "strings" + "testing" +) + +func TestAPI_OperatorRaftGetConfiguration(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + operator := c.Operator() + out, err := operator.RaftGetConfiguration(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if len(out.Servers) != 1 || + !out.Servers[0].Leader || + !out.Servers[0].Voter { + t.Fatalf("bad: %v", out) + } +} + +func TestAPI_OperatorRaftRemovePeerByAddress(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + // If we get this error, it proves we sent the address all the way + // through. + operator := c.Operator() + err := operator.RaftRemovePeerByAddress("nope", nil) + if err == nil || !strings.Contains(err.Error(), + "address \"nope\" was not found in the Raft configuration") { + t.Fatalf("err: %v", err) + } +} diff --git a/vendor/github.com/hashicorp/consul/api/prepared_query.go b/vendor/github.com/hashicorp/consul/api/prepared_query.go new file mode 100644 index 000000000..ff210de3f --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/prepared_query.go @@ -0,0 +1,198 @@ +package api + +// QueryDatacenterOptions sets options about how we fail over if there are no +// healthy nodes in the local datacenter. +type QueryDatacenterOptions struct { + // NearestN is set to the number of remote datacenters to try, based on + // network coordinates. + NearestN int + + // Datacenters is a fixed list of datacenters to try after NearestN. We + // never try a datacenter multiple times, so those are subtracted from + // this list before proceeding. + Datacenters []string +} + +// QueryDNSOptions controls settings when query results are served over DNS. +type QueryDNSOptions struct { + // TTL is the time to live for the served DNS results. + TTL string +} + +// ServiceQuery is used to query for a set of healthy nodes offering a specific +// service. +type ServiceQuery struct { + // Service is the service to query. + Service string + + // Near allows baking in the name of a node to automatically distance- + // sort from. The magic "_agent" value is supported, which sorts near + // the agent which initiated the request by default. + Near string + + // Failover controls what we do if there are no healthy nodes in the + // local datacenter. + Failover QueryDatacenterOptions + + // If OnlyPassing is true then we will only include nodes with passing + // health checks (critical AND warning checks will cause a node to be + // discarded) + OnlyPassing bool + + // Tags are a set of required and/or disallowed tags. If a tag is in + // this list it must be present. If the tag is preceded with "!" then + // it is disallowed. + Tags []string + + // NodeMeta is a map of required node metadata fields. If a key/value + // pair is in this map it must be present on the node in order for the + // service entry to be returned. + NodeMeta map[string]string +} + +// QueryTemplate carries the arguments for creating a templated query. +type QueryTemplate struct { + // Type specifies the type of the query template. Currently only + // "name_prefix_match" is supported. This field is required. + Type string + + // Regexp allows specifying a regex pattern to match against the name + // of the query being executed. + Regexp string +} + +// PrepatedQueryDefinition defines a complete prepared query. +type PreparedQueryDefinition struct { + // ID is this UUID-based ID for the query, always generated by Consul. + ID string + + // Name is an optional friendly name for the query supplied by the + // user. NOTE - if this feature is used then it will reduce the security + // of any read ACL associated with this query/service since this name + // can be used to locate nodes with supplying any ACL. + Name string + + // Session is an optional session to tie this query's lifetime to. If + // this is omitted then the query will not expire. + Session string + + // Token is the ACL token used when the query was created, and it is + // used when a query is subsequently executed. This token, or a token + // with management privileges, must be used to change the query later. + Token string + + // Service defines a service query (leaving things open for other types + // later). + Service ServiceQuery + + // DNS has options that control how the results of this query are + // served over DNS. + DNS QueryDNSOptions + + // Template is used to pass through the arguments for creating a + // prepared query with an attached template. If a template is given, + // interpolations are possible in other struct fields. + Template QueryTemplate +} + +// PreparedQueryExecuteResponse has the results of executing a query. +type PreparedQueryExecuteResponse struct { + // Service is the service that was queried. + Service string + + // Nodes has the nodes that were output by the query. + Nodes []ServiceEntry + + // DNS has the options for serving these results over DNS. + DNS QueryDNSOptions + + // Datacenter is the datacenter that these results came from. + Datacenter string + + // Failovers is a count of how many times we had to query a remote + // datacenter. + Failovers int +} + +// PreparedQuery can be used to query the prepared query endpoints. +type PreparedQuery struct { + c *Client +} + +// PreparedQuery returns a handle to the prepared query endpoints. +func (c *Client) PreparedQuery() *PreparedQuery { + return &PreparedQuery{c} +} + +// Create makes a new prepared query. The ID of the new query is returned. +func (c *PreparedQuery) Create(query *PreparedQueryDefinition, q *WriteOptions) (string, *WriteMeta, error) { + r := c.c.newRequest("POST", "/v1/query") + r.setWriteOptions(q) + r.obj = query + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Update makes updates to an existing prepared query. +func (c *PreparedQuery) Update(query *PreparedQueryDefinition, q *WriteOptions) (*WriteMeta, error) { + return c.c.write("/v1/query/"+query.ID, query, nil, q) +} + +// List is used to fetch all the prepared queries (always requires a management +// token). +func (c *PreparedQuery) List(q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { + var out []*PreparedQueryDefinition + qm, err := c.c.query("/v1/query", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Get is used to fetch a specific prepared query. +func (c *PreparedQuery) Get(queryID string, q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { + var out []*PreparedQueryDefinition + qm, err := c.c.query("/v1/query/"+queryID, &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Delete is used to delete a specific prepared query. +func (c *PreparedQuery) Delete(queryID string, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("DELETE", "/v1/query/"+queryID) + r.setWriteOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil +} + +// Execute is used to execute a specific prepared query. You can execute using +// a query ID or name. +func (c *PreparedQuery) Execute(queryIDOrName string, q *QueryOptions) (*PreparedQueryExecuteResponse, *QueryMeta, error) { + var out *PreparedQueryExecuteResponse + qm, err := c.c.query("/v1/query/"+queryIDOrName+"/execute", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/prepared_query_test.go b/vendor/github.com/hashicorp/consul/api/prepared_query_test.go new file mode 100644 index 000000000..f06218001 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/prepared_query_test.go @@ -0,0 +1,133 @@ +package api + +import ( + "reflect" + "testing" + + "github.com/hashicorp/consul/testutil/retry" +) + +func TestAPI_PreparedQuery(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + // Set up a node and a service. + reg := &CatalogRegistration{ + Datacenter: "dc1", + Node: "foobar", + Address: "192.168.10.10", + TaggedAddresses: map[string]string{ + "wan": "127.0.0.1", + }, + NodeMeta: map[string]string{"somekey": "somevalue"}, + Service: &AgentService{ + ID: "redis1", + Service: "redis", + Tags: []string{"master", "v1"}, + Port: 8000, + }, + } + + catalog := c.Catalog() + retry.Run(t, func(r *retry.R) { + if _, err := catalog.Register(reg, nil); err != nil { + r.Fatal(err) + } + if _, _, err := catalog.Node("foobar", nil); err != nil { + r.Fatal(err) + } + }) + + // Create a simple prepared query. + def := &PreparedQueryDefinition{ + Name: "test", + Service: ServiceQuery{ + Service: "redis", + NodeMeta: map[string]string{"somekey": "somevalue"}, + }, + } + + query := c.PreparedQuery() + var err error + def.ID, _, err = query.Create(def, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Read it back. + defs, _, err := query.Get(def.ID, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(defs) != 1 || !reflect.DeepEqual(defs[0], def) { + t.Fatalf("bad: %v", defs) + } + + // List them all. + defs, _, err = query.List(nil) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(defs) != 1 || !reflect.DeepEqual(defs[0], def) { + t.Fatalf("bad: %v", defs) + } + + // Make an update. + def.Name = "my-query" + _, err = query.Update(def, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Read it back again to verify the update worked. + defs, _, err = query.Get(def.ID, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(defs) != 1 || !reflect.DeepEqual(defs[0], def) { + t.Fatalf("bad: %v", defs) + } + + // Execute by ID. + results, _, err := query.Execute(def.ID, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(results.Nodes) != 1 || results.Nodes[0].Node.Node != "foobar" { + t.Fatalf("bad: %v", results) + } + if wan, ok := results.Nodes[0].Node.TaggedAddresses["wan"]; !ok || wan != "127.0.0.1" { + t.Fatalf("bad: %v", results) + } + + // Execute by name. + results, _, err = query.Execute("my-query", nil) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(results.Nodes) != 1 || results.Nodes[0].Node.Node != "foobar" { + t.Fatalf("bad: %v", results) + } + if wan, ok := results.Nodes[0].Node.TaggedAddresses["wan"]; !ok || wan != "127.0.0.1" { + t.Fatalf("bad: %v", results) + } + if results.Nodes[0].Node.Datacenter != "dc1" { + t.Fatalf("bad datacenter: %v", results) + } + + // Delete it. + _, err = query.Delete(def.ID, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Make sure there are no longer any queries. + defs, _, err = query.List(nil) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(defs) != 0 { + t.Fatalf("bad: %v", defs) + } +} diff --git a/vendor/github.com/hashicorp/consul/api/raw.go b/vendor/github.com/hashicorp/consul/api/raw.go new file mode 100644 index 000000000..745a208c9 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/raw.go @@ -0,0 +1,24 @@ +package api + +// Raw can be used to do raw queries against custom endpoints +type Raw struct { + c *Client +} + +// Raw returns a handle to query endpoints +func (c *Client) Raw() *Raw { + return &Raw{c} +} + +// Query is used to do a GET request against an endpoint +// and deserialize the response into an interface using +// standard Consul conventions. +func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { + return raw.c.query(endpoint, out, q) +} + +// Write is used to do a PUT request against an endpoint +// and serialize/deserialized using the standard Consul conventions. +func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { + return raw.c.write(endpoint, in, out, q) +} diff --git a/vendor/github.com/hashicorp/consul/api/semaphore.go b/vendor/github.com/hashicorp/consul/api/semaphore.go new file mode 100644 index 000000000..9ddbdc49e --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/semaphore.go @@ -0,0 +1,513 @@ +package api + +import ( + "encoding/json" + "fmt" + "path" + "sync" + "time" +) + +const ( + // DefaultSemaphoreSessionName is the Session Name we assign if none is provided + DefaultSemaphoreSessionName = "Consul API Semaphore" + + // DefaultSemaphoreSessionTTL is the default session TTL if no Session is provided + // when creating a new Semaphore. This is used because we do not have another + // other check to depend upon. + DefaultSemaphoreSessionTTL = "15s" + + // DefaultSemaphoreWaitTime is how long we block for at a time to check if semaphore + // acquisition is possible. This affects the minimum time it takes to cancel + // a Semaphore acquisition. + DefaultSemaphoreWaitTime = 15 * time.Second + + // DefaultSemaphoreKey is the key used within the prefix to + // use for coordination between all the contenders. + DefaultSemaphoreKey = ".lock" + + // SemaphoreFlagValue is a magic flag we set to indicate a key + // is being used for a semaphore. It is used to detect a potential + // conflict with a lock. + SemaphoreFlagValue = 0xe0f69a2baa414de0 +) + +var ( + // ErrSemaphoreHeld is returned if we attempt to double lock + ErrSemaphoreHeld = fmt.Errorf("Semaphore already held") + + // ErrSemaphoreNotHeld is returned if we attempt to unlock a semaphore + // that we do not hold. + ErrSemaphoreNotHeld = fmt.Errorf("Semaphore not held") + + // ErrSemaphoreInUse is returned if we attempt to destroy a semaphore + // that is in use. + ErrSemaphoreInUse = fmt.Errorf("Semaphore in use") + + // ErrSemaphoreConflict is returned if the flags on a key + // used for a semaphore do not match expectation + ErrSemaphoreConflict = fmt.Errorf("Existing key does not match semaphore use") +) + +// Semaphore is used to implement a distributed semaphore +// using the Consul KV primitives. +type Semaphore struct { + c *Client + opts *SemaphoreOptions + + isHeld bool + sessionRenew chan struct{} + lockSession string + l sync.Mutex +} + +// SemaphoreOptions is used to parameterize the Semaphore +type SemaphoreOptions struct { + Prefix string // Must be set and have write permissions + Limit int // Must be set, and be positive + Value []byte // Optional, value to associate with the contender entry + Session string // Optional, created if not specified + SessionName string // Optional, defaults to DefaultLockSessionName + SessionTTL string // Optional, defaults to DefaultLockSessionTTL + MonitorRetries int // Optional, defaults to 0 which means no retries + MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime + SemaphoreWaitTime time.Duration // Optional, defaults to DefaultSemaphoreWaitTime + SemaphoreTryOnce bool // Optional, defaults to false which means try forever +} + +// semaphoreLock is written under the DefaultSemaphoreKey and +// is used to coordinate between all the contenders. +type semaphoreLock struct { + // Limit is the integer limit of holders. This is used to + // verify that all the holders agree on the value. + Limit int + + // Holders is a list of all the semaphore holders. + // It maps the session ID to true. It is used as a set effectively. + Holders map[string]bool +} + +// SemaphorePrefix is used to created a Semaphore which will operate +// at the given KV prefix and uses the given limit for the semaphore. +// The prefix must have write privileges, and the limit must be agreed +// upon by all contenders. +func (c *Client) SemaphorePrefix(prefix string, limit int) (*Semaphore, error) { + opts := &SemaphoreOptions{ + Prefix: prefix, + Limit: limit, + } + return c.SemaphoreOpts(opts) +} + +// SemaphoreOpts is used to create a Semaphore with the given options. +// The prefix must have write privileges, and the limit must be agreed +// upon by all contenders. If a Session is not provided, one will be created. +func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) { + if opts.Prefix == "" { + return nil, fmt.Errorf("missing prefix") + } + if opts.Limit <= 0 { + return nil, fmt.Errorf("semaphore limit must be positive") + } + if opts.SessionName == "" { + opts.SessionName = DefaultSemaphoreSessionName + } + if opts.SessionTTL == "" { + opts.SessionTTL = DefaultSemaphoreSessionTTL + } else { + if _, err := time.ParseDuration(opts.SessionTTL); err != nil { + return nil, fmt.Errorf("invalid SessionTTL: %v", err) + } + } + if opts.MonitorRetryTime == 0 { + opts.MonitorRetryTime = DefaultMonitorRetryTime + } + if opts.SemaphoreWaitTime == 0 { + opts.SemaphoreWaitTime = DefaultSemaphoreWaitTime + } + s := &Semaphore{ + c: c, + opts: opts, + } + return s, nil +} + +// Acquire attempts to reserve a slot in the semaphore, blocking until +// success, interrupted via the stopCh or an error is encountered. +// Providing a non-nil stopCh can be used to abort the attempt. +// On success, a channel is returned that represents our slot. +// This channel could be closed at any time due to session invalidation, +// communication errors, operator intervention, etc. It is NOT safe to +// assume that the slot is held until Release() unless the Session is specifically +// created without any associated health checks. By default Consul sessions +// prefer liveness over safety and an application must be able to handle +// the session being lost. +func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) { + // Hold the lock as we try to acquire + s.l.Lock() + defer s.l.Unlock() + + // Check if we already hold the semaphore + if s.isHeld { + return nil, ErrSemaphoreHeld + } + + // Check if we need to create a session first + s.lockSession = s.opts.Session + if s.lockSession == "" { + sess, err := s.createSession() + if err != nil { + return nil, fmt.Errorf("failed to create session: %v", err) + } + + s.sessionRenew = make(chan struct{}) + s.lockSession = sess + session := s.c.Session() + go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew) + + // If we fail to acquire the lock, cleanup the session + defer func() { + if !s.isHeld { + close(s.sessionRenew) + s.sessionRenew = nil + } + }() + } + + // Create the contender entry + kv := s.c.KV() + made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), nil) + if err != nil || !made { + return nil, fmt.Errorf("failed to make contender entry: %v", err) + } + + // Setup the query options + qOpts := &QueryOptions{ + WaitTime: s.opts.SemaphoreWaitTime, + } + + start := time.Now() + attempts := 0 +WAIT: + // Check if we should quit + select { + case <-stopCh: + return nil, nil + default: + } + + // Handle the one-shot mode. + if s.opts.SemaphoreTryOnce && attempts > 0 { + elapsed := time.Now().Sub(start) + if elapsed > qOpts.WaitTime { + return nil, nil + } + + qOpts.WaitTime -= elapsed + } + attempts++ + + // Read the prefix + pairs, meta, err := kv.List(s.opts.Prefix, qOpts) + if err != nil { + return nil, fmt.Errorf("failed to read prefix: %v", err) + } + + // Decode the lock + lockPair := s.findLock(pairs) + if lockPair.Flags != SemaphoreFlagValue { + return nil, ErrSemaphoreConflict + } + lock, err := s.decodeLock(lockPair) + if err != nil { + return nil, err + } + + // Verify we agree with the limit + if lock.Limit != s.opts.Limit { + return nil, fmt.Errorf("semaphore limit conflict (lock: %d, local: %d)", + lock.Limit, s.opts.Limit) + } + + // Prune the dead holders + s.pruneDeadHolders(lock, pairs) + + // Check if the lock is held + if len(lock.Holders) >= lock.Limit { + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } + + // Create a new lock with us as a holder + lock.Holders[s.lockSession] = true + newLock, err := s.encodeLock(lock, lockPair.ModifyIndex) + if err != nil { + return nil, err + } + + // Attempt the acquisition + didSet, _, err := kv.CAS(newLock, nil) + if err != nil { + return nil, fmt.Errorf("failed to update lock: %v", err) + } + if !didSet { + // Update failed, could have been a race with another contender, + // retry the operation + goto WAIT + } + + // Watch to ensure we maintain ownership of the slot + lockCh := make(chan struct{}) + go s.monitorLock(s.lockSession, lockCh) + + // Set that we own the lock + s.isHeld = true + + // Acquired! All done + return lockCh, nil +} + +// Release is used to voluntarily give up our semaphore slot. It is +// an error to call this if the semaphore has not been acquired. +func (s *Semaphore) Release() error { + // Hold the lock as we try to release + s.l.Lock() + defer s.l.Unlock() + + // Ensure the lock is actually held + if !s.isHeld { + return ErrSemaphoreNotHeld + } + + // Set that we no longer own the lock + s.isHeld = false + + // Stop the session renew + if s.sessionRenew != nil { + defer func() { + close(s.sessionRenew) + s.sessionRenew = nil + }() + } + + // Get and clear the lock session + lockSession := s.lockSession + s.lockSession = "" + + // Remove ourselves as a lock holder + kv := s.c.KV() + key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) +READ: + pair, _, err := kv.Get(key, nil) + if err != nil { + return err + } + if pair == nil { + pair = &KVPair{} + } + lock, err := s.decodeLock(pair) + if err != nil { + return err + } + + // Create a new lock without us as a holder + if _, ok := lock.Holders[lockSession]; ok { + delete(lock.Holders, lockSession) + newLock, err := s.encodeLock(lock, pair.ModifyIndex) + if err != nil { + return err + } + + // Swap the locks + didSet, _, err := kv.CAS(newLock, nil) + if err != nil { + return fmt.Errorf("failed to update lock: %v", err) + } + if !didSet { + goto READ + } + } + + // Destroy the contender entry + contenderKey := path.Join(s.opts.Prefix, lockSession) + if _, err := kv.Delete(contenderKey, nil); err != nil { + return err + } + return nil +} + +// Destroy is used to cleanup the semaphore entry. It is not necessary +// to invoke. It will fail if the semaphore is in use. +func (s *Semaphore) Destroy() error { + // Hold the lock as we try to acquire + s.l.Lock() + defer s.l.Unlock() + + // Check if we already hold the semaphore + if s.isHeld { + return ErrSemaphoreHeld + } + + // List for the semaphore + kv := s.c.KV() + pairs, _, err := kv.List(s.opts.Prefix, nil) + if err != nil { + return fmt.Errorf("failed to read prefix: %v", err) + } + + // Find the lock pair, bail if it doesn't exist + lockPair := s.findLock(pairs) + if lockPair.ModifyIndex == 0 { + return nil + } + if lockPair.Flags != SemaphoreFlagValue { + return ErrSemaphoreConflict + } + + // Decode the lock + lock, err := s.decodeLock(lockPair) + if err != nil { + return err + } + + // Prune the dead holders + s.pruneDeadHolders(lock, pairs) + + // Check if there are any holders + if len(lock.Holders) > 0 { + return ErrSemaphoreInUse + } + + // Attempt the delete + didRemove, _, err := kv.DeleteCAS(lockPair, nil) + if err != nil { + return fmt.Errorf("failed to remove semaphore: %v", err) + } + if !didRemove { + return ErrSemaphoreInUse + } + return nil +} + +// createSession is used to create a new managed session +func (s *Semaphore) createSession() (string, error) { + session := s.c.Session() + se := &SessionEntry{ + Name: s.opts.SessionName, + TTL: s.opts.SessionTTL, + Behavior: SessionBehaviorDelete, + } + id, _, err := session.Create(se, nil) + if err != nil { + return "", err + } + return id, nil +} + +// contenderEntry returns a formatted KVPair for the contender +func (s *Semaphore) contenderEntry(session string) *KVPair { + return &KVPair{ + Key: path.Join(s.opts.Prefix, session), + Value: s.opts.Value, + Session: session, + Flags: SemaphoreFlagValue, + } +} + +// findLock is used to find the KV Pair which is used for coordination +func (s *Semaphore) findLock(pairs KVPairs) *KVPair { + key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) + for _, pair := range pairs { + if pair.Key == key { + return pair + } + } + return &KVPair{Flags: SemaphoreFlagValue} +} + +// decodeLock is used to decode a semaphoreLock from an +// entry in Consul +func (s *Semaphore) decodeLock(pair *KVPair) (*semaphoreLock, error) { + // Handle if there is no lock + if pair == nil || pair.Value == nil { + return &semaphoreLock{ + Limit: s.opts.Limit, + Holders: make(map[string]bool), + }, nil + } + + l := &semaphoreLock{} + if err := json.Unmarshal(pair.Value, l); err != nil { + return nil, fmt.Errorf("lock decoding failed: %v", err) + } + return l, nil +} + +// encodeLock is used to encode a semaphoreLock into a KVPair +// that can be PUT +func (s *Semaphore) encodeLock(l *semaphoreLock, oldIndex uint64) (*KVPair, error) { + enc, err := json.Marshal(l) + if err != nil { + return nil, fmt.Errorf("lock encoding failed: %v", err) + } + pair := &KVPair{ + Key: path.Join(s.opts.Prefix, DefaultSemaphoreKey), + Value: enc, + Flags: SemaphoreFlagValue, + ModifyIndex: oldIndex, + } + return pair, nil +} + +// pruneDeadHolders is used to remove all the dead lock holders +func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) { + // Gather all the live holders + alive := make(map[string]struct{}, len(pairs)) + for _, pair := range pairs { + if pair.Session != "" { + alive[pair.Session] = struct{}{} + } + } + + // Remove any holders that are dead + for holder := range lock.Holders { + if _, ok := alive[holder]; !ok { + delete(lock.Holders, holder) + } + } +} + +// monitorLock is a long running routine to monitor a semaphore ownership +// It closes the stopCh if we lose our slot. +func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) { + defer close(stopCh) + kv := s.c.KV() + opts := &QueryOptions{RequireConsistent: true} +WAIT: + retries := s.opts.MonitorRetries +RETRY: + pairs, meta, err := kv.List(s.opts.Prefix, opts) + if err != nil { + // If configured we can try to ride out a brief Consul unavailability + // by doing retries. Note that we have to attempt the retry in a non- + // blocking fashion so that we have a clean place to reset the retry + // counter if service is restored. + if retries > 0 && IsServerError(err) { + time.Sleep(s.opts.MonitorRetryTime) + retries-- + opts.WaitIndex = 0 + goto RETRY + } + return + } + lockPair := s.findLock(pairs) + lock, err := s.decodeLock(lockPair) + if err != nil { + return + } + s.pruneDeadHolders(lock, pairs) + if _, ok := lock.Holders[session]; ok { + opts.WaitIndex = meta.LastIndex + goto WAIT + } +} diff --git a/vendor/github.com/hashicorp/consul/api/semaphore_test.go b/vendor/github.com/hashicorp/consul/api/semaphore_test.go new file mode 100644 index 000000000..598be024c --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/semaphore_test.go @@ -0,0 +1,518 @@ +package api + +import ( + "log" + "net/http" + "net/http/httptest" + "net/http/httputil" + "strings" + "sync" + "testing" + "time" +) + +func TestAPI_SemaphoreAcquireRelease(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + sema, err := c.SemaphorePrefix("test/semaphore", 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Initial release should fail + err = sema.Release() + if err != ErrSemaphoreNotHeld { + t.Fatalf("err: %v", err) + } + + // Should work + lockCh, err := sema.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if lockCh == nil { + t.Fatalf("not hold") + } + + // Double lock should fail + _, err = sema.Acquire(nil) + if err != ErrSemaphoreHeld { + t.Fatalf("err: %v", err) + } + + // Should be held + select { + case <-lockCh: + t.Fatalf("should be held") + default: + } + + // Initial release should work + err = sema.Release() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Double unlock should fail + err = sema.Release() + if err != ErrSemaphoreNotHeld { + t.Fatalf("err: %v", err) + } + + // Should lose resource + select { + case <-lockCh: + case <-time.After(time.Second): + t.Fatalf("should not be held") + } +} + +func TestAPI_SemaphoreForceInvalidate(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + sema, err := c.SemaphorePrefix("test/semaphore", 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work + lockCh, err := sema.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if lockCh == nil { + t.Fatalf("not acquired") + } + defer sema.Release() + + go func() { + // Nuke the session, simulator an operator invalidation + // or a health check failure + session := c.Session() + session.Destroy(sema.lockSession, nil) + }() + + // Should loose slot + select { + case <-lockCh: + case <-time.After(time.Second): + t.Fatalf("should not be locked") + } +} + +func TestAPI_SemaphoreDeleteKey(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + sema, err := c.SemaphorePrefix("test/semaphore", 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work + lockCh, err := sema.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if lockCh == nil { + t.Fatalf("not locked") + } + defer sema.Release() + + go func() { + // Nuke the key, simulate an operator intervention + kv := c.KV() + kv.DeleteTree("test/semaphore", nil) + }() + + // Should loose leadership + select { + case <-lockCh: + case <-time.After(time.Second): + t.Fatalf("should not be locked") + } +} + +func TestAPI_SemaphoreContend(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + wg := &sync.WaitGroup{} + acquired := make([]bool, 4) + for idx := range acquired { + wg.Add(1) + go func(idx int) { + defer wg.Done() + sema, err := c.SemaphorePrefix("test/semaphore", 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work eventually, will contend + lockCh, err := sema.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if lockCh == nil { + t.Fatalf("not locked") + } + defer sema.Release() + log.Printf("Contender %d acquired", idx) + + // Set acquired and then leave + acquired[idx] = true + }(idx) + } + + // Wait for termination + doneCh := make(chan struct{}) + go func() { + wg.Wait() + close(doneCh) + }() + + // Wait for everybody to get a turn + select { + case <-doneCh: + case <-time.After(3 * DefaultLockRetryTime): + t.Fatalf("timeout") + } + + for idx, did := range acquired { + if !did { + t.Fatalf("contender %d never acquired", idx) + } + } +} + +func TestAPI_SemaphoreBadLimit(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + sema, err := c.SemaphorePrefix("test/semaphore", 0) + if err == nil { + t.Fatalf("should error") + } + + sema, err = c.SemaphorePrefix("test/semaphore", 1) + if err != nil { + t.Fatalf("err: %v", err) + } + + _, err = sema.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + sema2, err := c.SemaphorePrefix("test/semaphore", 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + _, err = sema2.Acquire(nil) + if err.Error() != "semaphore limit conflict (lock: 1, local: 2)" { + t.Fatalf("err: %v", err) + } +} + +func TestAPI_SemaphoreDestroy(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + sema, err := c.SemaphorePrefix("test/semaphore", 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + sema2, err := c.SemaphorePrefix("test/semaphore", 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + _, err = sema.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + _, err = sema2.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Destroy should fail, still held + if err := sema.Destroy(); err != ErrSemaphoreHeld { + t.Fatalf("err: %v", err) + } + + err = sema.Release() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Destroy should fail, still in use + if err := sema.Destroy(); err != ErrSemaphoreInUse { + t.Fatalf("err: %v", err) + } + + err = sema2.Release() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Destroy should work + if err := sema.Destroy(); err != nil { + t.Fatalf("err: %v", err) + } + + // Destroy should work + if err := sema2.Destroy(); err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestAPI_SemaphoreConflict(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + lock, err := c.LockKey("test/sema/.lock") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should work + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("not leader") + } + defer lock.Unlock() + + sema, err := c.SemaphorePrefix("test/sema/", 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should conflict with lock + _, err = sema.Acquire(nil) + if err != ErrSemaphoreConflict { + t.Fatalf("err: %v", err) + } + + // Should conflict with lock + err = sema.Destroy() + if err != ErrSemaphoreConflict { + t.Fatalf("err: %v", err) + } +} + +func TestAPI_SemaphoreMonitorRetry(t *testing.T) { + t.Parallel() + raw, s := makeClient(t) + defer s.Stop() + + // Set up a server that always responds with 500 errors. + failer := func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(500) + } + outage := httptest.NewServer(http.HandlerFunc(failer)) + defer outage.Close() + + // Set up a reverse proxy that will send some requests to the + // 500 server and pass everything else through to the real Consul + // server. + var mutex sync.Mutex + errors := 0 + director := func(req *http.Request) { + mutex.Lock() + defer mutex.Unlock() + + req.URL.Scheme = "http" + if errors > 0 && req.Method == "GET" && strings.Contains(req.URL.Path, "/v1/kv/test/sema/.lock") { + req.URL.Host = outage.URL[7:] // Strip off "http://". + errors-- + } else { + req.URL.Host = raw.config.Address + } + } + proxy := httptest.NewServer(&httputil.ReverseProxy{Director: director}) + defer proxy.Close() + + // Make another client that points at the proxy instead of the real + // Consul server. + config := raw.config + config.Address = proxy.URL[7:] // Strip off "http://". + c, err := NewClient(&config) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Set up a lock with retries enabled. + opts := &SemaphoreOptions{ + Prefix: "test/sema/.lock", + Limit: 2, + SessionTTL: "60s", + MonitorRetries: 3, + } + sema, err := c.SemaphoreOpts(opts) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Make sure the default got set. + if sema.opts.MonitorRetryTime != DefaultMonitorRetryTime { + t.Fatalf("bad: %d", sema.opts.MonitorRetryTime) + } + + // Now set a custom time for the test. + opts.MonitorRetryTime = 250 * time.Millisecond + sema, err = c.SemaphoreOpts(opts) + if err != nil { + t.Fatalf("err: %v", err) + } + if sema.opts.MonitorRetryTime != 250*time.Millisecond { + t.Fatalf("bad: %d", sema.opts.MonitorRetryTime) + } + + // Should get the lock. + ch, err := sema.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if ch == nil { + t.Fatalf("didn't acquire") + } + + // Take the semaphore using the raw client to force the monitor to wake + // up and check the lock again. This time we will return errors for some + // of the responses. + mutex.Lock() + errors = 2 + mutex.Unlock() + another, err := raw.SemaphoreOpts(opts) + if err != nil { + t.Fatalf("err: %v", err) + } + if _, err := another.Acquire(nil); err != nil { + t.Fatalf("err: %v", err) + } + time.Sleep(5 * opts.MonitorRetryTime) + + // Should still have the semaphore. + select { + case <-ch: + t.Fatalf("lost the semaphore") + default: + } + + // Now return an overwhelming number of errors, using the raw client to + // poke the key and get the monitor to run again. + mutex.Lock() + errors = 10 + mutex.Unlock() + if err := another.Release(); err != nil { + t.Fatalf("err: %v", err) + } + time.Sleep(5 * opts.MonitorRetryTime) + + // Should lose the semaphore. + select { + case <-ch: + case <-time.After(time.Second): + t.Fatalf("should not have the semaphore") + } +} + +func TestAPI_SemaphoreOneShot(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + // Set up a semaphore as a one-shot. + opts := &SemaphoreOptions{ + Prefix: "test/sema/.lock", + Limit: 2, + SemaphoreTryOnce: true, + } + sema, err := c.SemaphoreOpts(opts) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Make sure the default got set. + if sema.opts.SemaphoreWaitTime != DefaultSemaphoreWaitTime { + t.Fatalf("bad: %d", sema.opts.SemaphoreWaitTime) + } + + // Now set a custom time for the test. + opts.SemaphoreWaitTime = 250 * time.Millisecond + sema, err = c.SemaphoreOpts(opts) + if err != nil { + t.Fatalf("err: %v", err) + } + if sema.opts.SemaphoreWaitTime != 250*time.Millisecond { + t.Fatalf("bad: %d", sema.opts.SemaphoreWaitTime) + } + + // Should acquire the semaphore. + ch, err := sema.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if ch == nil { + t.Fatalf("should have acquired the semaphore") + } + + // Try with another session. + another, err := c.SemaphoreOpts(opts) + if err != nil { + t.Fatalf("err: %v", err) + } + ch, err = another.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if ch == nil { + t.Fatalf("should have acquired the semaphore") + } + + // Try with a third one that shouldn't get it. + contender, err := c.SemaphoreOpts(opts) + if err != nil { + t.Fatalf("err: %v", err) + } + start := time.Now() + ch, err = contender.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if ch != nil { + t.Fatalf("should not have acquired the semaphore") + } + diff := time.Now().Sub(start) + if diff < contender.opts.SemaphoreWaitTime { + t.Fatalf("time out of bounds: %9.6f", diff.Seconds()) + } + + // Give up a slot and make sure the third one can get it. + if err := another.Release(); err != nil { + t.Fatalf("err: %v", err) + } + ch, err = contender.Acquire(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if ch == nil { + t.Fatalf("should have acquired the semaphore") + } +} diff --git a/vendor/github.com/hashicorp/consul/api/session.go b/vendor/github.com/hashicorp/consul/api/session.go new file mode 100644 index 000000000..1613f11a6 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/session.go @@ -0,0 +1,224 @@ +package api + +import ( + "errors" + "fmt" + "time" +) + +const ( + // SessionBehaviorRelease is the default behavior and causes + // all associated locks to be released on session invalidation. + SessionBehaviorRelease = "release" + + // SessionBehaviorDelete is new in Consul 0.5 and changes the + // behavior to delete all associated locks on session invalidation. + // It can be used in a way similar to Ephemeral Nodes in ZooKeeper. + SessionBehaviorDelete = "delete" +) + +var ErrSessionExpired = errors.New("session expired") + +// SessionEntry represents a session in consul +type SessionEntry struct { + CreateIndex uint64 + ID string + Name string + Node string + Checks []string + LockDelay time.Duration + Behavior string + TTL string +} + +// Session can be used to query the Session endpoints +type Session struct { + c *Client +} + +// Session returns a handle to the session endpoints +func (c *Client) Session() *Session { + return &Session{c} +} + +// CreateNoChecks is like Create but is used specifically to create +// a session with no associated health checks. +func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { + body := make(map[string]interface{}) + body["Checks"] = []string{} + if se != nil { + if se.Name != "" { + body["Name"] = se.Name + } + if se.Node != "" { + body["Node"] = se.Node + } + if se.LockDelay != 0 { + body["LockDelay"] = durToMsec(se.LockDelay) + } + if se.Behavior != "" { + body["Behavior"] = se.Behavior + } + if se.TTL != "" { + body["TTL"] = se.TTL + } + } + return s.create(body, q) + +} + +// Create makes a new session. Providing a session entry can +// customize the session. It can also be nil to use defaults. +func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { + var obj interface{} + if se != nil { + body := make(map[string]interface{}) + obj = body + if se.Name != "" { + body["Name"] = se.Name + } + if se.Node != "" { + body["Node"] = se.Node + } + if se.LockDelay != 0 { + body["LockDelay"] = durToMsec(se.LockDelay) + } + if len(se.Checks) > 0 { + body["Checks"] = se.Checks + } + if se.Behavior != "" { + body["Behavior"] = se.Behavior + } + if se.TTL != "" { + body["TTL"] = se.TTL + } + } + return s.create(obj, q) +} + +func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) { + var out struct{ ID string } + wm, err := s.c.write("/v1/session/create", obj, &out, q) + if err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Destroy invalidates a given session +func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { + wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q) + if err != nil { + return nil, err + } + return wm, nil +} + +// Renew renews the TTL on a given session +func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) { + r := s.c.newRequest("PUT", "/v1/session/renew/"+id) + r.setWriteOptions(q) + rtt, resp, err := s.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + + if resp.StatusCode == 404 { + return nil, wm, nil + } else if resp.StatusCode != 200 { + return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) + } + + var entries []*SessionEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, fmt.Errorf("Failed to read response: %v", err) + } + if len(entries) > 0 { + return entries[0], wm, nil + } + return nil, wm, nil +} + +// RenewPeriodic is used to periodically invoke Session.Renew on a +// session until a doneCh is closed. This is meant to be used in a long running +// goroutine to ensure a session stays valid. +func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh <-chan struct{}) error { + ctx := q.Context() + + ttl, err := time.ParseDuration(initialTTL) + if err != nil { + return err + } + + waitDur := ttl / 2 + lastRenewTime := time.Now() + var lastErr error + for { + if time.Since(lastRenewTime) > ttl { + return lastErr + } + select { + case <-time.After(waitDur): + entry, _, err := s.Renew(id, q) + if err != nil { + waitDur = time.Second + lastErr = err + continue + } + if entry == nil { + return ErrSessionExpired + } + + // Handle the server updating the TTL + ttl, _ = time.ParseDuration(entry.TTL) + waitDur = ttl / 2 + lastRenewTime = time.Now() + + case <-doneCh: + // Attempt a session destroy + s.Destroy(id, q) + return nil + + case <-ctx.Done(): + // Bail immediately since attempting the destroy would + // use the canceled context in q, which would just bail. + return ctx.Err() + } + } +} + +// Info looks up a single session +func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/info/"+id, &entries, q) + if err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List gets sessions for a node +func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/node/"+node, &entries, q) + if err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// List gets all active sessions +func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/list", &entries, q) + if err != nil { + return nil, nil, err + } + return entries, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/session_test.go b/vendor/github.com/hashicorp/consul/api/session_test.go new file mode 100644 index 000000000..0039bb2e3 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/session_test.go @@ -0,0 +1,392 @@ +package api + +import ( + "context" + "strings" + "testing" + "time" +) + +func TestAPI_SessionCreateDestroy(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + session := c.Session() + + id, meta, err := session.Create(nil, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if meta.RequestTime == 0 { + t.Fatalf("bad: %v", meta) + } + + if id == "" { + t.Fatalf("invalid: %v", id) + } + + meta, err = session.Destroy(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if meta.RequestTime == 0 { + t.Fatalf("bad: %v", meta) + } +} + +func TestAPI_SessionCreateRenewDestroy(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + session := c.Session() + + se := &SessionEntry{ + TTL: "10s", + } + + id, meta, err := session.Create(se, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer session.Destroy(id, nil) + + if meta.RequestTime == 0 { + t.Fatalf("bad: %v", meta) + } + + if id == "" { + t.Fatalf("invalid: %v", id) + } + + if meta.RequestTime == 0 { + t.Fatalf("bad: %v", meta) + } + + renew, meta, err := session.Renew(id, nil) + + if err != nil { + t.Fatalf("err: %v", err) + } + if meta.RequestTime == 0 { + t.Fatalf("bad: %v", meta) + } + + if renew == nil { + t.Fatalf("should get session") + } + + if renew.ID != id { + t.Fatalf("should have matching id") + } + + if renew.TTL != "10s" { + t.Fatalf("should get session with TTL") + } +} + +func TestAPI_SessionCreateRenewDestroyRenew(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + session := c.Session() + + entry := &SessionEntry{ + Behavior: SessionBehaviorDelete, + TTL: "500s", // disable ttl + } + + id, meta, err := session.Create(entry, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if meta.RequestTime == 0 { + t.Fatalf("bad: %v", meta) + } + + if id == "" { + t.Fatalf("invalid: %v", id) + } + + // Extend right after create. Everything should be fine. + entry, _, err = session.Renew(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if entry == nil { + t.Fatal("session unexpectedly vanished") + } + + // Simulate TTL loss by manually destroying the session. + meta, err = session.Destroy(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if meta.RequestTime == 0 { + t.Fatalf("bad: %v", meta) + } + + // Extend right after delete. The 404 should proxy as a nil. + entry, _, err = session.Renew(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if entry != nil { + t.Fatal("session still exists") + } +} + +func TestAPI_SessionCreateDestroyRenewPeriodic(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + session := c.Session() + + entry := &SessionEntry{ + Behavior: SessionBehaviorDelete, + TTL: "500s", // disable ttl + } + + id, meta, err := session.Create(entry, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if meta.RequestTime == 0 { + t.Fatalf("bad: %v", meta) + } + + if id == "" { + t.Fatalf("invalid: %v", id) + } + + // This only tests Create/Destroy/RenewPeriodic to avoid the more + // difficult case of testing all of the timing code. + + // Simulate TTL loss by manually destroying the session. + meta, err = session.Destroy(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if meta.RequestTime == 0 { + t.Fatalf("bad: %v", meta) + } + + // Extend right after delete. The 404 should terminate the loop quickly and return ErrSessionExpired. + errCh := make(chan error, 1) + doneCh := make(chan struct{}) + go func() { errCh <- session.RenewPeriodic("1s", id, nil, doneCh) }() + defer close(doneCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("timedout: missing session did not terminate renewal loop") + case err = <-errCh: + if err != ErrSessionExpired { + t.Fatalf("err: %v", err) + } + } +} + +func TestAPI_SessionRenewPeriodic_Cancel(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + session := c.Session() + entry := &SessionEntry{ + Behavior: SessionBehaviorDelete, + TTL: "500s", // disable ttl + } + + t.Run("done channel", func(t *testing.T) { + id, _, err := session.Create(entry, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + errCh := make(chan error, 1) + doneCh := make(chan struct{}) + go func() { errCh <- session.RenewPeriodic("1s", id, nil, doneCh) }() + + close(doneCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("renewal loop didn't terminate") + case err = <-errCh: + if err != nil { + t.Fatalf("err: %v", err) + } + } + + sess, _, err := session.Info(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if sess != nil { + t.Fatalf("session was not expired") + } + }) + + t.Run("context", func(t *testing.T) { + id, _, err := session.Create(entry, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + wo := new(WriteOptions).WithContext(ctx) + + errCh := make(chan error, 1) + go func() { errCh <- session.RenewPeriodic("1s", id, wo, nil) }() + + cancel() + + select { + case <-time.After(1 * time.Second): + t.Fatal("renewal loop didn't terminate") + case err = <-errCh: + if err == nil || !strings.Contains(err.Error(), "context canceled") { + t.Fatalf("err: %v", err) + } + } + + // See comment in session.go for why the session isn't removed + // in this case. + sess, _, err := session.Info(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if sess == nil { + t.Fatalf("session should not be expired") + } + }) +} + +func TestAPI_SessionInfo(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + session := c.Session() + + id, _, err := session.Create(nil, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer session.Destroy(id, nil) + + info, qm, err := session.Info(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if qm.LastIndex == 0 { + t.Fatalf("bad: %v", qm) + } + if !qm.KnownLeader { + t.Fatalf("bad: %v", qm) + } + + if info == nil { + t.Fatalf("should get session") + } + if info.CreateIndex == 0 { + t.Fatalf("bad: %v", info) + } + if info.ID != id { + t.Fatalf("bad: %v", info) + } + if info.Name != "" { + t.Fatalf("bad: %v", info) + } + if info.Node == "" { + t.Fatalf("bad: %v", info) + } + if len(info.Checks) == 0 { + t.Fatalf("bad: %v", info) + } + if info.LockDelay == 0 { + t.Fatalf("bad: %v", info) + } + if info.Behavior != "release" { + t.Fatalf("bad: %v", info) + } + if info.TTL != "" { + t.Fatalf("bad: %v", info) + } +} + +func TestAPI_SessionNode(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + session := c.Session() + + id, _, err := session.Create(nil, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer session.Destroy(id, nil) + + info, qm, err := session.Info(id, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + sessions, qm, err := session.Node(info.Node, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(sessions) != 1 { + t.Fatalf("bad: %v", sessions) + } + + if qm.LastIndex == 0 { + t.Fatalf("bad: %v", qm) + } + if !qm.KnownLeader { + t.Fatalf("bad: %v", qm) + } +} + +func TestAPI_SessionList(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + session := c.Session() + + id, _, err := session.Create(nil, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer session.Destroy(id, nil) + + sessions, qm, err := session.List(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(sessions) != 1 { + t.Fatalf("bad: %v", sessions) + } + + if qm.LastIndex == 0 { + t.Fatalf("bad: %v", qm) + } + if !qm.KnownLeader { + t.Fatalf("bad: %v", qm) + } +} diff --git a/vendor/github.com/hashicorp/consul/api/snapshot.go b/vendor/github.com/hashicorp/consul/api/snapshot.go new file mode 100644 index 000000000..e902377dd --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/snapshot.go @@ -0,0 +1,47 @@ +package api + +import ( + "io" +) + +// Snapshot can be used to query the /v1/snapshot endpoint to take snapshots of +// Consul's internal state and restore snapshots for disaster recovery. +type Snapshot struct { + c *Client +} + +// Snapshot returns a handle that exposes the snapshot endpoints. +func (c *Client) Snapshot() *Snapshot { + return &Snapshot{c} +} + +// Save requests a new snapshot and provides an io.ReadCloser with the snapshot +// data to save. If this doesn't return an error, then it's the responsibility +// of the caller to close it. Only a subset of the QueryOptions are supported: +// Datacenter, AllowStale, and Token. +func (s *Snapshot) Save(q *QueryOptions) (io.ReadCloser, *QueryMeta, error) { + r := s.c.newRequest("GET", "/v1/snapshot") + r.setQueryOptions(q) + + rtt, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + return resp.Body, qm, nil +} + +// Restore streams in an existing snapshot and attempts to restore it. +func (s *Snapshot) Restore(q *WriteOptions, in io.Reader) error { + r := s.c.newRequest("PUT", "/v1/snapshot") + r.body = in + r.setWriteOptions(q) + _, _, err := requireOK(s.c.doRequest(r)) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/snapshot_test.go b/vendor/github.com/hashicorp/consul/api/snapshot_test.go new file mode 100644 index 000000000..ba371d4e4 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/snapshot_test.go @@ -0,0 +1,134 @@ +package api + +import ( + "bytes" + "strings" + "testing" +) + +func TestAPI_Snapshot(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + // Place an initial key into the store. + kv := c.KV() + key := &KVPair{Key: testKey(), Value: []byte("hello")} + if _, err := kv.Put(key, nil); err != nil { + t.Fatalf("err: %v", err) + } + + // Make sure it reads back. + pair, _, err := kv.Get(key.Key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair == nil { + t.Fatalf("expected value: %#v", pair) + } + if !bytes.Equal(pair.Value, []byte("hello")) { + t.Fatalf("unexpected value: %#v", pair) + } + + // Take a snapshot. + snapshot := c.Snapshot() + snap, qm, err := snapshot.Save(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + defer snap.Close() + + // Sanity check th query metadata. + if qm.LastIndex == 0 || !qm.KnownLeader || + qm.RequestTime == 0 { + t.Fatalf("bad: %v", qm) + } + + // Overwrite the key's value. + key.Value = []byte("goodbye") + if _, err := kv.Put(key, nil); err != nil { + t.Fatalf("err: %v", err) + } + + // Read the key back and look for the new value. + pair, _, err = kv.Get(key.Key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair == nil { + t.Fatalf("expected value: %#v", pair) + } + if !bytes.Equal(pair.Value, []byte("goodbye")) { + t.Fatalf("unexpected value: %#v", pair) + } + + // Restore the snapshot. + if err := snapshot.Restore(nil, snap); err != nil { + t.Fatalf("err: %v", err) + } + + // Read the key back and look for the original value. + pair, _, err = kv.Get(key.Key, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if pair == nil { + t.Fatalf("expected value: %#v", pair) + } + if !bytes.Equal(pair.Value, []byte("hello")) { + t.Fatalf("unexpected value: %#v", pair) + } +} + +func TestAPI_Snapshot_Options(t *testing.T) { + t.Parallel() + c, s := makeACLClient(t) + defer s.Stop() + + // Try to take a snapshot with a bad token. + snapshot := c.Snapshot() + _, _, err := snapshot.Save(&QueryOptions{Token: "anonymous"}) + if err == nil || !strings.Contains(err.Error(), "Permission denied") { + t.Fatalf("err: %v", err) + } + + // Now try an unknown DC. + _, _, err = snapshot.Save(&QueryOptions{Datacenter: "nope"}) + if err == nil || !strings.Contains(err.Error(), "No path to datacenter") { + t.Fatalf("err: %v", err) + } + + // This should work with a valid token. + snap, _, err := snapshot.Save(&QueryOptions{Token: "root"}) + if err != nil { + t.Fatalf("err: %v", err) + } + defer snap.Close() + + // This should work with a stale snapshot. This doesn't have good feedback + // that the stale option was sent, but it makes sure nothing bad happens. + snap, _, err = snapshot.Save(&QueryOptions{Token: "root", AllowStale: true}) + if err != nil { + t.Fatalf("err: %v", err) + } + defer snap.Close() + + // Try to restore a snapshot with a bad token. + null := bytes.NewReader([]byte("")) + err = snapshot.Restore(&WriteOptions{Token: "anonymous"}, null) + if err == nil || !strings.Contains(err.Error(), "Permission denied") { + t.Fatalf("err: %v", err) + } + + // Now try an unknown DC. + null = bytes.NewReader([]byte("")) + err = snapshot.Restore(&WriteOptions{Datacenter: "nope"}, null) + if err == nil || !strings.Contains(err.Error(), "No path to datacenter") { + t.Fatalf("err: %v", err) + } + + // This should work. + if err := snapshot.Restore(&WriteOptions{Token: "root"}, snap); err != nil { + t.Fatalf("err: %v", err) + } +} diff --git a/vendor/github.com/hashicorp/consul/api/status.go b/vendor/github.com/hashicorp/consul/api/status.go new file mode 100644 index 000000000..74ef61a67 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/status.go @@ -0,0 +1,43 @@ +package api + +// Status can be used to query the Status endpoints +type Status struct { + c *Client +} + +// Status returns a handle to the status endpoints +func (c *Client) Status() *Status { + return &Status{c} +} + +// Leader is used to query for a known leader +func (s *Status) Leader() (string, error) { + r := s.c.newRequest("GET", "/v1/status/leader") + _, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return "", err + } + defer resp.Body.Close() + + var leader string + if err := decodeBody(resp, &leader); err != nil { + return "", err + } + return leader, nil +} + +// Peers is used to query for a known raft peers +func (s *Status) Peers() ([]string, error) { + r := s.c.newRequest("GET", "/v1/status/peers") + _, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var peers []string + if err := decodeBody(resp, &peers); err != nil { + return nil, err + } + return peers, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/status_test.go b/vendor/github.com/hashicorp/consul/api/status_test.go new file mode 100644 index 000000000..f1ad3bf42 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/status_test.go @@ -0,0 +1,37 @@ +package api + +import ( + "testing" +) + +func TestAPI_StatusLeader(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + status := c.Status() + + leader, err := status.Leader() + if err != nil { + t.Fatalf("err: %v", err) + } + if leader == "" { + t.Fatalf("Expected leader") + } +} + +func TestAPI_StatusPeers(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + status := c.Status() + + peers, err := status.Peers() + if err != nil { + t.Fatalf("err: %v", err) + } + if len(peers) == 0 { + t.Fatalf("Expected peers ") + } +} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE new file mode 100644 index 000000000..e87a115e4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/hashicorp/go-cleanhttp/README.md new file mode 100644 index 000000000..036e5313f --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/README.md @@ -0,0 +1,30 @@ +# cleanhttp + +Functions for accessing "clean" Go http.Client values + +------------- + +The Go standard library contains a default `http.Client` called +`http.DefaultClient`. It is a common idiom in Go code to start with +`http.DefaultClient` and tweak it as necessary, and in fact, this is +encouraged; from the `http` package documentation: + +> The Client's Transport typically has internal state (cached TCP connections), +so Clients should be reused instead of created as needed. Clients are safe for +concurrent use by multiple goroutines. + +Unfortunately, this is a shared value, and it is not uncommon for libraries to +assume that they are free to modify it at will. With enough dependencies, it +can be very easy to encounter strange problems and race conditions due to +manipulation of this shared value across libraries and goroutines (clients are +safe for concurrent use, but writing values to the client struct itself is not +protected). + +Making things worse is the fact that a bare `http.Client` will use a default +`http.Transport` called `http.DefaultTransport`, which is another global value +that behaves the same way. So it is not simply enough to replace +`http.DefaultClient` with `&http.Client{}`. + +This repository provides some simple functions to get a "clean" `http.Client` +-- one that uses the same default values as the Go standard library, but +returns a client that does not share any state with other clients. diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go new file mode 100644 index 000000000..7d8a57c28 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go @@ -0,0 +1,56 @@ +package cleanhttp + +import ( + "net" + "net/http" + "runtime" + "time" +) + +// DefaultTransport returns a new http.Transport with similar default values to +// http.DefaultTransport, but with idle connections and keepalives disabled. +func DefaultTransport() *http.Transport { + transport := DefaultPooledTransport() + transport.DisableKeepAlives = true + transport.MaxIdleConnsPerHost = -1 + return transport +} + +// DefaultPooledTransport returns a new http.Transport with similar default +// values to http.DefaultTransport. Do not use this for transient transports as +// it can leak file descriptors over time. Only use this for transports that +// will be re-used for the same host(s). +func DefaultPooledTransport() *http.Transport { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1, + } + return transport +} + +// DefaultClient returns a new http.Client with similar default values to +// http.Client, but with a non-shared Transport, idle connections disabled, and +// keepalives disabled. +func DefaultClient() *http.Client { + return &http.Client{ + Transport: DefaultTransport(), + } +} + +// DefaultPooledClient returns a new http.Client with similar default values to +// http.Client, but with a shared Transport. Do not use this function for +// transient clients as it can leak file descriptors over time. Only use this +// for clients that will be re-used for the same host(s). +func DefaultPooledClient() *http.Client { + return &http.Client{ + Transport: DefaultPooledTransport(), + } +} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/doc.go b/vendor/github.com/hashicorp/go-cleanhttp/doc.go new file mode 100644 index 000000000..05841092a --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/doc.go @@ -0,0 +1,20 @@ +// Package cleanhttp offers convenience utilities for acquiring "clean" +// http.Transport and http.Client structs. +// +// Values set on http.DefaultClient and http.DefaultTransport affect all +// callers. This can have detrimental effects, esepcially in TLS contexts, +// where client or root certificates set to talk to multiple endpoints can end +// up displacing each other, leading to hard-to-debug issues. This package +// provides non-shared http.Client and http.Transport structs to ensure that +// the configuration will not be overwritten by other parts of the application +// or dependencies. +// +// The DefaultClient and DefaultTransport functions disable idle connections +// and keepalives. Without ensuring that idle connections are closed before +// garbage collection, short-term clients/transports can leak file descriptors, +// eventually leading to "too many open files" errors. If you will be +// connecting to the same hosts repeatedly from the same client, you can use +// DefaultPooledClient to receive a client that has connection pooling +// semantics similar to http.DefaultClient. +// +package cleanhttp diff --git a/vendor/github.com/hashicorp/go-rootcerts/.travis.yml b/vendor/github.com/hashicorp/go-rootcerts/.travis.yml new file mode 100644 index 000000000..80e1de44e --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/.travis.yml @@ -0,0 +1,12 @@ +sudo: false + +language: go + +go: + - 1.6 + +branches: + only: + - master + +script: make test diff --git a/vendor/github.com/hashicorp/go-rootcerts/LICENSE b/vendor/github.com/hashicorp/go-rootcerts/LICENSE new file mode 100644 index 000000000..e87a115e4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-rootcerts/Makefile b/vendor/github.com/hashicorp/go-rootcerts/Makefile new file mode 100644 index 000000000..c3989e789 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/Makefile @@ -0,0 +1,8 @@ +TEST?=./... + +test: + go test $(TEST) $(TESTARGS) -timeout=3s -parallel=4 + go vet $(TEST) + go test $(TEST) -race + +.PHONY: test diff --git a/vendor/github.com/hashicorp/go-rootcerts/README.md b/vendor/github.com/hashicorp/go-rootcerts/README.md new file mode 100644 index 000000000..f5abffc29 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/README.md @@ -0,0 +1,43 @@ +# rootcerts + +Functions for loading root certificates for TLS connections. + +----- + +Go's standard library `crypto/tls` provides a common mechanism for configuring +TLS connections in `tls.Config`. The `RootCAs` field on this struct is a pool +of certificates for the client to use as a trust store when verifying server +certificates. + +This library contains utility functions for loading certificates destined for +that field, as well as one other important thing: + +When the `RootCAs` field is `nil`, the standard library attempts to load the +host's root CA set. This behavior is OS-specific, and the Darwin +implementation contains [a bug that prevents trusted certificates from the +System and Login keychains from being loaded][1]. This library contains +Darwin-specific behavior that works around that bug. + +[1]: https://github.com/golang/go/issues/14514 + +## Example Usage + +Here's a snippet demonstrating how this library is meant to be used: + +```go +func httpClient() (*http.Client, error) + tlsConfig := &tls.Config{} + err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{ + CAFile: os.Getenv("MYAPP_CAFILE"), + CAPath: os.Getenv("MYAPP_CAPATH"), + }) + if err != nil { + return nil, err + } + c := cleanhttp.DefaultClient() + t := cleanhttp.DefaultTransport() + t.TLSClientConfig = tlsConfig + c.Transport = t + return c, nil +} +``` diff --git a/vendor/github.com/hashicorp/go-rootcerts/doc.go b/vendor/github.com/hashicorp/go-rootcerts/doc.go new file mode 100644 index 000000000..b55cc6284 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/doc.go @@ -0,0 +1,9 @@ +// Package rootcerts contains functions to aid in loading CA certificates for +// TLS connections. +// +// In addition, its default behavior on Darwin works around an open issue [1] +// in Go's crypto/x509 that prevents certicates from being loaded from the +// System or Login keychains. +// +// [1] https://github.com/golang/go/issues/14514 +package rootcerts diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go new file mode 100644 index 000000000..aeb30ece3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go @@ -0,0 +1,103 @@ +package rootcerts + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +// Config determines where LoadCACerts will load certificates from. When both +// CAFile and CAPath are blank, this library's functions will either load +// system roots explicitly and return them, or set the CertPool to nil to allow +// Go's standard library to load system certs. +type Config struct { + // CAFile is a path to a PEM-encoded certificate file or bundle. Takes + // precedence over CAPath. + CAFile string + + // CAPath is a path to a directory populated with PEM-encoded certificates. + CAPath string +} + +// ConfigureTLS sets up the RootCAs on the provided tls.Config based on the +// Config specified. +func ConfigureTLS(t *tls.Config, c *Config) error { + if t == nil { + return nil + } + pool, err := LoadCACerts(c) + if err != nil { + return err + } + t.RootCAs = pool + return nil +} + +// LoadCACerts loads a CertPool based on the Config specified. +func LoadCACerts(c *Config) (*x509.CertPool, error) { + if c == nil { + c = &Config{} + } + if c.CAFile != "" { + return LoadCAFile(c.CAFile) + } + if c.CAPath != "" { + return LoadCAPath(c.CAPath) + } + + return LoadSystemCAs() +} + +// LoadCAFile loads a single PEM-encoded file from the path specified. +func LoadCAFile(caFile string) (*x509.CertPool, error) { + pool := x509.NewCertPool() + + pem, err := ioutil.ReadFile(caFile) + if err != nil { + return nil, fmt.Errorf("Error loading CA File: %s", err) + } + + ok := pool.AppendCertsFromPEM(pem) + if !ok { + return nil, fmt.Errorf("Error loading CA File: Couldn't parse PEM in: %s", caFile) + } + + return pool, nil +} + +// LoadCAPath walks the provided path and loads all certificates encounted into +// a pool. +func LoadCAPath(caPath string) (*x509.CertPool, error) { + pool := x509.NewCertPool() + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if info.IsDir() { + return nil + } + + pem, err := ioutil.ReadFile(path) + if err != nil { + return fmt.Errorf("Error loading file from CAPath: %s", err) + } + + ok := pool.AppendCertsFromPEM(pem) + if !ok { + return fmt.Errorf("Error loading CA Path: Couldn't parse PEM in: %s", path) + } + + return nil + } + + err := filepath.Walk(caPath, walkFn) + if err != nil { + return nil, err + } + + return pool, nil +} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go new file mode 100644 index 000000000..66b1472c4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go @@ -0,0 +1,12 @@ +// +build !darwin + +package rootcerts + +import "crypto/x509" + +// LoadSystemCAs does nothing on non-Darwin systems. We return nil so that +// default behavior of standard TLS config libraries is triggered, which is to +// load system certs. +func LoadSystemCAs() (*x509.CertPool, error) { + return nil, nil +} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go new file mode 100644 index 000000000..a9a040657 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go @@ -0,0 +1,48 @@ +package rootcerts + +import ( + "crypto/x509" + "os/exec" + "path" + + "github.com/mitchellh/go-homedir" +) + +// LoadSystemCAs has special behavior on Darwin systems to work around +func LoadSystemCAs() (*x509.CertPool, error) { + pool := x509.NewCertPool() + + for _, keychain := range certKeychains() { + err := addCertsFromKeychain(pool, keychain) + if err != nil { + return nil, err + } + } + + return pool, nil +} + +func addCertsFromKeychain(pool *x509.CertPool, keychain string) error { + cmd := exec.Command("/usr/bin/security", "find-certificate", "-a", "-p", keychain) + data, err := cmd.Output() + if err != nil { + return err + } + + pool.AppendCertsFromPEM(data) + + return nil +} + +func certKeychains() []string { + keychains := []string{ + "/System/Library/Keychains/SystemRootCertificates.keychain", + "/Library/Keychains/System.keychain", + } + home, err := homedir.Dir() + if err == nil { + loginKeychain := path.Join(home, "Library", "Keychains", "login.keychain") + keychains = append(keychains, loginKeychain) + } + return keychains +} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin_test.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin_test.go new file mode 100644 index 000000000..2129c156d --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin_test.go @@ -0,0 +1,17 @@ +package rootcerts + +import "testing" + +func TestSystemCAsOnDarwin(t *testing.T) { + _, err := LoadSystemCAs() + if err != nil { + t.Fatalf("Got error: %s", err) + } +} + +func TestCertKeychains(t *testing.T) { + keychains := certKeychains() + if len(keychains) != 3 { + t.Fatalf("Expected 3 keychains, got %#v", keychains) + } +} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_test.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_test.go new file mode 100644 index 000000000..963438588 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_test.go @@ -0,0 +1,52 @@ +package rootcerts + +import ( + "path/filepath" + "testing" +) + +const fixturesDir = "./test-fixtures" + +func TestConfigureTLSHandlesNil(t *testing.T) { + err := ConfigureTLS(nil, nil) + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestLoadCACertsHandlesNil(t *testing.T) { + _, err := LoadCACerts(nil) + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestLoadCACertsFromFile(t *testing.T) { + path := testFixture("cafile", "cacert.pem") + _, err := LoadCACerts(&Config{CAFile: path}) + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestLoadCACertsFromDir(t *testing.T) { + path := testFixture("capath") + _, err := LoadCACerts(&Config{CAPath: path}) + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestLoadCACertsFromDirWithSymlinks(t *testing.T) { + path := testFixture("capath-with-symlinks") + _, err := LoadCACerts(&Config{CAPath: path}) + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func testFixture(n ...string) string { + parts := []string{fixturesDir} + parts = append(parts, n...) + return filepath.Join(parts...) +} diff --git a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/cafile/cacert.pem b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/cafile/cacert.pem new file mode 100644 index 000000000..86d732f3d --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/cafile/cacert.pem @@ -0,0 +1,28 @@ +-----BEGIN CERTIFICATE----- +MIIExDCCA6ygAwIBAgIJAJ7PV+3kJZqZMA0GCSqGSIb3DQEBBQUAMIGcMQswCQYD +VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xEjAQ +BgNVBAoTCUhhc2hpQ29ycDEUMBIGA1UECxMLRW5naW5lZXJpbmcxGzAZBgNVBAMU +EiouYXRsYXMucGhpbnplLmNvbTEhMB8GCSqGSIb3DQEJARYScGF1bEBoYXNoaWNv +cnAuY29tMB4XDTE2MDQyNzE1MjYyMVoXDTE3MDQyNzE1MjYyMVowgZwxCzAJBgNV +BAYTAlVTMREwDwYDVQQIEwhJbGxpbm9pczEQMA4GA1UEBxMHQ2hpY2FnbzESMBAG +A1UEChMJSGFzaGlDb3JwMRQwEgYDVQQLEwtFbmdpbmVlcmluZzEbMBkGA1UEAxQS +Ki5hdGxhcy5waGluemUuY29tMSEwHwYJKoZIhvcNAQkBFhJwYXVsQGhhc2hpY29y +cC5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDWRXdMnsTpxpwZ +D2olsun9WO7SnMQ/SIR3DV/fttPIDHSQm2ad4r2pKEuiV+TKEFUgj/Id9bCAfQYs +jsa1qX1GmieXz+83OnK3MDEcDczpjGhSplTYGOjlxKLMhMBAOtdV5hJAYz3nwV3c +R+IQu/4213+em40shZAQRNZ2apnyE3+QB+gPlEs9Nw0OcbSKLmAiuKPbJpO+94ou +n1h0/w/+DPz6yO/fFPoA3vlisGM6B4R9U2JVwWjXrU71fU1i82ulFQdApdfUs1FP +wRrZxgX5ldUrRvFr8lJiMehdX8khO7Ue4rT6yxbI6KVM04Q5mNt1ARRLI69rN9My +pGXiItcxAgMBAAGjggEFMIIBATAdBgNVHQ4EFgQUjwsj8l0Y9HFQLH0GaJAsOHof +PhwwgdEGA1UdIwSByTCBxoAUjwsj8l0Y9HFQLH0GaJAsOHofPhyhgaKkgZ8wgZwx +CzAJBgNVBAYTAlVTMREwDwYDVQQIEwhJbGxpbm9pczEQMA4GA1UEBxMHQ2hpY2Fn +bzESMBAGA1UEChMJSGFzaGlDb3JwMRQwEgYDVQQLEwtFbmdpbmVlcmluZzEbMBkG +A1UEAxQSKi5hdGxhcy5waGluemUuY29tMSEwHwYJKoZIhvcNAQkBFhJwYXVsQGhh +c2hpY29ycC5jb22CCQCez1ft5CWamTAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEB +BQUAA4IBAQC4tFfxpB8xEk9ewb5CNhhac4oKwGths+oq45DjoNtlagDMmIs2bl18 +q45PIB7fuFkAz/YHcOL0UEOAiw4jbuROp9jacHxBV21lRLLmNlK1Llc3eNVvLJ38 +ud6/Skilv9XyC4JNk0P5KrghxR6SOGwRuYZNqF+tthf+Bp9wJvLyfqDuJfGBal7C +ezobMoh4tp8Dh1JeQlwvJcVt2k0UFJpa57MNr78c684Bq55ow+jd6wFG0XM0MMmy +u+QRgJEGfYuYDPFEO8C8IfRyrHuV7Ll9P6eyEEFCneznXY0yJc/Gn3ZcX7ANqJsc +ueMOWw/vUnonzxAFKW+I9U9ptyVSNMLY +-----END CERTIFICATE----- diff --git a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/securetrust.pem b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/securetrust.pem new file mode 120000 index 000000000..dda0574d7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/securetrust.pem @@ -0,0 +1 @@ +../capath/securetrust.pem \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/thawte.pem b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/thawte.pem new file mode 120000 index 000000000..37ed4f01a --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/thawte.pem @@ -0,0 +1 @@ +../capath/thawte.pem \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath/securetrust.pem b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath/securetrust.pem new file mode 100644 index 000000000..37400921e --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath/securetrust.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz +MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv +cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz +Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO +0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao +wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj +7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS +8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT +BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg +JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC +NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3 +6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/ +3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm +D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS +CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- diff --git a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath/thawte.pem b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath/thawte.pem new file mode 100644 index 000000000..998460f1c --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath/thawte.pem @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCB +qTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf +Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw +MDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNV +BAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3MDAwMDAwWhcNMzYw +NzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5j +LjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYG +A1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsoPD7gFnUnMekz52hWXMJEEUMDSxuaPFs +W0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ1CRfBsDMRJSUjQJib+ta +3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGcq/gcfomk +6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6 +Sk/KaAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94J +NqR32HuHUETVPm4pafs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XP +r87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUFAAOCAQEAeRHAS7ORtvzw6WfU +DW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeEuzLlQRHAd9mz +YJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX +xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2 +/qxAeeWsEG89jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/ +LHbTY5xZ3Y+m4Q6gLkH3LpVHz7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7 +jVaMaA== +-----END CERTIFICATE----- diff --git a/vendor/github.com/hashicorp/serf/coordinate/client.go b/vendor/github.com/hashicorp/serf/coordinate/client.go new file mode 100644 index 000000000..63f624141 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/client.go @@ -0,0 +1,227 @@ +package coordinate + +import ( + "fmt" + "math" + "sort" + "sync" + "time" +) + +// Client manages the estimated network coordinate for a given node, and adjusts +// it as the node observes round trip times and estimated coordinates from other +// nodes. The core algorithm is based on Vivaldi, see the documentation for Config +// for more details. +type Client struct { + // coord is the current estimate of the client's network coordinate. + coord *Coordinate + + // origin is a coordinate sitting at the origin. + origin *Coordinate + + // config contains the tuning parameters that govern the performance of + // the algorithm. + config *Config + + // adjustmentIndex is the current index into the adjustmentSamples slice. + adjustmentIndex uint + + // adjustment is used to store samples for the adjustment calculation. + adjustmentSamples []float64 + + // latencyFilterSamples is used to store the last several RTT samples, + // keyed by node name. We will use the config's LatencyFilterSamples + // value to determine how many samples we keep, per node. + latencyFilterSamples map[string][]float64 + + // stats is used to record events that occur when updating coordinates. + stats ClientStats + + // mutex enables safe concurrent access to the client. + mutex sync.RWMutex +} + +// ClientStats is used to record events that occur when updating coordinates. +type ClientStats struct { + // Resets is incremented any time we reset our local coordinate because + // our calculations have resulted in an invalid state. + Resets int +} + +// NewClient creates a new Client and verifies the configuration is valid. +func NewClient(config *Config) (*Client, error) { + if !(config.Dimensionality > 0) { + return nil, fmt.Errorf("dimensionality must be >0") + } + + return &Client{ + coord: NewCoordinate(config), + origin: NewCoordinate(config), + config: config, + adjustmentIndex: 0, + adjustmentSamples: make([]float64, config.AdjustmentWindowSize), + latencyFilterSamples: make(map[string][]float64), + }, nil +} + +// GetCoordinate returns a copy of the coordinate for this client. +func (c *Client) GetCoordinate() *Coordinate { + c.mutex.RLock() + defer c.mutex.RUnlock() + + return c.coord.Clone() +} + +// SetCoordinate forces the client's coordinate to a known state. +func (c *Client) SetCoordinate(coord *Coordinate) error { + c.mutex.Lock() + defer c.mutex.Unlock() + + if err := c.checkCoordinate(coord); err != nil { + return err + } + + c.coord = coord.Clone() + return nil +} + +// ForgetNode removes any client state for the given node. +func (c *Client) ForgetNode(node string) { + c.mutex.Lock() + defer c.mutex.Unlock() + + delete(c.latencyFilterSamples, node) +} + +// Stats returns a copy of stats for the client. +func (c *Client) Stats() ClientStats { + c.mutex.Lock() + defer c.mutex.Unlock() + + return c.stats +} + +// checkCoordinate returns an error if the coordinate isn't compatible with +// this client, or if the coordinate itself isn't valid. This assumes the mutex +// has been locked already. +func (c *Client) checkCoordinate(coord *Coordinate) error { + if !c.coord.IsCompatibleWith(coord) { + return fmt.Errorf("dimensions aren't compatible") + } + + if !coord.IsValid() { + return fmt.Errorf("coordinate is invalid") + } + + return nil +} + +// latencyFilter applies a simple moving median filter with a new sample for +// a node. This assumes that the mutex has been locked already. +func (c *Client) latencyFilter(node string, rttSeconds float64) float64 { + samples, ok := c.latencyFilterSamples[node] + if !ok { + samples = make([]float64, 0, c.config.LatencyFilterSize) + } + + // Add the new sample and trim the list, if needed. + samples = append(samples, rttSeconds) + if len(samples) > int(c.config.LatencyFilterSize) { + samples = samples[1:] + } + c.latencyFilterSamples[node] = samples + + // Sort a copy of the samples and return the median. + sorted := make([]float64, len(samples)) + copy(sorted, samples) + sort.Float64s(sorted) + return sorted[len(sorted)/2] +} + +// updateVivialdi updates the Vivaldi portion of the client's coordinate. This +// assumes that the mutex has been locked already. +func (c *Client) updateVivaldi(other *Coordinate, rttSeconds float64) { + const zeroThreshold = 1.0e-6 + + dist := c.coord.DistanceTo(other).Seconds() + if rttSeconds < zeroThreshold { + rttSeconds = zeroThreshold + } + wrongness := math.Abs(dist-rttSeconds) / rttSeconds + + totalError := c.coord.Error + other.Error + if totalError < zeroThreshold { + totalError = zeroThreshold + } + weight := c.coord.Error / totalError + + c.coord.Error = c.config.VivaldiCE*weight*wrongness + c.coord.Error*(1.0-c.config.VivaldiCE*weight) + if c.coord.Error > c.config.VivaldiErrorMax { + c.coord.Error = c.config.VivaldiErrorMax + } + + delta := c.config.VivaldiCC * weight + force := delta * (rttSeconds - dist) + c.coord = c.coord.ApplyForce(c.config, force, other) +} + +// updateAdjustment updates the adjustment portion of the client's coordinate, if +// the feature is enabled. This assumes that the mutex has been locked already. +func (c *Client) updateAdjustment(other *Coordinate, rttSeconds float64) { + if c.config.AdjustmentWindowSize == 0 { + return + } + + // Note that the existing adjustment factors don't figure in to this + // calculation so we use the raw distance here. + dist := c.coord.rawDistanceTo(other) + c.adjustmentSamples[c.adjustmentIndex] = rttSeconds - dist + c.adjustmentIndex = (c.adjustmentIndex + 1) % c.config.AdjustmentWindowSize + + sum := 0.0 + for _, sample := range c.adjustmentSamples { + sum += sample + } + c.coord.Adjustment = sum / (2.0 * float64(c.config.AdjustmentWindowSize)) +} + +// updateGravity applies a small amount of gravity to pull coordinates towards +// the center of the coordinate system to combat drift. This assumes that the +// mutex is locked already. +func (c *Client) updateGravity() { + dist := c.origin.DistanceTo(c.coord).Seconds() + force := -1.0 * math.Pow(dist/c.config.GravityRho, 2.0) + c.coord = c.coord.ApplyForce(c.config, force, c.origin) +} + +// Update takes other, a coordinate for another node, and rtt, a round trip +// time observation for a ping to that node, and updates the estimated position of +// the client's coordinate. Returns the updated coordinate. +func (c *Client) Update(node string, other *Coordinate, rtt time.Duration) (*Coordinate, error) { + c.mutex.Lock() + defer c.mutex.Unlock() + + if err := c.checkCoordinate(other); err != nil { + return nil, err + } + + rttSeconds := c.latencyFilter(node, rtt.Seconds()) + c.updateVivaldi(other, rttSeconds) + c.updateAdjustment(other, rttSeconds) + c.updateGravity() + if !c.coord.IsValid() { + c.stats.Resets++ + c.coord = NewCoordinate(c.config) + } + + return c.coord.Clone(), nil +} + +// DistanceTo returns the estimated RTT from the client's coordinate to other, the +// coordinate for another node. +func (c *Client) DistanceTo(other *Coordinate) time.Duration { + c.mutex.RLock() + defer c.mutex.RUnlock() + + return c.coord.DistanceTo(other) +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/client_test.go b/vendor/github.com/hashicorp/serf/coordinate/client_test.go new file mode 100644 index 000000000..b84296d5e --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/client_test.go @@ -0,0 +1,180 @@ +package coordinate + +import ( + "math" + "reflect" + "strings" + "testing" + "time" +) + +func TestClient_NewClient(t *testing.T) { + config := DefaultConfig() + + config.Dimensionality = 0 + client, err := NewClient(config) + if err == nil || !strings.Contains(err.Error(), "dimensionality") { + t.Fatal(err) + } + + config.Dimensionality = 7 + client, err = NewClient(config) + if err != nil { + t.Fatal(err) + } + + origin := NewCoordinate(config) + if !reflect.DeepEqual(client.GetCoordinate(), origin) { + t.Fatalf("fresh client should be located at the origin") + } +} + +func TestClient_Update(t *testing.T) { + config := DefaultConfig() + config.Dimensionality = 3 + + client, err := NewClient(config) + if err != nil { + t.Fatal(err) + } + + // Make sure the Euclidean part of our coordinate is what we expect. + c := client.GetCoordinate() + verifyEqualVectors(t, c.Vec, []float64{0.0, 0.0, 0.0}) + + // Place a node right above the client and observe an RTT longer than the + // client expects, given its distance. + other := NewCoordinate(config) + other.Vec[2] = 0.001 + rtt := time.Duration(2.0 * other.Vec[2] * secondsToNanoseconds) + c, err = client.Update("node", other, rtt) + if err != nil { + t.Fatalf("err: %v", err) + } + + // The client should have scooted down to get away from it. + if !(c.Vec[2] < 0.0) { + t.Fatalf("client z coordinate %9.6f should be < 0.0", c.Vec[2]) + } + + // Set the coordinate to a known state. + c.Vec[2] = 99.0 + client.SetCoordinate(c) + c = client.GetCoordinate() + verifyEqualFloats(t, c.Vec[2], 99.0) +} + +func TestClient_DistanceTo(t *testing.T) { + config := DefaultConfig() + config.Dimensionality = 3 + config.HeightMin = 0 + + client, err := NewClient(config) + if err != nil { + t.Fatal(err) + } + + // Fiddle a raw coordinate to put it a specific number of seconds away. + other := NewCoordinate(config) + other.Vec[2] = 12.345 + expected := time.Duration(other.Vec[2] * secondsToNanoseconds) + dist := client.DistanceTo(other) + if dist != expected { + t.Fatalf("distance doesn't match %9.6f != %9.6f", dist.Seconds(), expected.Seconds()) + } +} + +func TestClient_latencyFilter(t *testing.T) { + config := DefaultConfig() + config.LatencyFilterSize = 3 + + client, err := NewClient(config) + if err != nil { + t.Fatal(err) + } + + // Make sure we get the median, and that things age properly. + verifyEqualFloats(t, client.latencyFilter("alice", 0.201), 0.201) + verifyEqualFloats(t, client.latencyFilter("alice", 0.200), 0.201) + verifyEqualFloats(t, client.latencyFilter("alice", 0.207), 0.201) + + // This glitch will get median-ed out and never seen by Vivaldi. + verifyEqualFloats(t, client.latencyFilter("alice", 1.9), 0.207) + verifyEqualFloats(t, client.latencyFilter("alice", 0.203), 0.207) + verifyEqualFloats(t, client.latencyFilter("alice", 0.199), 0.203) + verifyEqualFloats(t, client.latencyFilter("alice", 0.211), 0.203) + + // Make sure different nodes are not coupled. + verifyEqualFloats(t, client.latencyFilter("bob", 0.310), 0.310) + + // Make sure we don't leak coordinates for nodes that leave. + client.ForgetNode("alice") + verifyEqualFloats(t, client.latencyFilter("alice", 0.888), 0.888) +} + +func TestClient_NaN_Defense(t *testing.T) { + config := DefaultConfig() + config.Dimensionality = 3 + + client, err := NewClient(config) + if err != nil { + t.Fatal(err) + } + + // Block a bad coordinate from coming in. + other := NewCoordinate(config) + other.Vec[0] = math.NaN() + if other.IsValid() { + t.Fatalf("bad: %#v", *other) + } + rtt := 250 * time.Millisecond + c, err := client.Update("node", other, rtt) + if err == nil || !strings.Contains(err.Error(), "coordinate is invalid") { + t.Fatalf("err: %v", err) + } + if c := client.GetCoordinate(); !c.IsValid() { + t.Fatalf("bad: %#v", *c) + } + + // Block setting an invalid coordinate directly. + err = client.SetCoordinate(other) + if err == nil || !strings.Contains(err.Error(), "coordinate is invalid") { + t.Fatalf("err: %v", err) + } + if c := client.GetCoordinate(); !c.IsValid() { + t.Fatalf("bad: %#v", *c) + } + + // Block an incompatible coordinate. + other.Vec = make([]float64, 2*len(other.Vec)) + c, err = client.Update("node", other, rtt) + if err == nil || !strings.Contains(err.Error(), "dimensions aren't compatible") { + t.Fatalf("err: %v", err) + } + if c := client.GetCoordinate(); !c.IsValid() { + t.Fatalf("bad: %#v", *c) + } + + // Block setting an incompatible coordinate directly. + err = client.SetCoordinate(other) + if err == nil || !strings.Contains(err.Error(), "dimensions aren't compatible") { + t.Fatalf("err: %v", err) + } + if c := client.GetCoordinate(); !c.IsValid() { + t.Fatalf("bad: %#v", *c) + } + + // Poison the internal state and make sure we reset on an update. + client.coord.Vec[0] = math.NaN() + other = NewCoordinate(config) + c, err = client.Update("node", other, rtt) + if err != nil { + t.Fatalf("err: %v", err) + } + if !c.IsValid() { + t.Fatalf("bad: %#v", *c) + } + if got, want := client.Stats().Resets, 1; got != want { + t.Fatalf("got %d want %d", got, want) + } +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/config.go b/vendor/github.com/hashicorp/serf/coordinate/config.go new file mode 100644 index 000000000..b85a8ab7b --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/config.go @@ -0,0 +1,70 @@ +package coordinate + +// Config is used to set the parameters of the Vivaldi-based coordinate mapping +// algorithm. +// +// The following references are called out at various points in the documentation +// here: +// +// [1] Dabek, Frank, et al. "Vivaldi: A decentralized network coordinate system." +// ACM SIGCOMM Computer Communication Review. Vol. 34. No. 4. ACM, 2004. +// [2] Ledlie, Jonathan, Paul Gardner, and Margo I. Seltzer. "Network Coordinates +// in the Wild." NSDI. Vol. 7. 2007. +// [3] Lee, Sanghwan, et al. "On suitability of Euclidean embedding for +// host-based network coordinate systems." Networking, IEEE/ACM Transactions +// on 18.1 (2010): 27-40. +type Config struct { + // The dimensionality of the coordinate system. As discussed in [2], more + // dimensions improves the accuracy of the estimates up to a point. Per [2] + // we chose 8 dimensions plus a non-Euclidean height. + Dimensionality uint + + // VivaldiErrorMax is the default error value when a node hasn't yet made + // any observations. It also serves as an upper limit on the error value in + // case observations cause the error value to increase without bound. + VivaldiErrorMax float64 + + // VivaldiCE is a tuning factor that controls the maximum impact an + // observation can have on a node's confidence. See [1] for more details. + VivaldiCE float64 + + // VivaldiCC is a tuning factor that controls the maximum impact an + // observation can have on a node's coordinate. See [1] for more details. + VivaldiCC float64 + + // AdjustmentWindowSize is a tuning factor that determines how many samples + // we retain to calculate the adjustment factor as discussed in [3]. Setting + // this to zero disables this feature. + AdjustmentWindowSize uint + + // HeightMin is the minimum value of the height parameter. Since this + // always must be positive, it will introduce a small amount error, so + // the chosen value should be relatively small compared to "normal" + // coordinates. + HeightMin float64 + + // LatencyFilterSamples is the maximum number of samples that are retained + // per node, in order to compute a median. The intent is to ride out blips + // but still keep the delay low, since our time to probe any given node is + // pretty infrequent. See [2] for more details. + LatencyFilterSize uint + + // GravityRho is a tuning factor that sets how much gravity has an effect + // to try to re-center coordinates. See [2] for more details. + GravityRho float64 +} + +// DefaultConfig returns a Config that has some default values suitable for +// basic testing of the algorithm, but not tuned to any particular type of cluster. +func DefaultConfig() *Config { + return &Config{ + Dimensionality: 8, + VivaldiErrorMax: 1.5, + VivaldiCE: 0.25, + VivaldiCC: 0.25, + AdjustmentWindowSize: 20, + HeightMin: 10.0e-6, + LatencyFilterSize: 3, + GravityRho: 150.0, + } +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/coordinate.go b/vendor/github.com/hashicorp/serf/coordinate/coordinate.go new file mode 100644 index 000000000..fbe792c90 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/coordinate.go @@ -0,0 +1,203 @@ +package coordinate + +import ( + "math" + "math/rand" + "time" +) + +// Coordinate is a specialized structure for holding network coordinates for the +// Vivaldi-based coordinate mapping algorithm. All of the fields should be public +// to enable this to be serialized. All values in here are in units of seconds. +type Coordinate struct { + // Vec is the Euclidean portion of the coordinate. This is used along + // with the other fields to provide an overall distance estimate. The + // units here are seconds. + Vec []float64 + + // Err reflects the confidence in the given coordinate and is updated + // dynamically by the Vivaldi Client. This is dimensionless. + Error float64 + + // Adjustment is a distance offset computed based on a calculation over + // observations from all other nodes over a fixed window and is updated + // dynamically by the Vivaldi Client. The units here are seconds. + Adjustment float64 + + // Height is a distance offset that accounts for non-Euclidean effects + // which model the access links from nodes to the core Internet. The access + // links are usually set by bandwidth and congestion, and the core links + // usually follow distance based on geography. + Height float64 +} + +const ( + // secondsToNanoseconds is used to convert float seconds to nanoseconds. + secondsToNanoseconds = 1.0e9 + + // zeroThreshold is used to decide if two coordinates are on top of each + // other. + zeroThreshold = 1.0e-6 +) + +// ErrDimensionalityConflict will be panic-d if you try to perform operations +// with incompatible dimensions. +type DimensionalityConflictError struct{} + +// Adds the error interface. +func (e DimensionalityConflictError) Error() string { + return "coordinate dimensionality does not match" +} + +// NewCoordinate creates a new coordinate at the origin, using the given config +// to supply key initial values. +func NewCoordinate(config *Config) *Coordinate { + return &Coordinate{ + Vec: make([]float64, config.Dimensionality), + Error: config.VivaldiErrorMax, + Adjustment: 0.0, + Height: config.HeightMin, + } +} + +// Clone creates an independent copy of this coordinate. +func (c *Coordinate) Clone() *Coordinate { + vec := make([]float64, len(c.Vec)) + copy(vec, c.Vec) + return &Coordinate{ + Vec: vec, + Error: c.Error, + Adjustment: c.Adjustment, + Height: c.Height, + } +} + +// componentIsValid returns false if a floating point value is a NaN or an +// infinity. +func componentIsValid(f float64) bool { + return !math.IsInf(f, 0) && !math.IsNaN(f) +} + +// IsValid returns false if any component of a coordinate isn't valid, per the +// componentIsValid() helper above. +func (c *Coordinate) IsValid() bool { + for i := range c.Vec { + if !componentIsValid(c.Vec[i]) { + return false + } + } + + return componentIsValid(c.Error) && + componentIsValid(c.Adjustment) && + componentIsValid(c.Height) +} + +// IsCompatibleWith checks to see if the two coordinates are compatible +// dimensionally. If this returns true then you are guaranteed to not get +// any runtime errors operating on them. +func (c *Coordinate) IsCompatibleWith(other *Coordinate) bool { + return len(c.Vec) == len(other.Vec) +} + +// ApplyForce returns the result of applying the force from the direction of the +// other coordinate. +func (c *Coordinate) ApplyForce(config *Config, force float64, other *Coordinate) *Coordinate { + if !c.IsCompatibleWith(other) { + panic(DimensionalityConflictError{}) + } + + ret := c.Clone() + unit, mag := unitVectorAt(c.Vec, other.Vec) + ret.Vec = add(ret.Vec, mul(unit, force)) + if mag > zeroThreshold { + ret.Height = (ret.Height+other.Height)*force/mag + ret.Height + ret.Height = math.Max(ret.Height, config.HeightMin) + } + return ret +} + +// DistanceTo returns the distance between this coordinate and the other +// coordinate, including adjustments. +func (c *Coordinate) DistanceTo(other *Coordinate) time.Duration { + if !c.IsCompatibleWith(other) { + panic(DimensionalityConflictError{}) + } + + dist := c.rawDistanceTo(other) + adjustedDist := dist + c.Adjustment + other.Adjustment + if adjustedDist > 0.0 { + dist = adjustedDist + } + return time.Duration(dist * secondsToNanoseconds) +} + +// rawDistanceTo returns the Vivaldi distance between this coordinate and the +// other coordinate in seconds, not including adjustments. This assumes the +// dimensions have already been checked to be compatible. +func (c *Coordinate) rawDistanceTo(other *Coordinate) float64 { + return magnitude(diff(c.Vec, other.Vec)) + c.Height + other.Height +} + +// add returns the sum of vec1 and vec2. This assumes the dimensions have +// already been checked to be compatible. +func add(vec1 []float64, vec2 []float64) []float64 { + ret := make([]float64, len(vec1)) + for i := range ret { + ret[i] = vec1[i] + vec2[i] + } + return ret +} + +// diff returns the difference between the vec1 and vec2. This assumes the +// dimensions have already been checked to be compatible. +func diff(vec1 []float64, vec2 []float64) []float64 { + ret := make([]float64, len(vec1)) + for i := range ret { + ret[i] = vec1[i] - vec2[i] + } + return ret +} + +// mul returns vec multiplied by a scalar factor. +func mul(vec []float64, factor float64) []float64 { + ret := make([]float64, len(vec)) + for i := range vec { + ret[i] = vec[i] * factor + } + return ret +} + +// magnitude computes the magnitude of the vec. +func magnitude(vec []float64) float64 { + sum := 0.0 + for i := range vec { + sum += vec[i] * vec[i] + } + return math.Sqrt(sum) +} + +// unitVectorAt returns a unit vector pointing at vec1 from vec2. If the two +// positions are the same then a random unit vector is returned. We also return +// the distance between the points for use in the later height calculation. +func unitVectorAt(vec1 []float64, vec2 []float64) ([]float64, float64) { + ret := diff(vec1, vec2) + + // If the coordinates aren't on top of each other we can normalize. + if mag := magnitude(ret); mag > zeroThreshold { + return mul(ret, 1.0/mag), mag + } + + // Otherwise, just return a random unit vector. + for i := range ret { + ret[i] = rand.Float64() - 0.5 + } + if mag := magnitude(ret); mag > zeroThreshold { + return mul(ret, 1.0/mag), 0.0 + } + + // And finally just give up and make a unit vector along the first + // dimension. This should be exceedingly rare. + ret = make([]float64, len(ret)) + ret[0] = 1.0 + return ret, 0.0 +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/coordinate_test.go b/vendor/github.com/hashicorp/serf/coordinate/coordinate_test.go new file mode 100644 index 000000000..d404cb36a --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/coordinate_test.go @@ -0,0 +1,298 @@ +package coordinate + +import ( + "math" + "reflect" + "testing" + "time" +) + +// verifyDimensionPanic will run the supplied func and make sure it panics with +// the expected error type. +func verifyDimensionPanic(t *testing.T, f func()) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(DimensionalityConflictError); !ok { + t.Fatalf("panic isn't the right type") + } + } else { + t.Fatalf("didn't get expected panic") + } + }() + f() +} + +func TestCoordinate_NewCoordinate(t *testing.T) { + config := DefaultConfig() + c := NewCoordinate(config) + if uint(len(c.Vec)) != config.Dimensionality { + t.Fatalf("dimensionality not set correctly %d != %d", + len(c.Vec), config.Dimensionality) + } +} + +func TestCoordinate_Clone(t *testing.T) { + c := NewCoordinate(DefaultConfig()) + c.Vec[0], c.Vec[1], c.Vec[2] = 1.0, 2.0, 3.0 + c.Error = 5.0 + c.Adjustment = 10.0 + c.Height = 4.2 + + other := c.Clone() + if !reflect.DeepEqual(c, other) { + t.Fatalf("coordinate clone didn't make a proper copy") + } + + other.Vec[0] = c.Vec[0] + 0.5 + if reflect.DeepEqual(c, other) { + t.Fatalf("cloned coordinate is still pointing at its ancestor") + } +} + +func TestCoordinate_IsValid(t *testing.T) { + c := NewCoordinate(DefaultConfig()) + + var fields []*float64 + for i := range c.Vec { + fields = append(fields, &c.Vec[i]) + } + fields = append(fields, &c.Error) + fields = append(fields, &c.Adjustment) + fields = append(fields, &c.Height) + + for i, field := range fields { + if !c.IsValid() { + t.Fatalf("field %d should be valid", i) + } + + *field = math.NaN() + if c.IsValid() { + t.Fatalf("field %d should not be valid (NaN)", i) + } + + *field = 0.0 + if !c.IsValid() { + t.Fatalf("field %d should be valid", i) + } + + *field = math.Inf(0) + if c.IsValid() { + t.Fatalf("field %d should not be valid (Inf)", i) + } + + *field = 0.0 + if !c.IsValid() { + t.Fatalf("field %d should be valid", i) + } + } +} + +func TestCoordinate_IsCompatibleWith(t *testing.T) { + config := DefaultConfig() + + config.Dimensionality = 3 + c1 := NewCoordinate(config) + c2 := NewCoordinate(config) + + config.Dimensionality = 2 + alien := NewCoordinate(config) + + if !c1.IsCompatibleWith(c1) || !c2.IsCompatibleWith(c2) || + !alien.IsCompatibleWith(alien) { + t.Fatalf("coordinates should be compatible with themselves") + } + + if !c1.IsCompatibleWith(c2) || !c2.IsCompatibleWith(c1) { + t.Fatalf("coordinates should be compatible with each other") + } + + if c1.IsCompatibleWith(alien) || c2.IsCompatibleWith(alien) || + alien.IsCompatibleWith(c1) || alien.IsCompatibleWith(c2) { + t.Fatalf("alien should not be compatible with the other coordinates") + } +} + +func TestCoordinate_ApplyForce(t *testing.T) { + config := DefaultConfig() + config.Dimensionality = 3 + config.HeightMin = 0 + + origin := NewCoordinate(config) + + // This proves that we normalize, get the direction right, and apply the + // force multiplier correctly. + above := NewCoordinate(config) + above.Vec = []float64{0.0, 0.0, 2.9} + c := origin.ApplyForce(config, 5.3, above) + verifyEqualVectors(t, c.Vec, []float64{0.0, 0.0, -5.3}) + + // Scoot a point not starting at the origin to make sure there's nothing + // special there. + right := NewCoordinate(config) + right.Vec = []float64{3.4, 0.0, -5.3} + c = c.ApplyForce(config, 2.0, right) + verifyEqualVectors(t, c.Vec, []float64{-2.0, 0.0, -5.3}) + + // If the points are right on top of each other, then we should end up + // in a random direction, one unit away. This makes sure the unit vector + // build up doesn't divide by zero. + c = origin.ApplyForce(config, 1.0, origin) + verifyEqualFloats(t, origin.DistanceTo(c).Seconds(), 1.0) + + // Enable a minimum height and make sure that gets factored in properly. + config.HeightMin = 10.0e-6 + origin = NewCoordinate(config) + c = origin.ApplyForce(config, 5.3, above) + verifyEqualVectors(t, c.Vec, []float64{0.0, 0.0, -5.3}) + verifyEqualFloats(t, c.Height, config.HeightMin+5.3*config.HeightMin/2.9) + + // Make sure the height minimum is enforced. + c = origin.ApplyForce(config, -5.3, above) + verifyEqualVectors(t, c.Vec, []float64{0.0, 0.0, 5.3}) + verifyEqualFloats(t, c.Height, config.HeightMin) + + // Shenanigans should get called if the dimensions don't match. + bad := c.Clone() + bad.Vec = make([]float64, len(bad.Vec)+1) + verifyDimensionPanic(t, func() { c.ApplyForce(config, 1.0, bad) }) +} + +func TestCoordinate_DistanceTo(t *testing.T) { + config := DefaultConfig() + config.Dimensionality = 3 + config.HeightMin = 0 + + c1, c2 := NewCoordinate(config), NewCoordinate(config) + c1.Vec = []float64{-0.5, 1.3, 2.4} + c2.Vec = []float64{1.2, -2.3, 3.4} + + verifyEqualFloats(t, c1.DistanceTo(c1).Seconds(), 0.0) + verifyEqualFloats(t, c1.DistanceTo(c2).Seconds(), c2.DistanceTo(c1).Seconds()) + verifyEqualFloats(t, c1.DistanceTo(c2).Seconds(), 4.104875150354758) + + // Make sure negative adjustment factors are ignored. + c1.Adjustment = -1.0e6 + verifyEqualFloats(t, c1.DistanceTo(c2).Seconds(), 4.104875150354758) + + // Make sure positive adjustment factors affect the distance. + c1.Adjustment = 0.1 + c2.Adjustment = 0.2 + verifyEqualFloats(t, c1.DistanceTo(c2).Seconds(), 4.104875150354758+0.3) + + // Make sure the heights affect the distance. + c1.Height = 0.7 + c2.Height = 0.1 + verifyEqualFloats(t, c1.DistanceTo(c2).Seconds(), 4.104875150354758+0.3+0.8) + + // Shenanigans should get called if the dimensions don't match. + bad := c1.Clone() + bad.Vec = make([]float64, len(bad.Vec)+1) + verifyDimensionPanic(t, func() { _ = c1.DistanceTo(bad) }) +} + +// dist is a self-contained example that appears in documentation. +func dist(a *Coordinate, b *Coordinate) time.Duration { + // Coordinates will always have the same dimensionality, so this is + // just a sanity check. + if len(a.Vec) != len(b.Vec) { + panic("dimensions aren't compatible") + } + + // Calculate the Euclidean distance plus the heights. + sumsq := 0.0 + for i := 0; i < len(a.Vec); i++ { + diff := a.Vec[i] - b.Vec[i] + sumsq += diff * diff + } + rtt := math.Sqrt(sumsq) + a.Height + b.Height + + // Apply the adjustment components, guarding against negatives. + adjusted := rtt + a.Adjustment + b.Adjustment + if adjusted > 0.0 { + rtt = adjusted + } + + // Go's times are natively nanoseconds, so we convert from seconds. + const secondsToNanoseconds = 1.0e9 + return time.Duration(rtt * secondsToNanoseconds) +} + +func TestCoordinate_dist_Example(t *testing.T) { + config := DefaultConfig() + c1, c2 := NewCoordinate(config), NewCoordinate(config) + c1.Vec = []float64{-0.5, 1.3, 2.4} + c2.Vec = []float64{1.2, -2.3, 3.4} + c1.Adjustment = 0.1 + c2.Adjustment = 0.2 + c1.Height = 0.7 + c2.Height = 0.1 + verifyEqualFloats(t, c1.DistanceTo(c2).Seconds(), dist(c1, c2).Seconds()) +} + +func TestCoordinate_rawDistanceTo(t *testing.T) { + config := DefaultConfig() + config.Dimensionality = 3 + config.HeightMin = 0 + + c1, c2 := NewCoordinate(config), NewCoordinate(config) + c1.Vec = []float64{-0.5, 1.3, 2.4} + c2.Vec = []float64{1.2, -2.3, 3.4} + + verifyEqualFloats(t, c1.rawDistanceTo(c1), 0.0) + verifyEqualFloats(t, c1.rawDistanceTo(c2), c2.rawDistanceTo(c1)) + verifyEqualFloats(t, c1.rawDistanceTo(c2), 4.104875150354758) + + // Make sure that the adjustment doesn't factor into the raw + // distance. + c1.Adjustment = 1.0e6 + verifyEqualFloats(t, c1.rawDistanceTo(c2), 4.104875150354758) + + // Make sure the heights affect the distance. + c1.Height = 0.7 + c2.Height = 0.1 + verifyEqualFloats(t, c1.rawDistanceTo(c2), 4.104875150354758+0.8) +} + +func TestCoordinate_add(t *testing.T) { + vec1 := []float64{1.0, -3.0, 3.0} + vec2 := []float64{-4.0, 5.0, 6.0} + verifyEqualVectors(t, add(vec1, vec2), []float64{-3.0, 2.0, 9.0}) + + zero := []float64{0.0, 0.0, 0.0} + verifyEqualVectors(t, add(vec1, zero), vec1) +} + +func TestCoordinate_diff(t *testing.T) { + vec1 := []float64{1.0, -3.0, 3.0} + vec2 := []float64{-4.0, 5.0, 6.0} + verifyEqualVectors(t, diff(vec1, vec2), []float64{5.0, -8.0, -3.0}) + + zero := []float64{0.0, 0.0, 0.0} + verifyEqualVectors(t, diff(vec1, zero), vec1) +} + +func TestCoordinate_magnitude(t *testing.T) { + zero := []float64{0.0, 0.0, 0.0} + verifyEqualFloats(t, magnitude(zero), 0.0) + + vec := []float64{1.0, -2.0, 3.0} + verifyEqualFloats(t, magnitude(vec), 3.7416573867739413) +} + +func TestCoordinate_unitVectorAt(t *testing.T) { + vec1 := []float64{1.0, 2.0, 3.0} + vec2 := []float64{0.5, 0.6, 0.7} + u, mag := unitVectorAt(vec1, vec2) + verifyEqualVectors(t, u, []float64{0.18257418583505536, 0.511207720338155, 0.8398412548412546}) + verifyEqualFloats(t, magnitude(u), 1.0) + verifyEqualFloats(t, mag, magnitude(diff(vec1, vec2))) + + // If we give positions that are equal we should get a random unit vector + // returned to us, rather than a divide by zero. + u, mag = unitVectorAt(vec1, vec1) + verifyEqualFloats(t, magnitude(u), 1.0) + verifyEqualFloats(t, mag, 0.0) + + // We can't hit the final clause without heroics so I manually forced it + // there to verify it works. +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/performance_test.go b/vendor/github.com/hashicorp/serf/coordinate/performance_test.go new file mode 100644 index 000000000..fc676e20f --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/performance_test.go @@ -0,0 +1,182 @@ +package coordinate + +import ( + "math" + "testing" + "time" +) + +func TestPerformance_Line(t *testing.T) { + const spacing = 10 * time.Millisecond + const nodes, cycles = 10, 1000 + config := DefaultConfig() + clients, err := GenerateClients(nodes, config) + if err != nil { + t.Fatal(err) + } + truth := GenerateLine(nodes, spacing) + Simulate(clients, truth, cycles) + stats := Evaluate(clients, truth) + if stats.ErrorAvg > 0.0018 || stats.ErrorMax > 0.0092 { + t.Fatalf("performance stats are out of spec: %v", stats) + } +} + +func TestPerformance_Grid(t *testing.T) { + const spacing = 10 * time.Millisecond + const nodes, cycles = 25, 1000 + config := DefaultConfig() + clients, err := GenerateClients(nodes, config) + if err != nil { + t.Fatal(err) + } + truth := GenerateGrid(nodes, spacing) + Simulate(clients, truth, cycles) + stats := Evaluate(clients, truth) + if stats.ErrorAvg > 0.0015 || stats.ErrorMax > 0.022 { + t.Fatalf("performance stats are out of spec: %v", stats) + } +} + +func TestPerformance_Split(t *testing.T) { + const lan, wan = 1 * time.Millisecond, 10 * time.Millisecond + const nodes, cycles = 25, 1000 + config := DefaultConfig() + clients, err := GenerateClients(nodes, config) + if err != nil { + t.Fatal(err) + } + truth := GenerateSplit(nodes, lan, wan) + Simulate(clients, truth, cycles) + stats := Evaluate(clients, truth) + if stats.ErrorAvg > 0.000060 || stats.ErrorMax > 0.00048 { + t.Fatalf("performance stats are out of spec: %v", stats) + } +} + +func TestPerformance_Height(t *testing.T) { + const radius = 100 * time.Millisecond + const nodes, cycles = 25, 1000 + + // Constrain us to two dimensions so that we can just exactly represent + // the circle. + config := DefaultConfig() + config.Dimensionality = 2 + clients, err := GenerateClients(nodes, config) + if err != nil { + t.Fatal(err) + } + + // Generate truth where the first coordinate is in the "middle" because + // it's equidistant from all the nodes, but it will have an extra radius + // added to the distance, so it should come out above all the others. + truth := GenerateCircle(nodes, radius) + Simulate(clients, truth, cycles) + + // Make sure the height looks reasonable with the regular nodes all in a + // plane, and the center node up above. + for i, _ := range clients { + coord := clients[i].GetCoordinate() + if i == 0 { + if coord.Height < 0.97*radius.Seconds() { + t.Fatalf("height is out of spec: %9.6f", coord.Height) + } + } else { + if coord.Height > 0.03*radius.Seconds() { + t.Fatalf("height is out of spec: %9.6f", coord.Height) + } + } + } + stats := Evaluate(clients, truth) + if stats.ErrorAvg > 0.0025 || stats.ErrorMax > 0.064 { + t.Fatalf("performance stats are out of spec: %v", stats) + } +} + +func TestPerformance_Drift(t *testing.T) { + const dist = 500 * time.Millisecond + const nodes = 4 + config := DefaultConfig() + config.Dimensionality = 2 + clients, err := GenerateClients(nodes, config) + if err != nil { + t.Fatal(err) + } + + // Do some icky surgery on the clients to put them into a square, up in + // the first quadrant. + clients[0].coord.Vec = []float64{0.0, 0.0} + clients[1].coord.Vec = []float64{0.0, dist.Seconds()} + clients[2].coord.Vec = []float64{dist.Seconds(), dist.Seconds()} + clients[3].coord.Vec = []float64{dist.Seconds(), dist.Seconds()} + + // Make a corresponding truth matrix. The nodes are laid out like this + // so the distances are all equal, except for the diagonal: + // + // (1) <- dist -> (2) + // + // | <- dist | + // | | + // | dist -> | + // + // (0) <- dist -> (3) + // + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + rtt := dist + if (i%2 == 0) && (j%2 == 0) { + rtt = time.Duration(math.Sqrt2 * float64(rtt)) + } + truth[i][j], truth[j][i] = rtt, rtt + } + } + + calcCenterError := func() float64 { + min, max := clients[0].GetCoordinate(), clients[0].GetCoordinate() + for i := 1; i < nodes; i++ { + coord := clients[i].GetCoordinate() + for j, v := range coord.Vec { + min.Vec[j] = math.Min(min.Vec[j], v) + max.Vec[j] = math.Max(max.Vec[j], v) + } + } + + mid := make([]float64, config.Dimensionality) + for i, _ := range mid { + mid[i] = min.Vec[i] + (max.Vec[i]-min.Vec[i])/2 + } + return magnitude(mid) + } + + // Let the simulation run for a while to stabilize, then snap a baseline + // for the center error. + Simulate(clients, truth, 1000) + baseline := calcCenterError() + + // Now run for a bunch more cycles and see if gravity pulls the center + // in the right direction. + Simulate(clients, truth, 10000) + if error := calcCenterError(); error > 0.8*baseline { + t.Fatalf("drift performance out of spec: %9.6f -> %9.6f", baseline, error) + } +} + +func TestPerformance_Random(t *testing.T) { + const mean, deviation = 100 * time.Millisecond, 10 * time.Millisecond + const nodes, cycles = 25, 1000 + config := DefaultConfig() + clients, err := GenerateClients(nodes, config) + if err != nil { + t.Fatal(err) + } + truth := GenerateRandom(nodes, mean, deviation) + Simulate(clients, truth, cycles) + stats := Evaluate(clients, truth) + if stats.ErrorAvg > 0.075 || stats.ErrorMax > 0.33 { + t.Fatalf("performance stats are out of spec: %v", stats) + } +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/phantom.go b/vendor/github.com/hashicorp/serf/coordinate/phantom.go new file mode 100644 index 000000000..6fb033c0c --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/phantom.go @@ -0,0 +1,187 @@ +package coordinate + +import ( + "fmt" + "math" + "math/rand" + "time" +) + +// GenerateClients returns a slice with nodes number of clients, all with the +// given config. +func GenerateClients(nodes int, config *Config) ([]*Client, error) { + clients := make([]*Client, nodes) + for i, _ := range clients { + client, err := NewClient(config) + if err != nil { + return nil, err + } + + clients[i] = client + } + return clients, nil +} + +// GenerateLine returns a truth matrix as if all the nodes are in a straight linke +// with the given spacing between them. +func GenerateLine(nodes int, spacing time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + rtt := time.Duration(j-i) * spacing + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateGrid returns a truth matrix as if all the nodes are in a two dimensional +// grid with the given spacing between them. +func GenerateGrid(nodes int, spacing time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + n := int(math.Sqrt(float64(nodes))) + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + x1, y1 := float64(i%n), float64(i/n) + x2, y2 := float64(j%n), float64(j/n) + dx, dy := x2-x1, y2-y1 + dist := math.Sqrt(dx*dx + dy*dy) + rtt := time.Duration(dist * float64(spacing)) + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateSplit returns a truth matrix as if half the nodes are close together in +// one location and half the nodes are close together in another. The lan factor +// is used to separate the nodes locally and the wan factor represents the split +// between the two sides. +func GenerateSplit(nodes int, lan time.Duration, wan time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + split := nodes / 2 + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + rtt := lan + if (i <= split && j > split) || (i > split && j <= split) { + rtt += wan + } + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateCircle returns a truth matrix for a set of nodes, evenly distributed +// around a circle with the given radius. The first node is at the "center" of the +// circle because it's equidistant from all the other nodes, but we place it at +// double the radius, so it should show up above all the other nodes in height. +func GenerateCircle(nodes int, radius time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + var rtt time.Duration + if i == 0 { + rtt = 2 * radius + } else { + t1 := 2.0 * math.Pi * float64(i) / float64(nodes) + x1, y1 := math.Cos(t1), math.Sin(t1) + t2 := 2.0 * math.Pi * float64(j) / float64(nodes) + x2, y2 := math.Cos(t2), math.Sin(t2) + dx, dy := x2-x1, y2-y1 + dist := math.Sqrt(dx*dx + dy*dy) + rtt = time.Duration(dist * float64(radius)) + } + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateRandom returns a truth matrix for a set of nodes with normally +// distributed delays, with the given mean and deviation. The RNG is re-seeded +// so you always get the same matrix for a given size. +func GenerateRandom(nodes int, mean time.Duration, deviation time.Duration) [][]time.Duration { + rand.Seed(1) + + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + rttSeconds := rand.NormFloat64()*deviation.Seconds() + mean.Seconds() + rtt := time.Duration(rttSeconds * secondsToNanoseconds) + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// Simulate runs the given number of cycles using the given list of clients and +// truth matrix. On each cycle, each client will pick a random node and observe +// the truth RTT, updating its coordinate estimate. The RNG is re-seeded for +// each simulation run to get deterministic results (for this algorithm and the +// underlying algorithm which will use random numbers for position vectors when +// starting out with everything at the origin). +func Simulate(clients []*Client, truth [][]time.Duration, cycles int) { + rand.Seed(1) + + nodes := len(clients) + for cycle := 0; cycle < cycles; cycle++ { + for i, _ := range clients { + if j := rand.Intn(nodes); j != i { + c := clients[j].GetCoordinate() + rtt := truth[i][j] + node := fmt.Sprintf("node_%d", j) + clients[i].Update(node, c, rtt) + } + } + } +} + +// Stats is returned from the Evaluate function with a summary of the algorithm +// performance. +type Stats struct { + ErrorMax float64 + ErrorAvg float64 +} + +// Evaluate uses the coordinates of the given clients to calculate estimated +// distances and compares them with the given truth matrix, returning summary +// stats. +func Evaluate(clients []*Client, truth [][]time.Duration) (stats Stats) { + nodes := len(clients) + count := 0 + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + est := clients[i].DistanceTo(clients[j].GetCoordinate()).Seconds() + actual := truth[i][j].Seconds() + error := math.Abs(est-actual) / actual + stats.ErrorMax = math.Max(stats.ErrorMax, error) + stats.ErrorAvg += error + count += 1 + } + } + + stats.ErrorAvg /= float64(count) + fmt.Printf("Error avg=%9.6f max=%9.6f\n", stats.ErrorAvg, stats.ErrorMax) + return +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/util_test.go b/vendor/github.com/hashicorp/serf/coordinate/util_test.go new file mode 100644 index 000000000..116e94933 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/util_test.go @@ -0,0 +1,27 @@ +package coordinate + +import ( + "math" + "testing" +) + +// verifyEqualFloats will compare f1 and f2 and fail if they are not +// "equal" within a threshold. +func verifyEqualFloats(t *testing.T, f1 float64, f2 float64) { + const zeroThreshold = 1.0e-6 + if math.Abs(f1-f2) > zeroThreshold { + t.Fatalf("equal assertion fail, %9.6f != %9.6f", f1, f2) + } +} + +// verifyEqualVectors will compare vec1 and vec2 and fail if they are not +// "equal" within a threshold. +func verifyEqualVectors(t *testing.T, vec1 []float64, vec2 []float64) { + if len(vec1) != len(vec2) { + t.Fatalf("vector length mismatch, %d != %d", len(vec1), len(vec2)) + } + + for i, _ := range vec1 { + verifyEqualFloats(t, vec1[i], vec2[i]) + } +} diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go new file mode 100644 index 000000000..f143ed6a1 --- /dev/null +++ b/vendor/golang.org/x/net/context/context.go @@ -0,0 +1,156 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context // import "golang.org/x/net/context" + +import "time" + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out chan<- Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() diff --git a/vendor/golang.org/x/net/context/context_test.go b/vendor/golang.org/x/net/context/context_test.go new file mode 100644 index 000000000..62844131b --- /dev/null +++ b/vendor/golang.org/x/net/context/context_test.go @@ -0,0 +1,583 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package context + +import ( + "fmt" + "math/rand" + "runtime" + "strings" + "sync" + "testing" + "time" +) + +// otherContext is a Context that's not one of the types defined in context.go. +// This lets us test code paths that differ based on the underlying type of the +// Context. +type otherContext struct { + Context +} + +func TestBackground(t *testing.T) { + c := Background() + if c == nil { + t.Fatalf("Background returned nil") + } + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + if got, want := fmt.Sprint(c), "context.Background"; got != want { + t.Errorf("Background().String() = %q want %q", got, want) + } +} + +func TestTODO(t *testing.T) { + c := TODO() + if c == nil { + t.Fatalf("TODO returned nil") + } + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + if got, want := fmt.Sprint(c), "context.TODO"; got != want { + t.Errorf("TODO().String() = %q want %q", got, want) + } +} + +func TestWithCancel(t *testing.T) { + c1, cancel := WithCancel(Background()) + + if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want { + t.Errorf("c1.String() = %q want %q", got, want) + } + + o := otherContext{c1} + c2, _ := WithCancel(o) + contexts := []Context{c1, o, c2} + + for i, c := range contexts { + if d := c.Done(); d == nil { + t.Errorf("c[%d].Done() == %v want non-nil", i, d) + } + if e := c.Err(); e != nil { + t.Errorf("c[%d].Err() == %v want nil", i, e) + } + + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + } + + cancel() + time.Sleep(100 * time.Millisecond) // let cancelation propagate + + for i, c := range contexts { + select { + case <-c.Done(): + default: + t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i) + } + if e := c.Err(); e != Canceled { + t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled) + } + } +} + +func TestParentFinishesChild(t *testing.T) { + // Context tree: + // parent -> cancelChild + // parent -> valueChild -> timerChild + parent, cancel := WithCancel(Background()) + cancelChild, stop := WithCancel(parent) + defer stop() + valueChild := WithValue(parent, "key", "value") + timerChild, stop := WithTimeout(valueChild, 10000*time.Hour) + defer stop() + + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + case x := <-cancelChild.Done(): + t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x) + case x := <-timerChild.Done(): + t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x) + case x := <-valueChild.Done(): + t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x) + default: + } + + // The parent's children should contain the two cancelable children. + pc := parent.(*cancelCtx) + cc := cancelChild.(*cancelCtx) + tc := timerChild.(*timerCtx) + pc.mu.Lock() + if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] { + t.Errorf("bad linkage: pc.children = %v, want %v and %v", + pc.children, cc, tc) + } + pc.mu.Unlock() + + if p, ok := parentCancelCtx(cc.Context); !ok || p != pc { + t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc) + } + if p, ok := parentCancelCtx(tc.Context); !ok || p != pc { + t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc) + } + + cancel() + + pc.mu.Lock() + if len(pc.children) != 0 { + t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children) + } + pc.mu.Unlock() + + // parent and children should all be finished. + check := func(ctx Context, name string) { + select { + case <-ctx.Done(): + default: + t.Errorf("<-%s.Done() blocked, but shouldn't have", name) + } + if e := ctx.Err(); e != Canceled { + t.Errorf("%s.Err() == %v want %v", name, e, Canceled) + } + } + check(parent, "parent") + check(cancelChild, "cancelChild") + check(valueChild, "valueChild") + check(timerChild, "timerChild") + + // WithCancel should return a canceled context on a canceled parent. + precanceledChild := WithValue(parent, "key", "value") + select { + case <-precanceledChild.Done(): + default: + t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have") + } + if e := precanceledChild.Err(); e != Canceled { + t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled) + } +} + +func TestChildFinishesFirst(t *testing.T) { + cancelable, stop := WithCancel(Background()) + defer stop() + for _, parent := range []Context{Background(), cancelable} { + child, cancel := WithCancel(parent) + + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + case x := <-child.Done(): + t.Errorf("<-child.Done() == %v want nothing (it should block)", x) + default: + } + + cc := child.(*cancelCtx) + pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background() + if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) { + t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok) + } + + if pcok { + pc.mu.Lock() + if len(pc.children) != 1 || !pc.children[cc] { + t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc) + } + pc.mu.Unlock() + } + + cancel() + + if pcok { + pc.mu.Lock() + if len(pc.children) != 0 { + t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children) + } + pc.mu.Unlock() + } + + // child should be finished. + select { + case <-child.Done(): + default: + t.Errorf("<-child.Done() blocked, but shouldn't have") + } + if e := child.Err(); e != Canceled { + t.Errorf("child.Err() == %v want %v", e, Canceled) + } + + // parent should not be finished. + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + default: + } + if e := parent.Err(); e != nil { + t.Errorf("parent.Err() == %v want nil", e) + } + } +} + +func testDeadline(c Context, wait time.Duration, t *testing.T) { + select { + case <-time.After(wait): + t.Fatalf("context should have timed out") + case <-c.Done(): + } + if e := c.Err(); e != DeadlineExceeded { + t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded) + } +} + +func TestDeadline(t *testing.T) { + t.Parallel() + const timeUnit = 500 * time.Millisecond + c, _ := WithDeadline(Background(), time.Now().Add(1*timeUnit)) + if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { + t.Errorf("c.String() = %q want prefix %q", got, prefix) + } + testDeadline(c, 2*timeUnit, t) + + c, _ = WithDeadline(Background(), time.Now().Add(1*timeUnit)) + o := otherContext{c} + testDeadline(o, 2*timeUnit, t) + + c, _ = WithDeadline(Background(), time.Now().Add(1*timeUnit)) + o = otherContext{c} + c, _ = WithDeadline(o, time.Now().Add(3*timeUnit)) + testDeadline(c, 2*timeUnit, t) +} + +func TestTimeout(t *testing.T) { + t.Parallel() + const timeUnit = 500 * time.Millisecond + c, _ := WithTimeout(Background(), 1*timeUnit) + if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { + t.Errorf("c.String() = %q want prefix %q", got, prefix) + } + testDeadline(c, 2*timeUnit, t) + + c, _ = WithTimeout(Background(), 1*timeUnit) + o := otherContext{c} + testDeadline(o, 2*timeUnit, t) + + c, _ = WithTimeout(Background(), 1*timeUnit) + o = otherContext{c} + c, _ = WithTimeout(o, 3*timeUnit) + testDeadline(c, 2*timeUnit, t) +} + +func TestCanceledTimeout(t *testing.T) { + t.Parallel() + const timeUnit = 500 * time.Millisecond + c, _ := WithTimeout(Background(), 2*timeUnit) + o := otherContext{c} + c, cancel := WithTimeout(o, 4*timeUnit) + cancel() + time.Sleep(1 * timeUnit) // let cancelation propagate + select { + case <-c.Done(): + default: + t.Errorf("<-c.Done() blocked, but shouldn't have") + } + if e := c.Err(); e != Canceled { + t.Errorf("c.Err() == %v want %v", e, Canceled) + } +} + +type key1 int +type key2 int + +var k1 = key1(1) +var k2 = key2(1) // same int as k1, different type +var k3 = key2(3) // same type as k2, different int + +func TestValues(t *testing.T) { + check := func(c Context, nm, v1, v2, v3 string) { + if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 { + t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0) + } + if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 { + t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0) + } + if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 { + t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0) + } + } + + c0 := Background() + check(c0, "c0", "", "", "") + + c1 := WithValue(Background(), k1, "c1k1") + check(c1, "c1", "c1k1", "", "") + + if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want { + t.Errorf("c.String() = %q want %q", got, want) + } + + c2 := WithValue(c1, k2, "c2k2") + check(c2, "c2", "c1k1", "c2k2", "") + + c3 := WithValue(c2, k3, "c3k3") + check(c3, "c2", "c1k1", "c2k2", "c3k3") + + c4 := WithValue(c3, k1, nil) + check(c4, "c4", "", "c2k2", "c3k3") + + o0 := otherContext{Background()} + check(o0, "o0", "", "", "") + + o1 := otherContext{WithValue(Background(), k1, "c1k1")} + check(o1, "o1", "c1k1", "", "") + + o2 := WithValue(o1, k2, "o2k2") + check(o2, "o2", "c1k1", "o2k2", "") + + o3 := otherContext{c4} + check(o3, "o3", "", "c2k2", "c3k3") + + o4 := WithValue(o3, k3, nil) + check(o4, "o4", "", "c2k2", "") +} + +func TestAllocs(t *testing.T) { + bg := Background() + for _, test := range []struct { + desc string + f func() + limit float64 + gccgoLimit float64 + }{ + { + desc: "Background()", + f: func() { Background() }, + limit: 0, + gccgoLimit: 0, + }, + { + desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1), + f: func() { + c := WithValue(bg, k1, nil) + c.Value(k1) + }, + limit: 3, + gccgoLimit: 3, + }, + { + desc: "WithTimeout(bg, 15*time.Millisecond)", + f: func() { + c, _ := WithTimeout(bg, 15*time.Millisecond) + <-c.Done() + }, + limit: 8, + gccgoLimit: 16, + }, + { + desc: "WithCancel(bg)", + f: func() { + c, cancel := WithCancel(bg) + cancel() + <-c.Done() + }, + limit: 5, + gccgoLimit: 8, + }, + { + desc: "WithTimeout(bg, 100*time.Millisecond)", + f: func() { + c, cancel := WithTimeout(bg, 100*time.Millisecond) + cancel() + <-c.Done() + }, + limit: 8, + gccgoLimit: 25, + }, + } { + limit := test.limit + if runtime.Compiler == "gccgo" { + // gccgo does not yet do escape analysis. + // TODO(iant): Remove this when gccgo does do escape analysis. + limit = test.gccgoLimit + } + if n := testing.AllocsPerRun(100, test.f); n > limit { + t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit)) + } + } +} + +func TestSimultaneousCancels(t *testing.T) { + root, cancel := WithCancel(Background()) + m := map[Context]CancelFunc{root: cancel} + q := []Context{root} + // Create a tree of contexts. + for len(q) != 0 && len(m) < 100 { + parent := q[0] + q = q[1:] + for i := 0; i < 4; i++ { + ctx, cancel := WithCancel(parent) + m[ctx] = cancel + q = append(q, ctx) + } + } + // Start all the cancels in a random order. + var wg sync.WaitGroup + wg.Add(len(m)) + for _, cancel := range m { + go func(cancel CancelFunc) { + cancel() + wg.Done() + }(cancel) + } + // Wait on all the contexts in a random order. + for ctx := range m { + select { + case <-ctx.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n]) + } + } + // Wait for all the cancel functions to return. + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n]) + } +} + +func TestInterlockedCancels(t *testing.T) { + parent, cancelParent := WithCancel(Background()) + child, cancelChild := WithCancel(parent) + go func() { + parent.Done() + cancelChild() + }() + cancelParent() + select { + case <-child.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n]) + } +} + +func TestLayersCancel(t *testing.T) { + testLayers(t, time.Now().UnixNano(), false) +} + +func TestLayersTimeout(t *testing.T) { + testLayers(t, time.Now().UnixNano(), true) +} + +func testLayers(t *testing.T, seed int64, testTimeout bool) { + rand.Seed(seed) + errorf := func(format string, a ...interface{}) { + t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...) + } + const ( + timeout = 200 * time.Millisecond + minLayers = 30 + ) + type value int + var ( + vals []*value + cancels []CancelFunc + numTimers int + ctx = Background() + ) + for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ { + switch rand.Intn(3) { + case 0: + v := new(value) + ctx = WithValue(ctx, v, v) + vals = append(vals, v) + case 1: + var cancel CancelFunc + ctx, cancel = WithCancel(ctx) + cancels = append(cancels, cancel) + case 2: + var cancel CancelFunc + ctx, cancel = WithTimeout(ctx, timeout) + cancels = append(cancels, cancel) + numTimers++ + } + } + checkValues := func(when string) { + for _, key := range vals { + if val := ctx.Value(key).(*value); key != val { + errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key) + } + } + } + select { + case <-ctx.Done(): + errorf("ctx should not be canceled yet") + default: + } + if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) { + t.Errorf("ctx.String() = %q want prefix %q", s, prefix) + } + t.Log(ctx) + checkValues("before cancel") + if testTimeout { + select { + case <-ctx.Done(): + case <-time.After(timeout + 100*time.Millisecond): + errorf("ctx should have timed out") + } + checkValues("after timeout") + } else { + cancel := cancels[rand.Intn(len(cancels))] + cancel() + select { + case <-ctx.Done(): + default: + errorf("ctx should be canceled") + } + checkValues("after cancel") + } +} + +func TestCancelRemoves(t *testing.T) { + checkChildren := func(when string, ctx Context, want int) { + if got := len(ctx.(*cancelCtx).children); got != want { + t.Errorf("%s: context has %d children, want %d", when, got, want) + } + } + + ctx, _ := WithCancel(Background()) + checkChildren("after creation", ctx, 0) + _, cancel := WithCancel(ctx) + checkChildren("with WithCancel child ", ctx, 1) + cancel() + checkChildren("after cancelling WithCancel child", ctx, 0) + + ctx, _ = WithCancel(Background()) + checkChildren("after creation", ctx, 0) + _, cancel = WithTimeout(ctx, 60*time.Minute) + checkChildren("with WithTimeout child ", ctx, 1) + cancel() + checkChildren("after cancelling WithTimeout child", ctx, 0) +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go new file mode 100644 index 000000000..606cf1f97 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -0,0 +1,74 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +// Package ctxhttp provides helper functions for performing context-aware HTTP requests. +package ctxhttp // import "golang.org/x/net/context/ctxhttp" + +import ( + "io" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +// Do sends an HTTP request with the provided http.Client and returns +// an HTTP response. +// +// If the client is nil, http.DefaultClient is used. +// +// The provided ctx must be non-nil. If it is canceled or times out, +// ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + resp, err := client.Do(req.WithContext(ctx)) + // If we got an error, and the context has been canceled, + // the context's error is probably more useful. + if err != nil { + select { + case <-ctx.Done(): + err = ctx.Err() + default: + } + } + return resp, err +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go new file mode 100644 index 000000000..72411b1b6 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go @@ -0,0 +1,29 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9,go1.7 + +package ctxhttp + +import ( + "io" + "net/http" + "net/http/httptest" + "testing" + + "context" +) + +func TestGo17Context(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "ok") + })) + defer ts.Close() + ctx := context.Background() + resp, err := Get(ctx, http.DefaultClient, ts.URL) + if resp == nil || err != nil { + t.Fatalf("error received from client: %v %v", err, resp) + } + resp.Body.Close() +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go new file mode 100644 index 000000000..926870cc2 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go @@ -0,0 +1,147 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package ctxhttp // import "golang.org/x/net/context/ctxhttp" + +import ( + "io" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +func nop() {} + +var ( + testHookContextDoneBeforeHeaders = nop + testHookDoReturned = nop + testHookDidBodyClose = nop +) + +// Do sends an HTTP request with the provided http.Client and returns an HTTP response. +// If the client is nil, http.DefaultClient is used. +// If the context is canceled or times out, ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + // TODO(djd): Respect any existing value of req.Cancel. + cancel := make(chan struct{}) + req.Cancel = cancel + + type responseAndError struct { + resp *http.Response + err error + } + result := make(chan responseAndError, 1) + + // Make local copies of test hooks closed over by goroutines below. + // Prevents data races in tests. + testHookDoReturned := testHookDoReturned + testHookDidBodyClose := testHookDidBodyClose + + go func() { + resp, err := client.Do(req) + testHookDoReturned() + result <- responseAndError{resp, err} + }() + + var resp *http.Response + + select { + case <-ctx.Done(): + testHookContextDoneBeforeHeaders() + close(cancel) + // Clean up after the goroutine calling client.Do: + go func() { + if r := <-result; r.resp != nil { + testHookDidBodyClose() + r.resp.Body.Close() + } + }() + return nil, ctx.Err() + case r := <-result: + var err error + resp, err = r.resp, r.err + if err != nil { + return resp, err + } + } + + c := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + close(cancel) + case <-c: + // The response's Body is closed. + } + }() + resp.Body = ¬ifyingReader{resp.Body, c} + + return resp, nil +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// notifyingReader is an io.ReadCloser that closes the notify channel after +// Close is called or a Read fails on the underlying ReadCloser. +type notifyingReader struct { + io.ReadCloser + notify chan<- struct{} +} + +func (r *notifyingReader) Read(p []byte) (int, error) { + n, err := r.ReadCloser.Read(p) + if err != nil && r.notify != nil { + close(r.notify) + r.notify = nil + } + return n, err +} + +func (r *notifyingReader) Close() error { + err := r.ReadCloser.Close() + if r.notify != nil { + close(r.notify) + r.notify = nil + } + return err +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go new file mode 100644 index 000000000..9159cf022 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go @@ -0,0 +1,79 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9,!go1.7 + +package ctxhttp + +import ( + "net" + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" + + "golang.org/x/net/context" +) + +// golang.org/issue/14065 +func TestClosesResponseBodyOnCancel(t *testing.T) { + defer func() { testHookContextDoneBeforeHeaders = nop }() + defer func() { testHookDoReturned = nop }() + defer func() { testHookDidBodyClose = nop }() + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) + defer ts.Close() + + ctx, cancel := context.WithCancel(context.Background()) + + // closed when Do enters select case <-ctx.Done() + enteredDonePath := make(chan struct{}) + + testHookContextDoneBeforeHeaders = func() { + close(enteredDonePath) + } + + testHookDoReturned = func() { + // We now have the result (the Flush'd headers) at least, + // so we can cancel the request. + cancel() + + // But block the client.Do goroutine from sending + // until Do enters into the <-ctx.Done() path, since + // otherwise if both channels are readable, select + // picks a random one. + <-enteredDonePath + } + + sawBodyClose := make(chan struct{}) + testHookDidBodyClose = func() { close(sawBodyClose) } + + tr := &http.Transport{} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + req, _ := http.NewRequest("GET", ts.URL, nil) + _, doErr := Do(ctx, c, req) + + select { + case <-sawBodyClose: + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for body to close") + } + + if doErr != ctx.Err() { + t.Errorf("Do error = %v; want %v", doErr, ctx.Err()) + } +} + +type noteCloseConn struct { + net.Conn + onceClose sync.Once + closefn func() +} + +func (c *noteCloseConn) Close() error { + c.onceClose.Do(c.closefn) + return c.Conn.Close() +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go new file mode 100644 index 000000000..1e4155180 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go @@ -0,0 +1,105 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +package ctxhttp + +import ( + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + "time" + + "golang.org/x/net/context" +) + +const ( + requestDuration = 100 * time.Millisecond + requestBody = "ok" +) + +func okHandler(w http.ResponseWriter, r *http.Request) { + time.Sleep(requestDuration) + io.WriteString(w, requestBody) +} + +func TestNoTimeout(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(okHandler)) + defer ts.Close() + + ctx := context.Background() + res, err := Get(ctx, nil, ts.URL) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if string(slurp) != requestBody { + t.Errorf("body = %q; want %q", slurp, requestBody) + } +} + +func TestCancelBeforeHeaders(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + blockServer := make(chan struct{}) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + cancel() + <-blockServer + io.WriteString(w, requestBody) + })) + defer ts.Close() + defer close(blockServer) + + res, err := Get(ctx, nil, ts.URL) + if err == nil { + res.Body.Close() + t.Fatal("Get returned unexpected nil error") + } + if err != context.Canceled { + t.Errorf("err = %v; want %v", err, context.Canceled) + } +} + +func TestCancelAfterHangingRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.(http.Flusher).Flush() + <-w.(http.CloseNotifier).CloseNotify() + })) + defer ts.Close() + + ctx, cancel := context.WithCancel(context.Background()) + resp, err := Get(ctx, nil, ts.URL) + if err != nil { + t.Fatalf("unexpected error in Get: %v", err) + } + + // Cancel befer reading the body. + // Reading Request.Body should fail, since the request was + // canceled before anything was written. + cancel() + + done := make(chan struct{}) + + go func() { + b, err := ioutil.ReadAll(resp.Body) + if len(b) != 0 || err == nil { + t.Errorf(`Read got (%q, %v); want ("", error)`, b, err) + } + close(done) + }() + + select { + case <-time.After(1 * time.Second): + t.Errorf("Test timed out") + case <-done: + } +} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go new file mode 100644 index 000000000..d20f52b7d --- /dev/null +++ b/vendor/golang.org/x/net/context/go17.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +var ( + todo = context.TODO() + background = context.Background() +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = context.DeadlineExceeded + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + ctx, f := context.WithCancel(parent) + return ctx, CancelFunc(f) +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + ctx, f := context.WithDeadline(parent, deadline) + return ctx, CancelFunc(f) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go new file mode 100644 index 000000000..0f35592df --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -0,0 +1,300 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, c) + return c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) *cancelCtx { + return &cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + *cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/vendor/golang.org/x/net/context/withtimeout_test.go b/vendor/golang.org/x/net/context/withtimeout_test.go new file mode 100644 index 000000000..a6754dc36 --- /dev/null +++ b/vendor/golang.org/x/net/context/withtimeout_test.go @@ -0,0 +1,26 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context_test + +import ( + "fmt" + "time" + + "golang.org/x/net/context" +) + +func ExampleWithTimeout() { + // Pass a context with a timeout to tell a blocking function that it + // should abandon its work after the timeout elapses. + ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) + select { + case <-time.After(200 * time.Millisecond): + fmt.Println("overslept") + case <-ctx.Done(): + fmt.Println(ctx.Err()) // prints "context deadline exceeded" + } + // Output: + // context deadline exceeded +}