diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml new file mode 100644 index 0000000000..19fb961c2f --- /dev/null +++ b/.github/workflows/test.yaml @@ -0,0 +1,71 @@ +--- +name: Test CRDB +on: + push: + branches: + - master + pull_request: + workflow_dispatch: + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - uses: actions/setup-go@v3 + with: + go-version: 1.19 + + - uses: golangci/golangci-lint-action@v3 + with: + args: ./storage/crdb + + unit-test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - uses: actions/setup-go@v3 + with: + go-version: '1.19' + check-latest: true + cache: true + + - name: Run tests + run: go test -v ./storage/crdb/... ./quota/crdbqm/... + + integration: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v3 + + - uses: actions/setup-go@v3 + with: + go-version: '1.19' + check-latest: true + cache: true + + - name: Build before tests + run: go mod download && go build ./... + + - name: Run CockroachDB + run: docker run --rm -d --name=roach -p 8080:8080 -p 26257:26257 -v "${PWD}/cockroach-data:/cockroach/cockroach-data" cockroachdb/cockroach:latest start-single-node --insecure + + - name: Wait for CockroachDB + uses: nick-fields/retry@v2 + with: + timeout_seconds: 15 + max_attempts: 3 + retry_on: error + command: docker exec roach ./cockroach sql --insecure -e "SELECT 1" + + - name: Get crdb logs + run: docker logs roach + + - name: Run tests + run: ./integration/integration_test.sh + env: + TEST_COCKROACHDB_URI: postgresql://root@localhost:26257/defaultdb?sslmode=disable + CRDB_IN_CONTAINER: true + CRDB_CONTAINER_NAME: roach diff --git a/.gitignore b/.gitignore index cff7f98b96..f7e55498c0 100644 --- a/.gitignore +++ b/.gitignore @@ -22,3 +22,4 @@ /trillian_log_signer /trillian_map_server default.etcd +cockroach-data/ diff --git a/CHANGELOG.md b/CHANGELOG.md index 4a0f400ce3..a82a2574ef 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,11 @@ ## HEAD +### Storage + +A new storage driver for CockroachDB has been added. It's currently in alpha stage +with support provided by Equinix Metal. + ### Misc * Fix log server not exiting properly on SIGINT diff --git a/cmd/trillian_log_server/main.go b/cmd/trillian_log_server/main.go index e6b7c98b64..745881c9c2 100644 --- a/cmd/trillian_log_server/main.go +++ b/cmd/trillian_log_server/main.go @@ -47,9 +47,11 @@ import ( // Register supported storage providers. _ "github.com/google/trillian/storage/cloudspanner" + _ "github.com/google/trillian/storage/crdb" _ "github.com/google/trillian/storage/mysql" - // Load MySQL quota provider + // Load quota providers + _ "github.com/google/trillian/quota/crdbqm" _ "github.com/google/trillian/quota/mysqlqm" ) @@ -92,6 +94,7 @@ func main() { klog.Exitf("Failed to load flags from config file %q: %s", *configFile, err) } } + klog.Info("**** Log Server Starting ****") ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/cmd/trillian_log_signer/main.go b/cmd/trillian_log_signer/main.go index 6f479c159b..d9de2eb16e 100644 --- a/cmd/trillian_log_signer/main.go +++ b/cmd/trillian_log_signer/main.go @@ -53,9 +53,11 @@ import ( // Register supported storage providers. _ "github.com/google/trillian/storage/cloudspanner" + _ "github.com/google/trillian/storage/crdb" _ "github.com/google/trillian/storage/mysql" - // Load MySQL quota provider + // Load quota providers + _ "github.com/google/trillian/quota/crdbqm" _ "github.com/google/trillian/quota/mysqlqm" ) diff --git a/docs/Feature_Implementation_Matrix.md b/docs/Feature_Implementation_Matrix.md index 3dfbd02197..dfbfb29f99 100644 --- a/docs/Feature_Implementation_Matrix.md +++ b/docs/Feature_Implementation_Matrix.md @@ -60,6 +60,7 @@ The Log storage implementations supporting the original Trillian log. | Spanner | GA | ✓ | Google internal-only, see CloudSpanner for external use. | | CloudSpanner | Beta | | Google maintains continuous-integration environment based on CloudSpanner. | | MySQL | GA | ✓ | | +| CockroachDB | Alpha | | Supported by [Equinix Metal](https://deploy.equinix.com/). | ##### Spanner This is a Google-internal implementation, and is used by all of Google's current Trillian deployments. @@ -79,6 +80,12 @@ It's currently in production use by at least one CT log operator. Write throughput of 4-500 entries/s has been observed. +##### CockroachDB + +This implementation has been tested with CockroachDB 22.1.10. + +It's currently in alpha mode and is not yet in production use. + ### Monitoring Supported monitoring frameworks, allowing for production monitoring and alerting. @@ -107,6 +114,7 @@ Supported frameworks for quota management. | etcd | GA | ✓ | | | MySQL | Beta | ? | | | Redis | Alpha | ✓ | | +| CockroachDB | Alpha | | Supported by [Equinix Metal](https://deploy.equinix.com/). | ### Key management diff --git a/go.mod b/go.mod index a84430b7a4..c5da5178d5 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,7 @@ require ( cloud.google.com/go/spanner v1.40.0 contrib.go.opencensus.io/exporter/stackdriver v0.13.12 github.com/apache/beam/sdks/v2 v2.0.0-20211012030016-ef4364519c94 + github.com/cockroachdb/cockroach-go/v2 v2.2.16 github.com/fullstorydev/grpcurl v1.8.7 github.com/go-redis/redis v6.15.9+incompatible github.com/go-sql-driver/mysql v1.6.0 @@ -16,6 +17,7 @@ require ( github.com/google/go-licenses v0.0.0-20210329231322-ce1d9163b77d github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/letsencrypt/pkcs11key/v4 v4.0.0 + github.com/lib/pq v1.10.7 github.com/prometheus/client_golang v1.14.0 github.com/prometheus/client_model v0.3.0 github.com/pseudomuto/protoc-gen-doc v1.5.1 @@ -68,6 +70,7 @@ require ( github.com/fatih/color v1.9.0 // indirect github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect github.com/go-logr/logr v1.2.0 // indirect + github.com/gofrs/flock v0.8.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.0.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -83,13 +86,20 @@ require ( github.com/huandu/xstrings v1.2.0 // indirect github.com/imdario/mergo v0.3.9 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgconn v1.12.1 // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgproto3/v2 v2.3.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect + github.com/jackc/pgtype v1.11.0 // indirect + github.com/jackc/pgx/v4 v4.16.1 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jhump/protoreflect v1.12.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jonboulle/clockwork v0.3.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd // indirect - github.com/mattn/go-colorable v0.1.4 // indirect github.com/mattn/go-runewidth v0.0.13 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect github.com/miekg/pkcs11 v1.0.3 // indirect @@ -103,6 +113,7 @@ require ( github.com/onsi/ginkgo v1.10.3 // indirect github.com/onsi/gomega v1.7.1 // indirect github.com/otiai10/copy v1.2.0 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect github.com/prometheus/prometheus v2.5.0+incompatible // indirect diff --git a/go.sum b/go.sum index fcab1465e2..0a7857fa5f 100644 --- a/go.sum +++ b/go.sum @@ -299,6 +299,7 @@ github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy86 github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= @@ -362,6 +363,10 @@ github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/cockroachdb/cockroach-go/v2 v2.2.16 h1:t9dmZuC9J2W8IDQDSIGXmP+fBuEJSsrGXxWQz4cYqBY= +github.com/cockroachdb/cockroach-go/v2 v2.2.16/go.mod h1:xZ2VHjUEb/cySv0scXBx7YsBnHtLHkR1+w/w73b5i3M= github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E= github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs= @@ -375,8 +380,9 @@ github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8Nz github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= @@ -447,6 +453,10 @@ github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfC github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= @@ -603,6 +613,58 @@ github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.12.0/go.mod h1:ZkhRC59Llhrq3oSfrikvwQ5NaxYExr6twkdkMLaKono= +github.com/jackc/pgconn v1.12.1 h1:rsDFzIpRk7xT4B8FufgpCCeyjdNpKyghZeSefViE5W8= +github.com/jackc/pgconn v1.12.1/go.mod h1:ZkhRC59Llhrq3oSfrikvwQ5NaxYExr6twkdkMLaKono= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.0 h1:brH0pCGBDkBW07HWlN/oSBXrmo3WB0UvZd1pIuDcL8Y= +github.com/jackc/pgproto3/v2 v2.3.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.11.0 h1:u4uiGPz/1hryuXzyaBhSk6dnIyyG2683olG2OV+UUgs= +github.com/jackc/pgtype v1.11.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.16.0/go.mod h1:N0A9sFdWzkw/Jy1lwoiB64F2+ugFZi987zRxcPez/wI= +github.com/jackc/pgx/v4 v4.16.1 h1:JzTglcal01DrghUqt+PmzWsZx/Yh7SC/CTQmSBMTd0Y= +github.com/jackc/pgx/v4 v4.16.1/go.mod h1:SIhx0D5hoADaiXZVyv+3gSm3LCIIINTVO0PficsvWGQ= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.2.1 h1:gI8os0wpRXFd4FiAY2dWiqRK037tjj3t7rKFeO4X5iw= +github.com/jackc/puddle v1.2.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -612,10 +674,13 @@ github.com/jhump/goprotoc v0.5.0/go.mod h1:VrbvcYrQOrTi3i0Vf+m+oqQWk9l72mjkJCYo7 github.com/jhump/protoreflect v1.11.0/go.mod h1:U7aMIjN0NWq9swDP7xDdoMfRHb35uiuTd3Z9nFXJf5E= github.com/jhump/protoreflect v1.12.0 h1:1NQ4FpWMgn3by/n1X0fbeKEUxP1wBt7+Oitpv01HR10= github.com/jhump/protoreflect v1.12.0/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.1.4/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/jonboulle/clockwork v0.3.0 h1:9BSCMi8C+0qdApAp4auwX0RkLGUjs956h0EkuQymUhg= @@ -638,6 +703,7 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -649,20 +715,33 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/letsencrypt/pkcs11key/v4 v4.0.0 h1:qLc/OznH7xMr5ARJgkZCCWk+EomQkiNTOoOF5LAgagc= github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= +github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/linkedin/goavro v2.1.0+incompatible/go.mod h1:bBCwI2eGYpUI/4820s67MElg9tdeLbINjLjiM2xZFYM= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= @@ -770,18 +849,27 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= @@ -847,6 +935,7 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= @@ -899,17 +988,25 @@ go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.9.0 h1:C0g6TWmQYvjKRnljRULLWUVJGy8Uvu0NEL/5frY2/t4= go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= @@ -918,6 +1015,7 @@ golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -925,8 +1023,12 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220517005047-85d78b3ac167/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.2.0 h1:BRXPfhNivWL5Yq0BGQ39a2sW6t44aODpfxkWjYdzewE= golang.org/x/crypto v0.2.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -985,6 +1087,7 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191119073136-fc4aabc6c914/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1085,6 +1188,7 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1092,6 +1196,7 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1100,6 +1205,7 @@ golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1155,6 +1261,7 @@ golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220513210249-45d2b4557a2a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1163,6 +1270,7 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0 h1:z85xZCsEl7bi/KwbNADeBYoOP0++7W1ipu+aGnpwzRM= @@ -1195,6 +1303,7 @@ golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -1202,8 +1311,11 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1213,6 +1325,7 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1248,6 +1361,8 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.3.0 h1:SrNbZl6ECOS1qFzgTdQfWXZM9XBkiA6tkFrH9YSTPHM= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1503,6 +1618,7 @@ gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/linkedin/goavro.v1 v1.0.5/go.mod h1:Aw5GdAbizjOEl0kAMHV9iHmA8reZzW/OKuJAl4Hb9F0= @@ -1533,6 +1649,9 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gorm.io/driver/postgres v1.3.5/go.mod h1:EGCWefLFQSVFrHGy4J8EtiHCWX5Q8t0yz2Jt9aKkGzU= +gorm.io/gorm v1.23.4/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= +gorm.io/gorm v1.23.5/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/integration/admin/admin_integration_test.go b/integration/admin/admin_integration_test.go index be3619ec8e..ff24b9b304 100644 --- a/integration/admin/admin_integration_test.go +++ b/integration/admin/admin_integration_test.go @@ -582,7 +582,8 @@ func setupAdminServer(ctx context.Context, t *testing.T) (*testServer, error) { return nil, err } - registry, done, err := integration.NewRegistryForTests(ctx) + // TODO(jaosorior): Make this configurable for Cockroach or MySQL + registry, done, err := integration.NewRegistryForTests(ctx, testdb.DriverMySQL) if err != nil { ts.closeAll() return nil, err diff --git a/integration/functions.sh b/integration/functions.sh index bc66730081..b359992cbe 100755 --- a/integration/functions.sh +++ b/integration/functions.sh @@ -117,12 +117,17 @@ kill_pid() { # - SOFTHSM_CONF : location of the SoftHSM configuration file # log_prep_test() { + set -x # Default to one of each. local rpc_server_count=${1:-1} local log_signer_count=${2:-1} # Wipe the test database - yes | bash "${TRILLIAN_PATH}/scripts/resetdb.sh" + if [[ "${TEST_MYSQL_URI}" != "" ]]; then + yes | bash "${TRILLIAN_PATH}/scripts/resetdb.sh" + elif [[ "${TEST_COCKROACHDB_URI}" != "" ]]; then + yes | bash "${TRILLIAN_PATH}/scripts/resetcrdb.sh" + fi local logserver_opts='' local logsigner_opts='' @@ -131,6 +136,9 @@ log_prep_test() { if [[ "${TEST_MYSQL_URI}" != "" ]]; then logserver_opts+=" --mysql_uri=${TEST_MYSQL_URI}" logsigner_opts+=" --mysql_uri=${TEST_MYSQL_URI}" + elif [[ "${TEST_COCKROACHDB_URI}" != "" ]]; then + logserver_opts+="--quota_system=crdb --storage_system=crdb --crdb_uri=${TEST_COCKROACHDB_URI}" + logsigner_opts+="--quota_system=crdb --storage_system=crdb --crdb_uri=${TEST_COCKROACHDB_URI}" fi # Start a local etcd instance (if configured). diff --git a/integration/quota/quota_test.go b/integration/quota/quota_test.go index a0835413f0..477be695a0 100644 --- a/integration/quota/quota_test.go +++ b/integration/quota/quota_test.go @@ -51,7 +51,8 @@ func TestEtcdRateLimiting(t *testing.T) { testdb.SkipIfNoMySQL(t) ctx := context.Background() - registry, done, err := integration.NewRegistryForTests(ctx) + // TODO(jaosorior): Make this configurable for Cockroach or MySQL + registry, done, err := integration.NewRegistryForTests(ctx, testdb.DriverMySQL) if err != nil { t.Fatalf("NewRegistryForTests() returned err = %v", err) } @@ -100,7 +101,7 @@ func TestEtcdRateLimiting(t *testing.T) { func TestMySQLRateLimiting(t *testing.T) { testdb.SkipIfNoMySQL(t) ctx := context.Background() - db, done, err := testdb.NewTrillianDB(ctx) + db, done, err := testdb.NewTrillianDB(ctx, testdb.DriverMySQL) if err != nil { t.Fatalf("GetTestDB() returned err = %v", err) } diff --git a/quota/crdbqm/common_test.go b/quota/crdbqm/common_test.go new file mode 100644 index 0000000000..50eb0234a0 --- /dev/null +++ b/quota/crdbqm/common_test.go @@ -0,0 +1,50 @@ +// Copyright 2022 Trillian Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crdbqm + +import ( + "os" + "testing" + + "github.com/cockroachdb/cockroach-go/v2/testserver" + "github.com/google/trillian/storage/testdb" + "k8s.io/klog/v2" +) + +func TestMain(m *testing.M) { + ts, err := testserver.NewTestServer() + if err != nil { + klog.Errorf("Failed to start test server: %v", err) + os.Exit(1) + } + defer ts.Stop() + + // reset the test server URL path. By default cockroach sets it + // to point to a default database, we don't want that. + dburl := ts.PGURL() + dburl.Path = "/" + + // Set the environment variable for the test server + os.Setenv(testdb.CockroachDBURIEnv, dburl.String()) + + if !testdb.CockroachDBAvailable() { + klog.Errorf("CockroachDB not available, skipping all CockroachDB storage tests") + return + } + + status := m.Run() + + os.Exit(status) +} diff --git a/quota/crdbqm/crdb_quota.go b/quota/crdbqm/crdb_quota.go new file mode 100644 index 0000000000..488b9903de --- /dev/null +++ b/quota/crdbqm/crdb_quota.go @@ -0,0 +1,100 @@ +// Copyright 2022 Trillian Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package crdbqm defines a CockroachDB-based quota.Manager implementation. +package crdbqm + +import ( + "context" + "database/sql" + "errors" + + "github.com/google/trillian/quota" +) + +const ( + // DefaultMaxUnsequenced is a suggested value for MaxUnsequencedRows. + // Note that this is a Global/Write quota suggestion, so it applies across trees. + DefaultMaxUnsequenced = 500000 // About 2h of non-stop signing at 70QPS. + + // TODO(jaosorior): Come up with a more optimal solution for CRDB, as this is + // linear and too costly. + countFromUnsequencedTable = "SELECT COUNT(*) FROM Unsequenced" +) + +// ErrTooManyUnsequencedRows is returned when tokens are requested but Unsequenced has grown +// beyond the configured limit. +var ErrTooManyUnsequencedRows = errors.New("too many unsequenced rows") + +// QuotaManager is a CockroachDB-based quota.Manager implementation. +// +// QuotaManager only implements Global/Write quotas, which is based on the number of Unsequenced +// rows (to be exact, tokens = MaxUnsequencedRows - actualUnsequencedRows). +// Other quotas are considered infinite. +type QuotaManager struct { + DB *sql.DB + MaxUnsequencedRows int +} + +// GetTokens implements quota.Manager.GetTokens. +// It doesn't actually reserve or retrieve tokens, instead it allows access based on the number of +// rows in the Unsequenced table. +func (m *QuotaManager) GetTokens(ctx context.Context, numTokens int, specs []quota.Spec) error { + for _, spec := range specs { + if spec.Group != quota.Global || spec.Kind != quota.Write { + continue + } + // Only allow global writes if Unsequenced is under the expected limit + count, err := m.countUnsequenced(ctx) + if err != nil { + return err + } + if count+numTokens > m.MaxUnsequencedRows { + return ErrTooManyUnsequencedRows + } + } + return nil +} + +// PutTokens implements quota.Manager.PutTokens. +// It's a noop for QuotaManager. +func (m *QuotaManager) PutTokens(ctx context.Context, numTokens int, specs []quota.Spec) error { + return nil +} + +// ResetQuota implements quota.Manager.ResetQuota. +// It's a noop for QuotaManager. +func (m *QuotaManager) ResetQuota(ctx context.Context, specs []quota.Spec) error { + return nil +} + +func (m *QuotaManager) countUnsequenced(ctx context.Context) (int, error) { + // table names are lowercase for some reason + rows, err := m.DB.QueryContext(ctx, countFromUnsequencedTable) + if err != nil { + return 0, err + } + defer rows.Close() + if !rows.Next() { + return 0, errors.New("cursor has no rows after quota limit determination query") + } + var count int + if err := rows.Scan(&count); err != nil { + return 0, err + } + if rows.Next() { + return 0, errors.New("too many rows returned from quota limit determination query") + } + return count, nil +} diff --git a/quota/crdbqm/crdb_quota_test.go b/quota/crdbqm/crdb_quota_test.go new file mode 100644 index 0000000000..feb5e44f96 --- /dev/null +++ b/quota/crdbqm/crdb_quota_test.go @@ -0,0 +1,260 @@ +// Copyright 2022 Trillian Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crdbqm + +import ( + "context" + "crypto" + "database/sql" + "fmt" + "testing" + "time" + + "github.com/google/trillian" + "github.com/google/trillian/quota" + "github.com/google/trillian/storage" + "github.com/google/trillian/storage/crdb" + "github.com/google/trillian/storage/testdb" + "github.com/google/trillian/types" + + stestonly "github.com/google/trillian/storage/testonly" +) + +func TestQuotaManager_GetTokens(t *testing.T) { + testdb.SkipIfNoCockroachDB(t) + ctx := context.Background() + + db, done, err := testdb.NewTrillianDB(ctx, testdb.DriverCockroachDB) + if err != nil { + t.Fatalf("GetTestDB() returned err = %v", err) + } + defer done(ctx) + + tree, err := createTree(ctx, db) + if err != nil { + t.Fatalf("createTree() returned err = %v", err) + } + + tests := []struct { + desc string + unsequencedRows, maxUnsequencedRows, numTokens int + specs []quota.Spec + wantErr bool + }{ + { + desc: "globalWriteSingleToken", + unsequencedRows: 10, + maxUnsequencedRows: 20, + numTokens: 1, + specs: []quota.Spec{{Group: quota.Global, Kind: quota.Write}}, + }, + { + desc: "globalWriteMultiToken", + unsequencedRows: 10, + maxUnsequencedRows: 20, + numTokens: 5, + specs: []quota.Spec{{Group: quota.Global, Kind: quota.Write}}, + }, + { + desc: "globalWriteOverQuota1", + unsequencedRows: 20, + maxUnsequencedRows: 20, + numTokens: 1, + specs: []quota.Spec{{Group: quota.Global, Kind: quota.Write}}, + wantErr: true, + }, + { + desc: "globalWriteOverQuota2", + unsequencedRows: 15, + maxUnsequencedRows: 20, + numTokens: 10, + specs: []quota.Spec{{Group: quota.Global, Kind: quota.Write}}, + wantErr: true, + }, + { + desc: "unlimitedQuotas", + numTokens: 10, + specs: []quota.Spec{ + {Group: quota.User, Kind: quota.Read, User: "dylan"}, + {Group: quota.Tree, Kind: quota.Read, TreeID: tree.TreeId}, + {Group: quota.Global, Kind: quota.Read}, + {Group: quota.User, Kind: quota.Write, User: "dylan"}, + {Group: quota.Tree, Kind: quota.Write, TreeID: tree.TreeId}, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + if err := setUnsequencedRows(ctx, db, tree, test.unsequencedRows); err != nil { + t.Errorf("setUnsequencedRows() returned err = %v", err) + return + } + + qm := &QuotaManager{DB: db, MaxUnsequencedRows: test.maxUnsequencedRows} + + err = qm.GetTokens(ctx, test.numTokens, test.specs) + if hasErr := err == ErrTooManyUnsequencedRows; hasErr != test.wantErr { + t.Errorf("%v: GetTokens() returned err = %q, wantErr = %v", test.desc, err, test.wantErr) + } + }) + } +} + +func TestQuotaManager_Noops(t *testing.T) { + testdb.SkipIfNoCockroachDB(t) + ctx := context.Background() + + db, done, err := testdb.NewTrillianDB(ctx, testdb.DriverCockroachDB) + if err != nil { + t.Fatalf("GetTestDB() returned err = %v", err) + } + defer done(ctx) + + qm := &QuotaManager{DB: db, MaxUnsequencedRows: 1000} + specs := allSpecs(ctx, qm, 10 /* treeID */) + + tests := []struct { + desc string + fn func() error + }{ + { + desc: "PutTokens", + fn: func() error { + return qm.PutTokens(ctx, 10 /* numTokens */, specs) + }, + }, + { + desc: "ResetQuota", + fn: func() error { + return qm.ResetQuota(ctx, specs) + }, + }, + } + for _, test := range tests { + if err := test.fn(); err != nil { + t.Errorf("%v: got err = %v", test.desc, err) + } + } +} + +func allSpecs(_ context.Context, _ quota.Manager, treeID int64) []quota.Spec { + return []quota.Spec{ + {Group: quota.User, Kind: quota.Read, User: "florence"}, + {Group: quota.Tree, Kind: quota.Read, TreeID: treeID}, + {Group: quota.Global, Kind: quota.Read}, + {Group: quota.User, Kind: quota.Write, User: "florence"}, + {Group: quota.Tree, Kind: quota.Write, TreeID: treeID}, + {Group: quota.Global, Kind: quota.Write}, + } +} + +func countUnsequenced(ctx context.Context, db *sql.DB) (int, error) { + var count int + if err := db.QueryRowContext(ctx, "SELECT COUNT(*) FROM Unsequenced").Scan(&count); err != nil { + return 0, err + } + return count, nil +} + +func createTree(ctx context.Context, db *sql.DB) (*trillian.Tree, error) { + var tree *trillian.Tree + + { + as := crdb.NewSQLAdminStorage(db) + err := as.ReadWriteTransaction(ctx, func(ctx context.Context, tx storage.AdminTX) error { + var err error + tree, err = tx.CreateTree(ctx, stestonly.LogTree) + return err + }) + if err != nil { + return nil, err + } + } + + { + ls := crdb.NewLogStorage(db, nil) + err := ls.ReadWriteTransaction(ctx, tree, func(ctx context.Context, tx storage.LogTreeTX) error { + logRoot, err := (&types.LogRootV1{RootHash: []byte{0}}).MarshalBinary() + if err != nil { + return err + } + slr := &trillian.SignedLogRoot{LogRoot: logRoot} + return tx.StoreSignedLogRoot(ctx, slr) + }) + if err != nil { + return nil, err + } + } + + return tree, nil +} + +func queueLeaves(ctx context.Context, db *sql.DB, tree *trillian.Tree, firstID, num int) error { + hasher := crypto.SHA256.New() + + leaves := []*trillian.LogLeaf{} + for i := 0; i < num; i++ { + value := []byte(fmt.Sprintf("leaf-%v", firstID+i)) + hasher.Reset() + if _, err := hasher.Write(value); err != nil { + return err + } + hash := hasher.Sum(nil) + leaves = append(leaves, &trillian.LogLeaf{ + MerkleLeafHash: hash, + LeafValue: value, + ExtraData: []byte("extra data"), + LeafIdentityHash: hash, + }) + } + + ls := crdb.NewLogStorage(db, nil) + _, err := ls.QueueLeaves(ctx, tree, leaves, time.Now()) + return err +} + +func setUnsequencedRows(ctx context.Context, db *sql.DB, tree *trillian.Tree, wantRows int) error { + count, err := countUnsequenced(ctx, db) + if err != nil { + return err + } + if count == wantRows { + return nil + } + + // Clear the tables and re-create leaves from scratch. It's easier than having to reason + // about duplicate entries. + if _, err := db.ExecContext(ctx, "DELETE FROM LeafData"); err != nil { + return err + } + if _, err := db.ExecContext(ctx, "DELETE FROM Unsequenced"); err != nil { + return err + } + if err := queueLeaves(ctx, db, tree, 0 /* firstID */, wantRows); err != nil { + return err + } + + // Sanity check the final count + count, err = countUnsequenced(ctx, db) + if err != nil { + return err + } + if count != wantRows { + return fmt.Errorf("got %v unsequenced rows, want = %v", count, wantRows) + } + + return nil +} diff --git a/quota/crdbqm/quota_provider.go b/quota/crdbqm/quota_provider.go new file mode 100644 index 0000000000..06b6fbbb30 --- /dev/null +++ b/quota/crdbqm/quota_provider.go @@ -0,0 +1,50 @@ +// Copyright 2022 Trillian Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crdbqm + +import ( + "flag" + + "k8s.io/klog/v2" + + "github.com/google/trillian/quota" + "github.com/google/trillian/storage/crdb" +) + +// QuotaManagerName identifies the CockroachDB quota implementation. +const QuotaManagerName = "crdb" + +var maxUnsequencedRows = flag.Int("crdb_max_unsequenced_rows", DefaultMaxUnsequenced, + "Max number of unsequenced rows before rate limiting kicks in. Only effective for quota_system=crdb.") + +func init() { + if err := quota.RegisterProvider(QuotaManagerName, newCockroachDBQuotaManager); err != nil { + klog.Fatalf("Failed to register quota manager %v: %v", QuotaManagerName, err) + } +} + +func newCockroachDBQuotaManager() (quota.Manager, error) { + db, err := crdb.GetDatabase() + if err != nil { + return nil, err + } + qm := &QuotaManager{ + DB: db, + MaxUnsequencedRows: *maxUnsequencedRows, + } + + klog.Info("Using CockroachDB QuotaManager") + return qm, nil +} diff --git a/quota/mysqlqm/mysql_quota_test.go b/quota/mysqlqm/mysql_quota_test.go index d418840f42..dabfd131dd 100644 --- a/quota/mysqlqm/mysql_quota_test.go +++ b/quota/mysqlqm/mysql_quota_test.go @@ -37,7 +37,7 @@ func TestQuotaManager_GetTokens(t *testing.T) { testdb.SkipIfNoMySQL(t) ctx := context.Background() - db, done, err := testdb.NewTrillianDB(ctx) + db, done, err := testdb.NewTrillianDB(ctx, testdb.DriverMySQL) if err != nil { t.Fatalf("GetTestDB() returned err = %v", err) } @@ -131,7 +131,7 @@ func TestQuotaManager_GetTokens_InformationSchema(t *testing.T) { for _, test := range tests { desc := fmt.Sprintf("useSelectCount = %v", test.useSelectCount) t.Run(desc, func(t *testing.T) { - db, done, err := testdb.NewTrillianDB(ctx) + db, done, err := testdb.NewTrillianDB(ctx, testdb.DriverMySQL) if err != nil { t.Fatalf("NewTrillianDB() returned err = %v", err) } @@ -182,7 +182,7 @@ func TestQuotaManager_Noops(t *testing.T) { testdb.SkipIfNoMySQL(t) ctx := context.Background() - db, done, err := testdb.NewTrillianDB(ctx) + db, done, err := testdb.NewTrillianDB(ctx, testdb.DriverMySQL) if err != nil { t.Fatalf("GetTestDB() returned err = %v", err) } diff --git a/scripts/resetcrdb.sh b/scripts/resetcrdb.sh new file mode 100755 index 0000000000..4ffd669548 --- /dev/null +++ b/scripts/resetcrdb.sh @@ -0,0 +1,118 @@ +#!/bin/bash + +set -e + +usage() { + cat < /dev/stderr + exit 1 +} + +collect_vars() { + # set unset environment variables to defaults + [ -z ${CRDB_ROOT_USER+x} ] && CRDB_ROOT_USER="root" + [ -z ${CRDB_HOST+x} ] && CRDB_HOST="localhost" + [ -z ${CRDB_PORT+x} ] && CRDB_PORT="26257" + [ -z ${CRDB_DATABASE+x} ] && CRDB_DATABASE="defaultdb" + [ -z ${CRDB_USER+x} ] && CRDB_USER="test" + [ -z ${CRDB_PASSWORD+x} ] && CRDB_PASSWORD="zaphod" + [ -z ${CRDB_USER_HOST+x} ] && CRDB_USER_HOST="localhost" + [ -z ${CRDB_INSECURE+x} ] && CRDB_INSECURE="true" + [ -z ${CRDB_IN_CONTAINER+x} ] && CRDB_IN_CONTAINER="false" + [ -z ${CRDB_CONTAINER_NAME+x} ] && CRDB_CONTAINER_NAME="roach" + FLAGS=() + + # handle flags + FORCE=false + VERBOSE=false + while [[ $# -gt 0 ]]; do + case "$1" in + --force) FORCE=true ;; + --verbose) VERBOSE=true ;; + --help) usage; exit ;; + *) FLAGS+=("$1") + esac + shift 1 + done + + FLAGS+=(-u "${CRDB_ROOT_USER}") + FLAGS+=(--host "${CRDB_HOST}") + FLAGS+=(--port "${CRDB_PORT}") + + # Useful for debugging + FLAGS+=(--echo-sql) + + if [[ ${CRDB_INSECURE} = 'true' ]]; then + FLAGS+=(--insecure) + fi + + # Optionally print flags (before appending password) + [[ ${VERBOSE} = 'true' ]] && echo "- Using CRDB Flags: ${FLAGS[@]}" + + # append password if supplied + [ -z ${CRDB_ROOT_PASSWORD+x} ] || FLAGS+=(-p"${CRDB_ROOT_PASSWORD}") + + if [[ ${CRDB_IN_CONTAINER} = 'true' ]]; then + CMD="docker exec -i ${CRDB_CONTAINER_NAME} cockroach" + else + CMD="cockroach" + fi +} + +main() { + collect_vars "$@" + + readonly TRILLIAN_PATH=$(go list -f '{{.Dir}}' github.com/google/trillian) + + echo "Warning: about to destroy and reset database '${CRDB_DATABASE}'" + + [[ ${FORCE} = true ]] || read -p "Are you sure? [Y/N]: " -n 1 -r + echo # Print newline following the above prompt + + if [ -z ${REPLY+x} ] || [[ $REPLY =~ ^[Yy]$ ]] + then + echo "Resetting DB..." + set -eux + $CMD sql "${FLAGS[@]}" -e "DROP DATABASE IF EXISTS ${CRDB_DATABASE};" || \ + die "Error: Failed to drop database '${CRDB_DATABASE}'." + $CMD sql "${FLAGS[@]}" -e "CREATE DATABASE ${CRDB_DATABASE};" || \ + die "Error: Failed to create database '${CRDB_DATABASE}'." + if [[ ${CRDB_INSECURE} = 'true' ]]; then + $CMD sql "${FLAGS[@]}" -e "CREATE USER IF NOT EXISTS ${CRDB_USER};" || \ + die "Error: Failed to create user '${CRDB_USER}'." + else + $CMD sql "${FLAGS[@]}" -e "CREATE USER IF NOT EXISTS ${CRDB_USER} WITH PASSWORD '${CRDB_PASSWORD}';" || \ + die "Error: Failed to create user '${CRDB_USER}'." + fi + $CMD sql "${FLAGS[@]}" -e "GRANT ALL PRIVILEGES ON DATABASE ${CRDB_DATABASE} TO ${CRDB_USER} WITH GRANT OPTION" || \ + die "Error: Failed to grant '${CRDB_USER}' user all privileges on '${CRDB_DATABASE}'." + $CMD sql "${FLAGS[@]}" -d ${CRDB_DATABASE} < ${TRILLIAN_PATH}/storage/crdb/schema/storage.sql || \ + die "Error: Failed to create tables in '${CRDB_DATABASE}' database." + echo "Reset Complete" + fi +} + +main "$@" diff --git a/storage/crdb/common_test.go b/storage/crdb/common_test.go new file mode 100644 index 0000000000..d530ea52b4 --- /dev/null +++ b/storage/crdb/common_test.go @@ -0,0 +1,105 @@ +// Copyright 2022 Trillian Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crdb + +import ( + "context" + "database/sql" + "flag" + "os" + "sync" + "testing" + + "github.com/cockroachdb/cockroach-go/v2/testserver" + "k8s.io/klog/v2" + + "github.com/google/trillian/storage/testdb" +) + +// testDBs holds a set of test databases, one per test. +var testDBs sync.Map + +type testDBHandle struct { + db *sql.DB + done func(context.Context) +} + +func (db *testDBHandle) GetDB() *sql.DB { + return db.db +} + +func TestMain(m *testing.M) { + flag.Parse() + + ts, err := testserver.NewTestServer() + if err != nil { + klog.Exitf("Failed to start test server: %v", err) + } + defer ts.Stop() + + // reset the test server URL path. By default cockroach sets it + // to point to a default database, we don't want that. + dburl := ts.PGURL() + dburl.Path = "/" + + // Set the environment variable for the test server + os.Setenv(testdb.CockroachDBURIEnv, dburl.String()) + + if !testdb.CockroachDBAvailable() { + klog.Errorf("CockroachDB not available, skipping all CockroachDB storage tests") + return + } + + status := m.Run() + + // Clean up databases + testDBs.Range(func(key, value interface{}) bool { + testName := key.(string) + klog.Infof("Cleaning up database for test %s", testName) + + db := value.(*testDBHandle) + + // TODO(jaosorior): Set a timeout instead of using Background + db.done(context.Background()) + + return true + }) + + os.Exit(status) +} + +// This is used to identify a database from the map +func getDBID(t *testing.T) string { + t.Helper() + return t.Name() +} + +func openTestDBOrDie(t *testing.T) *testDBHandle { + t.Helper() + + db, done, err := testdb.NewTrillianDB(context.TODO(), testdb.DriverCockroachDB) + if err != nil { + panic(err) + } + + handle := &testDBHandle{ + db: db, + done: done, + } + + testDBs.Store(getDBID(t), handle) + + return handle +} diff --git a/storage/crdb/drop_storage.sql b/storage/crdb/drop_storage.sql new file mode 100644 index 0000000000..6e407b5d40 --- /dev/null +++ b/storage/crdb/drop_storage.sql @@ -0,0 +1,9 @@ +-- Caution - this removes all tables in our schema + +DROP TABLE IF EXISTS Unsequenced; +DROP TABLE IF EXISTS Subtree; +DROP TABLE IF EXISTS SequencedLeafData; +DROP TABLE IF EXISTS TreeHead; +DROP TABLE IF EXISTS LeafData; +DROP TABLE IF EXISTS TreeControl; +DROP TABLE IF EXISTS Trees; diff --git a/storage/crdb/errors.go b/storage/crdb/errors.go new file mode 100644 index 0000000000..1c7eefb60d --- /dev/null +++ b/storage/crdb/errors.go @@ -0,0 +1,42 @@ +// Copyright 2021 Trillian Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crdb + +import ( + "github.com/lib/pq" +) + +var uniqueViolationErrorCode = pq.ErrorCode("23505") + +// crdbToGRPC converts some types of CockroachDB errors to GRPC errors. This gives +// clients more signal when the operation can be retried. +func crdbToGRPC(err error) error { + _, ok := err.(*pq.Error) + if !ok { + return err + } + // TODO(jaosorior): Do we have a crdb equivalent for a deadlock + // error code? + return err +} + +func isDuplicateErr(err error) bool { + switch err := err.(type) { + case *pq.Error: + return err.Code == uniqueViolationErrorCode + default: + return false + } +} diff --git a/storage/crdb/log_storage.go b/storage/crdb/log_storage.go new file mode 100644 index 0000000000..499402c44e --- /dev/null +++ b/storage/crdb/log_storage.go @@ -0,0 +1,848 @@ +// Copyright 2016 Trillian Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crdb + +import ( + "bytes" + "context" + "database/sql" + "errors" + "fmt" + "sort" + "strconv" + "sync" + "time" + + "github.com/transparency-dev/merkle/compact" + "github.com/transparency-dev/merkle/rfc6962" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/timestamppb" + "k8s.io/klog/v2" + + "github.com/google/trillian" + "github.com/google/trillian/monitoring" + "github.com/google/trillian/storage" + "github.com/google/trillian/storage/cache" + "github.com/google/trillian/storage/tree" + "github.com/google/trillian/types" +) + +const ( + valuesPlaceholder5 = "($1,$2,$3,$4,$5)" + + insertLeafDataSQL = "INSERT INTO LeafData(TreeId,LeafIdentityHash,LeafValue,ExtraData,QueueTimestampNanos) VALUES" + valuesPlaceholder5 + insertSequencedLeafSQL = "INSERT INTO SequencedLeafData(TreeId,LeafIdentityHash,MerkleLeafHash,SequenceNumber,IntegrateTimestampNanos) VALUES" + + selectNonDeletedTreeIDByTypeAndStateSQL = ` + SELECT TreeId FROM Trees + WHERE TreeType IN($1,$2) + AND TreeState IN($3,$4) + AND (Deleted IS NULL OR Deleted = 'false')` + + selectLatestSignedLogRootSQL = `SELECT TreeHeadTimestamp,TreeSize,RootHash,TreeRevision,RootSignature + FROM TreeHead WHERE TreeId=$1 + ORDER BY TreeHeadTimestamp DESC LIMIT 1` + + selectLeavesByRangeSQL = `SELECT s.MerkleLeafHash,l.LeafIdentityHash,l.LeafValue,s.SequenceNumber,l.ExtraData,l.QueueTimestampNanos,s.IntegrateTimestampNanos + FROM LeafData l,SequencedLeafData s + WHERE l.LeafIdentityHash = s.LeafIdentityHash + AND s.SequenceNumber >= $1 AND s.SequenceNumber < $2 AND l.TreeId = $3 AND s.TreeId = l.TreeId` + orderBySequenceNumberSQL + + // These statements need to be expanded to provide the correct number of parameter placeholders. + // Note that this uses the MySQL-specific marker syntax here, but is eventually replaced with + // the postgres syntax in getStmt. + selectLeavesByMerkleHashSQL = `SELECT s.MerkleLeafHash,l.LeafIdentityHash,l.LeafValue,s.SequenceNumber,l.ExtraData,l.QueueTimestampNanos,s.IntegrateTimestampNanos + FROM LeafData l,SequencedLeafData s + WHERE l.LeafIdentityHash = s.LeafIdentityHash + AND s.MerkleLeafHash IN (` + placeholderSQL + `) AND l.TreeId = ? AND s.TreeId = l.TreeId` + // TODO(#1548): rework the code so the dummy hash isn't needed (e.g. this assumes hash size is 32) + dummyMerkleLeafHash = "00000000000000000000000000000000" + // This statement returns a dummy Merkle leaf hash value (which must be + // of the right size) so that its signature matches that of the other + // leaf-selection statements. + // Note that this uses the MySQL-specific marker syntax here, but is eventually replaced with + // the postgres syntax in getStmt. + selectLeavesByLeafIdentityHashSQL = `SELECT '` + dummyMerkleLeafHash + `',l.LeafIdentityHash,l.LeafValue,-1,l.ExtraData,l.QueueTimestampNanos,s.IntegrateTimestampNanos + FROM LeafData l LEFT JOIN SequencedLeafData s ON (l.LeafIdentityHash = s.LeafIdentityHash AND l.TreeID = s.TreeID) + WHERE l.LeafIdentityHash IN (` + placeholderSQL + `) AND l.TreeId = ?` + + // Same as above except with leaves ordered by sequence so we only incur this cost when necessary + orderBySequenceNumberSQL = " ORDER BY s.SequenceNumber" + selectLeavesByMerkleHashOrderedBySequenceSQL = selectLeavesByMerkleHashSQL + orderBySequenceNumberSQL + + logIDLabel = "logid" +) + +var ( + once sync.Once + queuedCounter monitoring.Counter + queuedDupCounter monitoring.Counter + dequeuedCounter monitoring.Counter + + queueLatency monitoring.Histogram + queueInsertLatency monitoring.Histogram + queueReadLatency monitoring.Histogram + queueInsertLeafLatency monitoring.Histogram + queueInsertEntryLatency monitoring.Histogram + dequeueLatency monitoring.Histogram + dequeueSelectLatency monitoring.Histogram + dequeueRemoveLatency monitoring.Histogram +) + +func createMetrics(mf monitoring.MetricFactory) { + queuedCounter = mf.NewCounter("crdb_queued_leaves", "Number of leaves queued", logIDLabel) + queuedDupCounter = mf.NewCounter("crdb_queued_dup_leaves", "Number of duplicate leaves queued", logIDLabel) + dequeuedCounter = mf.NewCounter("crdb_dequeued_leaves", "Number of leaves dequeued", logIDLabel) + + queueLatency = mf.NewHistogram("crdb_queue_leaves_latency", "Latency of queue leaves operation in seconds", logIDLabel) + queueInsertLatency = mf.NewHistogram("crdb_queue_leaves_latency_insert", "Latency of insertion part of queue leaves operation in seconds", logIDLabel) + queueReadLatency = mf.NewHistogram("crdb_queue_leaves_latency_read_dups", "Latency of read-duplicates part of queue leaves operation in seconds", logIDLabel) + queueInsertLeafLatency = mf.NewHistogram("crdb_queue_leaf_latency_leaf", "Latency of insert-leaf part of queue (single) leaf operation in seconds", logIDLabel) + queueInsertEntryLatency = mf.NewHistogram("crdb_queue_leaf_latency_entry", "Latency of insert-entry part of queue (single) leaf operation in seconds", logIDLabel) + + dequeueLatency = mf.NewHistogram("crdb_dequeue_leaves_latency", "Latency of dequeue leaves operation in seconds", logIDLabel) + dequeueSelectLatency = mf.NewHistogram("crdb_dequeue_leaves_latency_select", "Latency of selection part of dequeue leaves operation in seconds", logIDLabel) + dequeueRemoveLatency = mf.NewHistogram("crdb_dequeue_leaves_latency_remove", "Latency of removal part of dequeue leaves operation in seconds", logIDLabel) +} + +func labelForTX(t *logTreeTX) string { + return strconv.FormatInt(t.treeID, 10) +} + +func observe(hist monitoring.Histogram, duration time.Duration, label string) { + hist.Observe(duration.Seconds(), label) +} + +type crdbLogStorage struct { + *crdbTreeStorage + admin storage.AdminStorage + metricFactory monitoring.MetricFactory +} + +// NewLogStorage creates a storage.LogStorage instance for the specified CockroachDB URL. +// It assumes storage.AdminStorage is backed by the same CockroachDB database as well. +func NewLogStorage(db *sql.DB, mf monitoring.MetricFactory) storage.LogStorage { + if mf == nil { + mf = monitoring.InertMetricFactory{} + } + return &crdbLogStorage{ + admin: NewSQLAdminStorage(db), + crdbTreeStorage: newTreeStorage(db), + metricFactory: mf, + } +} + +func (m *crdbLogStorage) CheckDatabaseAccessible(ctx context.Context) error { + return m.db.PingContext(ctx) +} + +func (m *crdbLogStorage) getLeavesByMerkleHashStmt(ctx context.Context, num int, orderBySequence bool) (*sql.Stmt, error) { + if orderBySequence { + return m.getStmt(ctx, selectLeavesByMerkleHashOrderedBySequenceSQL, num, "?", "?") + } + + return m.getStmt(ctx, selectLeavesByMerkleHashSQL, num, "?", "?") +} + +func (m *crdbLogStorage) getLeavesByLeafIdentityHashStmt(ctx context.Context, num int) (*sql.Stmt, error) { + return m.getStmt(ctx, selectLeavesByLeafIdentityHashSQL, num, "?", "?") +} + +func (m *crdbLogStorage) GetActiveLogIDs(ctx context.Context) ([]int64, error) { + // Include logs that are DRAINING in the active list as we're still + // integrating leaves into them. + rows, err := m.db.QueryContext( + ctx, selectNonDeletedTreeIDByTypeAndStateSQL, + trillian.TreeType_LOG.String(), trillian.TreeType_PREORDERED_LOG.String(), + trillian.TreeState_ACTIVE.String(), trillian.TreeState_DRAINING.String()) + if err != nil { + return nil, err + } + defer rows.Close() + ids := []int64{} + for rows.Next() { + var treeID int64 + if err := rows.Scan(&treeID); err != nil { + return nil, err + } + ids = append(ids, treeID) + } + return ids, rows.Err() +} + +func (m *crdbLogStorage) beginInternal(ctx context.Context, tree *trillian.Tree) (*logTreeTX, error) { + once.Do(func() { + createMetrics(m.metricFactory) + }) + + stCache := cache.NewLogSubtreeCache(rfc6962.DefaultHasher) + ttx, err := m.beginTreeTx(ctx, tree, rfc6962.DefaultHasher.Size(), stCache) + if err != nil && err != storage.ErrTreeNeedsInit { + return nil, err + } + + ltx := &logTreeTX{ + treeTX: ttx, + ls: m, + dequeued: make(map[string]dequeuedLeaf), + } + ltx.slr, ltx.readRev, err = ltx.fetchLatestRoot(ctx) + if err == storage.ErrTreeNeedsInit { + ltx.treeTX.writeRevision = 0 + return ltx, err + } else if err != nil { + ttx.Close() + return nil, err + } + + if err := ltx.root.UnmarshalBinary(ltx.slr.LogRoot); err != nil { + ttx.Close() + return nil, err + } + + ltx.treeTX.writeRevision = ltx.readRev + 1 + return ltx, nil +} + +// TODO(pavelkalinnikov): This and many other methods of this storage +// implementation can leak a specific sql.ErrTxDone all the way to the client, +// if the transaction is rolled back as a result of a canceled context. It must +// return "generic" errors, and only log the specific ones for debugging. +func (m *crdbLogStorage) ReadWriteTransaction(ctx context.Context, tree *trillian.Tree, f storage.LogTXFunc) error { + tx, err := m.beginInternal(ctx, tree) + if err != nil && err != storage.ErrTreeNeedsInit { + return err + } + defer tx.Close() + if err := f(ctx, tx); err != nil { + return err + } + return tx.Commit(ctx) +} + +func (m *crdbLogStorage) AddSequencedLeaves(ctx context.Context, tree *trillian.Tree, leaves []*trillian.LogLeaf, timestamp time.Time) ([]*trillian.QueuedLogLeaf, error) { + tx, err := m.beginInternal(ctx, tree) + if tx != nil { + // Ensure we don't leak the transaction. For example if we get an + // ErrTreeNeedsInit from beginInternal() or if AddSequencedLeaves fails + // below. + defer tx.Close() + } + if err != nil { + return nil, err + } + res, err := tx.AddSequencedLeaves(ctx, leaves, timestamp) + if err != nil { + return nil, err + } + if err := tx.Commit(ctx); err != nil { + return nil, err + } + return res, nil +} + +func (m *crdbLogStorage) SnapshotForTree(ctx context.Context, tree *trillian.Tree) (storage.ReadOnlyLogTreeTX, error) { + tx, err := m.beginInternal(ctx, tree) + if err != nil && err != storage.ErrTreeNeedsInit { + return nil, err + } + return tx, err +} + +func (m *crdbLogStorage) QueueLeaves(ctx context.Context, tree *trillian.Tree, leaves []*trillian.LogLeaf, queueTimestamp time.Time) ([]*trillian.QueuedLogLeaf, error) { + tx, err := m.beginInternal(ctx, tree) + if tx != nil { + // Ensure we don't leak the transaction. For example if we get an + // ErrTreeNeedsInit from beginInternal() or if QueueLeaves fails + // below. + defer tx.Close() + } + if err != nil { + return nil, err + } + existing, err := tx.QueueLeaves(ctx, leaves, queueTimestamp) + if err != nil { + return nil, err + } + + if err := tx.Commit(ctx); err != nil { + return nil, err + } + + ret := make([]*trillian.QueuedLogLeaf, len(leaves)) + for i, e := range existing { + if e != nil { + ret[i] = &trillian.QueuedLogLeaf{ + Leaf: e, + Status: status.Newf(codes.AlreadyExists, "leaf already exists: %v", e.LeafIdentityHash).Proto(), + } + continue + } + ret[i] = &trillian.QueuedLogLeaf{Leaf: leaves[i]} + } + return ret, nil +} + +type logTreeTX struct { + treeTX + ls *crdbLogStorage + root types.LogRootV1 + readRev int64 + slr *trillian.SignedLogRoot + dequeued map[string]dequeuedLeaf +} + +// GetMerkleNodes returns the requested nodes at the read revision. +func (t *logTreeTX) GetMerkleNodes(ctx context.Context, ids []compact.NodeID) ([]tree.Node, error) { + t.treeTX.mu.Lock() + defer t.treeTX.mu.Unlock() + return t.subtreeCache.GetNodes(ids, t.getSubtreesAtRev(ctx, t.readRev)) +} + +func (t *logTreeTX) DequeueLeaves(ctx context.Context, limit int, cutoffTime time.Time) ([]*trillian.LogLeaf, error) { + t.treeTX.mu.Lock() + defer t.treeTX.mu.Unlock() + + if t.treeType == trillian.TreeType_PREORDERED_LOG { + // TODO(pavelkalinnikov): Optimize this by fetching only the required + // fields of LogLeaf. We can avoid joining with LeafData table here. + return t.getLeavesByRangeInternal(ctx, int64(t.root.TreeSize), int64(limit)) + } + + start := time.Now() + stx, err := t.tx.PrepareContext(ctx, selectQueuedLeavesSQL) + if err != nil { + klog.Warningf("Failed to prepare dequeue select: %s", err) + return nil, err + } + defer stx.Close() + + leaves := make([]*trillian.LogLeaf, 0, limit) + rows, err := stx.QueryContext(ctx, t.treeID, cutoffTime.UnixNano(), limit) + if err != nil { + klog.Warningf("Failed to select rows for work: %s", err) + return nil, err + } + defer rows.Close() + + for rows.Next() { + leaf, dqInfo, err := t.dequeueLeaf(rows) + if err != nil { + klog.Warningf("Error dequeuing leaf: %v", err) + return nil, err + } + + if len(leaf.LeafIdentityHash) != t.hashSizeBytes { + return nil, errors.New("dequeued a leaf with incorrect hash size") + } + + k := string(leaf.LeafIdentityHash) + if _, ok := t.dequeued[k]; ok { + // dupe, user probably called DequeueLeaves more than once. + continue + } + t.dequeued[k] = dqInfo + leaves = append(leaves, leaf) + } + + if rows.Err() != nil { + return nil, rows.Err() + } + label := labelForTX(t) + observe(dequeueSelectLatency, time.Since(start), label) + observe(dequeueLatency, time.Since(start), label) + dequeuedCounter.Add(float64(len(leaves)), label) + + return leaves, nil +} + +// sortLeavesForInsert returns a slice containing the passed in leaves sorted +// by LeafIdentityHash, and paired with their original positions. +// QueueLeaves and AddSequencedLeaves use this to make the order that LeafData +// row locks are acquired deterministic and reduce the chance of deadlocks. +func sortLeavesForInsert(leaves []*trillian.LogLeaf) []leafAndPosition { + ordLeaves := make([]leafAndPosition, len(leaves)) + for i, leaf := range leaves { + ordLeaves[i] = leafAndPosition{leaf: leaf, idx: i} + } + sort.Sort(byLeafIdentityHashWithPosition(ordLeaves)) + return ordLeaves +} + +func (t *logTreeTX) QueueLeaves(ctx context.Context, leaves []*trillian.LogLeaf, queueTimestamp time.Time) ([]*trillian.LogLeaf, error) { + t.treeTX.mu.Lock() + defer t.treeTX.mu.Unlock() + + // Don't accept batches if any of the leaves are invalid. + for _, leaf := range leaves { + if len(leaf.LeafIdentityHash) != t.hashSizeBytes { + return nil, fmt.Errorf("queued leaf must have a leaf ID hash of length %d", t.hashSizeBytes) + } + leaf.QueueTimestamp = timestamppb.New(queueTimestamp) + if err := leaf.QueueTimestamp.CheckValid(); err != nil { + return nil, fmt.Errorf("got invalid queue timestamp: %w", err) + } + } + start := time.Now() + label := labelForTX(t) + + ordLeaves := sortLeavesForInsert(leaves) + existingCount := 0 + existingLeaves := make([]*trillian.LogLeaf, len(leaves)) + + // CockroachDB/Postgres will cancel a transaction if an insert + // statement is run with a duplicate key. This is not ideal for + // QueueLeaves, as we want to detect these errors in-code + // and return them as AlreadyExists errors and add metrics. + // Thus, we use a SAVEPOINT and rollback on duplicates. + const savepoint = "SAVEPOINT QueueLeaves" + if _, err := t.tx.ExecContext(ctx, savepoint); err != nil { + klog.Errorf("Error adding savepoint: %s", err) + return nil, crdbToGRPC(err) + } + + for _, ol := range ordLeaves { + i, leaf := ol.idx, ol.leaf + + leafStart := time.Now() + if err := leaf.QueueTimestamp.CheckValid(); err != nil { + return nil, fmt.Errorf("got invalid queue timestamp: %w", err) + } + qTimestamp := leaf.QueueTimestamp.AsTime() + + if _, err := t.tx.ExecContext(ctx, savepoint); err != nil { + klog.Errorf("Error updating savepoint: %s", err) + return nil, crdbToGRPC(err) + } + + _, err := t.tx.ExecContext(ctx, insertLeafDataSQL, t.treeID, leaf.LeafIdentityHash, leaf.LeafValue, leaf.ExtraData, qTimestamp.UnixNano()) + insertDuration := time.Since(leafStart) + observe(queueInsertLeafLatency, insertDuration, label) + if isDuplicateErr(err) { + // Remember the duplicate leaf, using the requested leaf for now. + existingLeaves[i] = leaf + existingCount++ + queuedDupCounter.Inc(label) + // Note: one must roll back since there are side-effects in the transaction + // in crdb/postgres + if _, err := t.tx.ExecContext(ctx, "ROLLBACK TO "+savepoint); err != nil { + klog.Errorf("Error rolling back to savepoint: %s", err) + return nil, crdbToGRPC(err) + } + continue + } + if err != nil { + klog.Warningf("Error inserting %d into LeafData: %s", i, err) + return nil, crdbToGRPC(err) + } + + // Create the work queue entry + args := []interface{}{ + t.treeID, + leaf.LeafIdentityHash, + leaf.MerkleLeafHash, + } + args = append(args, queueArgs(t.treeID, leaf.LeafIdentityHash, qTimestamp)...) + _, err = t.tx.ExecContext( + ctx, + insertUnsequencedEntrySQL, + args..., + ) + if err != nil { + klog.Warningf("Error inserting into Unsequenced: %s", err) + return nil, crdbToGRPC(err) + } + leafDuration := time.Since(leafStart) + observe(queueInsertEntryLatency, (leafDuration - insertDuration), label) + } + insertDuration := time.Since(start) + observe(queueInsertLatency, insertDuration, label) + queuedCounter.Add(float64(len(leaves)), label) + + if _, err := t.tx.ExecContext(ctx, "RELEASE "+savepoint); err != nil { + klog.Errorf("Error releasing savepoint: %s", err) + return nil, crdbToGRPC(err) + } + + if existingCount == 0 { + return existingLeaves, nil + } + + // For existing leaves, we need to retrieve the contents. First collate the desired LeafIdentityHash values. + var toRetrieve [][]byte + for _, existing := range existingLeaves { + if existing != nil { + toRetrieve = append(toRetrieve, existing.LeafIdentityHash) + } + } + results, err := t.getLeafDataByIdentityHash(ctx, toRetrieve) + if err != nil { + return nil, fmt.Errorf("failed to retrieve existing leaves: %v", err) + } + if len(results) != len(toRetrieve) { + return nil, fmt.Errorf("failed to retrieve all existing leaves: got %d, want %d", len(results), len(toRetrieve)) + } + // Replace the requested leaves with the actual leaves. + for i, requested := range existingLeaves { + if requested == nil { + continue + } + found := false + for _, result := range results { + if bytes.Equal(result.LeafIdentityHash, requested.LeafIdentityHash) { + existingLeaves[i] = result + found = true + break + } + } + if !found { + return nil, fmt.Errorf("failed to find existing leaf for hash %x", requested.LeafIdentityHash) + } + } + totalDuration := time.Since(start) + readDuration := totalDuration - insertDuration + observe(queueReadLatency, readDuration, label) + observe(queueLatency, totalDuration, label) + + return existingLeaves, nil +} + +func (t *logTreeTX) AddSequencedLeaves(ctx context.Context, leaves []*trillian.LogLeaf, timestamp time.Time) ([]*trillian.QueuedLogLeaf, error) { + t.treeTX.mu.Lock() + defer t.treeTX.mu.Unlock() + + res := make([]*trillian.QueuedLogLeaf, len(leaves)) + ok := status.New(codes.OK, "OK").Proto() + + // Leaves in this transaction are inserted in two tables. For each leaf, if + // one of the two inserts fails, we remove the side effect by rolling back to + // a savepoint installed before the first insert of the two. + const savepoint = "SAVEPOINT AddSequencedLeaves" + if _, err := t.tx.ExecContext(ctx, savepoint); err != nil { + klog.Errorf("Error adding savepoint: %s", err) + return nil, crdbToGRPC(err) + } + // TODO(pavelkalinnikov): Consider performance implication of executing this + // extra SAVEPOINT, especially for 1-entry batches. Optimize if necessary. + + // Note: LeafData inserts are presumably protected from deadlocks due to + // sorting, but the order of the corresponding SequencedLeafData inserts + // becomes indeterministic. However, in a typical case when leaves are + // supplied in contiguous non-intersecting batches, the chance of having + // circular dependencies between transactions is significantly lower. + ordLeaves := sortLeavesForInsert(leaves) + for _, ol := range ordLeaves { + i, leaf := ol.idx, ol.leaf + + // This should fail on insert, but catch it early. + if got, want := len(leaf.LeafIdentityHash), t.hashSizeBytes; got != want { + return nil, status.Errorf(codes.FailedPrecondition, "leaves[%d] has incorrect hash size %d, want %d", i, got, want) + } + + if _, err := t.tx.ExecContext(ctx, savepoint); err != nil { + klog.Errorf("Error updating savepoint: %s", err) + return nil, crdbToGRPC(err) + } + + res[i] = &trillian.QueuedLogLeaf{Status: ok} + + // TODO(pavelkalinnikov): Measure latencies. + _, err := t.tx.ExecContext(ctx, insertLeafDataSQL, + t.treeID, leaf.LeafIdentityHash, leaf.LeafValue, leaf.ExtraData, timestamp.UnixNano()) + // TODO(pavelkalinnikov): Detach PREORDERED_LOG integration latency metric. + + // TODO(pavelkalinnikov): Support opting out from duplicates detection. + if isDuplicateErr(err) { + res[i].Status = status.New(codes.FailedPrecondition, "conflicting LeafIdentityHash").Proto() + // Note: one must roll back since there are side-effects in the transaction + // in crdb/postgres + if _, err := t.tx.ExecContext(ctx, "ROLLBACK TO "+savepoint); err != nil { + klog.Errorf("Error rolling back to savepoint: %s", err) + return nil, crdbToGRPC(err) + } + continue + } else if err != nil { + klog.Errorf("Error inserting leaves[%d] into LeafData: %s", i, err) + return nil, crdbToGRPC(err) + } + + _, err = t.tx.ExecContext(ctx, insertSequencedLeafSQL+valuesPlaceholder5, + t.treeID, leaf.LeafIdentityHash, leaf.MerkleLeafHash, leaf.LeafIndex, 0) + // TODO(pavelkalinnikov): Update IntegrateTimestamp on integrating the leaf. + + if isDuplicateErr(err) { + res[i].Status = status.New(codes.FailedPrecondition, "conflicting LeafIndex").Proto() + if _, err := t.tx.ExecContext(ctx, "ROLLBACK TO "+savepoint); err != nil { + klog.Errorf("Error rolling back to savepoint: %s", err) + return nil, crdbToGRPC(err) + } + } else if err != nil { + klog.Errorf("Error inserting leaves[%d] into SequencedLeafData: %s", i, err) + return nil, crdbToGRPC(err) + } + + // TODO(pavelkalinnikov): Load LeafData for conflicting entries. + } + + if _, err := t.tx.ExecContext(ctx, "RELEASE "+savepoint); err != nil { + klog.Errorf("Error releasing savepoint: %s", err) + return nil, crdbToGRPC(err) + } + + return res, nil +} + +func (t *logTreeTX) GetLeavesByRange(ctx context.Context, start, count int64) ([]*trillian.LogLeaf, error) { + t.treeTX.mu.Lock() + defer t.treeTX.mu.Unlock() + return t.getLeavesByRangeInternal(ctx, start, count) +} + +func (t *logTreeTX) getLeavesByRangeInternal(ctx context.Context, start, count int64) ([]*trillian.LogLeaf, error) { + if count <= 0 { + return nil, status.Errorf(codes.InvalidArgument, "invalid count %d, want > 0", count) + } + if start < 0 { + return nil, status.Errorf(codes.InvalidArgument, "invalid start %d, want >= 0", start) + } + + if t.treeType == trillian.TreeType_LOG { + treeSize := int64(t.root.TreeSize) + if treeSize <= 0 { + return nil, status.Errorf(codes.OutOfRange, "empty tree") + } else if start >= treeSize { + return nil, status.Errorf(codes.OutOfRange, "invalid start %d, want < TreeSize(%d)", start, treeSize) + } + // Ensure no entries queried/returned beyond the tree. + if maxCount := treeSize - start; count > maxCount { + count = maxCount + } + } + // TODO(pavelkalinnikov): Further clip `count` to a safe upper bound like 64k. + + args := []interface{}{start, start + count, t.treeID} + rows, err := t.tx.QueryContext(ctx, selectLeavesByRangeSQL, args...) + if err != nil { + klog.Warningf("Failed to get leaves by range: %s", err) + return nil, err + } + defer rows.Close() + + ret := make([]*trillian.LogLeaf, 0, count) + for wantIndex := start; rows.Next(); wantIndex++ { + leaf := &trillian.LogLeaf{} + var qTimestamp, iTimestamp int64 + if err := rows.Scan( + &leaf.MerkleLeafHash, + &leaf.LeafIdentityHash, + &leaf.LeafValue, + &leaf.LeafIndex, + &leaf.ExtraData, + &qTimestamp, + &iTimestamp); err != nil { + klog.Warningf("Failed to scan merkle leaves: %s", err) + return nil, err + } + if leaf.LeafIndex != wantIndex { + if wantIndex < int64(t.root.TreeSize) { + return nil, fmt.Errorf("got unexpected index %d, want %d", leaf.LeafIndex, wantIndex) + } + break + } + leaf.QueueTimestamp = timestamppb.New(time.Unix(0, qTimestamp)) + if err := leaf.QueueTimestamp.CheckValid(); err != nil { + return nil, fmt.Errorf("got invalid queue timestamp: %w", err) + } + leaf.IntegrateTimestamp = timestamppb.New(time.Unix(0, iTimestamp)) + if err := leaf.IntegrateTimestamp.CheckValid(); err != nil { + return nil, fmt.Errorf("got invalid integrate timestamp: %w", err) + } + ret = append(ret, leaf) + } + if err := rows.Err(); err != nil { + klog.Warningf("Failed to read returned leaves: %s", err) + return nil, err + } + + return ret, nil +} + +func (t *logTreeTX) GetLeavesByHash(ctx context.Context, leafHashes [][]byte, orderBySequence bool) ([]*trillian.LogLeaf, error) { + t.treeTX.mu.Lock() + defer t.treeTX.mu.Unlock() + + tmpl, err := t.ls.getLeavesByMerkleHashStmt(ctx, len(leafHashes), orderBySequence) + if err != nil { + return nil, err + } + + return t.getLeavesByHashInternal(ctx, leafHashes, tmpl, "merkle") +} + +// getLeafDataByIdentityHash retrieves leaf data by LeafIdentityHash, returned +// as a slice of LogLeaf objects for convenience. However, note that the +// returned LogLeaf objects will not have a valid MerkleLeafHash, LeafIndex, or IntegrateTimestamp. +func (t *logTreeTX) getLeafDataByIdentityHash(ctx context.Context, leafHashes [][]byte) ([]*trillian.LogLeaf, error) { + tmpl, err := t.ls.getLeavesByLeafIdentityHashStmt(ctx, len(leafHashes)) + if err != nil { + return nil, err + } + return t.getLeavesByHashInternal(ctx, leafHashes, tmpl, "leaf-identity") +} + +func (t *logTreeTX) LatestSignedLogRoot(ctx context.Context) (*trillian.SignedLogRoot, error) { + t.treeTX.mu.Lock() + defer t.treeTX.mu.Unlock() + + if t.slr == nil { + return nil, storage.ErrTreeNeedsInit + } + + return t.slr, nil +} + +// fetchLatestRoot reads the latest root and the revision from the DB. +func (t *logTreeTX) fetchLatestRoot(ctx context.Context) (*trillian.SignedLogRoot, int64, error) { + var timestamp, treeSize, treeRevision int64 + var rootHash, rootSignatureBytes []byte + if err := t.tx.QueryRowContext( + ctx, selectLatestSignedLogRootSQL, t.treeID).Scan( + ×tamp, &treeSize, &rootHash, &treeRevision, &rootSignatureBytes, + ); err == sql.ErrNoRows { + // It's possible there are no roots for this tree yet + return nil, 0, storage.ErrTreeNeedsInit + } + + // Put logRoot back together. Fortunately LogRoot has a deterministic serialization. + logRoot, err := (&types.LogRootV1{ + RootHash: rootHash, + TimestampNanos: uint64(timestamp), + TreeSize: uint64(treeSize), + }).MarshalBinary() + if err != nil { + return nil, 0, err + } + + return &trillian.SignedLogRoot{LogRoot: logRoot}, treeRevision, nil +} + +func (t *logTreeTX) StoreSignedLogRoot(ctx context.Context, root *trillian.SignedLogRoot) error { + t.treeTX.mu.Lock() + defer t.treeTX.mu.Unlock() + + var logRoot types.LogRootV1 + if err := logRoot.UnmarshalBinary(root.LogRoot); err != nil { + klog.Warningf("Failed to parse log root: %x %v", root.LogRoot, err) + return err + } + if len(logRoot.Metadata) != 0 { + return fmt.Errorf("unimplemented: crdb storage does not support log root metadata") + } + + res, err := t.tx.ExecContext( + ctx, + insertTreeHeadSQL, + t.treeID, + logRoot.TimestampNanos, + logRoot.TreeSize, + logRoot.RootHash, + t.treeTX.writeRevision, + []byte{}) + if err != nil { + klog.Warningf("Failed to store signed root: %s", err) + } + + return checkResultOkAndRowCountIs(res, err, 1) +} + +func (t *logTreeTX) getLeavesByHashInternal(ctx context.Context, leafHashes [][]byte, tmpl *sql.Stmt, desc string) ([]*trillian.LogLeaf, error) { + stx := t.tx.StmtContext(ctx, tmpl) + defer stx.Close() + + var args []interface{} + for _, hash := range leafHashes { + args = append(args, []byte(hash)) + } + args = append(args, t.treeID) + rows, err := stx.QueryContext(ctx, args...) + if err != nil { + klog.Warningf("Query() %s hash = %v", desc, err) + return nil, err + } + defer rows.Close() + + // The tree could include duplicates so we don't know how many results will be returned + var ret []*trillian.LogLeaf + for rows.Next() { + leaf := &trillian.LogLeaf{} + // We might be using a LEFT JOIN in our statement, so leaves which are + // queued but not yet integrated will have a NULL IntegrateTimestamp + // when there's no corresponding entry in SequencedLeafData, even though + // the table definition forbids that, so we use a nullable type here and + // check its validity below. + var integrateTS sql.NullInt64 + var queueTS int64 + + if err := rows.Scan(&leaf.MerkleLeafHash, &leaf.LeafIdentityHash, &leaf.LeafValue, &leaf.LeafIndex, &leaf.ExtraData, &queueTS, &integrateTS); err != nil { + klog.Warningf("LogID: %d Scan() %s = %s", t.treeID, desc, err) + return nil, err + } + leaf.QueueTimestamp = timestamppb.New(time.Unix(0, queueTS)) + if err := leaf.QueueTimestamp.CheckValid(); err != nil { + return nil, fmt.Errorf("got invalid queue timestamp: %w", err) + } + if integrateTS.Valid { + leaf.IntegrateTimestamp = timestamppb.New(time.Unix(0, integrateTS.Int64)) + if err := leaf.IntegrateTimestamp.CheckValid(); err != nil { + return nil, fmt.Errorf("got invalid integrate timestamp: %w", err) + } + } + + if got, want := len(leaf.MerkleLeafHash), t.hashSizeBytes; got != want { + return nil, fmt.Errorf("LogID: %d Scanned leaf %s does not have hash length %d, got %d", t.treeID, desc, want, got) + } + + ret = append(ret, leaf) + } + if err := rows.Err(); err != nil { + klog.Warningf("Failed to read returned leaves: %s", err) + return nil, err + } + + return ret, nil +} + +// leafAndPosition records original position before sort. +type leafAndPosition struct { + leaf *trillian.LogLeaf + idx int +} + +// byLeafIdentityHashWithPosition allows sorting (as above), but where we need +// to remember the original position +type byLeafIdentityHashWithPosition []leafAndPosition + +func (l byLeafIdentityHashWithPosition) Len() int { + return len(l) +} + +func (l byLeafIdentityHashWithPosition) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +func (l byLeafIdentityHashWithPosition) Less(i, j int) bool { + return bytes.Compare(l[i].leaf.LeafIdentityHash, l[j].leaf.LeafIdentityHash) == -1 +} diff --git a/storage/crdb/log_storage_test.go b/storage/crdb/log_storage_test.go new file mode 100644 index 0000000000..e00ac458da --- /dev/null +++ b/storage/crdb/log_storage_test.go @@ -0,0 +1,869 @@ +// Copyright 2016 Trillian Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crdb + +import ( + "bytes" + "context" + "crypto/sha256" + "database/sql" + "fmt" + "sort" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/trillian" + "github.com/google/trillian/integration/storagetest" + "github.com/google/trillian/storage" + "github.com/google/trillian/storage/testonly" + "github.com/google/trillian/types" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" + + _ "github.com/lib/pq" // Register the Postgres driver. +) + +// Must be 32 bytes to match sha256 length if it was a real hash +var ( + dummyHash = []byte("hashxxxxhashxxxxhashxxxxhashxxxx") + dummyRawHash = []byte("xxxxhashxxxxhashxxxxhashxxxxhash") + dummyHash2 = []byte("HASHxxxxhashxxxxhashxxxxhashxxxx") +) + +// Time we will queue all leaves at +var fakeQueueTime = time.Date(2016, 11, 10, 15, 16, 27, 0, time.UTC) + +// Time we will integrate all leaves at +var fakeIntegrateTime = time.Date(2016, 11, 10, 15, 16, 30, 0, time.UTC) + +// Time we'll request for guard cutoff in tests that don't test this (should include all above) +var fakeDequeueCutoffTime = time.Date(2016, 11, 10, 15, 16, 30, 0, time.UTC) + +// Used for tests involving extra data +var someExtraData = []byte("Some extra data") + +const ( + leavesToInsert = 5 + sequenceNumber int64 = 237 +) + +// Tests that access the db should each use a distinct log ID to prevent lock contention when +// run in parallel or race conditions / unexpected interactions. Tests that pass should hold +// no locks afterwards. + +func createFakeLeaf(ctx context.Context, db *sql.DB, logID int64, rawHash, hash, data, extraData []byte, seq int64, t *testing.T) *trillian.LogLeaf { + t.Helper() + queuedAtNanos := fakeQueueTime.UnixNano() + integratedAtNanos := fakeIntegrateTime.UnixNano() + _, err := db.ExecContext(ctx, "INSERT INTO LeafData(TreeId, LeafIdentityHash, LeafValue, ExtraData, QueueTimestampNanos) VALUES($1,$2,$3,$4,$5)", + logID, rawHash, data, extraData, queuedAtNanos) + _, err2 := db.ExecContext(ctx, "INSERT INTO SequencedLeafData(TreeId, SequenceNumber, LeafIdentityHash, MerkleLeafHash, IntegrateTimestampNanos) VALUES($1,$2,$3,$4,$5)", + logID, seq, rawHash, hash, integratedAtNanos) + + if err != nil || err2 != nil { + t.Fatalf("Failed to create test leaves: %v %v", err, err2) + } + queueTimestamp := timestamppb.New(fakeQueueTime) + integrateTimestamp := timestamppb.New(fakeIntegrateTime) + return &trillian.LogLeaf{ + MerkleLeafHash: hash, + LeafValue: data, + ExtraData: extraData, + LeafIndex: seq, + LeafIdentityHash: rawHash, + QueueTimestamp: queueTimestamp, + IntegrateTimestamp: integrateTimestamp, + } +} + +func checkLeafContents(leaf *trillian.LogLeaf, seq int64, rawHash, hash, data, extraData []byte, t *testing.T) { + t.Helper() + if got, want := leaf.MerkleLeafHash, hash; !bytes.Equal(got, want) { + t.Fatalf("Wrong leaf hash in returned leaf got\n%v\nwant:\n%v", got, want) + } + + if got, want := leaf.LeafIdentityHash, rawHash; !bytes.Equal(got, want) { + t.Fatalf("Wrong raw leaf hash in returned leaf got\n%v\nwant:\n%v", got, want) + } + + if got, want := seq, leaf.LeafIndex; got != want { + t.Fatalf("Bad sequence number in returned leaf got: %d, want:%d", got, want) + } + + if got, want := leaf.LeafValue, data; !bytes.Equal(got, want) { + t.Fatalf("Unxpected data in returned leaf. got:\n%v\nwant:\n%v", got, want) + } + + if got, want := leaf.ExtraData, extraData; !bytes.Equal(got, want) { + t.Fatalf("Unxpected data in returned leaf. got:\n%v\nwant:\n%v", got, want) + } + + iTime := leaf.IntegrateTimestamp.AsTime() + if got, want := iTime.UnixNano(), fakeIntegrateTime.UnixNano(); got != want { + t.Errorf("Wrong IntegrateTimestamp: got %v, want %v", got, want) + } +} + +func TestLogSuite(t *testing.T) { + t.Parallel() + + storageFactory := func(context.Context, *testing.T) (storage.LogStorage, storage.AdminStorage) { + handle := openTestDBOrDie(t) + return NewLogStorage(handle.db, nil), NewSQLAdminStorage(handle.db) + } + + storagetest.RunLogStorageTests(t, storageFactory) +} + +func TestQueueDuplicateLeaf(t *testing.T) { + t.Parallel() + + ctx := context.Background() + handle := openTestDBOrDie(t) + as := NewSQLAdminStorage(handle.db) + tree := mustCreateTree(ctx, t, as, testonly.LogTree) + s := NewLogStorage(handle.db, nil) + mustSignAndStoreLogRoot(ctx, t, s, tree, 0) + + count := 15 + leaves := createTestLeaves(int64(count), 10) + leaves2 := createTestLeaves(int64(count), 12) + leaves3 := createTestLeaves(3, 100) + + // Note that tests accumulate queued leaves on top of each other. + tests := []struct { + desc string + leaves []*trillian.LogLeaf + want []*trillian.LogLeaf + }{ + { + desc: "[10, 11, 12, ...]", + leaves: leaves, + want: make([]*trillian.LogLeaf, count), + }, + { + desc: "[12, 13, 14, ...] so first (count-2) are duplicates", + leaves: leaves2, + want: append(leaves[2:], nil, nil), + }, + { + desc: "[10, 100, 11, 101, 102] so [dup, new, dup, new, dup]", + leaves: []*trillian.LogLeaf{leaves[0], leaves3[0], leaves[1], leaves3[1], leaves[2]}, + want: []*trillian.LogLeaf{leaves[0], nil, leaves[1], nil, leaves[2]}, + }, + } + + for _, test := range tests { + // NOTE(jaosorior): These tests can't be parallelized as they + // depend on each others' leaves. + t.Run(test.desc, func(t *testing.T) { + existing, err := s.QueueLeaves(ctx, tree, test.leaves, fakeQueueTime) + if err != nil { + t.Fatalf("Failed to queue leaves: %v", err) + } + + if len(existing) != len(test.want) { + t.Fatalf("|QueueLeaves()|=%d; want %d", len(existing), len(test.want)) + } + for i, want := range test.want { + got := existing[i] + if want == nil { + if got.Status != nil { + t.Errorf("QueueLeaves()[%d].Code: %v; want %v", i, got, want) + } + return + } + if got == nil { + t.Fatalf("QueueLeaves()[%d]=nil; want non-nil", i) + } else if !bytes.Equal(got.Leaf.LeafIdentityHash, want.LeafIdentityHash) { + t.Fatalf("QueueLeaves()[%d].LeafIdentityHash=%x; want %x", i, got.Leaf.LeafIdentityHash, want.LeafIdentityHash) + } + } + }) + } +} + +func TestQueueLeaves(t *testing.T) { + t.Parallel() + + ctx := context.Background() + handle := openTestDBOrDie(t) + as := NewSQLAdminStorage(handle.db) + tree := mustCreateTree(ctx, t, as, testonly.LogTree) + s := NewLogStorage(handle.db, nil) + mustSignAndStoreLogRoot(ctx, t, s, tree, 0) + + leaves := createTestLeaves(leavesToInsert, 20) + if _, err := s.QueueLeaves(ctx, tree, leaves, fakeQueueTime); err != nil { + t.Fatalf("Failed to queue leaves: %v", err) + } + + // Should see the leaves in the database. There is no API to read from the unsequenced data. + var count int + if err := handle.db.QueryRowContext(ctx, "SELECT COUNT(*) FROM Unsequenced WHERE TreeID=$1", tree.TreeId).Scan(&count); err != nil { + t.Fatalf("Could not query row count: %v", err) + } + if leavesToInsert != count { + t.Fatalf("Expected %d unsequenced rows but got: %d", leavesToInsert, count) + } + + // Additional check on timestamp being set correctly in the database + var queueTimestamp int64 + if err := handle.db.QueryRowContext(ctx, "SELECT DISTINCT QueueTimestampNanos FROM Unsequenced WHERE TreeID=$1", tree.TreeId).Scan(&queueTimestamp); err != nil { + t.Fatalf("Could not query timestamp: %v", err) + } + if got, want := queueTimestamp, fakeQueueTime.UnixNano(); got != want { + t.Fatalf("Incorrect queue timestamp got: %d want: %d", got, want) + } +} + +func TestQueueLeavesDuplicateBigBatch(t *testing.T) { + t.Parallel() + + ctx := context.Background() + handle := openTestDBOrDie(t) + as := NewSQLAdminStorage(handle.db) + tree := mustCreateTree(ctx, t, as, testonly.LogTree) + s := NewLogStorage(handle.db, nil) + mustSignAndStoreLogRoot(ctx, t, s, tree, 0) + + const leafCount = 999 + 1 + leaves := createTestLeaves(leafCount, 20) + + if _, err := s.QueueLeaves(ctx, tree, leaves, fakeQueueTime); err != nil { + t.Fatalf("Failed to queue leaves: %v", err) + } + + if _, err := s.QueueLeaves(ctx, tree, leaves, fakeQueueTime); err != nil { + t.Fatalf("Failed to queue leaves: %v", err) + } + + // Should see the leaves in the database. There is no API to read from the unsequenced data. + var count int + if err := handle.db.QueryRowContext(ctx, "SELECT COUNT(*) FROM Unsequenced WHERE TreeID=$1", tree.TreeId).Scan(&count); err != nil { + t.Fatalf("Could not query row count: %v", err) + } + if leafCount != count { + t.Fatalf("Expected %d unsequenced rows but got: %d", leafCount, count) + } +} + +// ----------------------------------------------------------------------------- + +func TestDequeueLeavesHaveQueueTimestamp(t *testing.T) { + t.Parallel() + + ctx := context.Background() + handle := openTestDBOrDie(t) + as := NewSQLAdminStorage(handle.db) + tree := mustCreateTree(ctx, t, as, testonly.LogTree) + s := NewLogStorage(handle.db, nil) + mustSignAndStoreLogRoot(ctx, t, s, tree, 0) + + leaves := createTestLeaves(leavesToInsert, 20) + if _, err := s.QueueLeaves(ctx, tree, leaves, fakeDequeueCutoffTime); err != nil { + t.Fatalf("Failed to queue leaves: %v", err) + } + + { + // Now try to dequeue them + runLogTX(s, tree, t, func(ctx context.Context, tx2 storage.LogTreeTX) error { + leaves2, err := tx2.DequeueLeaves(ctx, 99, fakeDequeueCutoffTime) + if err != nil { + t.Fatalf("Failed to dequeue leaves: %v", err) + } + if len(leaves2) != leavesToInsert { + t.Fatalf("Dequeued %d leaves but expected to get %d", len(leaves2), leavesToInsert) + } + ensureLeavesHaveQueueTimestamp(t, leaves2, fakeDequeueCutoffTime) + return nil + }) + } +} + +// Queues leaves and attempts to dequeue before the guard cutoff allows it. This should +// return nothing. Then retry with an inclusive guard cutoff and ensure the leaves +// are returned. +func TestDequeueLeavesGuardInterval(t *testing.T) { + t.Parallel() + + ctx := context.Background() + handle := openTestDBOrDie(t) + as := NewSQLAdminStorage(handle.db) + tree := mustCreateTree(ctx, t, as, testonly.LogTree) + s := NewLogStorage(handle.db, nil) + mustSignAndStoreLogRoot(ctx, t, s, tree, 0) + + leaves := createTestLeaves(leavesToInsert, 20) + if _, err := s.QueueLeaves(ctx, tree, leaves, fakeQueueTime); err != nil { + t.Fatalf("Failed to queue leaves: %v", err) + } + + { + // Now try to dequeue them using a cutoff that means we should get none + runLogTX(s, tree, t, func(ctx context.Context, tx2 storage.LogTreeTX) error { + leaves2, err := tx2.DequeueLeaves(ctx, 99, fakeQueueTime.Add(-time.Second)) + if err != nil { + t.Fatalf("Failed to dequeue leaves: %v", err) + } + if len(leaves2) != 0 { + t.Fatalf("Dequeued %d leaves when they all should be in guard interval", len(leaves2)) + } + + // Try to dequeue again using a cutoff that should include them + leaves2, err = tx2.DequeueLeaves(ctx, 99, fakeQueueTime.Add(time.Second)) + if err != nil { + t.Fatalf("Failed to dequeue leaves: %v", err) + } + if len(leaves2) != leavesToInsert { + t.Fatalf("Dequeued %d leaves but expected to get %d", len(leaves2), leavesToInsert) + } + ensureAllLeavesDistinct(leaves2, t) + return nil + }) + } +} + +func TestDequeueLeavesTimeOrdering(t *testing.T) { + t.Parallel() + + // Queue two small batches of leaves at different timestamps. Do two separate dequeue + // transactions and make sure the returned leaves are respecting the time ordering of the + // queue. + ctx := context.Background() + handle := openTestDBOrDie(t) + as := NewSQLAdminStorage(handle.db) + tree := mustCreateTree(ctx, t, as, testonly.LogTree) + s := NewLogStorage(handle.db, nil) + mustSignAndStoreLogRoot(ctx, t, s, tree, 0) + + batchSize := 2 + leaves := createTestLeaves(int64(batchSize), 0) + leaves2 := createTestLeaves(int64(batchSize), int64(batchSize)) + + if _, err := s.QueueLeaves(ctx, tree, leaves, fakeQueueTime); err != nil { + t.Fatalf("QueueLeaves(1st batch) = %v", err) + } + // These are one second earlier so should be dequeued first + if _, err := s.QueueLeaves(ctx, tree, leaves2, fakeQueueTime.Add(-time.Second)); err != nil { + t.Fatalf("QueueLeaves(2nd batch) = %v", err) + } + + { + // Now try to dequeue two leaves and we should get the second batch + runLogTX(s, tree, t, func(ctx context.Context, tx2 storage.LogTreeTX) error { + dequeue1, err := tx2.DequeueLeaves(ctx, batchSize, fakeQueueTime) + if err != nil { + t.Fatalf("DequeueLeaves(1st) = %v", err) + } + if got, want := len(dequeue1), batchSize; got != want { + t.Fatalf("Dequeue count mismatch (1st) got: %d, want: %d", got, want) + } + ensureAllLeavesDistinct(dequeue1, t) + + // Ensure this is the second batch queued by comparing leaf hashes (must be distinct as + // the leaf data was). + if !leafInBatch(dequeue1[0], leaves2) || !leafInBatch(dequeue1[1], leaves2) { + t.Fatalf("Got leaf from wrong batch (1st dequeue): %v", dequeue1) + } + iTimestamp := timestamppb.Now() + for i, l := range dequeue1 { + l.IntegrateTimestamp = iTimestamp + l.LeafIndex = int64(i) + } + if err := tx2.UpdateSequencedLeaves(ctx, dequeue1); err != nil { + t.Fatalf("UpdateSequencedLeaves(): %v", err) + } + + return nil + }) + + // Try to dequeue again and we should get the batch that was queued first, though at a later time + runLogTX(s, tree, t, func(ctx context.Context, tx3 storage.LogTreeTX) error { + dequeue2, err := tx3.DequeueLeaves(ctx, batchSize, fakeQueueTime) + if err != nil { + t.Fatalf("DequeueLeaves(2nd) = %v", err) + } + if got, want := len(dequeue2), batchSize; got != want { + t.Fatalf("Dequeue count mismatch (2nd) got: %d, want: %d", got, want) + } + ensureAllLeavesDistinct(dequeue2, t) + + // Ensure this is the first batch by comparing leaf hashes. + if !leafInBatch(dequeue2[0], leaves) || !leafInBatch(dequeue2[1], leaves) { + t.Fatalf("Got leaf from wrong batch (2nd dequeue): %v", dequeue2) + } + return nil + }) + } +} + +func TestGetLeavesByHashNotPresent(t *testing.T) { + t.Parallel() + + ctx := context.Background() + handle := openTestDBOrDie(t) + as := NewSQLAdminStorage(handle.db) + tree := mustCreateTree(ctx, t, as, testonly.LogTree) + s := NewLogStorage(handle.db, nil) + + runLogTX(s, tree, t, func(ctx context.Context, tx storage.LogTreeTX) error { + hashes := [][]byte{[]byte("thisdoesn'texist")} + leaves, err := tx.GetLeavesByHash(ctx, hashes, false) + if err != nil { + t.Fatalf("Error getting leaves by hash: %v", err) + } + if len(leaves) != 0 { + t.Fatalf("Expected no leaves returned but got %d", len(leaves)) + } + return nil + }) +} + +func TestGetLeavesByHash(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + // Create fake leaf as if it had been sequenced + handle := openTestDBOrDie(t) + as := NewSQLAdminStorage(handle.db) + tree := mustCreateTree(ctx, t, as, testonly.LogTree) + s := NewLogStorage(handle.db, nil) + + data := []byte("some data") + createFakeLeaf(ctx, handle.db, tree.TreeId, dummyRawHash, dummyHash, data, someExtraData, sequenceNumber, t) + + runLogTX(s, tree, t, func(ctx context.Context, tx storage.LogTreeTX) error { + hashes := [][]byte{dummyHash} + leaves, err := tx.GetLeavesByHash(ctx, hashes, false) + if err != nil { + t.Fatalf("Unexpected error getting leaf by hash: %v", err) + } + if len(leaves) != 1 { + t.Fatalf("Got %d leaves but expected one", len(leaves)) + } + checkLeafContents(leaves[0], sequenceNumber, dummyRawHash, dummyHash, data, someExtraData, t) + return nil + }) +} + +func TestGetLeavesByHashBigBatch(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + // Create fake leaf as if it had been sequenced + handle := openTestDBOrDie(t) + as := NewSQLAdminStorage(handle.db) + tree := mustCreateTree(ctx, t, as, testonly.LogTree) + s := NewLogStorage(handle.db, nil) + + const leafCount = 999 + 1 + hashes := make([][]byte, leafCount) + for i := 0; i < leafCount; i++ { + data := []byte(fmt.Sprintf("data %d", i)) + hash := sha256.Sum256(data) + hashes[i] = hash[:] + createFakeLeaf(ctx, handle.db, tree.TreeId, hash[:], hash[:], data, someExtraData, sequenceNumber+int64(i), t) + } + + runLogTX(s, tree, t, func(ctx context.Context, tx storage.LogTreeTX) error { + leaves, err := tx.GetLeavesByHash(ctx, hashes, false) + if err != nil { + t.Fatalf("Unexpected error getting leaf by hash: %v", err) + } + if got, want := len(leaves), leafCount; got != want { + t.Fatalf("Got %d leaves, expected %d", got, want) + } + return nil + }) +} + +func TestGetLeafDataByIdentityHash(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + // Create fake leaf as if it had been sequenced + handle := openTestDBOrDie(t) + as := NewSQLAdminStorage(handle.db) + tree := mustCreateTree(ctx, t, as, testonly.LogTree) + s := NewLogStorage(handle.db, nil) + data := []byte("some data") + leaf := createFakeLeaf(ctx, handle.db, tree.TreeId, dummyRawHash, dummyHash, data, someExtraData, sequenceNumber, t) + leaf.LeafIndex = -1 + leaf.MerkleLeafHash = []byte(dummyMerkleLeafHash) + leaf2 := createFakeLeaf(ctx, handle.db, tree.TreeId, dummyHash2, dummyHash2, data, someExtraData, sequenceNumber+1, t) + leaf2.LeafIndex = -1 + leaf2.MerkleLeafHash = []byte(dummyMerkleLeafHash) + + tests := []struct { + hashes [][]byte + want []*trillian.LogLeaf + }{ + { + hashes: [][]byte{dummyRawHash}, + want: []*trillian.LogLeaf{leaf}, + }, + { + hashes: [][]byte{{0x01, 0x02}}, + }, + { + hashes: [][]byte{ + dummyRawHash, + {0x01, 0x02}, + dummyHash2, + {0x01, 0x02}, + }, + // Note: leaves not necessarily returned in order requested. + want: []*trillian.LogLeaf{leaf2, leaf}, + }, + } + for i, test := range tests { + test := test + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Parallel() + + runLogTX(s, tree, t, func(ctx context.Context, tx storage.LogTreeTX) error { + leaves, err := tx.(*logTreeTX).getLeafDataByIdentityHash(ctx, test.hashes) + if err != nil { + t.Fatalf("getLeavesByIdentityHash(_) = (_,%v); want (_,nil)", err) + } + + if len(leaves) != len(test.want) { + t.Fatalf("getLeavesByIdentityHash(_) = (|%d|,nil); want (|%d|,nil)", len(leaves), len(test.want)) + } + leavesEquivalent(t, leaves, test.want) + return nil + }) + }) + } +} + +func leavesEquivalent(t *testing.T, gotLeaves, wantLeaves []*trillian.LogLeaf) { + t.Helper() + want := make(map[string]*trillian.LogLeaf) + for _, w := range wantLeaves { + k := sha256.Sum256([]byte(w.String())) + want[string(k[:])] = w + } + got := make(map[string]*trillian.LogLeaf) + for _, g := range gotLeaves { + k := sha256.Sum256([]byte(g.String())) + got[string(k[:])] = g + } + if diff := cmp.Diff(want, got, cmp.Comparer(proto.Equal)); diff != "" { + t.Errorf("leaves not equivalent: diff -want,+got:\n%v", diff) + } +} + +// ----------------------------------------------------------------------------- + +func TestLatestSignedRootNoneWritten(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + handle := openTestDBOrDie(t) + as := NewSQLAdminStorage(handle.db) + tree := mustCreateTree(ctx, t, as, testonly.LogTree) + s := NewLogStorage(handle.db, nil) + + tx, err := s.SnapshotForTree(ctx, tree) + if err != storage.ErrTreeNeedsInit { + t.Fatalf("SnapshotForTree gave %v, want %v", err, storage.ErrTreeNeedsInit) + } + commit(ctx, tx, t) +} + +func SignLogRoot(root *types.LogRootV1) (*trillian.SignedLogRoot, error) { + logRoot, err := root.MarshalBinary() + if err != nil { + return nil, err + } + return &trillian.SignedLogRoot{LogRoot: logRoot}, nil +} + +func TestLatestSignedLogRoot(t *testing.T) { + t.Parallel() + + ctx := context.Background() + handle := openTestDBOrDie(t) + as := NewSQLAdminStorage(handle.db) + tree := mustCreateTree(ctx, t, as, testonly.LogTree) + s := NewLogStorage(handle.db, nil) + + root, err := SignLogRoot(&types.LogRootV1{ + TimestampNanos: 98765, + TreeSize: 16, + RootHash: []byte(dummyHash), + }) + if err != nil { + t.Fatalf("SignLogRoot(): %v", err) + } + + runLogTX(s, tree, t, func(ctx context.Context, tx storage.LogTreeTX) error { + if err := tx.StoreSignedLogRoot(ctx, root); err != nil { + t.Fatalf("Failed to store signed root: %v", err) + } + return nil + }) + + { + runLogTX(s, tree, t, func(ctx context.Context, tx2 storage.LogTreeTX) error { + root2, err := tx2.LatestSignedLogRoot(ctx) + if err != nil { + t.Fatalf("Failed to read back new log root: %v", err) + } + if !proto.Equal(root, root2) { + t.Fatalf("Root round trip failed: <%v> and: <%v>", root, root2) + } + return nil + }) + } +} + +func TestDuplicateSignedLogRoot(t *testing.T) { + t.Parallel() + + ctx := context.Background() + handle := openTestDBOrDie(t) + as := NewSQLAdminStorage(handle.db) + tree := mustCreateTree(ctx, t, as, testonly.LogTree) + s := NewLogStorage(handle.db, nil) + + root, err := SignLogRoot(&types.LogRootV1{ + TimestampNanos: 98765, + TreeSize: 16, + RootHash: []byte(dummyHash), + }) + if err != nil { + t.Fatalf("SignLogRoot(): %v", err) + } + + runLogTX(s, tree, t, func(ctx context.Context, tx storage.LogTreeTX) error { + if err := tx.StoreSignedLogRoot(ctx, root); err != nil { + t.Fatalf("Failed to store signed root: %v", err) + } + // Shouldn't be able to do it again + if err := tx.StoreSignedLogRoot(ctx, root); err == nil { + t.Fatal("Allowed duplicate signed root") + } + return nil + }) +} + +func TestLogRootUpdate(t *testing.T) { + t.Parallel() + + ctx := context.Background() + // Write two roots for a log and make sure the one with the newest timestamp supersedes + handle := openTestDBOrDie(t) + as := NewSQLAdminStorage(handle.db) + tree := mustCreateTree(ctx, t, as, testonly.LogTree) + s := NewLogStorage(handle.db, nil) + + root, err := SignLogRoot(&types.LogRootV1{ + TimestampNanos: 98765, + TreeSize: 16, + RootHash: []byte(dummyHash), + }) + if err != nil { + t.Fatalf("SignLogRoot(): %v", err) + } + root2, err := SignLogRoot(&types.LogRootV1{ + TimestampNanos: 98766, + TreeSize: 16, + RootHash: []byte(dummyHash), + }) + if err != nil { + t.Fatalf("SignLogRoot(): %v", err) + } + + runLogTX(s, tree, t, func(ctx context.Context, tx storage.LogTreeTX) error { + return tx.StoreSignedLogRoot(ctx, root) + }) + runLogTX(s, tree, t, func(ctx context.Context, tx storage.LogTreeTX) error { + return tx.StoreSignedLogRoot(ctx, root2) + }) + + runLogTX(s, tree, t, func(ctx context.Context, tx2 storage.LogTreeTX) error { + root3, err := tx2.LatestSignedLogRoot(ctx) + if err != nil { + t.Fatalf("Failed to read back new log root: %v", err) + } + if !proto.Equal(root2, root3) { + t.Fatalf("Root round trip failed: <%v> and: <%v>", root, root2) + } + return nil + }) +} + +func TestGetActiveLogIDs(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + handle := openTestDBOrDie(t) + admin := NewSQLAdminStorage(handle.db) + + // Create a few test trees + log1 := proto.Clone(testonly.LogTree).(*trillian.Tree) + log2 := proto.Clone(testonly.LogTree).(*trillian.Tree) + log3 := proto.Clone(testonly.PreorderedLogTree).(*trillian.Tree) + drainingLog := proto.Clone(testonly.LogTree).(*trillian.Tree) + frozenLog := proto.Clone(testonly.LogTree).(*trillian.Tree) + deletedLog := proto.Clone(testonly.LogTree).(*trillian.Tree) + for _, tree := range []**trillian.Tree{&log1, &log2, &log3, &drainingLog, &frozenLog, &deletedLog} { + newTree, err := storage.CreateTree(ctx, admin, *tree) + if err != nil { + t.Fatalf("CreateTree(%+v) returned err = %v", tree, err) + } + *tree = newTree + } + + // FROZEN is not a valid initial state, so we have to update it separately. + if _, err := storage.UpdateTree(ctx, admin, frozenLog.TreeId, func(t *trillian.Tree) { + t.TreeState = trillian.TreeState_FROZEN + }); err != nil { + t.Fatalf("UpdateTree() returned err = %v", err) + } + // DRAINING is not a valid initial state, so we have to update it separately. + if _, err := storage.UpdateTree(ctx, admin, drainingLog.TreeId, func(t *trillian.Tree) { + t.TreeState = trillian.TreeState_DRAINING + }); err != nil { + t.Fatalf("UpdateTree() returned err = %v", err) + } + + // Update deleted trees accordingly + updateDeletedStmt, err := handle.db.PrepareContext(ctx, "UPDATE Trees SET Deleted = $1 WHERE TreeId = $2") + if err != nil { + t.Fatalf("PrepareContext() returned err = %v", err) + } + defer updateDeletedStmt.Close() + for _, treeID := range []int64{deletedLog.TreeId} { + if _, err := updateDeletedStmt.ExecContext(ctx, true, treeID); err != nil { + t.Fatalf("ExecContext(%v) returned err = %v", treeID, err) + } + } + + s := NewLogStorage(handle.db, nil) + got, err := s.GetActiveLogIDs(ctx) + if err != nil { + t.Fatalf("GetActiveLogIDs() returns err = %v", err) + } + + want := []int64{log1.TreeId, log2.TreeId, log3.TreeId, drainingLog.TreeId} + sort.Slice(got, func(i, j int) bool { return got[i] < got[j] }) + sort.Slice(want, func(i, j int) bool { return want[i] < want[j] }) + if diff := cmp.Diff(got, want); diff != "" { + t.Errorf("post-GetActiveLogIDs diff (-got +want):\n%v", diff) + } +} + +func TestGetActiveLogIDsEmpty(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + handle := openTestDBOrDie(t) + s := NewLogStorage(handle.db, nil) + + ids, err := s.GetActiveLogIDs(ctx) + if err != nil { + t.Fatalf("GetActiveLogIDs() = (_, %v), want = (_, nil)", err) + } + + if got, want := len(ids), 0; got != want { + t.Errorf("GetActiveLogIDs(): got %v IDs, want = %v", got, want) + } +} + +func ensureAllLeavesDistinct(leaves []*trillian.LogLeaf, t *testing.T) { + t.Helper() + // All the leaf value hashes should be distinct because the leaves were created with distinct + // leaf data. If only we had maps with slices as keys or sets or pretty much any kind of usable + // data structures we could do this properly. + for i := range leaves { + for j := range leaves { + if i != j && bytes.Equal(leaves[i].LeafIdentityHash, leaves[j].LeafIdentityHash) { + t.Fatalf("Unexpectedly got a duplicate leaf hash: %v %v", + leaves[i].LeafIdentityHash, leaves[j].LeafIdentityHash) + } + } + } +} + +func ensureLeavesHaveQueueTimestamp(t *testing.T, leaves []*trillian.LogLeaf, want time.Time) { + t.Helper() + for _, leaf := range leaves { + gotQTimestamp := leaf.QueueTimestamp.AsTime() + if got, want := gotQTimestamp.UnixNano(), want.UnixNano(); got != want { + t.Errorf("Got leaf with QueueTimestampNanos = %v, want %v: %v", got, want, leaf) + } + } +} + +// Creates some test leaves with predictable data +func createTestLeaves(n, startSeq int64) []*trillian.LogLeaf { + var leaves []*trillian.LogLeaf + for l := int64(0); l < n; l++ { + lv := fmt.Sprintf("Leaf %d", l+startSeq) + h := sha256.New() + h.Write([]byte(lv)) + leafHash := h.Sum(nil) + leaf := &trillian.LogLeaf{ + LeafIdentityHash: leafHash, + MerkleLeafHash: leafHash, + LeafValue: []byte(lv), + ExtraData: []byte(fmt.Sprintf("Extra %d", l)), + LeafIndex: int64(startSeq + l), + } + leaves = append(leaves, leaf) + } + + return leaves +} + +// Convenience methods to avoid copying out "if err != nil { blah }" all over the place +func runLogTX(s storage.LogStorage, tree *trillian.Tree, t *testing.T, f storage.LogTXFunc) { + t.Helper() + if err := s.ReadWriteTransaction(context.Background(), tree, f); err != nil { + t.Fatalf("Failed to run log tx: %v", err) + } +} + +type committableTX interface { + Commit(ctx context.Context) error +} + +func commit(ctx context.Context, tx committableTX, t *testing.T) { + t.Helper() + if err := tx.Commit(ctx); err != nil { + t.Errorf("Failed to commit tx: %v", err) + } +} + +func leafInBatch(leaf *trillian.LogLeaf, batch []*trillian.LogLeaf) bool { + for _, bl := range batch { + if bytes.Equal(bl.LeafIdentityHash, leaf.LeafIdentityHash) { + return true + } + } + + return false +} diff --git a/storage/crdb/pg.go b/storage/crdb/pg.go new file mode 100644 index 0000000000..65ada21f60 --- /dev/null +++ b/storage/crdb/pg.go @@ -0,0 +1,30 @@ +// Copyright 2022 Trillian Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crdb + +import ( + "fmt" + "strings" +) + +func fromMySQLToPGPreparedStatement(sql string) string { + counter := 1 + for strings.Contains(sql, "?") { + pgmarker := fmt.Sprintf("$%d", counter) + sql = strings.Replace(sql, "?", pgmarker, 1) + counter++ + } + return sql +} diff --git a/storage/crdb/provider.go b/storage/crdb/provider.go new file mode 100644 index 0000000000..6ffdfa9d51 --- /dev/null +++ b/storage/crdb/provider.go @@ -0,0 +1,112 @@ +// Copyright 2018 Trillian Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crdb + +import ( + "database/sql" + "flag" + "sync" + + "github.com/google/trillian/monitoring" + "github.com/google/trillian/storage" + "k8s.io/klog/v2" + + _ "github.com/cockroachdb/cockroach-go/v2/crdb/crdbpgx" // crdb retries and postgres interface + _ "github.com/lib/pq" // Register the Postgres driver. +) + +const ( + // StorageProviderName is the name of the storage provider. + StorageProviderName = "crdb" +) + +var ( + crdbURI = flag.String("crdb_uri", "postgresql://root@localhost:26257?sslmode=disable", "Connection URI for CockroachDB database") + maxConns = flag.Int("crdb_max_conns", 0, "Maximum connections to the database") + maxIdle = flag.Int("crdb_max_idle_conns", -1, "Maximum idle database connections in the connection pool") + + crdbErr error + crdbHandle *sql.DB + crdbStorageInstance *crdbProvider + dbConnMu sync.Mutex +) + +// GetDatabase returns the database handle for the provider. +func GetDatabase() (*sql.DB, error) { + dbConnMu.Lock() + defer dbConnMu.Unlock() + return getCRDBDatabaseLocked() +} + +func init() { + if err := storage.RegisterProvider(StorageProviderName, newCRDBStorageProvider); err != nil { + klog.Fatalf("Failed to register storage provider crdb: %v", err) + } +} + +type crdbProvider struct { + db *sql.DB + mf monitoring.MetricFactory +} + +func newCRDBStorageProvider(mf monitoring.MetricFactory) (storage.Provider, error) { + dbConnMu.Lock() + defer dbConnMu.Unlock() + if crdbStorageInstance == nil { + db, err := getCRDBDatabaseLocked() + if err != nil { + return nil, err + } + crdbStorageInstance = &crdbProvider{ + db: db, + mf: mf, + } + } + + return crdbStorageInstance, nil +} + +// Lazy initializes the database connection handle and returns the instance. +// Requires lock to be held. +func getCRDBDatabaseLocked() (*sql.DB, error) { + if crdbHandle != nil || crdbErr != nil { + return crdbHandle, crdbErr + } + db, err := OpenDB(*crdbURI) + if err != nil { + crdbErr = err + return nil, err + } + if *maxConns > 0 { + db.SetMaxOpenConns(*maxConns) + } + if *maxIdle >= 0 { + db.SetMaxIdleConns(*maxIdle) + } + crdbHandle, crdbErr = db, nil + return db, nil +} + +func (p *crdbProvider) Close() error { + return p.db.Close() +} + +func (p *crdbProvider) LogStorage() storage.LogStorage { + return NewLogStorage(p.db, p.mf) +} + +func (p *crdbProvider) AdminStorage() storage.AdminStorage { + return NewSQLAdminStorage(p.db) +} diff --git a/storage/crdb/provider_test.go b/storage/crdb/provider_test.go new file mode 100644 index 0000000000..3b42cac93d --- /dev/null +++ b/storage/crdb/provider_test.go @@ -0,0 +1,48 @@ +// Copyright 2018 Trillian Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crdb + +import ( + "flag" + "testing" + + "github.com/google/trillian/storage" + "github.com/google/trillian/testonly/flagsaver" +) + +func TestCockroachDBStorageProviderErrorPersistence(t *testing.T) { + t.Parallel() + + defer flagsaver.Save().MustRestore() + if err := flag.Set("crdb_uri", "&bogus*:::?"); err != nil { + t.Errorf("Failed to set flag: %v", err) + } + + // First call: This should fail due to the Database URL being garbage. + _, err1 := storage.NewProvider(StorageProviderName, nil) + if err1 == nil { + t.Fatalf("Expected 'storage.NewProvider' to fail") + } + + // Second call: This should fail with the same error. + _, err2 := storage.NewProvider(StorageProviderName, nil) + if err2 == nil { + t.Fatalf("Expected second call to 'storage.NewProvider' to fail") + } + + if err2 != err1 { + t.Fatalf("Expected second call to 'storage.NewProvider' to fail with %q, instead got: %q", err1, err2) + } +} diff --git a/storage/crdb/queue.go b/storage/crdb/queue.go new file mode 100644 index 0000000000..c65de60154 --- /dev/null +++ b/storage/crdb/queue.go @@ -0,0 +1,139 @@ +// Copyright 2017 Trillian Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crdb + +import ( + "context" + "database/sql" + "errors" + "fmt" + "time" + + "github.com/google/trillian" + "google.golang.org/protobuf/types/known/timestamppb" + "k8s.io/klog/v2" +) + +const ( + // If this statement ORDER BY clause is changed refer to the comment in removeSequencedLeaves + selectQueuedLeavesSQL = `SELECT LeafIdentityHash,MerkleLeafHash,QueueTimestampNanos + FROM Unsequenced + WHERE TreeID=$1 + AND Bucket=0 + AND QueueTimestampNanos<=$2 + ORDER BY QueueTimestampNanos,LeafIdentityHash ASC LIMIT $3` + insertUnsequencedEntrySQL = `INSERT INTO Unsequenced(TreeId,Bucket,LeafIdentityHash,MerkleLeafHash,QueueTimestampNanos) + VALUES($1,0,$2,$3,$4)` + deleteUnsequencedSQL = "DELETE FROM Unsequenced WHERE TreeId=$1 AND Bucket=0 AND QueueTimestampNanos=$2 AND LeafIdentityHash=$3" +) + +type dequeuedLeaf struct { + queueTimestampNanos int64 + leafIdentityHash []byte +} + +func dequeueInfo(leafIDHash []byte, queueTimestamp int64) dequeuedLeaf { + return dequeuedLeaf{queueTimestampNanos: queueTimestamp, leafIdentityHash: leafIDHash} +} + +func (t *logTreeTX) dequeueLeaf(rows *sql.Rows) (*trillian.LogLeaf, dequeuedLeaf, error) { + var leafIDHash []byte + var merkleHash []byte + var queueTimestamp int64 + + err := rows.Scan(&leafIDHash, &merkleHash, &queueTimestamp) + if err != nil { + klog.Warningf("Error scanning work rows: %s", err) + return nil, dequeuedLeaf{}, err + } + + // Note: the LeafData and ExtraData being nil here is OK as this is only used by the + // sequencer. The sequencer only writes to the SequencedLeafData table and the client + // supplied data was already written to LeafData as part of queueing the leaf. + queueTimestampProto := timestamppb.New(time.Unix(0, queueTimestamp)) + if err := queueTimestampProto.CheckValid(); err != nil { + return nil, dequeuedLeaf{}, fmt.Errorf("got invalid queue timestamp: %w", err) + } + leaf := &trillian.LogLeaf{ + LeafIdentityHash: leafIDHash, + MerkleLeafHash: merkleHash, + QueueTimestamp: queueTimestampProto, + } + return leaf, dequeueInfo(leafIDHash, queueTimestamp), nil +} + +func queueArgs(_ int64, _ []byte, queueTimestamp time.Time) []interface{} { + return []interface{}{queueTimestamp.UnixNano()} +} + +func (t *logTreeTX) UpdateSequencedLeaves(ctx context.Context, leaves []*trillian.LogLeaf) error { + dequeuedLeaves := make([]dequeuedLeaf, 0, len(leaves)) + for _, leaf := range leaves { + // This should fail on insert but catch it early + if len(leaf.LeafIdentityHash) != t.hashSizeBytes { + return errors.New("sequenced leaf has incorrect hash size") + } + + if err := leaf.IntegrateTimestamp.CheckValid(); err != nil { + return fmt.Errorf("got invalid integrate timestamp: %w", err) + } + iTimestamp := leaf.IntegrateTimestamp.AsTime() + _, err := t.tx.ExecContext( + ctx, + insertSequencedLeafSQL+valuesPlaceholder5, + t.treeID, + leaf.LeafIdentityHash, + leaf.MerkleLeafHash, + leaf.LeafIndex, + iTimestamp.UnixNano()) + if err != nil { + klog.Warningf("Failed to update sequenced leaves: %s", err) + return err + } + + qe, ok := t.dequeued[string(leaf.LeafIdentityHash)] + if !ok { + return fmt.Errorf("attempting to update leaf that wasn't dequeued. IdentityHash: %x", leaf.LeafIdentityHash) + } + dequeuedLeaves = append(dequeuedLeaves, qe) + } + + return t.removeSequencedLeaves(ctx, dequeuedLeaves) +} + +// removeSequencedLeaves removes the passed in leaves slice (which may be +// modified as part of the operation). +func (t *logTreeTX) removeSequencedLeaves(ctx context.Context, leaves []dequeuedLeaf) error { + start := time.Now() + // Don't need to re-sort because the query ordered by leaf hash. If that changes because + // the query is expensive then the sort will need to be done here. See comment in + // QueueLeaves. + stx, err := t.tx.PrepareContext(ctx, deleteUnsequencedSQL) + if err != nil { + klog.Warningf("Failed to prep delete statement for sequenced work: %v", err) + return err + } + defer stx.Close() + for _, dql := range leaves { + result, err := stx.ExecContext(ctx, t.treeID, dql.queueTimestampNanos, dql.leafIdentityHash) + err = checkResultOkAndRowCountIs(result, err, int64(1)) + if err != nil { + return err + } + } + + observe(dequeueRemoveLatency, time.Since(start), labelForTX(t)) + return nil +} diff --git a/storage/crdb/schema/storage.sql b/storage/crdb/schema/storage.sql new file mode 100644 index 0000000000..7f2c165a85 --- /dev/null +++ b/storage/crdb/schema/storage.sql @@ -0,0 +1,141 @@ +-- CockroachDB version of the tree schema + +-- --------------------------------------------- +-- Tree stuff here +-- --------------------------------------------- + +CREATE TYPE tree_state AS ENUM ('ACTIVE', 'FROZEN', 'DRAINING'); +CREATE TYPE tree_type AS ENUM ('LOG', 'PREORDERED_LOG'); +CREATE TYPE tree_hash_strategy AS ENUM ('RFC6962_SHA256'); +CREATE TYPE tree_hash_algorithm AS ENUM ('SHA256'); +CREATE TYPE tree_signature_algorithm AS ENUM ('ECDSA', 'RSA', 'ED25519'); + +-- Tree parameters should not be changed after creation. Doing so can +-- render the data in the tree unusable or inconsistent. +CREATE TABLE IF NOT EXISTS Trees( + TreeId BIGINT NOT NULL, + TreeState tree_state NOT NULL, + TreeType tree_type NOT NULL, + HashStrategy tree_hash_strategy NOT NULL, + HashAlgorithm tree_hash_algorithm NOT NULL, + SignatureAlgorithm tree_signature_algorithm NOT NULL, + DisplayName VARCHAR(20), + Description VARCHAR(200), + CreateTimeMillis BIGINT NOT NULL, + UpdateTimeMillis BIGINT NOT NULL, + MaxRootDurationMillis BIGINT NOT NULL, + PrivateKey BYTES NOT NULL, + PublicKey BYTES NOT NULL, + Deleted BOOLEAN, + DeleteTimeMillis BIGINT, + PRIMARY KEY(TreeId) +); + +-- This table contains tree parameters that can be changed at runtime such as for +-- administrative purposes. +CREATE TABLE IF NOT EXISTS TreeControl( + TreeId BIGINT NOT NULL, + SigningEnabled BOOLEAN NOT NULL, + SequencingEnabled BOOLEAN NOT NULL, + SequenceIntervalSeconds INTEGER NOT NULL, + PRIMARY KEY(TreeId), + FOREIGN KEY(TreeId) REFERENCES Trees(TreeId) ON DELETE CASCADE +); + +CREATE TABLE IF NOT EXISTS Subtree( + TreeId BIGINT NOT NULL, + SubtreeId BYTES NOT NULL, + Nodes BYTES NOT NULL, + SubtreeRevision INTEGER NOT NULL, + PRIMARY KEY(TreeId, SubtreeId, SubtreeRevision), + FOREIGN KEY(TreeId) REFERENCES Trees(TreeId) ON DELETE CASCADE +); + +-- The TreeRevisionIdx is used to enforce that there is only one STH at any +-- tree revision +CREATE TABLE IF NOT EXISTS TreeHead( + TreeId BIGINT NOT NULL, + TreeHeadTimestamp BIGINT, + TreeSize BIGINT, + RootHash BYTES NOT NULL, + RootSignature BYTES NOT NULL, + TreeRevision BIGINT, + PRIMARY KEY(TreeId, TreeHeadTimestamp), + FOREIGN KEY(TreeId) REFERENCES Trees(TreeId) ON DELETE CASCADE +); + +CREATE UNIQUE INDEX TreeHeadRevisionIdx + ON TreeHead(TreeId, TreeRevision); + +-- --------------------------------------------- +-- Log specific stuff here +-- --------------------------------------------- + +-- Creating index at same time as table allows some storage engines to better +-- optimize physical storage layout. Most engines allow multiple nulls in a +-- unique index but some may not. + +-- A leaf that has not been sequenced has a row in this table. If duplicate leaves +-- are allowed they will all reference this row. +CREATE TABLE IF NOT EXISTS LeafData( + TreeId BIGINT NOT NULL, + -- This is a personality specific has of some subset of the leaf data. + -- It's only purpose is to allow Trillian to identify duplicate entries in + -- the context of the personality. + LeafIdentityHash BYTES NOT NULL, + -- This is the data stored in the leaf for example in CT it contains a DER encoded + -- X.509 certificate but is application dependent + LeafValue BYTES NOT NULL, + -- This is extra data that the application can associate with the leaf should it wish to. + -- This data is not included in signing and hashing. + ExtraData BYTES, + -- The timestamp from when this leaf data was first queued for inclusion. + QueueTimestampNanos BIGINT NOT NULL, + PRIMARY KEY(TreeId, LeafIdentityHash), + FOREIGN KEY(TreeId) REFERENCES Trees(TreeId) ON DELETE CASCADE +); + +-- When a leaf is sequenced a row is added to this table. If logs allow duplicates then +-- multiple rows will exist with different sequence numbers. The signed timestamp +-- will be communicated via the unsequenced table as this might need to be unique, depending +-- on the log parameters and we can't insert into this table until we have the sequence number +-- which is not available at the time we queue the entry. We need both hashes because the +-- LeafData table is keyed by the raw data hash. +CREATE TABLE IF NOT EXISTS SequencedLeafData( + TreeId BIGINT NOT NULL, + SequenceNumber BIGINT NOT NULL, + -- This is a personality specific has of some subset of the leaf data. + -- It's only purpose is to allow Trillian to identify duplicate entries in + -- the context of the personality. + LeafIdentityHash BYTES NOT NULL, + -- This is a MerkleLeafHash as defined by the treehasher that the log uses. For example for + -- CT this hash will include the leaf prefix byte as well as the leaf data. + MerkleLeafHash BYTES NOT NULL, + IntegrateTimestampNanos BIGINT NOT NULL, + PRIMARY KEY(TreeId, SequenceNumber), + FOREIGN KEY(TreeId) REFERENCES Trees(TreeId) ON DELETE CASCADE, + FOREIGN KEY(TreeId, LeafIdentityHash) REFERENCES LeafData(TreeId, LeafIdentityHash) ON DELETE CASCADE +); + +CREATE INDEX SequencedLeafMerkleIdx + ON SequencedLeafData(TreeId, MerkleLeafHash); + +CREATE TABLE IF NOT EXISTS Unsequenced( + TreeId BIGINT NOT NULL, + -- The bucket field is to allow the use of time based ring bucketed schemes if desired. If + -- unused this should be set to zero for all entries. + Bucket INTEGER NOT NULL, + -- This is a personality specific hash of some subset of the leaf data. + -- It's only purpose is to allow Trillian to identify duplicate entries in + -- the context of the personality. + LeafIdentityHash BYTES NOT NULL, + -- This is a MerkleLeafHash as defined by the treehasher that the log uses. For example for + -- CT this hash will include the leaf prefix byte as well as the leaf data. + MerkleLeafHash BYTES NOT NULL, + QueueTimestampNanos BIGINT NOT NULL, + -- This is a SHA256 hash of the TreeID, LeafIdentityHash and QueueTimestampNanos. It is used + -- for batched deletes from the table when trillian_log_server and trillian_log_signer are + -- built with the batched_queue tag. + QueueID BYTES DEFAULT NULL UNIQUE, + PRIMARY KEY (TreeId, Bucket, QueueTimestampNanos, LeafIdentityHash) +); diff --git a/storage/crdb/sqladminstorage.go b/storage/crdb/sqladminstorage.go new file mode 100644 index 0000000000..007de9add1 --- /dev/null +++ b/storage/crdb/sqladminstorage.go @@ -0,0 +1,398 @@ +// Copyright 2017 Trillian Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crdb + +import ( + "context" + "database/sql" + "fmt" + "sync" + "time" + + "github.com/google/trillian" + "github.com/google/trillian/storage" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + defaultSequenceIntervalSeconds = 60 + + nonDeletedWhere = " WHERE (Deleted IS NULL OR Deleted = 'false')" + + selectTrees = ` + SELECT + TreeId, + TreeState, + TreeType, + HashStrategy, + HashAlgorithm, + SignatureAlgorithm, + DisplayName, + Description, + CreateTimeMillis, + UpdateTimeMillis, + PrivateKey, + PublicKey, + MaxRootDurationMillis, + Deleted, + DeleteTimeMillis + FROM Trees` + selectNonDeletedTrees = selectTrees + nonDeletedWhere + selectTreeByID = selectTrees + " WHERE TreeId = $1" + + updateTreeSQL = `UPDATE Trees + SET TreeState = $1, TreeType = $2, DisplayName = $3, Description = $4, UpdateTimeMillis = $5, MaxRootDurationMillis = $6, PrivateKey = $7 + WHERE TreeId = $8` +) + +// NewSQLAdminStorage returns a SQL storage.AdminStorage implementation backed by DB. +// Should work for MySQL and CockroachDB +func NewSQLAdminStorage(db *sql.DB) storage.AdminStorage { + return &sqlAdminStorage{db} +} + +// sqlAdminStorage implements storage.AdminStorage +type sqlAdminStorage struct { + db *sql.DB +} + +func (s *sqlAdminStorage) Snapshot(ctx context.Context) (storage.ReadOnlyAdminTX, error) { + return s.beginInternal(ctx) +} + +func (s *sqlAdminStorage) beginInternal(ctx context.Context) (storage.AdminTX, error) { + tx, err := s.db.BeginTx(ctx, nil /* opts */) + if err != nil { + return nil, err + } + return &adminTX{tx: tx}, nil +} + +func (s *sqlAdminStorage) ReadWriteTransaction(ctx context.Context, f storage.AdminTXFunc) error { + tx, err := s.beginInternal(ctx) + if err != nil { + return err + } + defer tx.Close() + if err := f(ctx, tx); err != nil { + return err + } + return tx.Commit() +} + +func (s *sqlAdminStorage) CheckDatabaseAccessible(ctx context.Context) error { + return s.db.PingContext(ctx) +} + +type adminTX struct { + tx *sql.Tx + + // mu guards reads/writes on closed, which happen on Commit/Close methods. + // + // We don't check closed on methods apart from the ones above, as we trust tx + // to keep tabs on its state, and hence fail to do queries after closed. + mu sync.RWMutex + closed bool +} + +func (t *adminTX) Commit() error { + t.mu.Lock() + defer t.mu.Unlock() + t.closed = true + return t.tx.Commit() +} + +func (t *adminTX) Close() error { + t.mu.Lock() + defer t.mu.Unlock() + if t.closed { + return nil + } + t.closed = true + return t.tx.Rollback() +} + +func (t *adminTX) GetTree(ctx context.Context, treeID int64) (*trillian.Tree, error) { + stmt, err := t.tx.PrepareContext(ctx, selectTreeByID) + if err != nil { + return nil, err + } + defer stmt.Close() + + // GetTree is an entry point for most RPCs, let's provide somewhat nicer error messages. + tree, err := storage.ReadTree(stmt.QueryRowContext(ctx, treeID)) + switch { + case err == sql.ErrNoRows: + // ErrNoRows doesn't provide useful information, so we don't forward it. + return nil, status.Errorf(codes.NotFound, "tree %v not found", treeID) + case err != nil: + return nil, fmt.Errorf("error reading tree %v: %v", treeID, err) + } + return tree, nil +} + +func (t *adminTX) ListTrees(ctx context.Context, includeDeleted bool) ([]*trillian.Tree, error) { + var query string + if includeDeleted { + query = selectTrees + } else { + query = selectNonDeletedTrees + } + + stmt, err := t.tx.PrepareContext(ctx, query) + if err != nil { + return nil, err + } + defer stmt.Close() + rows, err := stmt.QueryContext(ctx) + if err != nil { + return nil, err + } + defer rows.Close() + trees := []*trillian.Tree{} + for rows.Next() { + tree, err := storage.ReadTree(rows) + if err != nil { + return nil, err + } + trees = append(trees, tree) + } + return trees, nil +} + +func (t *adminTX) CreateTree(ctx context.Context, tree *trillian.Tree) (*trillian.Tree, error) { + if err := storage.ValidateTreeForCreation(ctx, tree); err != nil { + return nil, err + } + if err := validateStorageSettings(tree); err != nil { + return nil, err + } + + id, err := storage.NewTreeID() + if err != nil { + return nil, err + } + + // Use the time truncated-to-millis throughout, as that's what's stored. + nowMillis := storage.ToMillisSinceEpoch(time.Now()) + now := storage.FromMillisSinceEpoch(nowMillis) + + newTree := proto.Clone(tree).(*trillian.Tree) + newTree.TreeId = id + newTree.CreateTime = timestamppb.New(now) + if err := newTree.CreateTime.CheckValid(); err != nil { + return nil, fmt.Errorf("failed to build create time: %w", err) + } + newTree.UpdateTime = timestamppb.New(now) + if err := newTree.UpdateTime.CheckValid(); err != nil { + return nil, fmt.Errorf("failed to build update time: %w", err) + } + if err := newTree.MaxRootDuration.CheckValid(); err != nil { + return nil, fmt.Errorf("could not parse MaxRootDuration: %w", err) + } + rootDuration := newTree.MaxRootDuration.AsDuration() + + insertTreeStmt, err := t.tx.PrepareContext( + ctx, + `INSERT INTO Trees( + TreeId, + TreeState, + TreeType, + HashStrategy, + HashAlgorithm, + SignatureAlgorithm, + DisplayName, + Description, + CreateTimeMillis, + UpdateTimeMillis, + PrivateKey, + PublicKey, + MaxRootDurationMillis) + VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)`) + if err != nil { + return nil, err + } + defer insertTreeStmt.Close() + + _, err = insertTreeStmt.ExecContext( + ctx, + newTree.TreeId, + newTree.TreeState.String(), + newTree.TreeType.String(), + "RFC6962_SHA256", // Unused, filling in for backward compatibility. + "SHA256", // Unused, filling in for backward compatibility. + "ECDSA", // Unused, filling in for backward compatibility. + newTree.DisplayName, + newTree.Description, + nowMillis, + nowMillis, + []byte{}, // Unused, filling in for backward compatibility. + []byte{}, // Unused, filling in for backward compatibility. + rootDuration/time.Millisecond, + ) + if err != nil { + return nil, err + } + + // MySQL silently truncates data when running in non-strict mode. + // We shouldn't be using non-strict modes, but let's guard against it + // anyway. + if _, err := t.GetTree(ctx, newTree.TreeId); err != nil { + // GetTree will fail for truncated enums (they get recorded as + // empty strings, which will not match any known value). + return nil, fmt.Errorf("enum truncated: %v", err) + } + + insertControlStmt, err := t.tx.PrepareContext( + ctx, + `INSERT INTO TreeControl( + TreeId, + SigningEnabled, + SequencingEnabled, + SequenceIntervalSeconds) + VALUES($1, $2, $3, $4)`) + if err != nil { + return nil, err + } + defer insertControlStmt.Close() + _, err = insertControlStmt.ExecContext( + ctx, + newTree.TreeId, + true, /* SigningEnabled */ + true, /* SequencingEnabled */ + defaultSequenceIntervalSeconds, + ) + if err != nil { + return nil, err + } + + return newTree, nil +} + +func (t *adminTX) UpdateTree(ctx context.Context, treeID int64, updateFunc func(*trillian.Tree)) (*trillian.Tree, error) { + tree, err := t.GetTree(ctx, treeID) + if err != nil { + return nil, err + } + + beforeUpdate := proto.Clone(tree).(*trillian.Tree) + updateFunc(tree) + if err := storage.ValidateTreeForUpdate(ctx, beforeUpdate, tree); err != nil { + return nil, err + } + if err := validateStorageSettings(tree); err != nil { + return nil, err + } + + // TODO(pavelkalinnikov): When switching TreeType from PREORDERED_LOG to LOG, + // ensure all entries in SequencedLeafData are integrated. + + // Use the time truncated-to-millis throughout, as that's what's stored. + nowMillis := storage.ToMillisSinceEpoch(time.Now()) + now := storage.FromMillisSinceEpoch(nowMillis) + tree.UpdateTime = timestamppb.New(now) + if err != nil { + return nil, fmt.Errorf("failed to build update time: %v", err) + } + if err := tree.MaxRootDuration.CheckValid(); err != nil { + return nil, fmt.Errorf("could not parse MaxRootDuration: %w", err) + } + rootDuration := tree.MaxRootDuration.AsDuration() + + stmt, err := t.tx.PrepareContext(ctx, updateTreeSQL) + if err != nil { + return nil, err + } + defer stmt.Close() + + if _, err = stmt.ExecContext( + ctx, + tree.TreeState.String(), + tree.TreeType.String(), + tree.DisplayName, + tree.Description, + nowMillis, + rootDuration/time.Millisecond, + []byte{}, // Unused, filling in for backward compatibility. + tree.TreeId); err != nil { + return nil, err + } + + return tree, nil +} + +func (t *adminTX) SoftDeleteTree(ctx context.Context, treeID int64) (*trillian.Tree, error) { + return t.updateDeleted(ctx, treeID, true /* deleted */, storage.ToMillisSinceEpoch(time.Now()) /* deleteTimeMillis */) +} + +func (t *adminTX) UndeleteTree(ctx context.Context, treeID int64) (*trillian.Tree, error) { + return t.updateDeleted(ctx, treeID, false /* deleted */, nil /* deleteTimeMillis */) +} + +// updateDeleted updates the Deleted and DeleteTimeMillis fields of the specified tree. +// deleteTimeMillis must be either an int64 (in millis since epoch) or nil. +func (t *adminTX) updateDeleted(ctx context.Context, treeID int64, deleted bool, deleteTimeMillis interface{}) (*trillian.Tree, error) { + if err := validateDeleted(ctx, t.tx, treeID, !deleted); err != nil { + return nil, err + } + if _, err := t.tx.ExecContext( + ctx, + "UPDATE Trees SET Deleted = $1, DeleteTimeMillis = $2 WHERE TreeId = $3", + deleted, deleteTimeMillis, treeID); err != nil { + return nil, err + } + return t.GetTree(ctx, treeID) +} + +func (t *adminTX) HardDeleteTree(ctx context.Context, treeID int64) error { + if err := validateDeleted(ctx, t.tx, treeID, true /* wantDeleted */); err != nil { + return err + } + + // TreeControl didn't have "ON DELETE CASCADE" on previous versions, so let's hit it explicitly + if _, err := t.tx.ExecContext(ctx, "DELETE FROM TreeControl WHERE TreeId = $1", treeID); err != nil { + return err + } + _, err := t.tx.ExecContext(ctx, "DELETE FROM Trees WHERE TreeId = $1", treeID) + return err +} + +func validateDeleted(ctx context.Context, tx *sql.Tx, treeID int64, wantDeleted bool) error { + var nullDeleted sql.NullBool + switch err := tx.QueryRowContext(ctx, "SELECT Deleted FROM Trees WHERE TreeId = $1", treeID).Scan(&nullDeleted); { + case err == sql.ErrNoRows: + return status.Errorf(codes.NotFound, "tree %v not found", treeID) + case err != nil: + return err + } + + switch deleted := nullDeleted.Valid && nullDeleted.Bool; { + case wantDeleted && !deleted: + return status.Errorf(codes.FailedPrecondition, "tree %v is not soft deleted", treeID) + case !wantDeleted && deleted: + return status.Errorf(codes.FailedPrecondition, "tree %v already soft deleted", treeID) + } + return nil +} + +func validateStorageSettings(tree *trillian.Tree) error { + if tree.StorageSettings != nil { + return fmt.Errorf("storage_settings not supported, but got %v", tree.StorageSettings) + } + return nil +} diff --git a/storage/crdb/sqladminstorage_test.go b/storage/crdb/sqladminstorage_test.go new file mode 100644 index 0000000000..1a93918c73 --- /dev/null +++ b/storage/crdb/sqladminstorage_test.go @@ -0,0 +1,258 @@ +// Copyright 2017 Trillian Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crdb + +import ( + "context" + "database/sql" + "fmt" + "testing" + + "github.com/google/trillian" + "github.com/google/trillian/storage" + "github.com/google/trillian/storage/testonly" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +const selectTreeControlByID = "SELECT SigningEnabled, SequencingEnabled, SequenceIntervalSeconds FROM TreeControl WHERE TreeId = $1" + +func TestCRDBAdminStorage(t *testing.T) { + t.Parallel() + + tester := &testonly.AdminStorageTester{NewAdminStorage: func() storage.AdminStorage { + handle := openTestDBOrDie(t) + return NewSQLAdminStorage(handle.db) + }} + tester.RunAllTests(t) +} + +func TestAdminTX_CreateTree_InitializesStorageStructures(t *testing.T) { + t.Parallel() + + handle := openTestDBOrDie(t) + s := NewSQLAdminStorage(handle.db) + ctx := context.Background() + + tree, err := storage.CreateTree(ctx, s, testonly.LogTree) + if err != nil { + t.Fatalf("CreateTree() failed: %v", err) + } + + // Check if TreeControl is correctly written. + var signingEnabled, sequencingEnabled bool + var sequenceIntervalSeconds int + if err := handle.db.QueryRowContext(ctx, selectTreeControlByID, tree.TreeId).Scan(&signingEnabled, &sequencingEnabled, &sequenceIntervalSeconds); err != nil { + t.Fatalf("Failed to read TreeControl: %v", err) + } + // We don't mind about specific values, defaults change, but let's check + // that important numbers are not zeroed. + if sequenceIntervalSeconds <= 0 { + t.Errorf("sequenceIntervalSeconds = %v, want > 0", sequenceIntervalSeconds) + } +} + +func TestCreateTreeInvalidStates(t *testing.T) { + t.Parallel() + + handle := openTestDBOrDie(t) + s := NewSQLAdminStorage(handle.db) + ctx := context.Background() + + states := []trillian.TreeState{trillian.TreeState_DRAINING, trillian.TreeState_FROZEN} + + for _, state := range states { + inTree := proto.Clone(testonly.LogTree).(*trillian.Tree) + inTree.TreeState = state + if _, err := storage.CreateTree(ctx, s, inTree); err == nil { + t.Errorf("CreateTree() state: %v got: nil want: err", state) + } + } +} + +func TestAdminTX_TreeWithNulls(t *testing.T) { + t.Parallel() + + handle := openTestDBOrDie(t) + s := NewSQLAdminStorage(handle.db) + ctx := context.Background() + + // Setup: create a tree and set all nullable columns to null. + // Some columns have to be manually updated, as it's not possible to set + // some proto fields to nil. + tree, err := storage.CreateTree(ctx, s, testonly.LogTree) + if err != nil { + t.Fatalf("CreateTree() failed: %v", err) + } + treeID := tree.TreeId + + if err := setNulls(ctx, handle.db, treeID); err != nil { + t.Fatalf("setNulls() = %v, want = nil", err) + } + + tests := []struct { + desc string + fn storage.AdminTXFunc + }{ + { + desc: "GetTree", + fn: func(ctx context.Context, tx storage.AdminTX) error { + _, err := tx.GetTree(ctx, treeID) + return err + }, + }, + { + desc: "ListTrees", + fn: func(ctx context.Context, tx storage.AdminTX) error { + trees, err := tx.ListTrees(ctx, false /* includeDeleted */) + if err != nil { + return err + } + for _, tree := range trees { + if tree.TreeId == treeID { + return nil + } + } + return fmt.Errorf("ID not found: %v", treeID) + }, + }, + } + for _, test := range tests { + if err := s.ReadWriteTransaction(ctx, test.fn); err != nil { + t.Errorf("%v: err = %v, want = nil", test.desc, err) + } + } +} + +func TestAdminTX_StorageSettingsNotSupported(t *testing.T) { + t.Parallel() + + handle := openTestDBOrDie(t) + s := NewSQLAdminStorage(handle.db) + ctx := context.Background() + + settings, err := anypb.New(&trillian.Tree{}) + if err != nil { + t.Fatalf("Error marshaling proto: %v", err) + } + + tests := []struct { + desc string + // fn attempts to either create or update a tree with a non-nil, valid Any proto + // on Tree.StorageSettings. It's expected to return an error. + fn func(storage.AdminStorage) error + }{ + { + desc: "CreateTree", + fn: func(s storage.AdminStorage) error { + tree := proto.Clone(testonly.LogTree).(*trillian.Tree) + tree.StorageSettings = settings + _, err := storage.CreateTree(ctx, s, tree) + return err + }, + }, + { + desc: "UpdateTree", + fn: func(s storage.AdminStorage) error { + tree, err := storage.CreateTree(ctx, s, testonly.LogTree) + if err != nil { + t.Fatalf("CreateTree() failed with err = %v", err) + } + _, err = storage.UpdateTree(ctx, s, tree.TreeId, func(tree *trillian.Tree) { tree.StorageSettings = settings }) + return err + }, + }, + } + for _, test := range tests { + if err := test.fn(s); err == nil { + t.Errorf("%v: err = nil, want non-nil", test.desc) + } + } +} + +func TestAdminTX_HardDeleteTree(t *testing.T) { + t.Parallel() + + handle := openTestDBOrDie(t) + s := NewSQLAdminStorage(handle.db) + ctx := context.Background() + + tree, err := storage.CreateTree(ctx, s, testonly.LogTree) + if err != nil { + t.Fatalf("CreateTree() returned err = %v", err) + } + + if err := s.ReadWriteTransaction(ctx, func(ctx context.Context, tx storage.AdminTX) error { + if _, err := tx.SoftDeleteTree(ctx, tree.TreeId); err != nil { + return err + } + return tx.HardDeleteTree(ctx, tree.TreeId) + }); err != nil { + t.Fatalf("ReadWriteTransaction() returned err = %v", err) + } + + // Unlike the HardDelete tests on AdminStorageTester, here we have the chance to poke inside the + // database and check that the rows are gone, so let's do just that. + // If there's no record on Trees, then there can be no record in any of the dependent tables. + var name string + if err := handle.db.QueryRowContext(ctx, "SELECT DisplayName FROM Trees WHERE TreeId = $1", tree.TreeId).Scan(&name); err != sql.ErrNoRows { + t.Errorf("QueryRowContext() returned err = %v, want = %v", err, sql.ErrNoRows) + } +} + +func TestCheckDatabaseAccessible_Fails(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + // Pass in a closed database to provoke a failure. + handle := openTestDBOrDie(t) + s := NewSQLAdminStorage(handle.db) + id := getDBID(t) + rawdbhandle, loaded := testDBs.LoadAndDelete(id) + if !loaded { + t.Fatalf("Failed to load test DB handle for id %v", id) + } + dbhandle, ok := rawdbhandle.(*testDBHandle) + if !ok { + t.Fatalf("Failed to cast test DB handle for id %v", id) + } + dbhandle.done(context.Background()) + + if err := s.CheckDatabaseAccessible(ctx); err == nil { + t.Error("TestCheckDatabaseAccessible_Fails got: nil, want: err") + } +} + +func TestCheckDatabaseAccessible_OK(t *testing.T) { + t.Parallel() + + handle := openTestDBOrDie(t) + s := NewSQLAdminStorage(handle.db) + ctx := context.Background() + if err := s.CheckDatabaseAccessible(ctx); err != nil { + t.Errorf("TestCheckDatabaseAccessible_OK got: %v, want: nil", err) + } +} + +func setNulls(ctx context.Context, db *sql.DB, treeID int64) error { + stmt, err := db.PrepareContext(ctx, "UPDATE Trees SET DisplayName = NULL, Description = NULL WHERE TreeId = $1") + if err != nil { + return err + } + defer stmt.Close() + _, err = stmt.ExecContext(ctx, treeID) + return err +} diff --git a/storage/crdb/storage_test.go b/storage/crdb/storage_test.go new file mode 100644 index 0000000000..efd0cea80e --- /dev/null +++ b/storage/crdb/storage_test.go @@ -0,0 +1,255 @@ +// Copyright 2016 Trillian Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crdb + +import ( + "bytes" + "context" + "crypto" + "crypto/sha256" + "fmt" + "testing" + + "github.com/google/trillian" + "github.com/google/trillian/storage" + storageto "github.com/google/trillian/storage/testonly" + stree "github.com/google/trillian/storage/tree" + "github.com/google/trillian/types" + "github.com/transparency-dev/merkle/compact" + "github.com/transparency-dev/merkle/rfc6962" + "k8s.io/klog/v2" +) + +func TestNodeRoundTrip(t *testing.T) { + t.Parallel() + + nodes := createSomeNodes(256) + nodeIDs := make([]compact.NodeID, len(nodes)) + for i := range nodes { + nodeIDs[i] = nodes[i].ID + } + + for _, tc := range []struct { + desc string + store []stree.Node + read []compact.NodeID + want []stree.Node + wantErr bool + }{ + {desc: "store-4-read-4", store: nodes[:4], read: nodeIDs[:4], want: nodes[:4]}, + {desc: "store-4-read-1", store: nodes[:4], read: nodeIDs[:1], want: nodes[:1]}, + {desc: "store-2-read-4", store: nodes[:2], read: nodeIDs[:4], want: nodes[:2]}, + {desc: "store-none-read-all", store: nil, read: nodeIDs, wantErr: true}, + {desc: "store-all-read-all", store: nodes, read: nodeIDs, want: nodes}, + {desc: "store-all-read-none", store: nodes, read: nil, want: nil}, + } { + tc := tc + t.Run(tc.desc, func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + handle := openTestDBOrDie(t) + as := NewSQLAdminStorage(handle.db) + tree := mustCreateTree(ctx, t, as, storageto.LogTree) + s := NewLogStorage(handle.db, nil) + + const writeRev = int64(100) + runLogTX(s, tree, t, func(ctx context.Context, tx storage.LogTreeTX) error { + forceWriteRevision(writeRev, tx) + if err := tx.SetMerkleNodes(ctx, tc.store); err != nil { + t.Fatalf("Failed to store nodes: %s", err) + } + return storeLogRoot(ctx, tx, uint64(len(tc.store)), uint64(writeRev), []byte{1, 2, 3}) + }) + + runLogTX(s, tree, t, func(ctx context.Context, tx storage.LogTreeTX) error { + readNodes, err := tx.GetMerkleNodes(ctx, tc.read) + if err != nil && !tc.wantErr { + t.Fatalf("Failed to retrieve nodes: %s", err) + } else if err == nil && tc.wantErr { + t.Fatal("Retrieving nodes succeeded unexpectedly") + } + if err := nodesAreEqual(readNodes, tc.want); err != nil { + t.Fatalf("Read back different nodes from the ones stored: %s", err) + } + return nil + }) + }) + } +} + +// This test ensures that node writes cross subtree boundaries so this edge case in the subtree +// cache gets exercised. Any tree size > 256 will do this. +func TestLogNodeRoundTripMultiSubtree(t *testing.T) { + t.Parallel() + + ctx := context.Background() + handle := openTestDBOrDie(t) + as := NewSQLAdminStorage(handle.db) + tree := mustCreateTree(ctx, t, as, storageto.LogTree) + s := NewLogStorage(handle.db, nil) + + const writeRev = int64(100) + const size = 871 + nodesToStore, err := createLogNodesForTreeAtSize(t, size, writeRev) + if err != nil { + t.Fatalf("failed to create test tree: %v", err) + } + nodeIDsToRead := make([]compact.NodeID, len(nodesToStore)) + for i := range nodesToStore { + nodeIDsToRead[i] = nodesToStore[i].ID + } + + { + runLogTX(s, tree, t, func(ctx context.Context, tx storage.LogTreeTX) error { + forceWriteRevision(writeRev, tx) + if err := tx.SetMerkleNodes(ctx, nodesToStore); err != nil { + t.Fatalf("Failed to store nodes: %s", err) + } + return storeLogRoot(ctx, tx, uint64(size), uint64(writeRev), []byte{1, 2, 3}) + }) + } + + { + runLogTX(s, tree, t, func(ctx context.Context, tx storage.LogTreeTX) error { + readNodes, err := tx.GetMerkleNodes(ctx, nodeIDsToRead) + if err != nil { + t.Fatalf("Failed to retrieve nodes: %s", err) + } + if err := nodesAreEqual(readNodes, nodesToStore); err != nil { + missing, extra := diffNodes(readNodes, nodesToStore) + for _, n := range missing { + t.Errorf("Missing: %v", n.ID) + } + for _, n := range extra { + t.Errorf("Extra : %v", n.ID) + } + t.Fatalf("Read back different nodes from the ones stored: %s", err) + } + return nil + }) + } +} + +func forceWriteRevision(rev int64, tx storage.LogTreeTX) { + mtx, ok := tx.(*logTreeTX) + if !ok { + panic(nil) + } + mtx.treeTX.writeRevision = rev +} + +func createSomeNodes(count int) []stree.Node { + r := make([]stree.Node, count) + for i := range r { + r[i].ID = compact.NewNodeID(0, uint64(i)) + h := sha256.Sum256([]byte{byte(i)}) + r[i].Hash = h[:] + klog.V(3).Infof("Node to store: %v", r[i].ID) + } + return r +} + +func createLogNodesForTreeAtSize(t *testing.T, ts, rev int64) ([]stree.Node, error) { + hasher := rfc6962.New(crypto.SHA256) + fact := compact.RangeFactory{Hash: hasher.HashChildren} + cr := fact.NewEmptyRange(0) + + nodeMap := make(map[compact.NodeID][]byte) + store := func(id compact.NodeID, hash []byte) { nodeMap[id] = hash } + + for l := 0; l < int(ts); l++ { + hash := hasher.HashLeaf([]byte(fmt.Sprintf("Leaf %d", l))) + // Store the new leaf node, and all new perfect nodes. + if err := cr.Append(hash, store); err != nil { + return nil, err + } + } + + // Unroll the map, which has deduped the updates for us and retained the latest + nodes := make([]stree.Node, 0, len(nodeMap)) + for id, hash := range nodeMap { + nodes = append(nodes, stree.Node{ID: id, Hash: hash}) + } + return nodes, nil +} + +// TODO(pavelkalinnikov): Allow nodes to be out of order. +func nodesAreEqual(lhs, rhs []stree.Node) error { + if ls, rs := len(lhs), len(rhs); ls != rs { + return fmt.Errorf("different number of nodes, %d vs %d", ls, rs) + } + for i := range lhs { + if l, r := lhs[i].ID, rhs[i].ID; l != r { + return fmt.Errorf("NodeIDs are not the same,\nlhs = %v,\nrhs = %v", l, r) + } + if l, r := lhs[i].Hash, rhs[i].Hash; !bytes.Equal(l, r) { + return fmt.Errorf("Hashes are not the same for %v,\nlhs = %v,\nrhs = %v", lhs[i].ID, l, r) + } + } + return nil +} + +func diffNodes(got, want []stree.Node) ([]stree.Node, []stree.Node) { + var missing []stree.Node + gotMap := make(map[compact.NodeID]stree.Node) + for _, n := range got { + gotMap[n.ID] = n + } + for _, n := range want { + _, ok := gotMap[n.ID] + if !ok { + missing = append(missing, n) + } + delete(gotMap, n.ID) + } + // Unpack the extra nodes to return both as slices + extra := make([]stree.Node, 0, len(gotMap)) + for _, v := range gotMap { + extra = append(extra, v) + } + return missing, extra +} + +func mustSignAndStoreLogRoot(ctx context.Context, t *testing.T, l storage.LogStorage, tree *trillian.Tree, treeSize uint64) { + t.Helper() + if err := l.ReadWriteTransaction(ctx, tree, func(ctx context.Context, tx storage.LogTreeTX) error { + return storeLogRoot(ctx, tx, treeSize, 0, []byte{0}) + }); err != nil { + t.Fatalf("ReadWriteTransaction: %v", err) + } +} + +func storeLogRoot(ctx context.Context, tx storage.LogTreeTX, size, rev uint64, hash []byte) error { + logRoot, err := (&types.LogRootV1{TreeSize: size, RootHash: hash}).MarshalBinary() + if err != nil { + return fmt.Errorf("error marshaling new LogRoot: %v", err) + } + root := &trillian.SignedLogRoot{LogRoot: logRoot} + if err := tx.StoreSignedLogRoot(ctx, root); err != nil { + return fmt.Errorf("error storing new SignedLogRoot: %v", err) + } + return nil +} + +// mustCreateTree creates the specified tree using AdminStorage. +func mustCreateTree(ctx context.Context, t *testing.T, s storage.AdminStorage, tree *trillian.Tree) *trillian.Tree { + t.Helper() + tree, err := storage.CreateTree(ctx, s, tree) + if err != nil { + t.Fatalf("storage.CreateTree(): %v", err) + } + return tree +} diff --git a/storage/crdb/tree_storage.go b/storage/crdb/tree_storage.go new file mode 100644 index 0000000000..8eb547ba8c --- /dev/null +++ b/storage/crdb/tree_storage.go @@ -0,0 +1,389 @@ +// Copyright 2016 Trillian Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package crdb provides a CockroachDB-based storage layer implementation. +package crdb + +import ( + "context" + "database/sql" + "encoding/base64" + "fmt" + "runtime/debug" + "strings" + "sync" + + "github.com/google/trillian" + "github.com/google/trillian/storage/cache" + "github.com/google/trillian/storage/storagepb" + "github.com/google/trillian/storage/tree" + "google.golang.org/protobuf/proto" + "k8s.io/klog/v2" +) + +// These statements are fixed +const ( + insertSubtreeMultiSQL = `INSERT INTO Subtree(TreeId, SubtreeId, Nodes, SubtreeRevision) ` + placeholderSQL + // NOTE(jaosorior): While using the `ON CONFLICT DO NOTHING` clause + // simplifies the StoreSignedLogRoot logic; it may lead to an + // unnintuitive error message when trying to insert a duplicate. + insertTreeHeadSQL = `INSERT INTO TreeHead(TreeId,TreeHeadTimestamp,TreeSize,RootHash,TreeRevision,RootSignature) + VALUES($1,$2,$3,$4,$5,$6) + ON CONFLICT DO NOTHING` + + selectSubtreeSQL = ` + SELECT x.SubtreeId, x.MaxRevision, Subtree.Nodes + FROM ( + SELECT n.TreeId, n.SubtreeId, max(n.SubtreeRevision) AS MaxRevision + FROM Subtree n + WHERE n.SubtreeId IN (` + placeholderSQL + `) AND + n.TreeId = ? AND n.SubtreeRevision <= ? + GROUP BY n.TreeId, n.SubtreeId + ) AS x + INNER JOIN Subtree + ON Subtree.SubtreeId = x.SubtreeId + AND Subtree.SubtreeRevision = x.MaxRevision + AND Subtree.TreeId = x.TreeId + AND Subtree.TreeId = ?` + placeholderSQL = "" +) + +// crdbTreeStorage contains common functionality for log/map storage +type crdbTreeStorage struct { + db *sql.DB + + // Must hold the mutex before manipulating the statement map. Sharing a lock because + // it only needs to be held while the statements are built, not while they execute and + // this will be a short time. These maps are from the number of placeholder '?' + // in the query to the statement that should be used. + statementMutex sync.Mutex + statements map[string]map[int]*sql.Stmt +} + +// OpenDB opens a database connection to the specified database. +func OpenDB(dbURL string) (*sql.DB, error) { + db, err := sql.Open("postgres", dbURL) + if err != nil { + klog.Warningf("Failed to open CRDB database: %v", err) + return nil, err + } + + // TODO(jaosorior): Set up retry logic so we don't immediately fail + // if the database hasn't started yet. This is useful when deployed + // in Kubernetes + if err := db.Ping(); err != nil { + klog.Warningf("failed verifying database connection: %v", err) + return nil, err + } + + return db, nil +} + +func newTreeStorage(db *sql.DB) *crdbTreeStorage { + return &crdbTreeStorage{ + db: db, + statements: make(map[string]map[int]*sql.Stmt), + } +} + +// expandPlaceholderSQL expands an sql statement by adding a specified number of '?' +// placeholder slots. At most one placeholder will be expanded. +func expandPlaceholderSQL(sql string, num int, first, rest string) string { + if num <= 0 { + panic(fmt.Errorf("trying to expand SQL placeholder with <= 0 parameters: %s", sql)) + } + + parameters := first + strings.Repeat(","+rest, num-1) + + return fromMySQLToPGPreparedStatement(strings.Replace(sql, placeholderSQL, parameters, 1)) +} + +// getStmt creates and caches sql.Stmt structs based on the passed in statement +// and number of bound arguments. +// TODO(al,martin): consider pulling this all out as a separate unit for reuse +// elsewhere. +func (m *crdbTreeStorage) getStmt(ctx context.Context, statement string, num int, first, rest string) (*sql.Stmt, error) { + m.statementMutex.Lock() + defer m.statementMutex.Unlock() + + if m.statements[statement] != nil { + if m.statements[statement][num] != nil { + // TODO(al,martin): we'll possibly need to expire Stmts from the cache, + // e.g. when DB connections break etc. + return m.statements[statement][num], nil + } + } else { + m.statements[statement] = make(map[int]*sql.Stmt) + } + + s, err := m.db.PrepareContext(ctx, expandPlaceholderSQL(statement, num, first, rest)) + if err != nil { + klog.Warningf("Failed to prepare statement %d: %s", num, err) + return nil, err + } + + m.statements[statement][num] = s + + return s, nil +} + +func (m *crdbTreeStorage) getSubtreeStmt(ctx context.Context, num int) (*sql.Stmt, error) { + return m.getStmt(ctx, selectSubtreeSQL, num, "?", "?") +} + +func (m *crdbTreeStorage) setSubtreeStmt(ctx context.Context, num int) (*sql.Stmt, error) { + return m.getStmt(ctx, insertSubtreeMultiSQL, num, "VALUES(?, ?, ?, ?)", "(?, ?, ?, ?)") +} + +func (m *crdbTreeStorage) beginTreeTx(ctx context.Context, tree *trillian.Tree, hashSizeBytes int, subtreeCache *cache.SubtreeCache) (treeTX, error) { + t, err := m.db.BeginTx(ctx, nil /* opts */) + if err != nil { + klog.Warningf("Could not start tree TX: %s", err) + return treeTX{}, err + } + return treeTX{ + tx: t, + mu: &sync.Mutex{}, + ts: m, + treeID: tree.TreeId, + treeType: tree.TreeType, + hashSizeBytes: hashSizeBytes, + subtreeCache: subtreeCache, + writeRevision: -1, + }, nil +} + +type treeTX struct { + // mu ensures that tx can only be used for one query/exec at a time. + mu *sync.Mutex + closed bool + tx *sql.Tx + ts *crdbTreeStorage + treeID int64 + treeType trillian.TreeType + hashSizeBytes int + subtreeCache *cache.SubtreeCache + writeRevision int64 +} + +func (t *treeTX) getSubtrees(ctx context.Context, treeRevision int64, ids [][]byte) ([]*storagepb.SubtreeProto, error) { + klog.V(2).Infof("getSubtrees(len(ids)=%d)", len(ids)) + klog.V(4).Infof("getSubtrees(") + if len(ids) == 0 { + return nil, nil + } + + tmpl, err := t.ts.getSubtreeStmt(ctx, len(ids)) + if err != nil { + return nil, err + } + stx := t.tx.StmtContext(ctx, tmpl) + defer stx.Close() + + args := make([]interface{}, 0, len(ids)+3) + + // populate args with ids. + for _, id := range ids { + klog.V(4).Infof(" id: %x", id) + args = append(args, id) + } + + args = append(args, t.treeID) + args = append(args, treeRevision) + args = append(args, t.treeID) + + rows, err := stx.QueryContext(ctx, args...) + if err != nil { + klog.Warningf("Failed to get merkle subtrees: %s", err) + return nil, err + } + defer rows.Close() + + if rows.Err() != nil { + // Nothing from the DB + klog.Warningf("Nothing from DB: %s", rows.Err()) + return nil, rows.Err() + } + + ret := make([]*storagepb.SubtreeProto, 0, len(ids)) + + for rows.Next() { + var subtreeIDBytes []byte + var subtreeRev int64 + var nodesRaw []byte + if err := rows.Scan(&subtreeIDBytes, &subtreeRev, &nodesRaw); err != nil { + klog.Warningf("Failed to scan merkle subtree: %s", err) + return nil, err + } + var subtree storagepb.SubtreeProto + if err := proto.Unmarshal(nodesRaw, &subtree); err != nil { + klog.Warningf("Failed to unmarshal SubtreeProto: %s", err) + return nil, err + } + if subtree.Prefix == nil { + subtree.Prefix = []byte{} + } + ret = append(ret, &subtree) + + if klog.V(4).Enabled() { + klog.Infof(" subtree: NID: %x, prefix: %x, depth: %d", + subtreeIDBytes, subtree.Prefix, subtree.Depth) + for k, v := range subtree.Leaves { + b, err := base64.StdEncoding.DecodeString(k) + if err != nil { + klog.Errorf("base64.DecodeString(%v): %v", k, err) + } + klog.Infof(" %x: %x", b, v) + } + } + } + + // The InternalNodes cache is possibly nil here, but the SubtreeCache (which called + // this method) will re-populate it. + return ret, nil +} + +func (t *treeTX) storeSubtrees(ctx context.Context, subtrees []*storagepb.SubtreeProto) error { + klog.V(2).Infof("storeSubtrees(len(subtrees)=%d)", len(subtrees)) + if klog.V(4).Enabled() { + klog.Infof("storeSubtrees(") + for _, s := range subtrees { + klog.Infof(" prefix: %x, depth: %d", s.Prefix, s.Depth) + for k, v := range s.Leaves { + b, err := base64.StdEncoding.DecodeString(k) + if err != nil { + klog.Errorf("base64.DecodeString(%v): %v", k, err) + } + klog.Infof(" %x: %x", b, v) + } + } + } + if len(subtrees) == 0 { + return nil + } + + // TODO(al): probably need to be able to batch this in the case where we have + // a really large number of subtrees to store. + args := make([]interface{}, 0, len(subtrees)) + + for _, s := range subtrees { + s := s + if s.Prefix == nil { + panic(fmt.Errorf("nil prefix on %v", s)) + } + subtreeBytes, err := proto.Marshal(s) + if err != nil { + return err + } + args = append(args, t.treeID) + args = append(args, s.Prefix) + args = append(args, subtreeBytes) + args = append(args, t.writeRevision) + } + + tmpl, err := t.ts.setSubtreeStmt(ctx, len(subtrees)) + if err != nil { + return err + } + stx := t.tx.StmtContext(ctx, tmpl) + defer stx.Close() + + r, err := stx.ExecContext(ctx, args...) + if err != nil { + klog.Warningf("Failed to set merkle subtrees: %s", err) + return err + } + _, _ = r.RowsAffected() + return nil +} + +func checkResultOkAndRowCountIs(res sql.Result, err error, count int64) error { + // The Exec() might have just failed + if err != nil { + return crdbToGRPC(err) + } + + // Otherwise we have to look at the result of the operation + rowsAffected, rowsError := res.RowsAffected() + + if rowsError != nil { + return crdbToGRPC(rowsError) + } + + if rowsAffected != count { + return fmt.Errorf("expected %d row(s) to be affected but saw: %d", count, + rowsAffected) + } + + return nil +} + +// getSubtreesAtRev returns a GetSubtreesFunc which reads at the passed in rev. +func (t *treeTX) getSubtreesAtRev(ctx context.Context, rev int64) cache.GetSubtreesFunc { + return func(ids [][]byte) ([]*storagepb.SubtreeProto, error) { + return t.getSubtrees(ctx, rev, ids) + } +} + +func (t *treeTX) SetMerkleNodes(ctx context.Context, nodes []tree.Node) error { + t.mu.Lock() + defer t.mu.Unlock() + rev := t.writeRevision - 1 + return t.subtreeCache.SetNodes(nodes, t.getSubtreesAtRev(ctx, rev)) +} + +func (t *treeTX) Commit(ctx context.Context) error { + t.mu.Lock() + defer t.mu.Unlock() + + if t.writeRevision > -1 { + tiles, err := t.subtreeCache.UpdatedTiles() + if err != nil { + klog.Warningf("SubtreeCache updated tiles error: %v", err) + return err + } + if err := t.storeSubtrees(ctx, tiles); err != nil { + klog.Warningf("TX commit flush error: %v", err) + return err + } + } + t.closed = true + if err := t.tx.Commit(); err != nil { + klog.Warningf("TX commit error: %s, stack:\n%s", err, string(debug.Stack())) + return err + } + return nil +} + +func (t *treeTX) rollbackInternal() error { + t.closed = true + if err := t.tx.Rollback(); err != nil { + klog.Warningf("TX rollback error: %s, stack:\n%s", err, string(debug.Stack())) + return err + } + return nil +} + +func (t *treeTX) Close() error { + t.mu.Lock() + defer t.mu.Unlock() + if t.closed { + return nil + } + err := t.rollbackInternal() + if err != nil { + klog.Warningf("Rollback error on Close(): %v", err) + } + return err +} diff --git a/storage/mysql/storage_test.go b/storage/mysql/storage_test.go index 998c4cc619..750dd1223f 100644 --- a/storage/mysql/storage_test.go +++ b/storage/mysql/storage_test.go @@ -222,7 +222,7 @@ func diffNodes(got, want []stree.Node) ([]stree.Node, []stree.Node) { } func openTestDBOrDie() (*sql.DB, func(context.Context)) { - db, done, err := testdb.NewTrillianDB(context.TODO()) + db, done, err := testdb.NewTrillianDB(context.TODO(), testdb.DriverMySQL) if err != nil { panic(err) } diff --git a/storage/testdb/testdb.go b/storage/testdb/testdb.go index c1970ff88a..84b2fa322e 100644 --- a/storage/testdb/testdb.go +++ b/storage/testdb/testdb.go @@ -22,6 +22,7 @@ import ( "fmt" "io/ioutil" "log" + "net/url" "os" "strings" "testing" @@ -32,6 +33,7 @@ import ( "k8s.io/klog/v2" _ "github.com/go-sql-driver/mysql" // mysql driver + _ "github.com/lib/pq" // postgres driver ) const ( @@ -41,9 +43,47 @@ const ( // Note: sql.Open requires the URI to end with a slash. defaultTestMySQLURI = "root@tcp(127.0.0.1)/" + + // CockroachDBURIEnv is the name of the ENV variable checked for the test CockroachDB + // instance URI to use. The value must have a trailing slash. + CockroachDBURIEnv = "TEST_COCKROACHDB_URI" + + defaultTestCockroachDBURI = "postgres://root@localhost:26257/?sslmode=disable" +) + +type storageDriverInfo struct { + sqlDriverName string + schema string + uriFunc func(paths ...string) string +} + +var ( + trillianMySQLSchema = testonly.RelativeToPackage("../mysql/schema/storage.sql") + trillianCRDBSchema = testonly.RelativeToPackage("../crdb/schema/storage.sql") ) -var trillianSQL = testonly.RelativeToPackage("../mysql/schema/storage.sql") +// DriverName is the name of a database driver. +type DriverName string + +const ( + // DriverMySQL is the identifier for the MySQL storage driver. + DriverMySQL DriverName = "mysql" + // DriverCockroachDB is the identifier for the CockroachDB storage driver. + DriverCockroachDB DriverName = "cockroachdb" +) + +var driverMapping = map[DriverName]storageDriverInfo{ + DriverMySQL: { + sqlDriverName: "mysql", + schema: trillianMySQLSchema, + uriFunc: mysqlURI, + }, + DriverCockroachDB: { + sqlDriverName: "postgres", + schema: trillianCRDBSchema, + uriFunc: crdbURI, + }, +} // mysqlURI returns the MySQL connection URI to use for tests. It returns the // value in the ENV variable defined by MySQLURIEnv. If the value is empty, @@ -53,16 +93,73 @@ var trillianSQL = testonly.RelativeToPackage("../mysql/schema/storage.sql") // of the tests in this repo require a database and import this package. With a // flag, it would be necessary to distinguish "go test" invocations that need a // database, and those that don't. ENV allows to "blanket apply" this setting. -func mysqlURI() string { +func mysqlURI(dbRef ...string) string { + var stringurl string if e := os.Getenv(MySQLURIEnv); len(e) > 0 { - return e + stringurl = e + } else { + stringurl = defaultTestMySQLURI + } + + for _, ref := range dbRef { + separator := "/" + if strings.HasSuffix(stringurl, "/") { + separator = "" + } + stringurl = strings.Join([]string{stringurl, ref}, separator) + } + + return stringurl +} + +// crdbURI returns the CockroachDB connection URI to use for tests. It returns the +// value in the ENV variable defined by CockroachDBURIEnv. If the value is empty, +// returns defaultTestCockroachDBURI. +func crdbURI(dbRef ...string) string { + var uri *url.URL + if e := os.Getenv(CockroachDBURIEnv); len(e) > 0 { + uri = getURL(e) + } else { + uri = getURL(defaultTestCockroachDBURI) } - return defaultTestMySQLURI + + return addPathToURI(uri, dbRef...) +} + +func addPathToURI(uri *url.URL, paths ...string) string { + if len(paths) > 0 { + for _, ref := range paths { + currentPaths := uri.Path + // If the path is the root path, we don't want to append a slash. + if currentPaths == "/" { + currentPaths = "" + } + uri.Path = strings.Join([]string{currentPaths, ref}, "/") + } + } + return uri.String() +} + +func getURL(unparsedurl string) *url.URL { + //nolint:errcheck // We're not expecting an error here. + u, _ := url.Parse(unparsedurl) + return u } // MySQLAvailable indicates whether the configured MySQL database is available. func MySQLAvailable() bool { - db, err := sql.Open("mysql", mysqlURI()) + return dbAvailable(DriverMySQL) +} + +// CockroachDBAvailable indicates whether the configured CockroachDB database is available. +func CockroachDBAvailable() bool { + return dbAvailable(DriverCockroachDB) +} + +func dbAvailable(driver DriverName) bool { + driverName := driverMapping[driver].sqlDriverName + uri := driverMapping[driver].uriFunc() + db, err := sql.Open(driverName, uri) if err != nil { log.Printf("sql.Open(): %v", err) return false @@ -95,11 +192,17 @@ func SetFDLimit(uLimit uint64) error { // using the DB, the caller should not continue to use the returned DB after // calling this function as it may, for example, delete the underlying // instance. -func newEmptyDB(ctx context.Context) (*sql.DB, func(context.Context), error) { +func newEmptyDB(ctx context.Context, driver DriverName) (*sql.DB, func(context.Context), error) { if err := SetFDLimit(2048); err != nil { return nil, nil, err } - db, err := sql.Open("mysql", mysqlURI()) + + inf, gotinf := driverMapping[driver] + if !gotinf { + return nil, nil, fmt.Errorf("unknown driver %q", driver) + } + + db, err := sql.Open(inf.sqlDriverName, inf.uriFunc()) if err != nil { return nil, nil, err } @@ -113,7 +216,8 @@ func newEmptyDB(ctx context.Context) (*sql.DB, func(context.Context), error) { } db.Close() - db, err = sql.Open("mysql", mysqlURI()+name) + uri := inf.uriFunc(name) + db, err = sql.Open(inf.sqlDriverName, uri) if err != nil { return nil, nil, err } @@ -131,13 +235,15 @@ func newEmptyDB(ctx context.Context) (*sql.DB, func(context.Context), error) { // NewTrillianDB creates an empty database with the Trillian schema. The database name is randomly // generated. // NewTrillianDB is equivalent to Default().NewTrillianDB(ctx). -func NewTrillianDB(ctx context.Context) (*sql.DB, func(context.Context), error) { - db, done, err := newEmptyDB(ctx) +func NewTrillianDB(ctx context.Context, driver DriverName) (*sql.DB, func(context.Context), error) { + db, done, err := newEmptyDB(ctx, driver) if err != nil { return nil, nil, err } - sqlBytes, err := ioutil.ReadFile(trillianSQL) + schema := driverMapping[driver].schema + + sqlBytes, err := ioutil.ReadFile(schema) if err != nil { return nil, nil, err } @@ -175,3 +281,12 @@ func SkipIfNoMySQL(t *testing.T) { } t.Logf("Test MySQL available at %q", mysqlURI()) } + +// SkipIfNoCockroachDB is a test helper that skips tests that require a local CockroachDB. +func SkipIfNoCockroachDB(t *testing.T) { + t.Helper() + if !CockroachDBAvailable() { + t.Skip("Skipping test as CockroachDB not available") + } + t.Logf("Test CockroachDB available at %q", crdbURI()) +} diff --git a/testonly/integration/logenv.go b/testonly/integration/logenv.go index f15b938b94..6806a9ecfe 100644 --- a/testonly/integration/logenv.go +++ b/testonly/integration/logenv.go @@ -85,7 +85,8 @@ func NewLogEnv(ctx context.Context, numSequencers int, _ string) (*LogEnv, error // if numSequencers is zero a manually-controlled test sequencer is used. // Additional grpc.ServerOption and grpc.DialOption values can be provided. func NewLogEnvWithGRPCOptions(ctx context.Context, numSequencers int, serverOpts []grpc.ServerOption, clientOpts []grpc.DialOption) (*LogEnv, error) { - db, done, err := testdb.NewTrillianDB(ctx) + // TODO(jaosorior): Make this configurable for Cockroach or MySQL + db, done, err := testdb.NewTrillianDB(ctx, testdb.DriverMySQL) if err != nil { return nil, err } diff --git a/testonly/integration/registry.go b/testonly/integration/registry.go index 64cce2715e..dc9a0fb9a9 100644 --- a/testonly/integration/registry.go +++ b/testonly/integration/registry.go @@ -26,8 +26,8 @@ import ( // NewRegistryForTests returns an extension.Registry for integration tests. // Callers should call the returned cleanup function when they're finished // with the registry and its contents. -func NewRegistryForTests(ctx context.Context) (extension.Registry, func(context.Context), error) { - db, done, err := testdb.NewTrillianDB(ctx) +func NewRegistryForTests(ctx context.Context, driver testdb.DriverName) (extension.Registry, func(context.Context), error) { + db, done, err := testdb.NewTrillianDB(ctx, driver) if err != nil { return extension.Registry{}, nil, err }