diff --git a/go.mod b/go.mod index 9366f9c4da633..6ab023268a763 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/cespare/xxhash/v2 v2.1.1 github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf - github.com/cortexproject/cortex v1.2.1-0.20200709155522-19502213923d + github.com/cortexproject/cortex v1.2.1-0.20200727121049-4cfa4a2978c2 github.com/davecgh/go-spew v1.1.1 github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82 // indirect @@ -38,24 +38,23 @@ require ( github.com/klauspost/compress v1.9.5 github.com/mitchellh/mapstructure v1.2.2 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f - github.com/opentracing/opentracing-go v1.1.1-0.20200124165624-2876d2018785 + github.com/opentracing/opentracing-go v1.2.0 github.com/pierrec/lz4 v2.5.3-0.20200429092203-e876bbd321b3+incompatible github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.7.1 github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.10.0 - github.com/prometheus/prometheus v1.8.2-0.20200707115909-30505a202a4c + github.com/prometheus/prometheus v1.8.2-0.20200727090838-6f296594a852 github.com/segmentio/fasthash v1.0.2 github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 - github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd + github.com/shurcooL/vfsgen v0.0.0-20200627165143-92b8a710ab6c github.com/stretchr/testify v1.5.1 github.com/tonistiigi/fifo v0.0.0-20190226154929-a9fb20d87448 - github.com/uber/jaeger-client-go v2.23.1+incompatible + github.com/uber/jaeger-client-go v2.25.0+incompatible github.com/ugorji/go v1.1.7 // indirect - github.com/weaveworks/common v0.0.0-20200512154658-384f10054ec5 + github.com/weaveworks/common v0.0.0-20200625145055-4b1847531bc9 go.etcd.io/bbolt v1.3.5-0.20200615073812-232d8fc87f50 - golang.org/x/net v0.0.0-20200602114024-627f9648deb9 - golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae // indirect + golang.org/x/net v0.0.0-20200707034311-ab3426394381 google.golang.org/grpc v1.29.1 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/fsnotify.v1 v1.4.7 @@ -67,8 +66,6 @@ replace github.com/hpcloud/tail => github.com/grafana/tail v0.0.0-20191024143944 replace github.com/Azure/azure-sdk-for-go => github.com/Azure/azure-sdk-for-go v36.2.0+incompatible -replace github.com/Azure/go-autorest => github.com/Azure/go-autorest v13.3.0+incompatible - replace k8s.io/client-go => k8s.io/client-go v0.18.3 // >v1.2.0 has some conflict with prometheus/alertmanager. Hence prevent the upgrade till it's fixed. diff --git a/go.sum b/go.sum index eb0a06caa2330..c81fb4d72144c 100644 --- a/go.sum +++ b/go.sum @@ -54,26 +54,35 @@ github.com/Azure/azure-storage-blob-go v0.8.0 h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFE github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-autorest v13.3.0+incompatible h1:8Ix0VdeOllBx9jEcZ2Wb1uqWUpE1awmJiaHztwaJCPk= -github.com/Azure/go-autorest v13.3.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v11.2.8+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3-0.20191028180845-3492b2aff503 h1:uUhdsDMg2GbFLF5GfQPtLMWd5vdDZSfqvqQp3waafxQ= github.com/Azure/go-autorest/autorest v0.9.3-0.20191028180845-3492b2aff503/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.10.2 h1:NuSF3gXetiHyUbVdneJMEVyPUYAe5wh+aN08JYAf1tI= github.com/Azure/go-autorest/autorest v0.10.2/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest v0.11.2 h1:BR5GoSGobeiMwGOOIxXuvNKNPy+HMGdteKB8kJUDnBE= +github.com/Azure/go-autorest/autorest v0.11.2/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.1-0.20191028180845-3492b2aff503 h1:Hxqlh1uAA8aGpa1dFhDNhll7U/rkWtG8ZItFvRMr7l0= github.com/Azure/go-autorest/autorest/adal v0.8.1-0.20191028180845-3492b2aff503/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.8.3 h1:O1AGG9Xig71FxdX9HO5pGNyZ7TbSyHaVg+5eJO/jSGw= github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.9.0 h1:SigMbuFNuKgc1xcGhaeapbh+8fgsu+GxgDRFyg7f5lM= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/mocks v0.4.0 h1:z20OWOSG5aCye0HEkDp6TPmP17ZcfeMxPi6HnSALa8c= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= github.com/Azure/go-autorest/autorest/to v0.3.1-0.20191028180845-3492b2aff503 h1:2McfZNaDqGPjv2pddK547PENIk4HV+NT7gvqRq4L0us= github.com/Azure/go-autorest/autorest/to v0.3.1-0.20191028180845-3492b2aff503/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= @@ -82,8 +91,12 @@ github.com/Azure/go-autorest/autorest/validation v0.2.1-0.20191028180845-3492b2a github.com/Azure/go-autorest/autorest/validation v0.2.1-0.20191028180845-3492b2aff503/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -159,6 +172,10 @@ github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpi github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.31.9 h1:n+b34ydVfgC30j0Qm69yaapmjejQPW2BoDBX7Uy/tLI= github.com/aws/aws-sdk-go v1.31.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.33.5 h1:p2fr1ryvNTU6avUWLI+/H7FGv0TBIjzVM5WDgXBBv4U= +github.com/aws/aws-sdk-go v1.33.5/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.33.12 h1:eydMoSwfrSTD9PWKUJOiDL7+/UwDW8AjInUGVE5Llh4= +github.com/aws/aws-sdk-go v1.33.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= @@ -190,6 +207,8 @@ github.com/cenkalti/backoff v1.0.0 h1:2XeuDgvPv/6QDyzIuxb6n36ADVocyqTLlOSpYBGYtv github.com/cenkalti/backoff v1.0.0/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.0.2 h1:JIufpQLbh4DkbQoii76ItQIUFzevQSqOLZca4eamEDs= +github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v0.0.0-20181017004759-096ff4a8a059/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= @@ -232,8 +251,8 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cortexproject/cortex v0.6.1-0.20200228110116-92ab6cbe0995/go.mod h1:3Xa3DjJxtpXqxcMGdk850lcIRb81M0fyY1MQ6udY134= -github.com/cortexproject/cortex v1.2.1-0.20200709155522-19502213923d h1:L20hHQCwVNj8eQDCvTmYA3Q8Xai8ZH5LcpgTiMuOWK8= -github.com/cortexproject/cortex v1.2.1-0.20200709155522-19502213923d/go.mod h1:9Iy6tOOITdQb5Q8Boj9ke/q7AyztcJlnmgpSUW/k1eM= +github.com/cortexproject/cortex v1.2.1-0.20200727121049-4cfa4a2978c2 h1:KseIJ2j4OJ8Vt9B2dpUyAgqgoeoRtFxLydxabmTToDg= +github.com/cortexproject/cortex v1.2.1-0.20200727121049-4cfa4a2978c2/go.mod h1:zBfkUqePbDsIbPaClWi31N3wC93h76vu0ONPNYQitCs= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= @@ -260,6 +279,10 @@ github.com/dhui/dktest v0.3.0 h1:kwX5a7EkLcjo7VpsPQSYJcKGbXBXdjI9FGjuUj1jn6I= github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= github.com/digitalocean/godo v1.37.0 h1:NEj5ne2cvLBHo1GJY1DNN/iEt9ipa72CMwwAjKEA530= github.com/digitalocean/godo v1.37.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= +github.com/digitalocean/godo v1.38.0 h1:to+pLe5RJqflJiyxhaLJfJgT3YzwHRSg19mOWkKt6A0= +github.com/digitalocean/godo v1.38.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= +github.com/digitalocean/godo v1.42.0 h1:xQlEFLhQ1zZUryJAfiWb8meLPPCWnLO901U5Imhh0Mc= +github.com/digitalocean/godo v1.42.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= @@ -408,6 +431,8 @@ github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8 github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/spec v0.19.7 h1:0xWSeMd35y5avQAThZR2PkEuqSosoS5t6gDH4L8n11M= github.com/go-openapi/spec v0.19.7/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.8 h1:qAdZLh1r6QF/hI/gTq+TJTvsQUodZsM7KLqkAJdiJNg= +github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.17.2/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= @@ -553,6 +578,7 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -578,6 +604,8 @@ github.com/gophercloud/gophercloud v0.6.0 h1:Xb2lcqZtml1XjgYZxbeayEemq7ASbeTp09m github.com/gophercloud/gophercloud v0.6.0/go.mod h1:GICNByuaEBibcjmjvI7QvYJSZEbGkcYwAR7EZK2WMqM= github.com/gophercloud/gophercloud v0.11.0 h1:pYMP9UZBdQa3lsfIZ1tZor4EbtxiuB6BHhocenkiH/E= github.com/gophercloud/gophercloud v0.11.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= +github.com/gophercloud/gophercloud v0.12.0 h1:mZrie07npp6ODiwHZolTicr5jV8Ogn43AvAsSMm6Ork= +github.com/gophercloud/gophercloud v0.12.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de h1:F7WD09S8QB4LrkEpka0dFPLSotH11HRpCsLIbIcJ7sU= github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -622,12 +650,16 @@ github.com/hashicorp/consul/api v1.3.0 h1:HXNYlRkkM/t+Y/Yhxtwcy02dlYwIaoxzvxPnS+ github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.4.0 h1:jfESivXnO5uLdH650JU/6AnjRoHrLhULq0FnC3Kp9EY= github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU= +github.com/hashicorp/consul/api v1.5.0 h1:Yo2bneoGy68A7aNwmuETFnPhjyBEm7n3vzRacEVMjvI= +github.com/hashicorp/consul/api v1.5.0/go.mod h1:LqwrLNW876eYSuUOo4ZLHBcdKc038txr/IMfbLPATa4= github.com/hashicorp/consul/sdk v0.1.1 h1:LnuDWGNsoajlhGyHJvuWW6FVqRl8JOTPqS6CPTsYjhY= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0 h1:UOxjlb4xVNF93jak1mzzoBatyFju9nrkxpVwIp/QqxQ= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.4.0 h1:zBtCfKJZcJDBvSCkQJch4ulp59m1rATFLKwNo/LYY30= github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= +github.com/hashicorp/consul/sdk v0.5.0 h1:WC4594Wp/LkEeML/OdQKEC1yqBmEYkRp6i7X5u0zDAs= +github.com/hashicorp/consul/sdk v0.5.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -680,6 +712,8 @@ github.com/hashicorp/memberlist v0.1.5 h1:AYBsgJOW9gab/toO5tEB8lWetVgDKZycqkebJ8 github.com/hashicorp/memberlist v0.1.5/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.2.0 h1:WeeNspppWi5s1OFefTviPQueC/Bq8dONfvNjPhiEQKE= github.com/hashicorp/memberlist v0.2.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.2.2 h1:5+RffWKwqJ71YPu9mWsF7ZOscZmwfasdA8kbdC7AO2g= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.8.3/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= @@ -699,6 +733,7 @@ github.com/influxdata/go-syslog/v3 v3.0.1-0.20200510134747-836dce2cf6da h1:yEutt github.com/influxdata/go-syslog/v3 v3.0.1-0.20200510134747-836dce2cf6da/go.mod h1:aXdIdfn2OcGnMhOTojXmwZqXKgC3MU5riiNvzwwG9OY= github.com/influxdata/influxdb v1.7.7/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= github.com/influxdata/influxdb v1.8.0/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ= +github.com/influxdata/influxdb v1.8.1/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxql v1.1.0/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZgb3N+tzevNgo= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= @@ -781,6 +816,8 @@ github.com/kshvakov/clickhouse v1.3.5/go.mod h1:DMzX7FxRymoNkVgizH0DWAL8Cur7wHLg github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lann/builder v0.0.0-20150808151131-f22ce00fd939 h1:yZJImkCmVI6d1uJ9KRRf/96YbFLDQ/hhs6Xt9Z3OBXI= github.com/lann/builder v0.0.0-20150808151131-f22ce00fd939/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= @@ -852,6 +889,8 @@ github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKju github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.29 h1:xHBEhR+t5RzcFJjBLJlax2daXOrTYtr9z4WdKEfWFzg= github.com/miekg/dns v1.1.29/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.30 h1:Qww6FseFn8PRfw07jueqIXqodm0JKiiKuK0DeXSqfyo= +github.com/miekg/dns v1.1.30/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/minio/minio-go/v6 v6.0.44/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg= github.com/minio/minio-go/v6 v6.0.56 h1:H4+v6UFV1V7VkEf1HjL15W9OvTL1Gy8EbMmjQZHqEbg= github.com/minio/minio-go/v6 v6.0.56/go.mod h1:KQMM+/44DSlSGSQWSfRrAZ12FVMmpWNuX37i2AX0jfI= @@ -944,6 +983,8 @@ github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9 h1:QsgXACQhd9QJhEmRumbsMQQvBtmdS0mafoVEBplWXEg= github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= +github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= +github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -951,6 +992,8 @@ github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsq github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.1-0.20200124165624-2876d2018785 h1:Oi9nYnU9jbiUVyoRTQfMpSdGzNVmEI+/9fija3lcnjU= github.com/opentracing/opentracing-go v1.1.1-0.20200124165624-2876d2018785/go.mod h1:C+iumr2ni468+1jvcHXLCdqP9uQnoQbdX93F3aWahWU= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= @@ -986,6 +1029,8 @@ github.com/prometheus/alertmanager v0.18.0/go.mod h1:WcxHBl40VSPuOaqWae6l6HpnEOV github.com/prometheus/alertmanager v0.19.0/go.mod h1:Eyp94Yi/T+kdeb2qvq66E3RGuph5T/jm/RBVh4yz1xo= github.com/prometheus/alertmanager v0.20.0 h1:PBMNY7oyIvYMBBIag35/C0hO7xn8+35p4V5rNAph5N8= github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg= +github.com/prometheus/alertmanager v0.21.0 h1:qK51JcUR9l/unhawGA9F9B64OCYfcGewhPNprem/Acc= +github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkLF3KxmC/malTcT8Go= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= @@ -1049,8 +1094,10 @@ github.com/prometheus/prometheus v1.8.2-0.20200107122003-4708915ac6ef/go.mod h1: github.com/prometheus/prometheus v1.8.2-0.20200213233353-b90be6f32a33 h1:HBYrMJj5iosUjUkAK9L5GO+5eEQXbcrzdjkqY9HV5W4= github.com/prometheus/prometheus v1.8.2-0.20200213233353-b90be6f32a33/go.mod h1:fkIPPkuZnkXyopYHmXPxf9rgiPkVgZCN8w9o8+UgBlY= github.com/prometheus/prometheus v1.8.2-0.20200619100132-74207c04655e/go.mod h1:QV6T0PPQi5UFmqcLBJw3JiyIR8r1O7KEv9qlVw4VV40= -github.com/prometheus/prometheus v1.8.2-0.20200707115909-30505a202a4c h1:Iz2q3wgo4xiURb7Ku0MCrM7osAVHX03lF1vHNht1fb8= -github.com/prometheus/prometheus v1.8.2-0.20200707115909-30505a202a4c/go.mod h1:/kMSPIRsxr/apyHxlzYMdFnaPXUXXqILU5uzIoNhOvc= +github.com/prometheus/prometheus v1.8.2-0.20200722151933-4a8531a64b32 h1:GcJMaFu1uu6rSueToTRZuVS3AiORbFtLEDMUfp4GA9Q= +github.com/prometheus/prometheus v1.8.2-0.20200722151933-4a8531a64b32/go.mod h1:+/y4DzJ62qmhy0o/H4PtXegRXw+80E8RVRHhLbv+bkM= +github.com/prometheus/prometheus v1.8.2-0.20200727090838-6f296594a852 h1:aRBuOcI/bN5f/UqmIGn8CajY6W0mPBEajK8q+SFgNZY= +github.com/prometheus/prometheus v1.8.2-0.20200727090838-6f296594a852/go.mod h1:yzkxU+U4d5ZgVH/ywg/zONKN91UPLKsKCYkcyGOBH18= github.com/rafaeljusto/redigomock v0.0.0-20190202135759-257e089e14a1 h1:+kGqA4dNN5hn7WwvKdzHl0rdN5AEkbNZd0VjRltAiZg= github.com/rafaeljusto/redigomock v0.0.0-20190202135759-257e089e14a1/go.mod h1:JaY6n2sDr+z2WTsXkOmNRUfDy6FN0L6Nk7x06ndm4tY= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -1062,6 +1109,8 @@ github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v1.6.0 h1:G9tHG9lebljV9mfp9SNPDL36nCDxmo3zTlAf1YgvzmI= github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -1069,6 +1118,8 @@ github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75 h1:cA+Ubq9qEVI github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da h1:p3Vo3i64TCLY7gIfzeQaUJ+kppEO5WQG3cL8iE8tGHU= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e h1:CGjiMQ0wMH4wtNWrlj6kiTbkPt2F3rbYnhGX6TWLfco= +github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= @@ -1093,6 +1144,8 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV github.com/shurcooL/vfsgen v0.0.0-20180825020608-02ddb050ef6b/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd h1:ug7PpSOB5RBPK1Kg6qskGBoP3Vnj/aNYFTznWvlkGo0= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/shurcooL/vfsgen v0.0.0-20200627165143-92b8a710ab6c h1:XLPw6rny9Vrrvrzhw8pNLrC2+x/kH0a/3gOx5xWDa6Y= +github.com/shurcooL/vfsgen v0.0.0-20200627165143-92b8a710ab6c/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/siebenmann/go-kstat v0.0.0-20160321171754-d34789b79745/go.mod h1:G81aIFAMS9ECrwBYR9YxhlPjWgrItd+Kje78O6+uqm8= github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -1159,6 +1212,10 @@ github.com/uber/jaeger-client-go v2.20.1+incompatible h1:HgqpYBng0n7tLJIlyT4kPCI github.com/uber/jaeger-client-go v2.20.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.23.1+incompatible h1:uArBYHQR0HqLFFAypI7RsWTzPSj/bDpmZZuQjMLSg1A= github.com/uber/jaeger-client-go v2.23.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.24.0+incompatible h1:CGchgJcHsDd2jWnaL4XngByMrXoGHh3n8oCqAKx0uMo= +github.com/uber/jaeger-client-go v2.24.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U= +github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v1.5.1-0.20181102163054-1fc5c315e03c/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= @@ -1172,8 +1229,8 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/weaveworks/common v0.0.0-20200206153930-760e36ae819a/go.mod h1:6enWAqfQBFrE8X/XdJwZr8IKgh1chStuFR0mjU/UOUw= -github.com/weaveworks/common v0.0.0-20200512154658-384f10054ec5 h1:EYxr08r8x6r/5fLEAMMkida1BVgxVXE4LfZv/XV+znU= -github.com/weaveworks/common v0.0.0-20200512154658-384f10054ec5/go.mod h1:c98fKi5B9u8OsKGiWHLRKus6ToQ1Tubeow44ECO1uxY= +github.com/weaveworks/common v0.0.0-20200625145055-4b1847531bc9 h1:dNVIG9aKQHR9T4uYAC4YxmkHHryOsfTwsL54WrS7u28= +github.com/weaveworks/common v0.0.0-20200625145055-4b1847531bc9/go.mod h1:c98fKi5B9u8OsKGiWHLRKus6ToQ1Tubeow44ECO1uxY= github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= @@ -1184,8 +1241,10 @@ github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0 github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= go.elastic.co/apm v1.5.0/go.mod h1:OdB9sPtM6Vt7oz3VXt7+KR96i9li74qrxBGHTQygFvk= go.elastic.co/apm/module/apmhttp v1.5.0/go.mod h1:1FbmNuyD3ddauwzgVwFB0fqY6KbZt3JkV187tGCYYhY= @@ -1225,6 +1284,8 @@ go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/automaxprocs v1.2.0/go.mod h1:YfO3fm683kQpzETxlTGZhGIVmXAhaw3gxeBADbpZtnU= +go.uber.org/goleak v1.0.0 h1:qsup4IcBdlmsnGfqyLl4Ntn3C2XCCuKAE7DwHpScyUo= +go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= @@ -1264,6 +1325,8 @@ golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200422194213-44a606286825 h1:dSChiwOTvzwbHFTMq2l6uRardHH7/E6SqEkqccinS/o= golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1302,6 +1365,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1344,8 +1409,12 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200602114024-627f9648deb9 h1:pNX+40auqi2JqRfOP1akLGtYcn15TUbkhwuCO3foqqM= golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1365,6 +1434,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4 golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1435,6 +1506,8 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1 h1:ogLJMz+qpzav7lGMh10LMvAkM golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200724161237-0e2f3a69832c h1:UIcGWL6/wpCfyGuJnRFJRurA+yj8RrW7Q6x2YMCXt6c= +golang.org/x/sys v0.0.0-20200724161237-0e2f3a69832c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1449,6 +1522,8 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqG golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1485,6 +1560,7 @@ golang.org/x/tools v0.0.0-20190918214516-5a1a30219888/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191111182352-50fa39b762bc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2 h1:EtTFh6h4SAKemS+CURDMTDIANuduG5zKEXShyy18bGA= @@ -1507,8 +1583,13 @@ golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200603131246-cc40288be839 h1:SxYgZ5FbVts/fm9UsuLycOG8MRWJPm7krdhgPQSayUs= golang.org/x/tools v0.0.0-20200603131246-cc40288be839/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200710042808-f1c4188a97a1 h1:rD1FcWVsRaMY+l8biE9jbWP5MS/CJJ/90a9TMkMgNrM= +golang.org/x/tools v0.0.0-20200710042808-f1c4188a97a1/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200725200936-102e7d357031 h1:VtIxiVHWPhnny2ZTi4f9/2diZKqyLaq3FUTuud5+khA= +golang.org/x/tools v0.0.0-20200725200936-102e7d357031/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= @@ -1535,6 +1616,8 @@ google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.26.0 h1:VJZ8h6E8ip82FRpQl848c5vAadxlTXrUh8RzQzSRm08= google.golang.org/api v0.26.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0 h1:BaiDisFir8O4IJxvAabCGGkQ6yCJegNQqSVoYUNAnbk= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1575,6 +1658,10 @@ google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200603110839-e855014d5736 h1:+IE3xTD+6Eb7QWG5JFp+dQr/XjKpjmrNkh4pdjTdHEs= google.golang.org/genproto v0.0.0-20200603110839-e855014d5736/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200710124503-20a17af7bd0e h1:k+p/u26/lVeNEpdxSeUrm7rTvoFckBKaf7gTzgmHyDA= +google.golang.org/genproto v0.0.0-20200710124503-20a17af7bd0e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200724131911-43cab4749ae7 h1:AWgNCmk2V5HZp9AiCDRBExX/b9I0Ey9F8STHDZlhCC4= +google.golang.org/genproto v0.0.0-20200724131911-43cab4749ae7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -1648,6 +1735,8 @@ gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2 h1:XZx7nhd5GMaZpmDaEHFVafUZC gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200603094226-e3079894b1e8 h1:jL/vaozO53FMfZLySWM+4nulF3gQEC6q5jH90LPomDo= gopkg.in/yaml.v3 v3.0.0-20200603094226-e3079894b1e8/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1666,12 +1755,20 @@ k8s.io/api v0.0.0-20191115095533-47f6de673b26 h1:6L7CEQVcduEr9eUPN3r3RliLvDrvcan k8s.io/api v0.0.0-20191115095533-47f6de673b26/go.mod h1:iA/8arsvelvo4IDqIhX4IbjTEKBGgvsf2OraTuRtLFU= k8s.io/api v0.18.3 h1:2AJaUQdgUZLoDZHrun21PW2Nx9+ll6cUzvn3IKhSIn0= k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA= +k8s.io/api v0.18.5 h1:fKbCxr+U3fu7k6jB+QeYPD/c6xKYeSJ2KVWmyUypuWM= +k8s.io/api v0.18.5/go.mod h1:tN+e/2nbdGKOAH55NMV8oGrMG+3uRlA9GaRfvnCCSNk= +k8s.io/api v0.18.6 h1:osqrAXbOQjkKIWDTjrqxWQ3w0GkKb1KA1XkUGHHYpeE= +k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI= k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010 h1:pyoq062NftC1y/OcnbSvgolyZDJ8y4fmUPWMkdA6gfU= k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010/go.mod h1:Waf/xTS2FGRrgXCkO5FP3XxTOWh0qLf2QhL1qFZZ/R8= k8s.io/apimachinery v0.0.0-20191115015347-3c7067801da2 h1:TSH6UZ+y3etc/aDbVqow1NT8o7SJXkxhLKbp3Ywhyvg= k8s.io/apimachinery v0.0.0-20191115015347-3c7067801da2/go.mod h1:dXFS2zaQR8fyzuvRdJDHw2Aerij/yVGJSre0bZQSVJA= k8s.io/apimachinery v0.18.3 h1:pOGcbVAhxADgUYnjS08EFXs9QMl8qaH5U4fr5LGUrSk= k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= +k8s.io/apimachinery v0.18.5 h1:Lh6tgsM9FMkC12K5T5QjRm7rDs6aQN5JHkA0JomULDM= +k8s.io/apimachinery v0.18.5/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= +k8s.io/apimachinery v0.18.6 h1:RtFHnfGNfd1N0LeSrKCUznz5xtUP1elRGvHJbL3Ntag= +k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= k8s.io/client-go v0.18.3 h1:QaJzz92tsN67oorwzmoB0a9r9ZVHuD5ryjbCKP0U22k= k8s.io/client-go v0.18.3/go.mod h1:4a/dpQEvzAhT1BbuWW09qvIaGw6Gbu1gZYiQZIi1DMw= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 08946de71d5a5..cd01cdbdc77da 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -103,6 +103,12 @@ func (t *Loki) initRuntimeConfig() (services.Service, error) { t.cfg.RuntimeConfig.LoadPath = t.cfg.LimitsConfig.PerTenantOverrideConfig t.cfg.RuntimeConfig.ReloadPeriod = t.cfg.LimitsConfig.PerTenantOverridePeriod } + + if t.cfg.RuntimeConfig.LoadPath == "" { + // no need to initialize module if load path is empty + return nil, nil + } + t.cfg.RuntimeConfig.Loader = loadRuntimeConfig // make sure to set default limits before we start loading configuration into memory @@ -226,7 +232,7 @@ func (t *Loki) initTableManager() (services.Service, error) { os.Exit(1) } - tableClient, err := storage.NewTableClient(lastConfig.IndexType, t.cfg.StorageConfig.Config) + tableClient, err := storage.NewTableClient(lastConfig.IndexType, t.cfg.StorageConfig.Config, prometheus.DefaultRegisterer) if err != nil { return nil, err } diff --git a/pkg/loki/runtime_config.go b/pkg/loki/runtime_config.go index af65d8d30dde3..a26d9dc2c952d 100644 --- a/pkg/loki/runtime_config.go +++ b/pkg/loki/runtime_config.go @@ -1,7 +1,7 @@ package loki import ( - "os" + "io" "github.com/cortexproject/cortex/pkg/ring/kv" "github.com/cortexproject/cortex/pkg/util/runtimeconfig" @@ -19,15 +19,10 @@ type runtimeConfigValues struct { Multi kv.MultiRuntimeConfig `yaml:"multi_kv_config"` } -func loadRuntimeConfig(filename string) (interface{}, error) { - f, err := os.Open(filename) - if err != nil { - return nil, err - } - +func loadRuntimeConfig(r io.Reader) (interface{}, error) { var overrides = &runtimeConfigValues{} - decoder := yaml.NewDecoder(f) + decoder := yaml.NewDecoder(r) decoder.SetStrict(true) if err := decoder.Decode(&overrides); err != nil { return nil, err diff --git a/pkg/storage/store.go b/pkg/storage/store.go index abe396ea2952f..82f0078ad3cca 100644 --- a/pkg/storage/store.go +++ b/pkg/storage/store.go @@ -68,7 +68,7 @@ func NewTableClient(name string, cfg Config) (chunk.TableClient, error) { name = "boltdb" cfg.FSConfig = cortex_local.FSConfig{Directory: cfg.BoltDBShipperConfig.ActiveIndexDirectory} } - return storage.NewTableClient(name, cfg.Config) + return storage.NewTableClient(name, cfg.Config, prometheus.DefaultRegisterer) } // decodeReq sanitizes an incoming request, rounds bounds, appends the __name__ matcher, diff --git a/pkg/storage/stores/util/object_client.go b/pkg/storage/stores/util/object_client.go index d7d532b247e79..1f822d7547044 100644 --- a/pkg/storage/stores/util/object_client.go +++ b/pkg/storage/stores/util/object_client.go @@ -46,6 +46,10 @@ func (p PrefixedObjectClient) Stop() { p.downstreamClient.Stop() } +func (p PrefixedObjectClient) PathSeparator() string { + return p.downstreamClient.PathSeparator() +} + func NewPrefixedObjectClient(downstreamClient chunk.ObjectClient, prefix string) chunk.ObjectClient { return PrefixedObjectClient{downstreamClient: downstreamClient, prefix: prefix} } diff --git a/vendor/github.com/Azure/go-autorest/.gitignore b/vendor/github.com/Azure/go-autorest/.gitignore new file mode 100644 index 0000000000000..3350aaf706482 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/.gitignore @@ -0,0 +1,32 @@ +# The standard Go .gitignore file follows. (Sourced from: github.com/github/gitignore/master/Go.gitignore) +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.DS_Store +.idea/ +.vscode/ + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# go-autorest specific +vendor/ +autorest/azure/example/example diff --git a/vendor/github.com/Azure/go-autorest/CHANGELOG.md b/vendor/github.com/Azure/go-autorest/CHANGELOG.md new file mode 100644 index 0000000000000..d1f596bfc9b98 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/CHANGELOG.md @@ -0,0 +1,1004 @@ +# CHANGELOG + +## v14.2.0 + +- Added package comment to make `github.com/Azure/go-autorest` importable. + +## v14.1.1 + +### Bug Fixes + +- Change `x-ms-authorization-auxiliary` header value separator to comma. + +## v14.1.0 + +### New Features + +- Added `azure.SetEnvironment()` that will update the global environments map with the specified values. + +## v14.0.1 + +### Bug Fixes + +- Fix race condition when refreshing token. +- Fixed some tests to work with Go 1.14. + +## v14.0.0 + +## Breaking Changes + +- By default, the `DoRetryForStatusCodes` functions will no longer infinitely retry a request when the response returns an HTTP status code of 429 (StatusTooManyRequests). To opt in to the old behavior set `autorest.Count429AsRetry` to `false`. + +## New Features + +- Variable `autorest.Max429Delay` can be used to control the maximum delay between retries when a 429 is received with no `Retry-After` header. The default is zero which means there is no cap. + +## v13.4.0 + +## New Features + +- Added field `SendDecorators` to the `Client` type. This can be used to specify a custom chain of SendDecorators per client. +- Added method `Client.Send()` which includes logic for selecting the preferred chain of SendDecorators. + +## v13.3.3 + +### Bug Fixes + +- Fixed connection leak when retrying requests. +- Enabled exponential back-off with a 2-minute cap when retrying on 429. +- Fixed some cases where errors were inadvertently dropped. + +## v13.3.2 + +### Bug Fixes + +- Updated `autorest.AsStringSlice()` to convert slice elements to their string representation. + +## v13.3.1 + +- Updated external dependencies. + +### Bug Fixes + +## v13.3.0 + +### New Features + +- Added support for shared key and shared access signature token authorization. + - `autorest.NewSharedKeyAuthorizer()` and dependent types. + - `autorest.NewSASTokenAuthorizer()` and dependent types. +- Added `ServicePrincipalToken.SetCustomRefresh()` so a custom refresh function can be invoked when a token has expired. + +### Bug Fixes + +- Fixed `cli.AccessTokensPath()` to respect `AZURE_CONFIG_DIR` when set. +- Support parsing error messages in XML responses. + +## v13.2.0 + +### New Features + +- Added the following functions to replace their versions that don't take a context. + - `adal.InitiateDeviceAuthWithContext()` + - `adal.CheckForUserCompletionWithContext()` + - `adal.WaitForUserCompletionWithContext()` + +## v13.1.0 + +### New Features + +- Added support for MSI authentication on Azure App Service and Azure Functions. + +## v13.0.2 + +### Bug Fixes + +- Always retry a request even if the sender returns a non-nil error. + +## v13.0.1 + +## Bug Fixes + +- Fixed `autorest.WithQueryParameters()` so that it properly encodes multi-value query parameters. + +## v13.0.0 + +## Breaking Changes + +The `tracing` package has been rewritten to provide a common interface for consumers to wire in the tracing package of their choice. +What this means is that by default no tracing provider will be compiled into your program and setting the `AZURE_SDK_TRACING_ENABLED` +environment variable will have no effect. To enable this previous behavior you must now add the following import to your source file. +```go + import _ "github.com/Azure/go-autorest/tracing/opencensus" +``` +The APIs required by autorest-generated code have remained but some APIs have been removed and new ones added. +The following APIs and variables have been removed (the majority of them were moved to the `opencensus` package). +- tracing.Transport +- tracing.Enable() +- tracing.EnableWithAIForwarding() +- tracing.Disable() + +The following APIs and types have been added +- tracing.Tracer +- tracing.Register() + +To hook up a tracer simply call `tracing.Register()` passing in a type that satisfies the `tracing.Tracer` interface. + +## v12.4.3 + +### Bug Fixes + +- `autorest.MultiTenantServicePrincipalTokenAuthorizer` will now properly add its auxiliary bearer tokens. + +## v12.4.2 + +### Bug Fixes + +- Improvements to the fixes made in v12.4.1. + - Remove `override` stanza from Gopkg.toml and `replace` directive from go.mod as they don't apply when being consumed as a dependency. + - Switched to latest version of `ocagent` that still depends on protobuf v1.2. + - Add indirect dependencies to the `required` clause with matching `constraint` stanzas so that `dep` dependencies match go.sum. + +## v12.4.1 + +### Bug Fixes + +- Updated OpenCensus and OCAgent versions to versions that don't depend on v1.3+ of protobuf as it was breaking kubernetes. +- Pinned opencensus-proto to a version that's compatible with our versions of OpenCensus and OCAgent. + +## v12.4.0 + +### New Features + +- Added `autorest.WithPrepareDecorators` and `autorest.GetPrepareDecorators` for adding and retrieving a custom chain of PrepareDecorators to the provided context. + +## v12.3.0 + +### New Features + +- Support for multi-tenant via x-ms-authorization-auxiliary header has been added for client credentials with + secret scenario; this basically bundles multiple OAuthConfig and ServicePrincipalToken types into corresponding + MultiTenant* types along with a new authorizer that adds the primary and auxiliary token headers to the reqest. + The authenticaion helpers have been updated to support this scenario; if environment var AZURE_AUXILIARY_TENANT_IDS + is set with a semicolon delimited list of tenants the multi-tenant codepath will kick in to create the appropriate authorizer. + See `adal.NewMultiTenantOAuthConfig`, `adal.NewMultiTenantServicePrincipalToken` and `autorest.NewMultiTenantServicePrincipalTokenAuthorizer` + along with their supporting types and methods. +- Added `autorest.WithSendDecorators` and `autorest.GetSendDecorators` for adding and retrieving a custom chain of SendDecorators to the provided context. +- Added `autorest.DoRetryForStatusCodesWithCap` and `autorest.DelayForBackoffWithCap` to enforce an upper bound on the duration between retries. + +## v12.2.0 + +### New Features + +- Added `autorest.WithXML`, `autorest.AsMerge`, `autorest.WithBytes` preparer decorators. +- Added `autorest.ByUnmarshallingBytes` response decorator. +- Added `Response.IsHTTPStatus` and `Response.HasHTTPStatus` helper methods for inspecting HTTP status code in `autorest.Response` types. + +### Bug Fixes + +- `autorest.DelayWithRetryAfter` now supports HTTP-Dates in the `Retry-After` header and is not limited to just 429 status codes. + +## v12.1.0 + +### New Features + +- Added `to.ByteSlicePtr()`. +- Added blob/queue storage resource ID to `azure.ResourceIdentifier`. + +## v12.0.0 + +### Breaking Changes + +In preparation for modules the following deprecated content has been removed. + + - async.NewFuture() + - async.Future.Done() + - async.Future.WaitForCompletion() + - async.DoPollForAsynchronous() + - The `utils` package + - validation.NewErrorWithValidationError() + - The `version` package + +## v11.9.0 + +### New Features + +- Add `ResourceIdentifiers` field to `azure.Environment` containing resource IDs for public and sovereign clouds. + +## v11.8.0 + +### New Features + +- Added `autorest.NewClientWithOptions()` to support endpoints that require free renegotiation. + +## v11.7.1 + +### Bug Fixes + +- Fix missing support for http(s) proxy when using the default sender. + +## v11.7.0 + +### New Features + +- Added methods to obtain a ServicePrincipalToken on the various credential configuration types in the `auth` package. + +## v11.6.1 + +### Bug Fixes + +- Fix ACR DNS endpoint for government clouds. +- Add Cosmos DB DNS endpoints. +- Update dependencies to resolve build breaks in OpenCensus. + +## v11.6.0 + +### New Features + +- Added type `autorest.BasicAuthorizer` to support Basic authentication. + +## v11.5.2 + +### Bug Fixes + +- Fixed `GetTokenFromCLI` did not work with zsh. + +## v11.5.1 + +### Bug Fixes + +- In `Client.sender()` set the minimum TLS version on HTTP clients to 1.2. + +## v11.5.0 + +### New Features + +- The `auth` package has been refactored so that the environment and file settings are now available. +- The methods used in `auth.NewAuthorizerFromEnvironment()` are now exported so that custom authorization chains can be created. +- Added support for certificate authorization for file-based config. + +## v11.4.0 + +### New Features + +- Added `adal.AddToUserAgent()` so callers can append custom data to the user-agent header used for ADAL requests. +- Exported `adal.UserAgent()` for parity with `autorest.Client`. + +## v11.3.2 + +### Bug Fixes + +- In `Future.WaitForCompletionRef()` if the provided context has a deadline don't add the default deadline. + +## v11.3.1 + +### Bug Fixes + +- For an LRO PUT operation the final GET URL was incorrectly set to the Location polling header in some cases. + +## v11.3.0 + +### New Features + +- Added method `ServicePrincipalToken()` to `DeviceFlowConfig` type. + +## v11.2.8 + +### Bug Fixes + +- Deprecate content in the `version` package. The functionality has been superseded by content in the `autorest` package. + +## v11.2.7 + +### Bug Fixes + +- Fix environment variable name for enabling tracing from `AZURE_SDK_TRACING_ENABELD` to `AZURE_SDK_TRACING_ENABLED`. + Note that for backward compatibility reasons, both will work until the next major version release of the package. + +## v11.2.6 + +### Bug Fixes + +- If zero bytes are read from a polling response body don't attempt to unmarshal them. + +## v11.2.5 + +### Bug Fixes + +- Removed race condition in `autorest.DoRetryForStatusCodes`. + +## v11.2.4 + +### Bug Fixes + +- Function `cli.ProfilePath` now respects environment `AZURE_CONFIG_DIR` if available. + +## v11.2.1 + +NOTE: Versions of Go prior to 1.10 have been removed from CI as they no +longer work with golint. + +### Bug Fixes + +- Method `MSIConfig.Authorizer` now supports user-assigned identities. +- The adal package now reports its own user-agent string. + +## v11.2.0 + +### New Features + +- Added `tracing` package that enables instrumentation of HTTP and API calls. + Setting the env variable `AZURE_SDK_TRACING_ENABLED` or calling `tracing.Enable` + will start instrumenting the code for metrics and traces. + Additionally, setting the env variable `OCAGENT_TRACE_EXPORTER_ENDPOINT` or + calling `tracing.EnableWithAIForwarding` will start the instrumentation and connect to an + App Insights Local Forwarder that is needs to be running. Note that if the + AI Local Forwarder is not running tracking will still be enabled. + By default, instrumentation is disabled. Once enabled, instrumentation can also + be programatically disabled by calling `Disable`. +- Added `DoneWithContext` call for checking LRO status. `Done` has been deprecated. + +### Bug Fixes + +- Don't use the initial request's context for LRO polling. +- Don't override the `refreshLock` and the `http.Client` when unmarshalling `ServicePrincipalToken` if + it is already set. + +## v11.1.1 + +### Bug Fixes + +- When creating a future always include the polling tracker even if there's a failure; this allows the underlying response to be obtained by the caller. + +## v11.1.0 + +### New Features + +- Added `auth.NewAuthorizerFromCLI` to create an authorizer configured from the Azure 2.0 CLI. +- Added `adal.NewOAuthConfigWithAPIVersion` to create an OAuthConfig with the specified API version. + +## v11.0.1 + +### New Features + +- Added `x5c` header to client assertion for certificate Issuer+Subject Name authentication. + +## v11.0.0 + +### Breaking Changes + +- To handle differences between ADFS and AAD the following fields have had their types changed from `string` to `json.Number` + - ExpiresIn + - ExpiresOn + - NotBefore + +### New Features + +- Added `auth.NewAuthorizerFromFileWithResource` to create an authorizer from the config file with the specified resource. +- Setting a client's `PollingDuration` to zero will use the provided context to control a LRO's polling duration. + +## v10.15.5 + +### Bug Fixes + +- In `DoRetryForStatusCodes`, if a request's context is cancelled return the last response. + +## v10.15.4 + +### Bug Fixes + +- If a polling operation returns a failure status code return the associated error. + +## v10.15.3 + +### Bug Fixes + +- Initialize the polling URL and method for an LRO tracker on each iteration, favoring the Azure-AsyncOperation header. + +## v10.15.2 + +### Bug Fixes + +- Use fmt.Fprint when printing request/response so that any escape sequences aren't treated as format specifiers. + +## v10.15.1 + +### Bug Fixes + +- If an LRO API returns a `Failed` provisioning state in the initial response return an error at that point so the caller doesn't have to poll. +- For failed LROs without an OData v4 error include the response body in the error's `AdditionalInfo` field to aid in diagnosing the failure. + +## v10.15.0 + +### New Features + +- Add initial support for request/response logging via setting environment variables. + Setting `AZURE_GO_SDK_LOG_LEVEL` to `LogInfo` will log request/response + without their bodies. To include the bodies set the log level to `LogDebug`. + By default the logger writes to strerr, however it can also write to stdout or a file + if specified in `AZURE_GO_SDK_LOG_FILE`. Note that if the specified file + already exists it will be truncated. + IMPORTANT: by default the logger will redact the Authorization and Ocp-Apim-Subscription-Key + headers. Any other secrets will _not_ be redacted. + +## v10.14.0 + +### New Features + +- Added package version that contains version constants and user-agent data. + +### Bug Fixes + +- Add the user-agent to token requests. + +## v10.13.0 + +- Added support for additionalInfo in ServiceError type. + +## v10.12.0 + +### New Features + +- Added field ServicePrincipalToken.MaxMSIRefreshAttempts to configure the maximun number of attempts to refresh an MSI token. + +## v10.11.4 + +### Bug Fixes + +- If an LRO returns http.StatusOK on the initial response with no async headers return the response body from Future.GetResult(). +- If there is no "final GET URL" return an error from Future.GetResult(). + +## v10.11.3 + +### Bug Fixes + +- In IMDS retry logic, if we don't receive a response don't retry. + - Renamed the retry function so it's clear it's meant for IMDS only. +- For error response bodies that aren't OData-v4 compliant stick the raw JSON in the ServiceError.Details field so the information isn't lost. + - Also add the raw HTTP response to the DetailedResponse. +- Removed superfluous wrapping of response error in azure.DoRetryWithRegistration(). + +## v10.11.2 + +### Bug Fixes + +- Validation for integers handles int and int64 types. + +## v10.11.1 + +### Bug Fixes + +- Adding User information to authorization config as parsed from CLI cache. + +## v10.11.0 + +### New Features + +- Added NewServicePrincipalTokenFromManualTokenSecret for creating a new SPT using a manual token and secret +- Added method ServicePrincipalToken.MarshalTokenJSON() to marshall the inner Token + +## v10.10.0 + +### New Features + +- Most ServicePrincipalTokens can now be marshalled/unmarshall to/from JSON (ServicePrincipalCertificateSecret and ServicePrincipalMSISecret are not supported). +- Added method ServicePrincipalToken.SetRefreshCallbacks(). + +## v10.9.2 + +### Bug Fixes + +- Refreshing a refresh token obtained from a web app authorization code now works. + +## v10.9.1 + +### Bug Fixes + +- The retry logic for MSI token requests now uses exponential backoff per the guidelines. +- IsTemporaryNetworkError() will return true for errors that don't implement the net.Error interface. + +## v10.9.0 + +### Deprecated Methods + +| Old Method | New Method | +| -------------------------: | :---------------------------: | +| azure.NewFuture() | azure.NewFutureFromResponse() | +| Future.WaitForCompletion() | Future.WaitForCompletionRef() | + +### New Features + +- Added azure.NewFutureFromResponse() for creating a Future from the initial response from an async operation. +- Added Future.GetResult() for making the final GET call to retrieve the result from an async operation. + +### Bug Fixes + +- Some futures failed to return their results, this should now be fixed. + +## v10.8.2 + +### Bug Fixes + +- Add nil-gaurd to token retry logic. + +## v10.8.1 + +### Bug Fixes + +- Return a TokenRefreshError if the sender fails on the initial request. +- Don't retry on non-temporary network errors. + +## v10.8.0 + +- Added NewAuthorizerFromEnvironmentWithResource() helper function. + +## v10.7.0 + +### New Features + +- Added \*WithContext() methods to ADAL token refresh operations. + +## v10.6.2 + +- Fixed a bug on device authentication. + +## v10.6.1 + +- Added retries to MSI token get request. + +## v10.6.0 + +- Changed MSI token implementation. Now, the token endpoint is the IMDS endpoint. + +## v10.5.1 + +### Bug Fixes + +- `DeviceFlowConfig.Authorizer()` now prints the device code message when running `go test`. `-v` flag is required. + +## v10.5.0 + +### New Features + +- Added NewPollingRequestWithContext() for use with polling asynchronous operations. + +### Bug Fixes + +- Make retry logic use the request's context instead of the deprecated Cancel object. + +## v10.4.0 + +### New Features + +- Added helper for parsing Azure Resource ID's. +- Added deprecation message to utils.GetEnvVarOrExit() + +## v10.3.0 + +### New Features + +- Added EnvironmentFromURL method to load an Environment from a given URL. This function is particularly useful in the private and hybrid Cloud model, where one may define their own endpoints +- Added TokenAudience endpoint to Environment structure. This is useful in private and hybrid cloud models where TokenAudience endpoint can be different from ResourceManagerEndpoint + +## v10.2.0 + +### New Features + +- Added endpoints for batch management. + +## v10.1.3 + +### Bug Fixes + +- In Client.Do() invoke WithInspection() last so that it will inspect WithAuthorization(). +- Fixed authorization methods to invoke p.Prepare() first, aligning them with the other preparers. + +## v10.1.2 + +- Corrected comment for auth.NewAuthorizerFromFile() function. + +## v10.1.1 + +- Updated version number to match current release. + +## v10.1.0 + +### New Features + +- Expose the polling URL for futures. + +### Bug Fixes + +- Add validation.NewErrorWithValidationError back to prevent breaking changes (it is deprecated). + +## v10.0.0 + +### New Features + +- Added target and innererror fields to ServiceError to comply with OData v4 spec. +- The Done() method on futures will now return a ServiceError object when available (it used to return a partial value of such errors). +- Added helper methods for obtaining authorizers. +- Expose the polling URL for futures. + +### Bug Fixes + +- Switched from glide to dep for dependency management. +- Fixed unmarshaling of ServiceError for JSON bodies that don't conform to the OData spec. +- Fixed a race condition in token refresh. + +### Breaking Changes + +- The ServiceError.Details field type has been changed to match the OData v4 spec. +- Go v1.7 has been dropped from CI. +- API parameter validation failures will now return a unique error type validation.Error. +- The adal.Token type has been decomposed from adal.ServicePrincipalToken (this was necessary in order to fix the token refresh race). + +## v9.10.0 + +- Fix the Service Bus suffix in Azure public env +- Add Service Bus Endpoint (AAD ResourceURI) for use in [Azure Service Bus RBAC Preview](https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-role-based-access-control) + +## v9.9.0 + +### New Features + +- Added EventGridKeyAuthorizer for key authorization with event grid topics. + +### Bug Fixes + +- Fixed race condition when auto-refreshing service principal tokens. + +## v9.8.1 + +### Bug Fixes + +- Added http.StatusNoContent (204) to the list of expected status codes for long-running operations. +- Updated runtime version info so it's current. + +## v9.8.0 + +### New Features + +- Added type azure.AsyncOpIncompleteError to be returned from a future's Result() method when the operation has not completed. + +## v9.7.1 + +### Bug Fixes + +- Use correct AAD and Graph endpoints for US Gov environment. + +## v9.7.0 + +### New Features + +- Added support for application/octet-stream MIME types. + +## v9.6.1 + +### Bug Fixes + +- Ensure Authorization header is added to request when polling for registration status. + +## v9.6.0 + +### New Features + +- Added support for acquiring tokens via MSI with a user assigned identity. + +## v9.5.3 + +### Bug Fixes + +- Don't remove encoding of existing URL Query parameters when calling autorest.WithQueryParameters. +- Set correct Content Type when using autorest.WithFormData. + +## v9.5.2 + +### Bug Fixes + +- Check for nil \*http.Response before dereferencing it. + +## v9.5.1 + +### Bug Fixes + +- Don't count http.StatusTooManyRequests (429) against the retry cap. +- Use retry logic when SkipResourceProviderRegistration is set to true. + +## v9.5.0 + +### New Features + +- Added support for username + password, API key, authoriazation code and cognitive services authentication. +- Added field SkipResourceProviderRegistration to clients to provide a way to skip auto-registration of RPs. +- Added utility function AsStringSlice() to convert its parameters to a string slice. + +### Bug Fixes + +- When checking for authentication failures look at the error type not the status code as it could vary. + +## v9.4.2 + +### Bug Fixes + +- Validate parameters when creating credentials. +- Don't retry requests if the returned status is a 401 (http.StatusUnauthorized) as it will never succeed. + +## v9.4.1 + +### Bug Fixes + +- Update the AccessTokensPath() to read access tokens path through AZURE_ACCESS_TOKEN_FILE. If this + environment variable is not set, it will fall back to use default path set by Azure CLI. +- Use case-insensitive string comparison for polling states. + +## v9.4.0 + +### New Features + +- Added WaitForCompletion() to Future as a default polling implementation. + +### Bug Fixes + +- Method Future.Done() shouldn't update polling status for unexpected HTTP status codes. + +## v9.3.1 + +### Bug Fixes + +- DoRetryForStatusCodes will retry if sender.Do returns a non-nil error. + +## v9.3.0 + +### New Features + +- Added PollingMethod() to Future so callers know what kind of polling mechanism is used. +- Added azure.ChangeToGet() which transforms an http.Request into a GET (to be used with LROs). + +## v9.2.0 + +### New Features + +- Added support for custom Azure Stack endpoints. +- Added type azure.Future used to track the status of long-running operations. + +### Bug Fixes + +- Preserve the original error in DoRetryWithRegistration when registration fails. + +## v9.1.1 + +- Fixes a bug regarding the cookie jar on `autorest.Client.Sender`. + +## v9.1.0 + +### New Features + +- In cases where there is a non-empty error from the service, attempt to unmarshal it instead of uniformly calling it an "Unknown" error. +- Support for loading Azure CLI Authentication files. +- Automatically register your subscription with the Azure Resource Provider if it hadn't been previously. + +### Bug Fixes + +- RetriableRequest can now tolerate a ReadSeekable body being read but not reset. +- Adding missing Apache Headers + +## v9.0.0 + +> **IMPORTANT:** This release was intially labeled incorrectly as `v8.4.0`. From the time it was released, it should have been marked `v9.0.0` because it contains breaking changes to the MSI packages. We appologize for any inconvenience this causes. + +Adding MSI Endpoint Support and CLI token rehydration. + +## v8.3.1 + +Pick up bug fix in adal for MSI support. + +## v8.3.0 + +Updates to Error string formats for clarity. Also, adding a copy of the http.Response to errors for an improved debugging experience. + +## v8.2.0 + +### New Features + +- Add support for bearer authentication callbacks +- Support 429 response codes that include "Retry-After" header +- Support validation constraint "Pattern" for map keys + +### Bug Fixes + +- Make RetriableRequest work with multiple versions of Go + +## v8.1.1 + +Updates the RetriableRequest to take advantage of GetBody() added in Go 1.8. + +## v8.1.0 + +Adds RetriableRequest type for more efficient handling of retrying HTTP requests. + +## v8.0.0 + +ADAL refactored into its own package. +Support for UNIX time. + +## v7.3.1 + +- Version Testing now removed from production bits that are shipped with the library. + +## v7.3.0 + +- Exposing new `RespondDecorator`, `ByDiscardingBody`. This allows operations + to acknowledge that they do not need either the entire or a trailing portion + of accepts response body. In doing so, Go's http library can reuse HTTP + connections more readily. +- Adding `PrepareDecorator` to target custom BaseURLs. +- Adding ACR suffix to public cloud environment. +- Updating Glide dependencies. + +## v7.2.5 + +- Fixed the Active Directory endpoint for the China cloud. +- Removes UTF-8 BOM if present in response payload. +- Added telemetry. + +## v7.2.3 + +- Fixing bug in calls to `DelayForBackoff` that caused doubling of delay + duration. + +## v7.2.2 + +- autorest/azure: added ASM and ARM VM DNS suffixes. + +## v7.2.1 + +- fixed parsing of UTC times that are not RFC3339 conformant. + +## v7.2.0 + +- autorest/validation: Reformat validation error for better error message. + +## v7.1.0 + +- preparer: Added support for multipart formdata - WithMultiPartFormdata() +- preparer: Added support for sending file in request body - WithFile +- client: Added RetryDuration parameter. +- autorest/validation: new package for validation code for Azure Go SDK. + +## v7.0.7 + +- Add trailing / to endpoint +- azure: add EnvironmentFromName + +## v7.0.6 + +- Add retry logic for 408, 500, 502, 503 and 504 status codes. +- Change url path and query encoding logic. +- Fix DelayForBackoff for proper exponential delay. +- Add CookieJar in Client. + +## v7.0.5 + +- Add check to start polling only when status is in [200,201,202]. +- Refactoring for unchecked errors. +- azure/persist changes. +- Fix 'file in use' issue in renewing token in deviceflow. +- Store header RetryAfter for subsequent requests in polling. +- Add attribute details in service error. + +## v7.0.4 + +- Better error messages for long running operation failures + +## v7.0.3 + +- Corrected DoPollForAsynchronous to properly handle the initial response + +## v7.0.2 + +- Corrected DoPollForAsynchronous to continue using the polling method first discovered + +## v7.0.1 + +- Fixed empty JSON input error in ByUnmarshallingJSON +- Fixed polling support for GET calls +- Changed format name from TimeRfc1123 to TimeRFC1123 + +## v7.0.0 + +- Added ByCopying responder with supporting TeeReadCloser +- Rewrote Azure asynchronous handling +- Reverted to only unmarshalling JSON +- Corrected handling of RFC3339 time strings and added support for Rfc1123 time format + +The `json.Decoder` does not catch bad data as thoroughly as `json.Unmarshal`. Since +`encoding/json` successfully deserializes all core types, and extended types normally provide +their custom JSON serialization handlers, the code has been reverted back to using +`json.Unmarshal`. The original change to use `json.Decode` was made to reduce duplicate +code; there is no loss of function, and there is a gain in accuracy, by reverting. + +Additionally, Azure services indicate requests to be polled by multiple means. The existing code +only checked for one of those (that is, the presence of the `Azure-AsyncOperation` header). +The new code correctly covers all cases and aligns with the other Azure SDKs. + +## v6.1.0 + +- Introduced `date.ByUnmarshallingJSONDate` and `date.ByUnmarshallingJSONTime` to enable JSON encoded values. + +## v6.0.0 + +- Completely reworked the handling of polled and asynchronous requests +- Removed unnecessary routines +- Reworked `mocks.Sender` to replay a series of `http.Response` objects +- Added `PrepareDecorators` for primitive types (e.g., bool, int32) + +Handling polled and asynchronous requests is no longer part of `Client#Send`. Instead new +`SendDecorators` implement different styles of polled behavior. See`autorest.DoPollForStatusCodes` +and `azure.DoPollForAsynchronous` for examples. + +## v5.0.0 + +- Added new RespondDecorators unmarshalling primitive types +- Corrected application of inspection and authorization PrependDecorators + +## v4.0.0 + +- Added support for Azure long-running operations. +- Added cancelation support to all decorators and functions that may delay. +- Breaking: `DelayForBackoff` now accepts a channel, which may be nil. + +## v3.1.0 + +- Add support for OAuth Device Flow authorization. +- Add support for ServicePrincipalTokens that are backed by an existing token, rather than other secret material. +- Add helpers for persisting and restoring Tokens. +- Increased code coverage in the github.com/Azure/autorest/azure package + +## v3.0.0 + +- Breaking: `NewErrorWithError` no longer takes `statusCode int`. +- Breaking: `NewErrorWithStatusCode` is replaced with `NewErrorWithResponse`. +- Breaking: `Client#Send()` no longer takes `codes ...int` argument. +- Add: XML unmarshaling support with `ByUnmarshallingXML()` +- Stopped vending dependencies locally and switched to [Glide](https://github.com/Masterminds/glide). + Applications using this library should either use Glide or vendor dependencies locally some other way. +- Add: `azure.WithErrorUnlessStatusCode()` decorator to handle Azure errors. +- Fix: use `net/http.DefaultClient` as base client. +- Fix: Missing inspection for polling responses added. +- Add: CopyAndDecode helpers. +- Improved `./autorest/to` with `[]string` helpers. +- Removed golint suppressions in .travis.yml. + +## v2.1.0 + +- Added `StatusCode` to `Error` for more easily obtaining the HTTP Reponse StatusCode (if any) + +## v2.0.0 + +- Changed `to.StringMapPtr` method signature to return a pointer +- Changed `ServicePrincipalCertificateSecret` and `NewServicePrincipalTokenFromCertificate` to support generic certificate and private keys + +## v1.0.0 + +- Added Logging inspectors to trace http.Request / Response +- Added support for User-Agent header +- Changed WithHeader PrepareDecorator to use set vs. add +- Added JSON to error when unmarshalling fails +- Added Client#Send method +- Corrected case of "Azure" in package paths +- Added "to" helpers, Azure helpers, and improved ease-of-use +- Corrected golint issues + +## v1.0.1 + +- Added CHANGELOG.md + +## v1.1.0 + +- Added mechanism to retrieve a ServicePrincipalToken using a certificate-signed JWT +- Added an example of creating a certificate-based ServicePrincipal and retrieving an OAuth token using the certificate + +## v1.1.1 + +- Introduce godeps and vendor dependencies introduced in v1.1.1 diff --git a/vendor/github.com/Azure/go-autorest/GNUmakefile b/vendor/github.com/Azure/go-autorest/GNUmakefile new file mode 100644 index 0000000000000..a434e73ac49db --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/GNUmakefile @@ -0,0 +1,23 @@ +DIR?=./autorest/ + +default: build + +build: fmt + go install $(DIR) + +test: + go test $(DIR) || exit 1 + +vet: + @echo "go vet ." + @go vet $(DIR)... ; if [ $$? -eq 1 ]; then \ + echo ""; \ + echo "Vet found suspicious constructs. Please check the reported constructs"; \ + echo "and fix them if necessary before submitting the code for review."; \ + exit 1; \ + fi + +fmt: + gofmt -w $(DIR) + +.PHONY: build test vet fmt diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.lock b/vendor/github.com/Azure/go-autorest/Gopkg.lock new file mode 100644 index 0000000000000..dc6e3e633e6d8 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/Gopkg.lock @@ -0,0 +1,324 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:892e39e5c083d0943f1e80ab8351690f183c6a5ab24e1d280adcad424c26255e" + name = "contrib.go.opencensus.io/exporter/ocagent" + packages = ["."] + pruneopts = "UT" + revision = "a8a6f458bbc1d5042322ad1f9b65eeb0b69be9ea" + version = "v0.6.0" + +[[projects]] + digest = "1:8f5acd4d4462b5136af644d25101f0968a7a94ee90fcb2059cec5b7cc42e0b20" + name = "github.com/census-instrumentation/opencensus-proto" + packages = [ + "gen-go/agent/common/v1", + "gen-go/agent/metrics/v1", + "gen-go/agent/trace/v1", + "gen-go/metrics/v1", + "gen-go/resource/v1", + "gen-go/trace/v1", + ] + pruneopts = "UT" + revision = "d89fa54de508111353cb0b06403c00569be780d8" + version = "v0.2.1" + +[[projects]] + digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "UT" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" + +[[projects]] + digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55" + name = "github.com/dgrijalva/jwt-go" + packages = ["."] + pruneopts = "UT" + revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" + version = "v3.2.0" + +[[projects]] + digest = "1:cf0d2e435fd4ce45b789e93ef24b5f08e86be0e9807a16beb3694e2d8c9af965" + name = "github.com/dimchansky/utfbom" + packages = ["."] + pruneopts = "UT" + revision = "d2133a1ce379ef6fa992b0514a77146c60db9d1c" + version = "v1.1.0" + +[[projects]] + branch = "master" + digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8" + name = "github.com/golang/groupcache" + packages = ["lru"] + pruneopts = "UT" + revision = "611e8accdfc92c4187d399e95ce826046d4c8d73" + +[[projects]] + digest = "1:e3839df32927e8d3403cd5aa7253d966e8ff80fc8f10e2e35d146461cd83fcfa" + name = "github.com/golang/protobuf" + packages = [ + "descriptor", + "jsonpb", + "proto", + "protoc-gen-go/descriptor", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/struct", + "ptypes/timestamp", + "ptypes/wrappers", + ] + pruneopts = "UT" + revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7" + version = "v1.3.2" + +[[projects]] + digest = "1:c560cd79300fac84f124b96225181a637a70b60155919a3c36db50b7cca6b806" + name = "github.com/grpc-ecosystem/grpc-gateway" + packages = [ + "internal", + "runtime", + "utilities", + ] + pruneopts = "UT" + revision = "f7120437bb4f6c71f7f5076ad65a45310de2c009" + version = "v1.12.1" + +[[projects]] + digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79" + name = "github.com/mitchellh/go-homedir" + packages = ["."] + pruneopts = "UT" + revision = "af06845cf3004701891bf4fdb884bfe4920b3727" + version = "v1.1.0" + +[[projects]] + digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + pruneopts = "UT" + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + digest = "1:99d32780e5238c2621fff621123997c3e3cca96db8be13179013aea77dfab551" + name = "github.com/stretchr/testify" + packages = [ + "assert", + "require", + ] + pruneopts = "UT" + revision = "221dbe5ed46703ee255b1da0dec05086f5035f62" + version = "v1.4.0" + +[[projects]] + digest = "1:7c5e00383399fe13de0b4b65c9fdde16275407ce8ac02d867eafeaa916edcc71" + name = "go.opencensus.io" + packages = [ + ".", + "internal", + "internal/tagencoding", + "metric/metricdata", + "metric/metricproducer", + "plugin/ocgrpc", + "plugin/ochttp", + "plugin/ochttp/propagation/b3", + "plugin/ochttp/propagation/tracecontext", + "resource", + "stats", + "stats/internal", + "stats/view", + "tag", + "trace", + "trace/internal", + "trace/propagation", + "trace/tracestate", + ] + pruneopts = "UT" + revision = "aad2c527c5defcf89b5afab7f37274304195a6b2" + version = "v0.22.2" + +[[projects]] + branch = "master" + digest = "1:f604f5e2ee721b6757d962dfe7bab4f28aae50c456e39cfb2f3819762a44a6ae" + name = "golang.org/x/crypto" + packages = [ + "pkcs12", + "pkcs12/internal/rc2", + ] + pruneopts = "UT" + revision = "e9b2fee46413994441b28dfca259d911d963dfed" + +[[projects]] + branch = "master" + digest = "1:334b27eac455cb6567ea28cd424230b07b1a64334a2f861a8075ac26ce10af43" + name = "golang.org/x/lint" + packages = [ + ".", + "golint", + ] + pruneopts = "UT" + revision = "fdd1cda4f05fd1fd86124f0ef9ce31a0b72c8448" + +[[projects]] + branch = "master" + digest = "1:257a75d024975428ab9192bfc334c3490882f8cb21322ea5784ca8eca000a910" + name = "golang.org/x/net" + packages = [ + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "trace", + ] + pruneopts = "UT" + revision = "1ddd1de85cb0337b623b740a609d35817d516a8d" + +[[projects]] + branch = "master" + digest = "1:382bb5a7fb4034db3b6a2d19e5a4a6bcf52f4750530603c01ca18a172fa3089b" + name = "golang.org/x/sync" + packages = ["semaphore"] + pruneopts = "UT" + revision = "cd5d95a43a6e21273425c7ae415d3df9ea832eeb" + +[[projects]] + branch = "master" + digest = "1:4da420ceda5f68e8d748aa2169d0ed44ffadb1bbd6537cf778a49563104189b8" + name = "golang.org/x/sys" + packages = ["unix"] + pruneopts = "UT" + revision = "ce4227a45e2eb77e5c847278dcc6a626742e2945" + +[[projects]] + digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/language", + "internal/language/compact", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "UT" + revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475" + version = "v0.3.2" + +[[projects]] + branch = "master" + digest = "1:4eb5ea8395fb60212dd58b92c9db80bab59d5e99c7435f9a6a0a528c373b60e7" + name = "golang.org/x/tools" + packages = [ + "go/ast/astutil", + "go/gcexportdata", + "go/internal/gcimporter", + "go/types/typeutil", + ] + pruneopts = "UT" + revision = "259af5ff87bdcd4abf2ecda8edc3f13f04f26a42" + +[[projects]] + digest = "1:964bb30febc27fabfbec4759fa530c6ec35e77a7c85fed90b9317ea39a054877" + name = "google.golang.org/api" + packages = ["support/bundler"] + pruneopts = "UT" + revision = "8a410c21381766a810817fd6200fce8838ecb277" + version = "v0.14.0" + +[[projects]] + branch = "master" + digest = "1:a8d5c2c6e746b3485e36908ab2a9e3d77b86b81f8156d88403c7d2b462431dfd" + name = "google.golang.org/genproto" + packages = [ + "googleapis/api/httpbody", + "googleapis/rpc/status", + "protobuf/field_mask", + ] + pruneopts = "UT" + revision = "51378566eb590fa106d1025ea12835a4416dda84" + +[[projects]] + digest = "1:b59ce3ddb11daeeccccc9cb3183b58ebf8e9a779f1c853308cd91612e817a301" + name = "google.golang.org/grpc" + packages = [ + ".", + "backoff", + "balancer", + "balancer/base", + "balancer/roundrobin", + "binarylog/grpc_binarylog_v1", + "codes", + "connectivity", + "credentials", + "credentials/internal", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/balancerload", + "internal/binarylog", + "internal/buffer", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/grpcsync", + "internal/resolver/dns", + "internal/resolver/passthrough", + "internal/syscall", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "serviceconfig", + "stats", + "status", + "tap", + ] + pruneopts = "UT" + revision = "1a3960e4bd028ac0cec0a2afd27d7d8e67c11514" + version = "v1.25.1" + +[[projects]] + digest = "1:b75b3deb2bce8bc079e16bb2aecfe01eb80098f5650f9e93e5643ca8b7b73737" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "UT" + revision = "1f64d6156d11335c3f22d9330b0ad14fc1e789ce" + version = "v2.2.7" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "contrib.go.opencensus.io/exporter/ocagent", + "github.com/dgrijalva/jwt-go", + "github.com/dimchansky/utfbom", + "github.com/mitchellh/go-homedir", + "github.com/stretchr/testify/require", + "go.opencensus.io/plugin/ochttp", + "go.opencensus.io/plugin/ochttp/propagation/tracecontext", + "go.opencensus.io/stats/view", + "go.opencensus.io/trace", + "golang.org/x/crypto/pkcs12", + "golang.org/x/lint/golint", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.toml b/vendor/github.com/Azure/go-autorest/Gopkg.toml new file mode 100644 index 0000000000000..1fc28659696c8 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/Gopkg.toml @@ -0,0 +1,59 @@ +# Gopkg.toml example +# +# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + +required = ["golang.org/x/lint/golint"] + +[prune] + go-tests = true + unused-packages = true + +[[constraint]] + name = "contrib.go.opencensus.io/exporter/ocagent" + version = "0.6.0" + +[[constraint]] + name = "github.com/dgrijalva/jwt-go" + version = "3.2.0" + +[[constraint]] + name = "github.com/dimchansky/utfbom" + version = "1.1.0" + +[[constraint]] + name = "github.com/mitchellh/go-homedir" + version = "1.1.0" + +[[constraint]] + name = "github.com/stretchr/testify" + version = "1.3.0" + +[[constraint]] + name = "go.opencensus.io" + version = "0.22.0" + +[[constraint]] + branch = "master" + name = "golang.org/x/crypto" diff --git a/vendor/github.com/Azure/go-autorest/LICENSE b/vendor/github.com/Azure/go-autorest/LICENSE new file mode 100644 index 0000000000000..b9d6a27ea92ef --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/README.md b/vendor/github.com/Azure/go-autorest/README.md new file mode 100644 index 0000000000000..de1e19a44df95 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/README.md @@ -0,0 +1,165 @@ +# go-autorest + +[![GoDoc](https://godoc.org/github.com/Azure/go-autorest/autorest?status.png)](https://godoc.org/github.com/Azure/go-autorest/autorest) +[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/Azure.go-autorest?branchName=master)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=625&branchName=master) +[![Go Report Card](https://goreportcard.com/badge/Azure/go-autorest)](https://goreportcard.com/report/Azure/go-autorest) + +Package go-autorest provides an HTTP request client for use with [Autorest](https://github.com/Azure/autorest.go)-generated API client packages. + +An authentication client tested with Azure Active Directory (AAD) is also +provided in this repo in the package +`github.com/Azure/go-autorest/autorest/adal`. Despite its name, this package +is maintained only as part of the Azure Go SDK and is not related to other +"ADAL" libraries in [github.com/AzureAD](https://github.com/AzureAD). + +## Overview + +Package go-autorest implements an HTTP request pipeline suitable for use across +multiple goroutines and provides the shared routines used by packages generated +by [Autorest](https://github.com/Azure/autorest.go). + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + +```go + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) +``` + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + +```go + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) +``` + +will set the URL to: + +``` + https://microsoft.com/a/b/c +``` + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., `ByUnmarshallingJson`) is likely incorrect. + +Errors raised by autorest objects and methods will conform to the `autorest.Error` interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. + +## Helpers + +### Handling Swagger Dates + +The Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure correct +parsing and formatting. + +### Handling Empty Values + +In JSON, missing values have different semantics than empty values. This is especially true for +services using the HTTP PATCH verb. The JSON submitted with a PATCH request generally contains +only those values to modify. Missing values are to be left unchanged. Developers, then, require a +means to both specify an empty value and to leave the value out of the submitted JSON. + +The Go JSON package (`encoding/json`) supports the `omitempty` tag. When specified, it omits +empty values from the rendered JSON. Since Go defines default values for all base types (such as "" +for string and 0 for int) and provides no means to mark a value as actually empty, the JSON package +treats default values as meaning empty, omitting them from the rendered JSON. This means that, using +the Go base types encoded through the default JSON package, it is not possible to create JSON to +clear a value at the server. + +The workaround within the Go community is to use pointers to base types in lieu of base types within +structures that map to JSON. For example, instead of a value of type `string`, the workaround uses +`*string`. While this enables distinguishing empty values from those to be unchanged, creating +pointers to a base type (notably constant, in-line values) requires additional variables. This, for +example, + +```go + s := struct { + S *string + }{ S: &"foo" } +``` +fails, while, this + +```go + v := "foo" + s := struct { + S *string + }{ S: &v } +``` +succeeds. + +To ease using pointers, the subpackage `to` contains helpers that convert to and from pointers for +Go base types which have Swagger analogs. It also provides a helper that converts between +`map[string]string` and `map[string]*string`, enabling the JSON to specify that the value +associated with a key should be cleared. With the helpers, the previous example becomes + +```go + s := struct { + S *string + }{ S: to.StringPtr("foo") } +``` + +## Install + +```bash +go get github.com/Azure/go-autorest/autorest +go get github.com/Azure/go-autorest/autorest/azure +go get github.com/Azure/go-autorest/autorest/date +go get github.com/Azure/go-autorest/autorest/to +``` + +### Using with Go Modules +In [v12.0.1](https://github.com/Azure/go-autorest/pull/386), this repository introduced the following modules. + +- autorest/adal +- autorest/azure/auth +- autorest/azure/cli +- autorest/date +- autorest/mocks +- autorest/to +- autorest/validation +- autorest +- logger +- tracing + +Tagging cumulative SDK releases as a whole (e.g. `v12.3.0`) is still enabled to support consumers of this repo that have not yet migrated to modules. + +## License + +See LICENSE file. + +----- + +This project has adopted the [Microsoft Open Source Code of +Conduct](https://opensource.microsoft.com/codeofconduct/). For more information +see the [Code of Conduct +FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact +[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional +questions or comments. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod index a030eb42da8b4..02a3d39ff4b2d 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod @@ -3,10 +3,10 @@ module github.com/Azure/go-autorest/autorest/adal go 1.12 require ( - github.com/Azure/go-autorest/autorest v0.9.0 - github.com/Azure/go-autorest/autorest/date v0.2.0 - github.com/Azure/go-autorest/autorest/mocks v0.3.0 - github.com/Azure/go-autorest/tracing v0.5.0 + github.com/Azure/go-autorest v14.2.0+incompatible + github.com/Azure/go-autorest/autorest/date v0.3.0 + github.com/Azure/go-autorest/autorest/mocks v0.4.0 + github.com/Azure/go-autorest/tracing v0.6.0 github.com/dgrijalva/jwt-go v3.2.0+incompatible - golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 + golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 ) diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum index e43cf6498d0db..bbda1a9a98913 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum @@ -1,26 +1,17 @@ -github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0 h1:z20OWOSG5aCye0HEkDp6TPmP17ZcfeMxPi6HnSALa8c= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go index 28a4bfc4c4377..7551b79235de6 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go @@ -16,9 +16,9 @@ package adal // See the License for the specific language governing permissions and // limitations under the License. -// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of +// This file, and the github.com/Azure/go-autorest import, won't actually become part of // the resultant binary. // Necessary for safely adding multi-module repo. // See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest/autorest" +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go index 5802f6a4b5c2e..15138b642f2e1 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/authorization.go +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -138,6 +138,11 @@ func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { } } +// TokenProvider returns OAuthTokenProvider so that it can be used for authorization outside the REST. +func (ba *BearerAuthorizer) TokenProvider() adal.OAuthTokenProvider { + return ba.tokenProvider +} + // BearerAuthorizerCallbackFunc is the authentication callback signature. type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go index c5fc511f67c07..5326f1fd3b9b3 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -167,7 +167,13 @@ func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Clien cancelCtx, cancel = context.WithTimeout(ctx, d) defer cancel() } - + // if the initial response has a Retry-After, sleep for the specified amount of time before starting to poll + if delay, ok := f.GetPollingDelay(); ok { + if delayElapsed := autorest.DelayForBackoff(delay, 0, cancelCtx.Done()); !delayElapsed { + err = cancelCtx.Err() + return + } + } done, err := f.DoneWithContext(ctx, client) for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) { if attempts >= client.RetryAttempts { diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go index 85ed9635f801a..3e9f74aa319a5 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -71,6 +71,7 @@ type Environment struct { ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"` TokenAudience string `json:"tokenAudience"` + APIManagementHostNameSuffix string `json:"apiManagementHostNameSuffix"` ResourceIdentifiers ResourceIdentifier `json:"resourceIdentifiers"` } @@ -98,6 +99,7 @@ var ( ContainerRegistryDNSSuffix: "azurecr.io", CosmosDBDNSSuffix: "documents.azure.com", TokenAudience: "https://management.azure.com/", + APIManagementHostNameSuffix: "azure-api.net", ResourceIdentifiers: ResourceIdentifier{ Graph: "https://graph.windows.net/", KeyVault: "https://vault.azure.net", @@ -127,10 +129,11 @@ var ( KeyVaultDNSSuffix: "vault.usgovcloudapi.net", ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", ServiceManagementVMDNSSuffix: "usgovcloudapp.net", - ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", + ResourceManagerVMDNSSuffix: "cloudapp.usgovcloudapi.net", ContainerRegistryDNSSuffix: "azurecr.us", CosmosDBDNSSuffix: "documents.azure.us", TokenAudience: "https://management.usgovcloudapi.net/", + APIManagementHostNameSuffix: "azure-api.us", ResourceIdentifiers: ResourceIdentifier{ Graph: "https://graph.windows.net/", KeyVault: "https://vault.usgovcloudapi.net", @@ -160,10 +163,11 @@ var ( KeyVaultDNSSuffix: "vault.azure.cn", ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", ServiceManagementVMDNSSuffix: "chinacloudapp.cn", - ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", + ResourceManagerVMDNSSuffix: "cloudapp.chinacloudapi.cn", ContainerRegistryDNSSuffix: "azurecr.cn", CosmosDBDNSSuffix: "documents.azure.cn", TokenAudience: "https://management.chinacloudapi.cn/", + APIManagementHostNameSuffix: "azure-api.cn", ResourceIdentifiers: ResourceIdentifier{ Graph: "https://graph.chinacloudapi.cn/", KeyVault: "https://vault.azure.cn", @@ -197,6 +201,7 @@ var ( ContainerRegistryDNSSuffix: NotAvailable, CosmosDBDNSSuffix: "documents.microsoftazure.de", TokenAudience: "https://management.microsoftazure.de/", + APIManagementHostNameSuffix: NotAvailable, ResourceIdentifiers: ResourceIdentifier{ Graph: "https://graph.cloudapi.de/", KeyVault: "https://vault.microsoftazure.de", diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go.mod b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod index 3adc4804c3d20..f88ecc4022d60 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/date/go.mod +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod @@ -2,4 +2,4 @@ module github.com/Azure/go-autorest/autorest/date go 1.12 -require github.com/Azure/go-autorest/autorest v0.9.0 +require github.com/Azure/go-autorest v14.2.0+incompatible diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go.sum b/vendor/github.com/Azure/go-autorest/autorest/date/go.sum index 9e2ee7a948449..1fc56a962ee44 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/date/go.sum +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go.sum @@ -1,16 +1,2 @@ -github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go index 55adf930f4ab6..4e05432071717 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go @@ -16,9 +16,9 @@ package date // See the License for the specific language governing permissions and // limitations under the License. -// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of +// This file, and the github.com/Azure/go-autorest import, won't actually become part of // the resultant binary. // Necessary for safely adding multi-module repo. // See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest/autorest" +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.mod b/vendor/github.com/Azure/go-autorest/autorest/go.mod index 499c56de48ae7..b66c78da2ccf4 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/go.mod +++ b/vendor/github.com/Azure/go-autorest/autorest/go.mod @@ -3,9 +3,10 @@ module github.com/Azure/go-autorest/autorest go 1.12 require ( - github.com/Azure/go-autorest/autorest/adal v0.8.2 - github.com/Azure/go-autorest/autorest/mocks v0.3.0 - github.com/Azure/go-autorest/logger v0.1.0 - github.com/Azure/go-autorest/tracing v0.5.0 - golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 + github.com/Azure/go-autorest v14.2.0+incompatible + github.com/Azure/go-autorest/autorest/adal v0.9.0 + github.com/Azure/go-autorest/autorest/mocks v0.4.0 + github.com/Azure/go-autorest/logger v0.2.0 + github.com/Azure/go-autorest/tracing v0.6.0 + golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 ) diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.sum b/vendor/github.com/Azure/go-autorest/autorest/go.sum index 37398d1d48a88..96d2ad0fcd860 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/go.sum +++ b/vendor/github.com/Azure/go-autorest/autorest/go.sum @@ -1,28 +1,21 @@ -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.2 h1:O1X4oexUxnZCaEUGsvMnr8ZGj8HI37tNezwY4npRqA0= -github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest/adal v0.9.0 h1:SigMbuFNuKgc1xcGhaeapbh+8fgsu+GxgDRFyg7f5lM= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0 h1:z20OWOSG5aCye0HEkDp6TPmP17ZcfeMxPi6HnSALa8c= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go new file mode 100644 index 0000000000000..da65e1041eb0c --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go index d81116ccc0838..713e23581d927 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/version.go +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -19,7 +19,7 @@ import ( "runtime" ) -const number = "v14.1.1" +const number = "v14.2.1" var ( userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", diff --git a/vendor/github.com/Azure/go-autorest/azure-pipelines.yml b/vendor/github.com/Azure/go-autorest/azure-pipelines.yml new file mode 100644 index 0000000000000..6fb8404fd01d7 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/azure-pipelines.yml @@ -0,0 +1,105 @@ +variables: + GOPATH: '$(system.defaultWorkingDirectory)/work' + sdkPath: '$(GOPATH)/src/github.com/$(build.repository.name)' + +jobs: + - job: 'goautorest' + displayName: 'Run go-autorest CI Checks' + + strategy: + matrix: + Linux_Go113: + vm.image: 'ubuntu-18.04' + go.version: '1.13' + Linux_Go114: + vm.image: 'ubuntu-18.04' + go.version: '1.14' + + pool: + vmImage: '$(vm.image)' + + steps: + - task: GoTool@0 + inputs: + version: '$(go.version)' + displayName: "Select Go Version" + + - script: | + set -e + mkdir -p '$(GOPATH)/bin' + mkdir -p '$(sdkPath)' + shopt -s extglob + mv !(work) '$(sdkPath)' + echo '##vso[task.prependpath]$(GOPATH)/bin' + displayName: 'Create Go Workspace' + + - script: | + set -e + curl -sSL https://raw.githubusercontent.com/golang/dep/master/install.sh | sh + dep ensure -v + go install ./vendor/golang.org/x/lint/golint + go get github.com/jstemmer/go-junit-report + go get github.com/axw/gocov/gocov + go get github.com/AlekSi/gocov-xml + go get -u github.com/matm/gocov-html + workingDirectory: '$(sdkPath)' + displayName: 'Install Dependencies' + + - script: | + go vet ./autorest/... + go vet ./logger/... + go vet ./tracing/... + workingDirectory: '$(sdkPath)' + displayName: 'Vet' + + - script: | + go build -v ./autorest/... + go build -v ./logger/... + go build -v ./tracing/... + workingDirectory: '$(sdkPath)' + displayName: 'Build' + + - script: | + set -e + go test -race -v -coverprofile=coverage.txt -covermode atomic ./autorest/... ./logger/... ./tracing/... 2>&1 | go-junit-report > report.xml + gocov convert coverage.txt > coverage.json + gocov-xml < coverage.json > coverage.xml + gocov-html < coverage.json > coverage.html + workingDirectory: '$(sdkPath)' + displayName: 'Run Tests' + + - script: grep -L -r --include *.go --exclude-dir vendor -P "Copyright (\d{4}|\(c\)) Microsoft" ./ | tee >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Copyright Header Check' + failOnStderr: true + condition: succeededOrFailed() + + - script: | + gofmt -s -l -w ./autorest/. >&2 + gofmt -s -l -w ./logger/. >&2 + gofmt -s -l -w ./tracing/. >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Format Check' + failOnStderr: true + condition: succeededOrFailed() + + - script: | + golint ./autorest/... >&2 + golint ./logger/... >&2 + golint ./tracing/... >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Linter Check' + failOnStderr: true + condition: succeededOrFailed() + + - task: PublishTestResults@2 + inputs: + testRunner: JUnit + testResultsFiles: $(sdkPath)/report.xml + failTaskOnFailedTests: true + + - task: PublishCodeCoverageResults@1 + inputs: + codeCoverageTool: Cobertura + summaryFileLocation: $(sdkPath)/coverage.xml + additionalCodeCoverageFiles: $(sdkPath)/coverage.html diff --git a/vendor/github.com/Azure/go-autorest/doc.go b/vendor/github.com/Azure/go-autorest/doc.go new file mode 100644 index 0000000000000..99ae6ca988a4e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/doc.go @@ -0,0 +1,18 @@ +/* +Package go-autorest provides an HTTP request client for use with Autorest-generated API client packages. +*/ +package go_autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/logger/go.mod b/vendor/github.com/Azure/go-autorest/logger/go.mod index f22ed56bcde47..bedeaee039e06 100644 --- a/vendor/github.com/Azure/go-autorest/logger/go.mod +++ b/vendor/github.com/Azure/go-autorest/logger/go.mod @@ -1,3 +1,5 @@ module github.com/Azure/go-autorest/logger go 1.12 + +require github.com/Azure/go-autorest v14.2.0+incompatible diff --git a/vendor/github.com/Azure/go-autorest/logger/go.sum b/vendor/github.com/Azure/go-autorest/logger/go.sum new file mode 100644 index 0000000000000..1fc56a962ee44 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/go.sum @@ -0,0 +1,2 @@ +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= diff --git a/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go new file mode 100644 index 0000000000000..0aa27680db9b1 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package logger + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/tracing/go.mod b/vendor/github.com/Azure/go-autorest/tracing/go.mod index 25c34c1085a7a..a2cdec78c810e 100644 --- a/vendor/github.com/Azure/go-autorest/tracing/go.mod +++ b/vendor/github.com/Azure/go-autorest/tracing/go.mod @@ -1,3 +1,5 @@ module github.com/Azure/go-autorest/tracing go 1.12 + +require github.com/Azure/go-autorest v14.2.0+incompatible diff --git a/vendor/github.com/Azure/go-autorest/tracing/go.sum b/vendor/github.com/Azure/go-autorest/tracing/go.sum new file mode 100644 index 0000000000000..1fc56a962ee44 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/go.sum @@ -0,0 +1,2 @@ +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= diff --git a/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go new file mode 100644 index 0000000000000..e163975cd4e19 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package tracing + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index 2def23fa1d1c6..3b809e8478c18 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -43,7 +43,7 @@ type Config struct { // An optional endpoint URL (hostname only or fully qualified URI) // that overrides the default generated endpoint for a client. Set this - // to `""` to use the default generated endpoint. + // to `nil` or the value to `""` to use the default generated endpoint. // // Note: You must still provide a `Region` value when specifying an // endpoint for a client. @@ -138,7 +138,7 @@ type Config struct { // `ExpectContinueTimeout` for information on adjusting the continue wait // timeout. https://golang.org/pkg/net/http/#Transport // - // You should use this flag to disble 100-Continue if you experience issues + // You should use this flag to disable 100-Continue if you experience issues // with proxies or third party S3 compatible services. S3Disable100Continue *bool @@ -183,7 +183,7 @@ type Config struct { // // Example: // sess := session.Must(session.NewSession(aws.NewConfig() - // .WithEC2MetadataDiableTimeoutOverride(true))) + // .WithEC2MetadataDisableTimeoutOverride(true))) // // svc := s3.New(sess) // @@ -194,7 +194,7 @@ type Config struct { // both IPv4 and IPv6 addressing. // // Setting this for a service which does not support dual stack will fail - // to make requets. It is not recommended to set this value on the session + // to make requests. It is not recommended to set this value on the session // as it will apply to all service clients created with the session. Even // services which don't support dual stack endpoints. // @@ -238,6 +238,7 @@ type Config struct { // EnableEndpointDiscovery will allow for endpoint discovery on operations that // have the definition in its model. By default, endpoint discovery is off. + // To use EndpointDiscovery, Endpoint should be unset or set to an empty string. // // Example: // sess := session.Must(session.NewSession(&aws.Config{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go index aa902d70837f8..d95a5eb540806 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -225,6 +225,8 @@ var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointH if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { r.Error = aws.ErrMissingRegion } else if r.ClientInfo.Endpoint == "" { + // Was any endpoint provided by the user, or one was derived by the + // SDK's endpoint resolver? r.Error = aws.ErrMissingEndpoint } }} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go index e1551495812ab..22b5c5d9f3224 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -17,8 +17,9 @@ var ( ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) ) -// A SharedCredentialsProvider retrieves credentials from the current user's home -// directory, and keeps track if those credentials are expired. +// A SharedCredentialsProvider retrieves access key pair (access key ID, +// secret access key, and session token if present) credentials from the current +// user's home directory, and keeps track if those credentials are expired. // // Profile ini file example: $HOME/.aws/credentials type SharedCredentialsProvider struct { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 1ececcff69056..57a65b9686a25 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -662,6 +662,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, @@ -684,11 +685,16 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1086,6 +1092,21 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "codeartifact": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "codebuild": service{ Endpoints: endpoints{ @@ -1538,6 +1559,7 @@ var awsPartition = partition{ "datasync": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1547,6 +1569,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2516,6 +2539,7 @@ var awsPartition = partition{ "firehose": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2525,6 +2549,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2565,6 +2590,7 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2666,11 +2692,12 @@ var awsPartition = partition{ Region: "us-west-2", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "forecast": service{ @@ -2709,8 +2736,10 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, @@ -2930,6 +2959,12 @@ var awsPartition = partition{ "us-east-1": endpoint{}, }, }, + "honeycode": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, "iam": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -3036,6 +3071,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, @@ -3373,6 +3409,7 @@ var awsPartition = partition{ "license-manager": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3382,6 +3419,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -3452,12 +3490,36 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "logs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "logs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "logs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "logs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "machinelearning": service{ @@ -3486,6 +3548,52 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "macie2": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "macie2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "macie2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "macie2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "macie2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "managedblockchain": service{ Endpoints: endpoints{ @@ -3678,6 +3786,7 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, @@ -3882,6 +3991,12 @@ var awsPartition = partition{ Region: "us-east-2", }, }, + "us-west-1": endpoint{ + Hostname: "rds.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, "us-west-2": endpoint{ Hostname: "rds.us-west-2.amazonaws.com", CredentialScope: credentialScope{ @@ -4005,14 +4120,17 @@ var awsPartition = partition{ "outposts": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -4047,6 +4165,7 @@ var awsPartition = partition{ }, }, "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -4225,6 +4344,7 @@ var awsPartition = partition{ "ram": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -4234,6 +4354,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -4368,10 +4489,34 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "rekognition-fips.us-east-1": endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "rekognition-fips.us-east-2": endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "rekognition-fips.us-west-1": endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "rekognition-fips.us-west-2": endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "resource-groups": service{ @@ -4459,6 +4604,7 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -4468,6 +4614,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -4486,6 +4633,7 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, @@ -4883,12 +5031,36 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "securityhub-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "securityhub-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "securityhub-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "securityhub-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "serverlessrepo": service{ @@ -4955,6 +5127,7 @@ var awsPartition = partition{ "servicecatalog": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -4964,6 +5137,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -6211,6 +6385,15 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "backup": service{ Endpoints: endpoints{ @@ -6225,6 +6408,32 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "budgets": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "budgets.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "ce": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "ce.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "cloudformation": service{ Endpoints: endpoints{ @@ -6287,6 +6496,12 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "cur": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, "dax": service{ Endpoints: endpoints{ @@ -6514,6 +6729,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "kms": service{ Endpoints: endpoints{ @@ -6573,6 +6795,25 @@ var awscnPartition = partition{ }, }, }, + "organizations": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "organizations.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + "fips-aws-cn-global": endpoint{ + Hostname: "organizations.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "polly": service{ Endpoints: endpoints{ @@ -6681,13 +6922,20 @@ var awscnPartition = partition{ "snowball": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, "fips-cn-north-1": endpoint{ Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-north-1", }, }, + "fips-cn-northwest-1": endpoint{ + Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, }, }, "sns": service{ @@ -6907,6 +7155,18 @@ var awsusgovPartition = partition{ Endpoints: endpoints{ "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1-fips-secondary": endpoint{ + Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "apigateway": service{ @@ -6985,6 +7245,13 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "backup": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "batch": service{ Endpoints: endpoints{ @@ -7159,6 +7426,12 @@ var awsusgovPartition = partition{ "comprehendmedical": service{ Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-west-1": endpoint{}, }, }, @@ -7218,6 +7491,17 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "docdb": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "ds": service{ Endpoints: endpoints{ @@ -7316,7 +7600,7 @@ var awsusgovPartition = partition{ Endpoints: endpoints{ "fips": endpoint{ - Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com", + Hostname: "elasticache.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, @@ -7616,6 +7900,13 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "kms": service{ Endpoints: endpoints{ @@ -7670,8 +7961,18 @@ var awsusgovPartition = partition{ "logs": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "logs.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "logs.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "mediaconvert": service{ @@ -7836,6 +8137,12 @@ var awsusgovPartition = partition{ "rekognition": service{ Endpoints: endpoints{ + "rekognition-fips.us-gov-west-1": endpoint{ + Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-west-1": endpoint{}, }, }, @@ -7969,6 +8276,18 @@ var awsusgovPartition = partition{ "securityhub": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -8141,6 +8460,12 @@ var awsusgovPartition = partition{ "storagegateway": service{ Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -8659,6 +8984,20 @@ var awsisoPartition = partition{ "us-iso-east-1": endpoint{}, }, }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "transcribestreaming": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "workspaces": service{ Endpoints: endpoints{ @@ -8842,6 +9181,12 @@ var awsisobPartition = partition{ "us-isob-east-1": endpoint{}, }, }, + "lambda": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, "license-manager": service{ Endpoints: endpoints{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go index eb2ac83c99275..773613722f490 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -7,6 +7,8 @@ import ( "strings" ) +var regionValidationRegex = regexp.MustCompile(`^[[:alnum:]]([[:alnum:]\-]*[[:alnum:]])?$`) + type partitions []partition func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { @@ -124,7 +126,7 @@ func (p partition) EndpointFor(service, region string, opts ...func(*Options)) ( defs := []endpoint{p.Defaults, s.Defaults} - return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt), nil + return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt) } func serviceList(ss services) []string { @@ -233,7 +235,7 @@ func getByPriority(s []string, p []string, def string) string { return s[0] } -func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { +func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) (ResolvedEndpoint, error) { var merged endpoint for _, def := range defs { merged.mergeIn(def) @@ -260,6 +262,10 @@ func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs [ region = signingRegion } + if !validateInputRegion(region) { + return ResolvedEndpoint{}, fmt.Errorf("invalid region identifier format provided") + } + u := strings.Replace(hostname, "{service}", service, 1) u = strings.Replace(u, "{region}", region, 1) u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1) @@ -274,7 +280,7 @@ func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs [ SigningName: signingName, SigningNameDerived: signingNameDerived, SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), - } + }, nil } func getEndpointScheme(protocols []string, disableSSL bool) string { @@ -339,3 +345,7 @@ const ( boxedFalse boxedTrue ) + +func validateInputRegion(region string) bool { + return regionValidationRegex.MatchString(region) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 1078db6496a6c..1a859e6ba964b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.31.9" +const SDKVersion = "1.33.12" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go index bb8ea5da16596..0e4aa42f3e42b 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go @@ -69,10 +69,23 @@ func (r *EventReader) ReadEvent() (event interface{}, err error) { case ErrorMessageType: return nil, r.unmarshalErrorMessage(msg) default: - return nil, fmt.Errorf("unknown eventstream message type, %v", typ) + return nil, &UnknownMessageTypeError{ + Type: typ, Message: msg.Clone(), + } } } +// UnknownMessageTypeError provides an error when a message is received from +// the stream, but the reader is unable to determine what kind of message it is. +type UnknownMessageTypeError struct { + Type string + Message eventstream.Message +} + +func (e *UnknownMessageTypeError) Error() string { + return "unknown eventstream message type, " + e.Type +} + func (r *EventReader) unmarshalEventMessage( msg eventstream.Message, ) (event interface{}, err error) { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go index 3b44dde2f3230..f6f8c5674edaa 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go @@ -52,6 +52,15 @@ func (hs *Headers) Del(name string) { } } +// Clone returns a deep copy of the headers +func (hs Headers) Clone() Headers { + o := make(Headers, 0, len(hs)) + for _, h := range hs { + o.Set(h.Name, h.Value) + } + return o +} + func decodeHeaders(r io.Reader) (Headers, error) { hs := Headers{} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go index 25c9783cde6c6..f7427da039e06 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go @@ -57,6 +57,20 @@ func (m *Message) rawMessage() (rawMessage, error) { return raw, nil } +// Clone returns a deep copy of the message. +func (m Message) Clone() Message { + var payload []byte + if m.Payload != nil { + payload = make([]byte, len(m.Payload)) + copy(payload, m.Payload) + } + + return Message{ + Headers: m.Headers.Clone(), + Payload: payload, + } +} + type messagePrelude struct { Length uint32 HeadersLen uint32 diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go index 05d4ff5192583..d2f6dae5321d6 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -56,7 +56,8 @@ func FormatTime(name string, t time.Time) string { case ISO8601TimeFormatName: return t.Format(ISO8601OutputTimeFormat) case UnixTimeFormatName: - return strconv.FormatInt(t.Unix(), 10) + ms := t.UnixNano() / int64(time.Millisecond) + return strconv.FormatFloat(float64(ms)/1e3, 'f', -1, 64) default: panic("unknown timestamp format name, " + name) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go index ea2ca2c33507b..f6ca35dce25ce 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go @@ -61,9 +61,9 @@ func (c *DynamoDB) BatchGetItemRequest(input *BatchGetItemInput) (req *request.R output = &BatchGetItemOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -282,9 +282,9 @@ func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *reque output = &BatchWriteItemOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -476,9 +476,9 @@ func (c *DynamoDB) CreateBackupRequest(input *CreateBackupInput) (req *request.R output = &CreateBackupOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -636,9 +636,9 @@ func (c *DynamoDB) CreateGlobalTableRequest(input *CreateGlobalTableInput) (req output = &CreateGlobalTableOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -804,9 +804,9 @@ func (c *DynamoDB) CreateTableRequest(input *CreateTableInput) (req *request.Req output = &CreateTableOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -941,9 +941,9 @@ func (c *DynamoDB) DeleteBackupRequest(input *DeleteBackupInput) (req *request.R output = &DeleteBackupOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -1068,9 +1068,9 @@ func (c *DynamoDB) DeleteItemRequest(input *DeleteItemInput) (req *request.Reque output = &DeleteItemOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -1212,9 +1212,9 @@ func (c *DynamoDB) DeleteTableRequest(input *DeleteTableInput) (req *request.Req output = &DeleteTableOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -1356,9 +1356,9 @@ func (c *DynamoDB) DescribeBackupRequest(input *DescribeBackupInput) (req *reque output = &DescribeBackupOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -1465,9 +1465,9 @@ func (c *DynamoDB) DescribeContinuousBackupsRequest(input *DescribeContinuousBac output = &DescribeContinuousBackupsOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -1815,9 +1815,9 @@ func (c *DynamoDB) DescribeGlobalTableRequest(input *DescribeGlobalTableInput) ( output = &DescribeGlobalTableOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -1927,9 +1927,9 @@ func (c *DynamoDB) DescribeGlobalTableSettingsRequest(input *DescribeGlobalTable output = &DescribeGlobalTableSettingsOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -2037,9 +2037,9 @@ func (c *DynamoDB) DescribeLimitsRequest(input *DescribeLimitsInput) (req *reque output = &DescribeLimitsOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -2198,9 +2198,9 @@ func (c *DynamoDB) DescribeTableRequest(input *DescribeTableInput) (req *request output = &DescribeTableOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -2400,9 +2400,9 @@ func (c *DynamoDB) DescribeTimeToLiveRequest(input *DescribeTimeToLiveInput) (re output = &DescribeTimeToLiveOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -2508,9 +2508,9 @@ func (c *DynamoDB) GetItemRequest(input *GetItemInput) (req *request.Request, ou output = &GetItemOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -2636,9 +2636,9 @@ func (c *DynamoDB) ListBackupsRequest(input *ListBackupsInput) (req *request.Req output = &ListBackupsOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -2890,9 +2890,9 @@ func (c *DynamoDB) ListGlobalTablesRequest(input *ListGlobalTablesInput) (req *r output = &ListGlobalTablesOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -3003,9 +3003,9 @@ func (c *DynamoDB) ListTablesRequest(input *ListTablesInput) (req *request.Reque output = &ListTablesOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -3161,9 +3161,9 @@ func (c *DynamoDB) ListTagsOfResourceRequest(input *ListTagsOfResourceInput) (re output = &ListTagsOfResourceOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -3273,9 +3273,9 @@ func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, ou output = &PutItemOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -3459,9 +3459,9 @@ func (c *DynamoDB) QueryRequest(input *QueryInput) (req *request.Request, output output = &QueryOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -3679,9 +3679,9 @@ func (c *DynamoDB) RestoreTableFromBackupRequest(input *RestoreTableFromBackupIn output = &RestoreTableFromBackupOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -3827,9 +3827,9 @@ func (c *DynamoDB) RestoreTableToPointInTimeRequest(input *RestoreTableToPointIn output = &RestoreTableToPointInTimeOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -4003,9 +4003,9 @@ func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output * output = &ScanOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -4204,9 +4204,9 @@ func (c *DynamoDB) TagResourceRequest(input *TagResourceInput) (req *request.Req output = &TagResourceOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -4337,9 +4337,9 @@ func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *r output = &TransactGetItemsOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -4564,9 +4564,9 @@ func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (re output = &TransactWriteItemsOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -4833,9 +4833,9 @@ func (c *DynamoDB) UntagResourceRequest(input *UntagResourceInput) (req *request output = &UntagResourceOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -4964,9 +4964,9 @@ func (c *DynamoDB) UpdateContinuousBackupsRequest(input *UpdateContinuousBackups output = &UpdateContinuousBackupsOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -5168,9 +5168,9 @@ func (c *DynamoDB) UpdateGlobalTableRequest(input *UpdateGlobalTableInput) (req output = &UpdateGlobalTableOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -5304,9 +5304,9 @@ func (c *DynamoDB) UpdateGlobalTableSettingsRequest(input *UpdateGlobalTableSett output = &UpdateGlobalTableSettingsOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -5436,9 +5436,9 @@ func (c *DynamoDB) UpdateItemRequest(input *UpdateItemInput) (req *request.Reque output = &UpdateItemOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -5574,9 +5574,9 @@ func (c *DynamoDB) UpdateTableRequest(input *UpdateTableInput) (req *request.Req output = &UpdateTableOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -5823,9 +5823,9 @@ func (c *DynamoDB) UpdateTimeToLiveRequest(input *UpdateTimeToLiveInput) (req *r output = &UpdateTimeToLiveOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index cb4293757876a..6ea2559aec01a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -3926,13 +3926,13 @@ func (c *EC2) CreateDhcpOptionsRequest(input *CreateDhcpOptionsInput) (req *requ // * domain-name - If you're using AmazonProvidedDNS in us-east-1, specify // ec2.internal. If you're using AmazonProvidedDNS in another Region, specify // region.compute.internal (for example, ap-northeast-1.compute.internal). -// Otherwise, specify a domain name (for example, MyCompany.com). This value -// is used to complete unqualified DNS hostnames. Important: Some Linux operating -// systems accept multiple domain names separated by spaces. However, Windows -// and other Linux operating systems treat the value as a single domain, -// which results in unexpected behavior. If your DHCP options set is associated -// with a VPC that has instances with multiple operating systems, specify -// only one domain name. +// Otherwise, specify a domain name (for example, ExampleCompany.com). This +// value is used to complete unqualified DNS hostnames. Important: Some Linux +// operating systems accept multiple domain names separated by spaces. However, +// Windows and other Linux operating systems treat the value as a single +// domain, which results in unexpected behavior. If your DHCP options set +// is associated with a VPC that has instances with multiple operating systems, +// specify only one domain name. // // * ntp-servers - The IP addresses of up to four Network Time Protocol (NTP) // servers. @@ -4432,7 +4432,7 @@ func (c *EC2) CreateInstanceExportTaskRequest(input *CreateInstanceExportTaskInp // CreateInstanceExportTask API operation for Amazon Elastic Compute Cloud. // -// Exports a running or stopped instance to an S3 bucket. +// Exports a running or stopped instance to an Amazon S3 bucket. // // For information about the supported operating systems, image formats, and // known limitations for the types of instances you can export, see Exporting @@ -4938,6 +4938,84 @@ func (c *EC2) CreateLocalGatewayRouteTableVpcAssociationWithContext(ctx aws.Cont return out, req.Send() } +const opCreateManagedPrefixList = "CreateManagedPrefixList" + +// CreateManagedPrefixListRequest generates a "aws/request.Request" representing the +// client's request for the CreateManagedPrefixList operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateManagedPrefixList for more information on using the CreateManagedPrefixList +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateManagedPrefixListRequest method. +// req, resp := client.CreateManagedPrefixListRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateManagedPrefixList +func (c *EC2) CreateManagedPrefixListRequest(input *CreateManagedPrefixListInput) (req *request.Request, output *CreateManagedPrefixListOutput) { + op := &request.Operation{ + Name: opCreateManagedPrefixList, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateManagedPrefixListInput{} + } + + output = &CreateManagedPrefixListOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateManagedPrefixList API operation for Amazon Elastic Compute Cloud. +// +// Creates a managed prefix list. You can specify one or more entries for the +// prefix list. Each entry consists of a CIDR block and an optional description. +// +// You must specify the maximum number of entries for the prefix list. The maximum +// number of entries cannot be changed later. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateManagedPrefixList for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateManagedPrefixList +func (c *EC2) CreateManagedPrefixList(input *CreateManagedPrefixListInput) (*CreateManagedPrefixListOutput, error) { + req, out := c.CreateManagedPrefixListRequest(input) + return out, req.Send() +} + +// CreateManagedPrefixListWithContext is the same as CreateManagedPrefixList with the addition of +// the ability to pass a context and additional request options. +// +// See CreateManagedPrefixList for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateManagedPrefixListWithContext(ctx aws.Context, input *CreateManagedPrefixListInput, opts ...request.Option) (*CreateManagedPrefixListOutput, error) { + req, out := c.CreateManagedPrefixListRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateNatGateway = "CreateNatGateway" // CreateNatGatewayRequest generates a "aws/request.Request" representing the @@ -5397,7 +5475,7 @@ func (c *EC2) CreatePlacementGroupRequest(input *CreatePlacementGroupInput) (req // instances in one partition do not share the same hardware with instances // in another partition. // -// For more information, see Placement Groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) +// For more information, see Placement groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6022,7 +6100,7 @@ func (c *EC2) CreateSpotDatafeedSubscriptionRequest(input *CreateSpotDatafeedSub // // Creates a data feed for Spot Instances, enabling you to view Spot Instance // usage logs. You can create one data feed per AWS account. For more information, -// see Spot Instance Data Feed (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html) +// see Spot Instance data feed (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html) // in the Amazon EC2 User Guide for Linux Instances. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6097,15 +6175,12 @@ func (c *EC2) CreateSubnetRequest(input *CreateSubnetInput) (req *request.Reques // CreateSubnet API operation for Amazon Elastic Compute Cloud. // -// Creates a subnet in an existing VPC. +// Creates a subnet in a specified VPC. // -// When you create each subnet, you provide the VPC ID and IPv4 CIDR block for -// the subnet. After you create a subnet, you can't change its CIDR block. The -// size of the subnet's IPv4 CIDR block can be the same as a VPC's IPv4 CIDR -// block, or a subset of a VPC's IPv4 CIDR block. If you create more than one -// subnet in a VPC, the subnets' CIDR blocks must not overlap. The smallest -// IPv4 subnet (and VPC) you can create uses a /28 netmask (16 IPv4 addresses), -// and the largest uses a /16 netmask (65,536 IPv4 addresses). +// You must specify an IPv4 CIDR block for the subnet. After you create a subnet, +// you can't change its CIDR block. The allowed block size is between a /16 +// netmask (65,536 IP addresses) and /28 netmask (16 IP addresses). The CIDR +// block must not overlap with the CIDR block of an existing subnet in the VPC. // // If you've associated an IPv6 CIDR block with your VPC, you can create a subnet // with an IPv6 CIDR block that uses a /64 prefix length. @@ -6116,9 +6191,7 @@ func (c *EC2) CreateSubnetRequest(input *CreateSubnetInput) (req *request.Reques // If you add more than one subnet to a VPC, they're set up in a star topology // with a logical router in the middle. // -// If you launch an instance in a VPC using an Amazon EBS-backed AMI, the IP -// address doesn't change if you stop and restart the instance (unlike a similar -// instance launched outside a VPC, which gets a new IP address when restarted). +// When you stop an instance in a subnet, it retains its private IPv4 address. // It's therefore possible to have a subnet with no running instances (they're // all stopped), but no remaining IP addresses available. // @@ -8890,6 +8963,81 @@ func (c *EC2) DeleteLocalGatewayRouteTableVpcAssociationWithContext(ctx aws.Cont return out, req.Send() } +const opDeleteManagedPrefixList = "DeleteManagedPrefixList" + +// DeleteManagedPrefixListRequest generates a "aws/request.Request" representing the +// client's request for the DeleteManagedPrefixList operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteManagedPrefixList for more information on using the DeleteManagedPrefixList +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteManagedPrefixListRequest method. +// req, resp := client.DeleteManagedPrefixListRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteManagedPrefixList +func (c *EC2) DeleteManagedPrefixListRequest(input *DeleteManagedPrefixListInput) (req *request.Request, output *DeleteManagedPrefixListOutput) { + op := &request.Operation{ + Name: opDeleteManagedPrefixList, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteManagedPrefixListInput{} + } + + output = &DeleteManagedPrefixListOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteManagedPrefixList API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified managed prefix list. You must first remove all references +// to the prefix list in your resources. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteManagedPrefixList for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteManagedPrefixList +func (c *EC2) DeleteManagedPrefixList(input *DeleteManagedPrefixListInput) (*DeleteManagedPrefixListOutput, error) { + req, out := c.DeleteManagedPrefixListRequest(input) + return out, req.Send() +} + +// DeleteManagedPrefixListWithContext is the same as DeleteManagedPrefixList with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteManagedPrefixList for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteManagedPrefixListWithContext(ctx aws.Context, input *DeleteManagedPrefixListInput, opts ...request.Option) (*DeleteManagedPrefixListOutput, error) { + req, out := c.DeleteManagedPrefixListRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteNatGateway = "DeleteNatGateway" // DeleteNatGatewayRequest generates a "aws/request.Request" representing the @@ -9318,7 +9466,7 @@ func (c *EC2) DeletePlacementGroupRequest(input *DeletePlacementGroupInput) (req // // Deletes the specified placement group. You must terminate all instances in // the placement group before you can delete the placement group. For more information, -// see Placement Groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) +// see Placement groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -13984,7 +14132,7 @@ func (c *EC2) DescribeExportImageTasksRequest(input *DescribeExportImageTasksInp // DescribeExportImageTasks API operation for Amazon Elastic Compute Cloud. // -// Describes the specified export image tasks or all your export image tasks. +// Describes the specified export image tasks or all of your export image tasks. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -14110,7 +14258,7 @@ func (c *EC2) DescribeExportTasksRequest(input *DescribeExportTasksInput) (req * // DescribeExportTasks API operation for Amazon Elastic Compute Cloud. // -// Describes the specified export instance tasks or all your export instance +// Describes the specified export instance tasks or all of your export instance // tasks. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -16195,7 +16343,7 @@ func (c *EC2) DescribeInstanceCreditSpecificationsRequest(input *DescribeInstanc // all, the call fails. If you specify only instance IDs in an unaffected zone, // the call works normally. // -// For more information, see Burstable Performance Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) +// For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -16411,18 +16559,18 @@ func (c *EC2) DescribeInstanceStatusRequest(input *DescribeInstanceStatusInput) // // * Status checks - Amazon EC2 performs status checks on running EC2 instances // to identify hardware and software issues. For more information, see Status -// Checks for Your Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-system-instance-status-check.html) -// and Troubleshooting Instances with Failed Status Checks (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstances.html) +// checks for your instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-system-instance-status-check.html) +// and Troubleshooting instances with failed status checks (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstances.html) // in the Amazon Elastic Compute Cloud User Guide. // // * Scheduled events - Amazon EC2 can schedule events (such as reboot, stop, // or terminate) for your instances related to hardware issues, software -// updates, or system maintenance. For more information, see Scheduled Events -// for Your Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-instances-status-check_sched.html) +// updates, or system maintenance. For more information, see Scheduled events +// for your instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-instances-status-check_sched.html) // in the Amazon Elastic Compute Cloud User Guide. // // * Instance state - You can manage your instances from the moment you launch -// them through their termination. For more information, see Instance Lifecycle +// them through their termination. For more information, see Instance lifecycle // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) // in the Amazon Elastic Compute Cloud User Guide. // @@ -17316,7 +17464,9 @@ func (c *EC2) DescribeLaunchTemplateVersionsRequest(input *DescribeLaunchTemplat // DescribeLaunchTemplateVersions API operation for Amazon Elastic Compute Cloud. // // Describes one or more versions of a specified launch template. You can describe -// all versions, individual versions, or a range of versions. +// all versions, individual versions, or a range of versions. You can also describe +// all the latest versions or all the default versions of all the launch templates +// in your account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -18326,6 +18476,140 @@ func (c *EC2) DescribeLocalGatewaysPagesWithContext(ctx aws.Context, input *Desc return p.Err() } +const opDescribeManagedPrefixLists = "DescribeManagedPrefixLists" + +// DescribeManagedPrefixListsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeManagedPrefixLists operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeManagedPrefixLists for more information on using the DescribeManagedPrefixLists +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeManagedPrefixListsRequest method. +// req, resp := client.DescribeManagedPrefixListsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeManagedPrefixLists +func (c *EC2) DescribeManagedPrefixListsRequest(input *DescribeManagedPrefixListsInput) (req *request.Request, output *DescribeManagedPrefixListsOutput) { + op := &request.Operation{ + Name: opDescribeManagedPrefixLists, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeManagedPrefixListsInput{} + } + + output = &DescribeManagedPrefixListsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeManagedPrefixLists API operation for Amazon Elastic Compute Cloud. +// +// Describes your managed prefix lists and any AWS-managed prefix lists. +// +// To view the entries for your prefix list, use GetManagedPrefixListEntries. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeManagedPrefixLists for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeManagedPrefixLists +func (c *EC2) DescribeManagedPrefixLists(input *DescribeManagedPrefixListsInput) (*DescribeManagedPrefixListsOutput, error) { + req, out := c.DescribeManagedPrefixListsRequest(input) + return out, req.Send() +} + +// DescribeManagedPrefixListsWithContext is the same as DescribeManagedPrefixLists with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeManagedPrefixLists for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeManagedPrefixListsWithContext(ctx aws.Context, input *DescribeManagedPrefixListsInput, opts ...request.Option) (*DescribeManagedPrefixListsOutput, error) { + req, out := c.DescribeManagedPrefixListsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeManagedPrefixListsPages iterates over the pages of a DescribeManagedPrefixLists operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeManagedPrefixLists method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeManagedPrefixLists operation. +// pageNum := 0 +// err := client.DescribeManagedPrefixListsPages(params, +// func(page *ec2.DescribeManagedPrefixListsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeManagedPrefixListsPages(input *DescribeManagedPrefixListsInput, fn func(*DescribeManagedPrefixListsOutput, bool) bool) error { + return c.DescribeManagedPrefixListsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeManagedPrefixListsPagesWithContext same as DescribeManagedPrefixListsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeManagedPrefixListsPagesWithContext(ctx aws.Context, input *DescribeManagedPrefixListsInput, fn func(*DescribeManagedPrefixListsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeManagedPrefixListsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeManagedPrefixListsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeManagedPrefixListsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeMovingAddresses = "DescribeMovingAddresses" // DescribeMovingAddressesRequest generates a "aws/request.Request" representing the @@ -19111,7 +19395,7 @@ func (c *EC2) DescribePlacementGroupsRequest(input *DescribePlacementGroupsInput // DescribePlacementGroups API operation for Amazon Elastic Compute Cloud. // // Describes the specified placement groups or all of your placement groups. -// For more information, see Placement Groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) +// For more information, see Placement groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -19194,10 +19478,9 @@ func (c *EC2) DescribePrefixListsRequest(input *DescribePrefixListsInput) (req * // // Describes available AWS services in a prefix list format, which includes // the prefix list name and prefix list ID of the service and the IP address -// range for the service. A prefix list ID is required for creating an outbound -// security group rule that allows traffic from a VPC to access an AWS service -// through a gateway VPC endpoint. Currently, the services that support this -// action are Amazon S3 and Amazon DynamoDB. +// range for the service. +// +// We recommend that you use DescribeManagedPrefixLists instead. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -20884,12 +21167,12 @@ func (c *EC2) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *requ // (if you own the snapshots), self for snapshots for which you own or have // explicit permissions, or all for public snapshots. // -// If you are describing a long list of snapshots, you can paginate the output -// to make the list more manageable. The MaxResults parameter sets the maximum -// number of results returned in a single page. If the list of results exceeds -// your MaxResults value, then that number of results is returned along with -// a NextToken value that can be passed to a subsequent DescribeSnapshots request -// to retrieve the remaining results. +// If you are describing a long list of snapshots, we recommend that you paginate +// the output to make the list more manageable. The MaxResults parameter sets +// the maximum number of results returned in a single page. If the list of results +// exceeds your MaxResults value, then that number of results is returned along +// with a NextToken value that can be passed to a subsequent DescribeSnapshots +// request to retrieve the remaining results. // // To get the state of fast snapshot restores for a snapshot, use DescribeFastSnapshotRestores. // @@ -21021,7 +21304,7 @@ func (c *EC2) DescribeSpotDatafeedSubscriptionRequest(input *DescribeSpotDatafee // DescribeSpotDatafeedSubscription API operation for Amazon Elastic Compute Cloud. // // Describes the data feed for Spot Instances. For more information, see Spot -// Instance Data Feed (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html) +// Instance data feed (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html) // in the Amazon EC2 User Guide for Linux Instances. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -21539,7 +21822,7 @@ func (c *EC2) DescribeSpotPriceHistoryRequest(input *DescribeSpotPriceHistoryInp // DescribeSpotPriceHistory API operation for Amazon Elastic Compute Cloud. // // Describes the Spot price history. For more information, see Spot Instance -// Pricing History (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances-history.html) +// pricing history (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances-history.html) // in the Amazon EC2 User Guide for Linux Instances. // // When you specify a start and end time, this operation returns the prices @@ -23524,12 +23807,12 @@ func (c *EC2) DescribeVolumesRequest(input *DescribeVolumesInput) (req *request. // // Describes the specified EBS volumes or all of your EBS volumes. // -// If you are describing a long list of volumes, you can paginate the output -// to make the list more manageable. The MaxResults parameter sets the maximum -// number of results returned in a single page. If the list of results exceeds -// your MaxResults value, then that number of results is returned along with -// a NextToken value that can be passed to a subsequent DescribeVolumes request -// to retrieve the remaining results. +// If you are describing a long list of volumes, we recommend that you paginate +// the output to make the list more manageable. The MaxResults parameter sets +// the maximum number of results returned in a single page. If the list of results +// exceeds your MaxResults value, then that number of results is returned along +// with a NextToken value that can be passed to a subsequent DescribeVolumes +// request to retrieve the remaining results. // // For more information about EBS volumes, see Amazon EBS Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumes.html) // in the Amazon Elastic Compute Cloud User Guide. @@ -23664,19 +23947,17 @@ func (c *EC2) DescribeVolumesModificationsRequest(input *DescribeVolumesModifica // DescribeVolumesModifications API operation for Amazon Elastic Compute Cloud. // -// Reports the current modification status of EBS volumes. +// Describes the most recent volume modification request for the specified EBS +// volumes. // -// Current-generation EBS volumes support modification of attributes including -// type, size, and (for io1 volumes) IOPS provisioning while either attached -// to or detached from an instance. Following an action from the API or the -// console to modify a volume, the status of the modification may be modifying, -// optimizing, completed, or failed. If a volume has never been modified, then -// certain elements of the returned VolumeModification objects are null. +// If a volume has never been modified, some information in the output will +// be null. If a volume has been modified more than once, the output includes +// only the most recent modification request. // // You can also use CloudWatch Events to check the status of a modification // to an EBS volume. For information about CloudWatch Events, see the Amazon // CloudWatch Events User Guide (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/). -// For more information, see Monitoring Volume Modifications" (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#monitoring_mods) +// For more information, see Monitoring Volume Modifications (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#monitoring_mods) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -28056,7 +28337,7 @@ func (c *EC2) GetDefaultCreditSpecificationRequest(input *GetDefaultCreditSpecif // Describes the default credit option for CPU usage of a burstable performance // instance family. // -// For more information, see Burstable Performance Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) +// For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -28370,6 +28651,12 @@ func (c *EC2) GetLaunchTemplateDataRequest(input *GetLaunchTemplateDataInput) (r // Retrieves the configuration data of the specified instance. You can use this // data to create a launch template. // +// This action calls on other describe actions to get instance information. +// Depending on your instance configuration, you may need to allow the following +// actions in your IAM policy: DescribeSpotInstanceRequests, DescribeInstanceCreditSpecifications, +// DescribeVolumes, DescribeInstanceAttribute, and DescribeElasticGpus. Or, +// you can allow describe* depending on your instance requirements. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -28398,6 +28685,271 @@ func (c *EC2) GetLaunchTemplateDataWithContext(ctx aws.Context, input *GetLaunch return out, req.Send() } +const opGetManagedPrefixListAssociations = "GetManagedPrefixListAssociations" + +// GetManagedPrefixListAssociationsRequest generates a "aws/request.Request" representing the +// client's request for the GetManagedPrefixListAssociations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetManagedPrefixListAssociations for more information on using the GetManagedPrefixListAssociations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetManagedPrefixListAssociationsRequest method. +// req, resp := client.GetManagedPrefixListAssociationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetManagedPrefixListAssociations +func (c *EC2) GetManagedPrefixListAssociationsRequest(input *GetManagedPrefixListAssociationsInput) (req *request.Request, output *GetManagedPrefixListAssociationsOutput) { + op := &request.Operation{ + Name: opGetManagedPrefixListAssociations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetManagedPrefixListAssociationsInput{} + } + + output = &GetManagedPrefixListAssociationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetManagedPrefixListAssociations API operation for Amazon Elastic Compute Cloud. +// +// Gets information about the resources that are associated with the specified +// managed prefix list. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetManagedPrefixListAssociations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetManagedPrefixListAssociations +func (c *EC2) GetManagedPrefixListAssociations(input *GetManagedPrefixListAssociationsInput) (*GetManagedPrefixListAssociationsOutput, error) { + req, out := c.GetManagedPrefixListAssociationsRequest(input) + return out, req.Send() +} + +// GetManagedPrefixListAssociationsWithContext is the same as GetManagedPrefixListAssociations with the addition of +// the ability to pass a context and additional request options. +// +// See GetManagedPrefixListAssociations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetManagedPrefixListAssociationsWithContext(ctx aws.Context, input *GetManagedPrefixListAssociationsInput, opts ...request.Option) (*GetManagedPrefixListAssociationsOutput, error) { + req, out := c.GetManagedPrefixListAssociationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetManagedPrefixListAssociationsPages iterates over the pages of a GetManagedPrefixListAssociations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetManagedPrefixListAssociations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetManagedPrefixListAssociations operation. +// pageNum := 0 +// err := client.GetManagedPrefixListAssociationsPages(params, +// func(page *ec2.GetManagedPrefixListAssociationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) GetManagedPrefixListAssociationsPages(input *GetManagedPrefixListAssociationsInput, fn func(*GetManagedPrefixListAssociationsOutput, bool) bool) error { + return c.GetManagedPrefixListAssociationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetManagedPrefixListAssociationsPagesWithContext same as GetManagedPrefixListAssociationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetManagedPrefixListAssociationsPagesWithContext(ctx aws.Context, input *GetManagedPrefixListAssociationsInput, fn func(*GetManagedPrefixListAssociationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetManagedPrefixListAssociationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetManagedPrefixListAssociationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetManagedPrefixListAssociationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetManagedPrefixListEntries = "GetManagedPrefixListEntries" + +// GetManagedPrefixListEntriesRequest generates a "aws/request.Request" representing the +// client's request for the GetManagedPrefixListEntries operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetManagedPrefixListEntries for more information on using the GetManagedPrefixListEntries +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetManagedPrefixListEntriesRequest method. +// req, resp := client.GetManagedPrefixListEntriesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetManagedPrefixListEntries +func (c *EC2) GetManagedPrefixListEntriesRequest(input *GetManagedPrefixListEntriesInput) (req *request.Request, output *GetManagedPrefixListEntriesOutput) { + op := &request.Operation{ + Name: opGetManagedPrefixListEntries, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetManagedPrefixListEntriesInput{} + } + + output = &GetManagedPrefixListEntriesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetManagedPrefixListEntries API operation for Amazon Elastic Compute Cloud. +// +// Gets information about the entries for a specified managed prefix list. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetManagedPrefixListEntries for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetManagedPrefixListEntries +func (c *EC2) GetManagedPrefixListEntries(input *GetManagedPrefixListEntriesInput) (*GetManagedPrefixListEntriesOutput, error) { + req, out := c.GetManagedPrefixListEntriesRequest(input) + return out, req.Send() +} + +// GetManagedPrefixListEntriesWithContext is the same as GetManagedPrefixListEntries with the addition of +// the ability to pass a context and additional request options. +// +// See GetManagedPrefixListEntries for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetManagedPrefixListEntriesWithContext(ctx aws.Context, input *GetManagedPrefixListEntriesInput, opts ...request.Option) (*GetManagedPrefixListEntriesOutput, error) { + req, out := c.GetManagedPrefixListEntriesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetManagedPrefixListEntriesPages iterates over the pages of a GetManagedPrefixListEntries operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetManagedPrefixListEntries method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetManagedPrefixListEntries operation. +// pageNum := 0 +// err := client.GetManagedPrefixListEntriesPages(params, +// func(page *ec2.GetManagedPrefixListEntriesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) GetManagedPrefixListEntriesPages(input *GetManagedPrefixListEntriesInput, fn func(*GetManagedPrefixListEntriesOutput, bool) bool) error { + return c.GetManagedPrefixListEntriesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetManagedPrefixListEntriesPagesWithContext same as GetManagedPrefixListEntriesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetManagedPrefixListEntriesPagesWithContext(ctx aws.Context, input *GetManagedPrefixListEntriesInput, fn func(*GetManagedPrefixListEntriesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetManagedPrefixListEntriesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetManagedPrefixListEntriesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetManagedPrefixListEntriesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opGetPasswordData = "GetPasswordData" // GetPasswordDataRequest generates a "aws/request.Request" representing the @@ -29612,7 +30164,7 @@ func (c *EC2) ModifyAvailabilityZoneGroupRequest(input *ModifyAvailabilityZoneGr // // Enables or disables an Availability Zone group for your account. // -// Use describe-availability-zones (https://docs.aws.amazon.com/AWSEC2ApiDocReef/build/server-root/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) +// Use DescribeAvailabilityZones (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) // to view the value for GroupName. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -29854,7 +30406,7 @@ func (c *EC2) ModifyDefaultCreditSpecificationRequest(input *ModifyDefaultCredit // can call GetDefaultCreditSpecification and check DefaultCreditSpecification // for updates. // -// For more information, see Burstable Performance Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) +// For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -30566,7 +31118,7 @@ func (c *EC2) ModifyInstanceAttributeRequest(input *ModifyInstanceAttributeInput // we recommend that you use the ModifyNetworkInterfaceAttribute action. // // To modify some attributes, the instance must be stopped. For more information, -// see Modifying Attributes of a Stopped Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_ChangingAttributesWhileInstanceStopped.html) +// see Modifying attributes of a stopped instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_ChangingAttributesWhileInstanceStopped.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -30721,7 +31273,7 @@ func (c *EC2) ModifyInstanceCreditSpecificationRequest(input *ModifyInstanceCred // Modifies the credit option for CPU usage on a running or stopped burstable // performance instance. The credit options are standard and unlimited. // -// For more information, see Burstable Performance Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) +// For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -30876,7 +31428,7 @@ func (c *EC2) ModifyInstanceMetadataOptionsRequest(input *ModifyInstanceMetadata // the API responds with a state of “pending”. After the parameter modifications // are successfully applied to the instance, the state of the modifications // changes from “pending” to “applied” in subsequent describe-instances -// API calls. For more information, see Instance Metadata and User Data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). +// API calls. For more information, see Instance metadata and user data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -31077,6 +31629,86 @@ func (c *EC2) ModifyLaunchTemplateWithContext(ctx aws.Context, input *ModifyLaun return out, req.Send() } +const opModifyManagedPrefixList = "ModifyManagedPrefixList" + +// ModifyManagedPrefixListRequest generates a "aws/request.Request" representing the +// client's request for the ModifyManagedPrefixList operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyManagedPrefixList for more information on using the ModifyManagedPrefixList +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyManagedPrefixListRequest method. +// req, resp := client.ModifyManagedPrefixListRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyManagedPrefixList +func (c *EC2) ModifyManagedPrefixListRequest(input *ModifyManagedPrefixListInput) (req *request.Request, output *ModifyManagedPrefixListOutput) { + op := &request.Operation{ + Name: opModifyManagedPrefixList, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyManagedPrefixListInput{} + } + + output = &ModifyManagedPrefixListOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyManagedPrefixList API operation for Amazon Elastic Compute Cloud. +// +// Modifies the specified managed prefix list. +// +// Adding or removing entries in a prefix list creates a new version of the +// prefix list. Changing the name of the prefix list does not affect the version. +// +// If you specify a current version number that does not match the true current +// version number, the request fails. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyManagedPrefixList for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyManagedPrefixList +func (c *EC2) ModifyManagedPrefixList(input *ModifyManagedPrefixListInput) (*ModifyManagedPrefixListOutput, error) { + req, out := c.ModifyManagedPrefixListRequest(input) + return out, req.Send() +} + +// ModifyManagedPrefixListWithContext is the same as ModifyManagedPrefixList with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyManagedPrefixList for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyManagedPrefixListWithContext(ctx aws.Context, input *ModifyManagedPrefixListInput, opts ...request.Option) (*ModifyManagedPrefixListOutput, error) { + req, out := c.ModifyManagedPrefixListRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opModifyNetworkInterfaceAttribute = "ModifyNetworkInterfaceAttribute" // ModifyNetworkInterfaceAttributeRequest generates a "aws/request.Request" representing the @@ -32879,7 +33511,7 @@ func (c *EC2) MonitorInstancesRequest(input *MonitorInstancesInput) (req *reques // MonitorInstances API operation for Amazon Elastic Compute Cloud. // // Enables detailed monitoring for a running instance. Otherwise, basic monitoring -// is enabled. For more information, see Monitoring Your Instances and Volumes +// is enabled. For more information, see Monitoring your instances and volumes // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch.html) // in the Amazon Elastic Compute Cloud User Guide. // @@ -33383,8 +34015,8 @@ func (c *EC2) RebootInstancesRequest(input *RebootInstancesInput) (req *request. // If an instance does not cleanly shut down within four minutes, Amazon EC2 // performs a hard reboot. // -// For more information about troubleshooting, see Getting Console Output and -// Rebooting Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-console.html) +// For more information about troubleshooting, see Getting console output and +// rebooting instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-console.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -33461,7 +34093,7 @@ func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Requ // // Registers an AMI. When you're creating an AMI, this is the final step you // must complete before you can launch an instance from the AMI. For more information -// about creating AMIs, see Creating Your Own AMIs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami.html) +// about creating AMIs, see Creating your own AMIs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami.html) // in the Amazon Elastic Compute Cloud User Guide. // // For Amazon EBS-backed instances, CreateImage creates and registers the AMI @@ -33469,12 +34101,12 @@ func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Requ // // You can also use RegisterImage to create an Amazon EBS-backed Linux AMI from // a snapshot of a root device volume. You specify the snapshot using the block -// device mapping. For more information, see Launching a Linux Instance from -// a Backup (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-launch-snapshot.html) +// device mapping. For more information, see Launching a Linux instance from +// a backup (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-launch-snapshot.html) // in the Amazon Elastic Compute Cloud User Guide. // -// You can't register an image where a secondary (non-root) snapshot has AWS -// Marketplace product codes. +// If any snapshots have AWS Marketplace product codes, they are copied to the +// new AMI. // // Windows and some Linux distributions, such as Red Hat Enterprise Linux (RHEL) // and SUSE Linux Enterprise Server (SLES), use the EC2 billing product code @@ -33495,7 +34127,7 @@ func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Requ // a Reserved Instance without the matching billing product code, the Reserved // Instance will not be applied to the On-Demand Instance. For information about // how to obtain the platform details and billing information of an AMI, see -// Obtaining Billing Information (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-billing-info.html) +// Obtaining billing information (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-billing-info.html) // in the Amazon Elastic Compute Cloud User Guide. // // If needed, you can deregister an AMI at any time. Any modifications you make @@ -34873,7 +35505,7 @@ func (c *EC2) RequestSpotFleetRequest(input *RequestSpotFleetInput) (req *reques // the fleet. You cannot tag other resource types in a Spot Fleet request because // only the spot-fleet-request and instance resource types are supported. // -// For more information, see Spot Fleet Requests (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-requests.html) +// For more information, see Spot Fleet requests (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-requests.html) // in the Amazon EC2 User Guide for Linux Instances. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -34950,7 +35582,7 @@ func (c *EC2) RequestSpotInstancesRequest(input *RequestSpotInstancesInput) (req // // Creates a Spot Instance request. // -// For more information, see Spot Instance Requests (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html) +// For more information, see Spot Instance requests (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html) // in the Amazon EC2 User Guide for Linux Instances. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -35528,6 +36160,81 @@ func (c *EC2) RestoreAddressToClassicWithContext(ctx aws.Context, input *Restore return out, req.Send() } +const opRestoreManagedPrefixListVersion = "RestoreManagedPrefixListVersion" + +// RestoreManagedPrefixListVersionRequest generates a "aws/request.Request" representing the +// client's request for the RestoreManagedPrefixListVersion operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RestoreManagedPrefixListVersion for more information on using the RestoreManagedPrefixListVersion +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RestoreManagedPrefixListVersionRequest method. +// req, resp := client.RestoreManagedPrefixListVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RestoreManagedPrefixListVersion +func (c *EC2) RestoreManagedPrefixListVersionRequest(input *RestoreManagedPrefixListVersionInput) (req *request.Request, output *RestoreManagedPrefixListVersionOutput) { + op := &request.Operation{ + Name: opRestoreManagedPrefixListVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreManagedPrefixListVersionInput{} + } + + output = &RestoreManagedPrefixListVersionOutput{} + req = c.newRequest(op, input, output) + return +} + +// RestoreManagedPrefixListVersion API operation for Amazon Elastic Compute Cloud. +// +// Restores the entries from a previous version of a managed prefix list to +// a new version of the prefix list. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation RestoreManagedPrefixListVersion for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RestoreManagedPrefixListVersion +func (c *EC2) RestoreManagedPrefixListVersion(input *RestoreManagedPrefixListVersionInput) (*RestoreManagedPrefixListVersionOutput, error) { + req, out := c.RestoreManagedPrefixListVersionRequest(input) + return out, req.Send() +} + +// RestoreManagedPrefixListVersionWithContext is the same as RestoreManagedPrefixListVersion with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreManagedPrefixListVersion for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RestoreManagedPrefixListVersionWithContext(ctx aws.Context, input *RestoreManagedPrefixListVersionInput, opts ...request.Option) (*RestoreManagedPrefixListVersionOutput, error) { + req, out := c.RestoreManagedPrefixListVersionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opRevokeClientVpnIngress = "RevokeClientVpnIngress" // RevokeClientVpnIngressRequest generates a "aws/request.Request" representing the @@ -35838,17 +36545,17 @@ func (c *EC2) RunInstancesRequest(input *RunInstancesInput) (req *request.Reques // // * Some instance types must be launched into a VPC. If you do not have // a default VPC, or if you do not specify a subnet ID, the request fails. -// For more information, see Instance Types Available Only in a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-vpc.html#vpc-only-instance-types). +// For more information, see Instance types available only in a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-vpc.html#vpc-only-instance-types). // // * [EC2-VPC] All instances have a network interface with a primary private // IPv4 address. If you don't specify this address, we choose one from the // IPv4 range of your subnet. // // * Not all instance types support IPv6 addresses. For more information, -// see Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). +// see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). // // * If you don't specify a security group ID, we use the default security -// group. For more information, see Security Groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html). +// group. For more information, see Security groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html). // // * If any of the AMIs have a product code attached for which the user has // not subscribed, the request fails. @@ -35865,17 +36572,17 @@ func (c *EC2) RunInstancesRequest(input *RunInstancesInput) (req *request.Reques // An instance is ready for you to use when it's in the running state. You can // check the state of your instance using DescribeInstances. You can tag instances // and EBS volumes during launch, after launch, or both. For more information, -// see CreateTags and Tagging Your Amazon EC2 Resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html). +// see CreateTags and Tagging your Amazon EC2 resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html). // // Linux instances have access to the public key of the key pair at boot. You // can use this key to provide secure access to the instance. Amazon EC2 public // images use this feature to provide secure access without passwords. For more -// information, see Key Pairs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) +// information, see Key pairs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) // in the Amazon Elastic Compute Cloud User Guide. // -// For troubleshooting, see What To Do If An Instance Immediately Terminates +// For troubleshooting, see What to do if an instance immediately terminates // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_InstanceStraightToTerminated.html), -// and Troubleshooting Connecting to Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesConnecting.html) +// and Troubleshooting connecting to your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesConnecting.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -36388,8 +37095,8 @@ func (c *EC2) SendDiagnosticInterruptRequest(input *SendDiagnosticInterruptInput // system is configured to perform the required diagnostic tasks. // // For more information about configuring your operating system to generate -// a crash dump when a kernel panic or stop error occurs, see Send a Diagnostic -// Interrupt (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/diagnostic-interrupt.html) +// a crash dump when a kernel panic or stop error occurs, see Send a diagnostic +// interrupt (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/diagnostic-interrupt.html) // (Linux instances) or Send a Diagnostic Interrupt (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/diagnostic-interrupt.html) // (Windows instances). // @@ -36486,7 +37193,7 @@ func (c *EC2) StartInstancesRequest(input *StartInstancesInput) (req *request.Re // Performing this operation on an instance that uses an instance store as its // root device returns an error. // -// For more information, see Stopping Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html) +// For more information, see Stopping instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -36649,7 +37356,7 @@ func (c *EC2) StopInstancesRequest(input *StopInstancesInput) (req *request.Requ // You can use the Stop action to hibernate an instance if the instance is enabled // for hibernation (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#enabling-hibernation) // and it meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#hibernating-prerequisites). -// For more information, see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) +// For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon Elastic Compute Cloud User Guide. // // We don't charge usage for a stopped instance, or data transfer fees; however, @@ -36665,7 +37372,7 @@ func (c *EC2) StopInstancesRequest(input *StopInstancesInput) (req *request.Requ // You can't stop or hibernate instance store-backed instances. You can't use // the Stop action to hibernate Spot Instances, but you can specify that Amazon // EC2 should hibernate Spot Instances when they are interrupted. For more information, -// see Hibernating Interrupted Spot Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-interruptions.html#hibernate-spot-instances) +// see Hibernating interrupted Spot Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-interruptions.html#hibernate-spot-instances) // in the Amazon Elastic Compute Cloud User Guide. // // When you stop or hibernate an instance, we shut it down. You can restart @@ -36681,13 +37388,13 @@ func (c *EC2) StopInstancesRequest(input *StopInstancesInput) (req *request.Requ // an instance, the root device and any other devices attached during the instance // launch are automatically deleted. For more information about the differences // between rebooting, stopping, hibernating, and terminating instances, see -// Instance Lifecycle (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) +// Instance lifecycle (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) // in the Amazon Elastic Compute Cloud User Guide. // // When you stop an instance, we attempt to shut it down forcibly after a short // while. If your instance appears stuck in the stopping state after a period // of time, there may be an issue with the underlying host computer. For more -// information, see Troubleshooting Stopping Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesStopping.html) +// information, see Troubleshooting stopping your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesStopping.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -36857,11 +37564,11 @@ func (c *EC2) TerminateInstancesRequest(input *TerminateInstancesInput) (req *re // an instance, any attached EBS volumes with the DeleteOnTermination block // device mapping parameter set to true are automatically deleted. For more // information about the differences between stopping and terminating instances, -// see Instance Lifecycle (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) +// see Instance lifecycle (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) // in the Amazon Elastic Compute Cloud User Guide. // -// For more information about troubleshooting, see Troubleshooting Terminating -// Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesShuttingDown.html) +// For more information about troubleshooting, see Troubleshooting terminating +// your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesShuttingDown.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -37086,7 +37793,7 @@ func (c *EC2) UnmonitorInstancesRequest(input *UnmonitorInstancesInput) (req *re // UnmonitorInstances API operation for Amazon Elastic Compute Cloud. // // Disables detailed monitoring for a running instance. For more information, -// see Monitoring Your Instances and Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch.html) +// see Monitoring your instances and volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -37855,6 +38562,56 @@ func (s *ActiveInstance) SetSpotInstanceRequestId(v string) *ActiveInstance { return s } +// An entry for a prefix list. +type AddPrefixListEntry struct { + _ struct{} `type:"structure"` + + // The CIDR block. + // + // Cidr is a required field + Cidr *string `type:"string" required:"true"` + + // A description for the entry. + // + // Constraints: Up to 255 characters in length. + Description *string `type:"string"` +} + +// String returns the string representation +func (s AddPrefixListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddPrefixListEntry) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddPrefixListEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddPrefixListEntry"} + if s.Cidr == nil { + invalidParams.Add(request.NewErrParamRequired("Cidr")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCidr sets the Cidr field's value. +func (s *AddPrefixListEntry) SetCidr(v string) *AddPrefixListEntry { + s.Cidr = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *AddPrefixListEntry) SetDescription(v string) *AddPrefixListEntry { + s.Description = &v + return s +} + // Describes an Elastic IP address. type Address struct { _ struct{} `type:"structure"` @@ -38075,9 +38832,11 @@ type AllocateAddressInput struct { // address from the address pool. CustomerOwnedIpv4Pool *string `type:"string"` - // Set to vpc to allocate the address for use with instances in a VPC. + // Indicates whether the Elastic IP address is for use with instances in a VPC + // or instances in EC2-Classic. // - // Default: The address is for use with instances in EC2-Classic. + // Default: If the Region supports EC2-Classic, the default is standard. Otherwise, + // the default is vpc. Domain *string `type:"string" enum:"DomainType"` // Checks whether you have the required permissions for the action, without @@ -38166,8 +38925,8 @@ type AllocateAddressOutput struct { // The ID of the customer-owned address pool. CustomerOwnedIpv4Pool *string `locationName:"customerOwnedIpv4Pool" type:"string"` - // Indicates whether this Elastic IP address is for use with instances in EC2-Classic - // (standard) or instances in a VPC (vpc). + // Indicates whether the Elastic IP address is for use with instances in a VPC + // (vpc) or instances in EC2-Classic (standard). Domain *string `locationName:"domain" type:"string" enum:"DomainType"` // The location from which the IP address is advertised. @@ -40245,11 +41004,13 @@ type AuthorizeClientVpnIngressInput struct { _ struct{} `type:"structure"` // The ID of the group to grant access to, for example, the Active Directory - // group or identity provider (IdP) group. + // group or identity provider (IdP) group. Required if AuthorizeAllGroups is + // false or not specified. AccessGroupId *string `type:"string"` - // Indicates whether to grant access to all clients. Use true to grant all clients - // who successfully establish a VPN connection access to the network. + // Indicates whether to grant access to all clients. Specify true to grant all + // clients who successfully establish a VPN connection access to the network. + // Must be set to true if AccessGroupId is not specified. AuthorizeAllGroups *bool `type:"boolean"` // Unique, case-sensitive identifier that you provide to ensure the idempotency @@ -40657,7 +41418,7 @@ func (s AuthorizeSecurityGroupIngressOutput) GoString() string { return s.String() } -// Describes an Availability Zone or Local Zone. +// Describes a Zone. type AvailabilityZone struct { _ struct{} `type:"structure"` @@ -40666,7 +41427,7 @@ type AvailabilityZone struct { // For Local Zones, the name of the associated group, for example us-west-2-lax-1. GroupName *string `locationName:"groupName" type:"string"` - // Any messages about the Availability Zone or Local Zone. + // Any messages about the Zone. Messages []*AvailabilityZoneMessage `locationName:"messageSet" locationNameList:"item" type:"list"` // The name of the location from which the address is advertised. @@ -40678,17 +41439,28 @@ type AvailabilityZone struct { // are opted-in, and not-opted-in. OptInStatus *string `locationName:"optInStatus" type:"string" enum:"AvailabilityZoneOptInStatus"` + // The ID of the zone that handles some of the Local Zone control plane operations, + // such as API calls. + ParentZoneId *string `locationName:"parentZoneId" type:"string"` + + // The name of the zone that handles some of the Local Zone control plane operations, + // such as API calls. + ParentZoneName *string `locationName:"parentZoneName" type:"string"` + // The name of the Region. RegionName *string `locationName:"regionName" type:"string"` - // The state of the Availability Zone or Local Zone. + // The state of the Zone. State *string `locationName:"zoneState" type:"string" enum:"AvailabilityZoneState"` - // The ID of the Availability Zone or Local Zone. + // The ID of the Zone. ZoneId *string `locationName:"zoneId" type:"string"` - // The name of the Availability Zone or Local Zone. + // The name of the Zone. ZoneName *string `locationName:"zoneName" type:"string"` + + // The type of zone. The valid values are availability-zone and local-zone. + ZoneType *string `locationName:"zoneType" type:"string"` } // String returns the string representation @@ -40725,6 +41497,18 @@ func (s *AvailabilityZone) SetOptInStatus(v string) *AvailabilityZone { return s } +// SetParentZoneId sets the ParentZoneId field's value. +func (s *AvailabilityZone) SetParentZoneId(v string) *AvailabilityZone { + s.ParentZoneId = &v + return s +} + +// SetParentZoneName sets the ParentZoneName field's value. +func (s *AvailabilityZone) SetParentZoneName(v string) *AvailabilityZone { + s.ParentZoneName = &v + return s +} + // SetRegionName sets the RegionName field's value. func (s *AvailabilityZone) SetRegionName(v string) *AvailabilityZone { s.RegionName = &v @@ -40749,11 +41533,17 @@ func (s *AvailabilityZone) SetZoneName(v string) *AvailabilityZone { return s } -// Describes a message about an Availability Zone or Local Zone. +// SetZoneType sets the ZoneType field's value. +func (s *AvailabilityZone) SetZoneType(v string) *AvailabilityZone { + s.ZoneType = &v + return s +} + +// Describes a message about a Zone. type AvailabilityZoneMessage struct { _ struct{} `type:"structure"` - // The message about the Availability Zone or Local Zone. + // The message about the Zone. Message *string `locationName:"message" type:"string"` } @@ -42246,6 +43036,8 @@ type CapacityReservationSpecification struct { // // * none - The instance avoids running in a Capacity Reservation even if // one is available. The instance runs as an On-Demand Instance. + // + // When CapacityReservationPreference is not specified, it defaults to open. CapacityReservationPreference *string `type:"string" enum:"CapacityReservationPreference"` // Information about the target Capacity Reservation. @@ -43450,6 +44242,9 @@ type CoipPool struct { // The ID of the local gateway route table. LocalGatewayRouteTableId *string `locationName:"localGatewayRouteTableId" type:"string"` + // The ARN of the address pool. + PoolArn *string `locationName:"poolArn" min:"1" type:"string"` + // The address ranges of the address pool. PoolCidrs []*string `locationName:"poolCidrSet" locationNameList:"item" type:"list"` @@ -43476,6 +44271,12 @@ func (s *CoipPool) SetLocalGatewayRouteTableId(v string) *CoipPool { return s } +// SetPoolArn sets the PoolArn field's value. +func (s *CoipPool) SetPoolArn(v string) *CoipPool { + s.PoolArn = &v + return s +} + // SetPoolCidrs sets the PoolCidrs field's value. func (s *CoipPool) SetPoolCidrs(v []*string) *CoipPool { s.PoolCidrs = v @@ -43596,7 +44397,8 @@ func (s *ConfirmProductInstanceOutput) SetReturn(v bool) *ConfirmProductInstance type ConnectionLogOptions struct { _ struct{} `type:"structure"` - // The name of the CloudWatch Logs log group. + // The name of the CloudWatch Logs log group. Required if connection logging + // is enabled. CloudwatchLogGroup *string `type:"string"` // The name of the CloudWatch Logs log stream to which the connection data is @@ -45261,6 +46063,9 @@ type CreateDhcpOptionsInput struct { // the required permissions, the error response is DryRunOperation. Otherwise, // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The tags to assign to the DHCP option. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -45298,6 +46103,12 @@ func (s *CreateDhcpOptionsInput) SetDryRun(v bool) *CreateDhcpOptionsInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateDhcpOptionsInput) SetTagSpecifications(v []*TagSpecification) *CreateDhcpOptionsInput { + s.TagSpecifications = v + return s +} + type CreateDhcpOptionsOutput struct { _ struct{} `type:"structure"` @@ -45334,6 +46145,9 @@ type CreateEgressOnlyInternetGatewayInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` + // The tags to assign to the egress-only internet gateway. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The ID of the VPC for which to create the egress-only internet gateway. // // VpcId is a required field @@ -45375,6 +46189,12 @@ func (s *CreateEgressOnlyInternetGatewayInput) SetDryRun(v bool) *CreateEgressOn return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateEgressOnlyInternetGatewayInput) SetTagSpecifications(v []*TagSpecification) *CreateEgressOnlyInternetGatewayInput { + s.TagSpecifications = v + return s +} + // SetVpcId sets the VpcId field's value. func (s *CreateEgressOnlyInternetGatewayInput) SetVpcId(v string) *CreateEgressOnlyInternetGatewayInput { s.VpcId = &v @@ -46261,7 +47081,7 @@ type CreateInstanceExportTaskInput struct { _ struct{} `type:"structure"` // A description for the conversion task or the resource being exported. The - // maximum length is 255 bytes. + // maximum length is 255 characters. Description *string `locationName:"description" type:"string"` // The format and location for an instance export task. @@ -46272,6 +47092,9 @@ type CreateInstanceExportTaskInput struct { // InstanceId is a required field InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + // The tags to apply to the instance export task during creation. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The target virtualization environment. TargetEnvironment *string `locationName:"targetEnvironment" type:"string" enum:"ExportEnvironment"` } @@ -46317,6 +47140,12 @@ func (s *CreateInstanceExportTaskInput) SetInstanceId(v string) *CreateInstanceE return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateInstanceExportTaskInput) SetTagSpecifications(v []*TagSpecification) *CreateInstanceExportTaskInput { + s.TagSpecifications = v + return s +} + // SetTargetEnvironment sets the TargetEnvironment field's value. func (s *CreateInstanceExportTaskInput) SetTargetEnvironment(v string) *CreateInstanceExportTaskInput { s.TargetEnvironment = &v @@ -46354,6 +47183,9 @@ type CreateInternetGatewayInput struct { // the required permissions, the error response is DryRunOperation. Otherwise, // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The tags to assign to the internet gateway. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -46372,6 +47204,12 @@ func (s *CreateInternetGatewayInput) SetDryRun(v bool) *CreateInternetGatewayInp return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateInternetGatewayInput) SetTagSpecifications(v []*TagSpecification) *CreateInternetGatewayInput { + s.TagSpecifications = v + return s +} + type CreateInternetGatewayOutput struct { _ struct{} `type:"structure"` @@ -46996,6 +47834,151 @@ func (s *CreateLocalGatewayRouteTableVpcAssociationOutput) SetLocalGatewayRouteT return s } +type CreateManagedPrefixListInput struct { + _ struct{} `type:"structure"` + + // The IP address type. + // + // Valid Values: IPv4 | IPv6 + // + // AddressFamily is a required field + AddressFamily *string `type:"string" required:"true"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // + // Constraints: Up to 255 UTF-8 characters in length. + ClientToken *string `type:"string" idempotencyToken:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more entries for the prefix list. + Entries []*AddPrefixListEntry `locationName:"Entry" type:"list"` + + // The maximum number of entries for the prefix list. + // + // MaxEntries is a required field + MaxEntries *int64 `type:"integer" required:"true"` + + // A name for the prefix list. + // + // Constraints: Up to 255 characters in length. The name cannot start with com.amazonaws. + // + // PrefixListName is a required field + PrefixListName *string `type:"string" required:"true"` + + // The tags to apply to the prefix list during creation. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateManagedPrefixListInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateManagedPrefixListInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateManagedPrefixListInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateManagedPrefixListInput"} + if s.AddressFamily == nil { + invalidParams.Add(request.NewErrParamRequired("AddressFamily")) + } + if s.MaxEntries == nil { + invalidParams.Add(request.NewErrParamRequired("MaxEntries")) + } + if s.PrefixListName == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListName")) + } + if s.Entries != nil { + for i, v := range s.Entries { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Entries", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddressFamily sets the AddressFamily field's value. +func (s *CreateManagedPrefixListInput) SetAddressFamily(v string) *CreateManagedPrefixListInput { + s.AddressFamily = &v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateManagedPrefixListInput) SetClientToken(v string) *CreateManagedPrefixListInput { + s.ClientToken = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateManagedPrefixListInput) SetDryRun(v bool) *CreateManagedPrefixListInput { + s.DryRun = &v + return s +} + +// SetEntries sets the Entries field's value. +func (s *CreateManagedPrefixListInput) SetEntries(v []*AddPrefixListEntry) *CreateManagedPrefixListInput { + s.Entries = v + return s +} + +// SetMaxEntries sets the MaxEntries field's value. +func (s *CreateManagedPrefixListInput) SetMaxEntries(v int64) *CreateManagedPrefixListInput { + s.MaxEntries = &v + return s +} + +// SetPrefixListName sets the PrefixListName field's value. +func (s *CreateManagedPrefixListInput) SetPrefixListName(v string) *CreateManagedPrefixListInput { + s.PrefixListName = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateManagedPrefixListInput) SetTagSpecifications(v []*TagSpecification) *CreateManagedPrefixListInput { + s.TagSpecifications = v + return s +} + +type CreateManagedPrefixListOutput struct { + _ struct{} `type:"structure"` + + // Information about the prefix list. + PrefixList *ManagedPrefixList `locationName:"prefixList" type:"structure"` +} + +// String returns the string representation +func (s CreateManagedPrefixListOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateManagedPrefixListOutput) GoString() string { + return s.String() +} + +// SetPrefixList sets the PrefixList field's value. +func (s *CreateManagedPrefixListOutput) SetPrefixList(v *ManagedPrefixList) *CreateManagedPrefixListOutput { + s.PrefixList = v + return s +} + type CreateNatGatewayInput struct { _ struct{} `type:"structure"` @@ -47120,6 +48103,8 @@ type CreateNetworkAclEntryInput struct { _ struct{} `type:"structure"` // The IPv4 network range to allow or deny, in CIDR notation (for example 172.16.0.0/24). + // We modify the specified CIDR block to its canonical form; for example, if + // you specify 100.68.0.18/18, we modify it to 100.68.0.0/18. CidrBlock *string `locationName:"cidrBlock" type:"string"` // Checks whether you have the required permissions for the action, without @@ -47294,6 +48279,9 @@ type CreateNetworkAclInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` + // The tags to assign to the network ACL. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The ID of the VPC. // // VpcId is a required field @@ -47329,6 +48317,12 @@ func (s *CreateNetworkAclInput) SetDryRun(v bool) *CreateNetworkAclInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateNetworkAclInput) SetTagSpecifications(v []*TagSpecification) *CreateNetworkAclInput { + s.TagSpecifications = v + return s +} + // SetVpcId sets the VpcId field's value. func (s *CreateNetworkAclInput) SetVpcId(v string) *CreateNetworkAclInput { s.VpcId = &v @@ -47415,6 +48409,9 @@ type CreateNetworkInterfaceInput struct { // // SubnetId is a required field SubnetId *string `locationName:"subnetId" type:"string" required:"true"` + + // The tags to apply to the new network interface. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -47500,6 +48497,12 @@ func (s *CreateNetworkInterfaceInput) SetSubnetId(v string) *CreateNetworkInterf return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateNetworkInterfaceInput) SetTagSpecifications(v []*TagSpecification) *CreateNetworkInterfaceInput { + s.TagSpecifications = v + return s +} + // Contains the output of CreateNetworkInterface. type CreateNetworkInterfaceOutput struct { _ struct{} `type:"structure"` @@ -47834,13 +48837,18 @@ type CreateRouteInput struct { _ struct{} `type:"structure"` // The IPv4 CIDR address block used for the destination match. Routing decisions - // are based on the most specific match. + // are based on the most specific match. We modify the specified CIDR block + // to its canonical form; for example, if you specify 100.68.0.18/18, we modify + // it to 100.68.0.0/18. DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` // The IPv6 CIDR block used for the destination match. Routing decisions are // based on the most specific match. DestinationIpv6CidrBlock *string `locationName:"destinationIpv6CidrBlock" type:"string"` + // The ID of a prefix list used for the destination match. + DestinationPrefixListId *string `type:"string"` + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have // the required permissions, the error response is DryRunOperation. Otherwise, @@ -47914,6 +48922,12 @@ func (s *CreateRouteInput) SetDestinationIpv6CidrBlock(v string) *CreateRouteInp return s } +// SetDestinationPrefixListId sets the DestinationPrefixListId field's value. +func (s *CreateRouteInput) SetDestinationPrefixListId(v string) *CreateRouteInput { + s.DestinationPrefixListId = &v + return s +} + // SetDryRun sets the DryRun field's value. func (s *CreateRouteInput) SetDryRun(v bool) *CreateRouteInput { s.DryRun = &v @@ -48006,6 +49020,9 @@ type CreateRouteTableInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` + // The tags to assign to the route table. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The ID of the VPC. // // VpcId is a required field @@ -48041,6 +49058,12 @@ func (s *CreateRouteTableInput) SetDryRun(v bool) *CreateRouteTableInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateRouteTableInput) SetTagSpecifications(v []*TagSpecification) *CreateRouteTableInput { + s.TagSpecifications = v + return s +} + // SetVpcId sets the VpcId field's value. func (s *CreateRouteTableInput) SetVpcId(v string) *CreateRouteTableInput { s.VpcId = &v @@ -48101,6 +49124,9 @@ type CreateSecurityGroupInput struct { // GroupName is a required field GroupName *string `type:"string" required:"true"` + // The tags to assign to the security group. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // [EC2-VPC] The ID of the VPC. Required for EC2-VPC. VpcId *string `type:"string"` } @@ -48149,6 +49175,12 @@ func (s *CreateSecurityGroupInput) SetGroupName(v string) *CreateSecurityGroupIn return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateSecurityGroupInput) SetTagSpecifications(v []*TagSpecification) *CreateSecurityGroupInput { + s.TagSpecifications = v + return s +} + // SetVpcId sets the VpcId field's value. func (s *CreateSecurityGroupInput) SetVpcId(v string) *CreateSecurityGroupInput { s.VpcId = &v @@ -48160,6 +49192,9 @@ type CreateSecurityGroupOutput struct { // The ID of the security group. GroupId *string `locationName:"groupId" type:"string"` + + // The tags assigned to the security group. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -48178,6 +49213,12 @@ func (s *CreateSecurityGroupOutput) SetGroupId(v string) *CreateSecurityGroupOut return s } +// SetTags sets the Tags field's value. +func (s *CreateSecurityGroupOutput) SetTags(v []*Tag) *CreateSecurityGroupOutput { + s.Tags = v + return s +} + type CreateSnapshotInput struct { _ struct{} `type:"structure"` @@ -48451,6 +49492,8 @@ type CreateSubnetInput struct { AvailabilityZoneId *string `type:"string"` // The IPv4 network range for the subnet, in CIDR notation. For example, 10.0.0.0/24. + // We modify the specified CIDR block to its canonical form; for example, if + // you specify 100.68.0.18/18, we modify it to 100.68.0.0/18. // // CidrBlock is a required field CidrBlock *string `type:"string" required:"true"` @@ -48469,6 +49512,9 @@ type CreateSubnetInput struct { // ARN, you must also specify the Availability Zone of the Outpost subnet. OutpostArn *string `type:"string"` + // The tags to assign to the subnet. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The ID of the VPC. // // VpcId is a required field @@ -48537,6 +49583,12 @@ func (s *CreateSubnetInput) SetOutpostArn(v string) *CreateSubnetInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateSubnetInput) SetTagSpecifications(v []*TagSpecification) *CreateSubnetInput { + s.TagSpecifications = v + return s +} + // SetVpcId sets the VpcId field's value. func (s *CreateSubnetInput) SetVpcId(v string) *CreateSubnetInput { s.VpcId = &v @@ -50551,6 +51603,8 @@ type CreateVpcInput struct { AmazonProvidedIpv6CidrBlock *bool `locationName:"amazonProvidedIpv6CidrBlock" type:"boolean"` // The IPv4 network range for the VPC, in CIDR notation. For example, 10.0.0.0/16. + // We modify the specified CIDR block to its canonical form; for example, if + // you specify 100.68.0.18/18, we modify it to 100.68.0.0/18. // // CidrBlock is a required field CidrBlock *string `type:"string" required:"true"` @@ -50587,6 +51641,9 @@ type CreateVpcInput struct { // The ID of an IPv6 address pool from which to allocate the IPv6 CIDR block. Ipv6Pool *string `type:"string"` + + // The tags to assign to the VPC. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -50654,6 +51711,12 @@ func (s *CreateVpcInput) SetIpv6Pool(v string) *CreateVpcInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateVpcInput) SetTagSpecifications(v []*TagSpecification) *CreateVpcInput { + s.TagSpecifications = v + return s +} + type CreateVpcOutput struct { _ struct{} `type:"structure"` @@ -50701,6 +51764,9 @@ type CreateVpcPeeringConnectionInput struct { // You must specify this parameter in the request. PeerVpcId *string `locationName:"peerVpcId" type:"string"` + // The tags to assign to the peering connection. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The ID of the requester VPC. You must specify this parameter in the request. VpcId *string `locationName:"vpcId" type:"string"` } @@ -50739,6 +51805,12 @@ func (s *CreateVpcPeeringConnectionInput) SetPeerVpcId(v string) *CreateVpcPeeri return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateVpcPeeringConnectionInput) SetTagSpecifications(v []*TagSpecification) *CreateVpcPeeringConnectionInput { + s.TagSpecifications = v + return s +} + // SetVpcId sets the VpcId field's value. func (s *CreateVpcPeeringConnectionInput) SetVpcId(v string) *CreateVpcPeeringConnectionInput { s.VpcId = &v @@ -51050,12 +52122,12 @@ func (s *CreateVpnGatewayOutput) SetVpnGateway(v *VpnGateway) *CreateVpnGatewayO return s } -// Describes the credit option for CPU usage of a T2 or T3 instance. +// Describes the credit option for CPU usage of a T2, T3, or T3a instance. type CreditSpecification struct { _ struct{} `type:"structure"` - // The credit option for CPU usage of a T2 or T3 instance. Valid values are - // standard and unlimited. + // The credit option for CPU usage of a T2, T3, or T3a instance. Valid values + // are standard and unlimited. CpuCredits *string `locationName:"cpuCredits" type:"string"` } @@ -51075,12 +52147,12 @@ func (s *CreditSpecification) SetCpuCredits(v string) *CreditSpecification { return s } -// The credit option for CPU usage of a T2 or T3 instance. +// The credit option for CPU usage of a T2, T3, or T3a instance. type CreditSpecificationRequest struct { _ struct{} `type:"structure"` - // The credit option for CPU usage of a T2 or T3 instance. Valid values are - // standard and unlimited. + // The credit option for CPU usage of a T2, T3, or T3a instance. Valid values + // are standard and unlimited. // // CpuCredits is a required field CpuCredits *string `type:"string" required:"true"` @@ -52490,6 +53562,79 @@ func (s *DeleteLocalGatewayRouteTableVpcAssociationOutput) SetLocalGatewayRouteT return s } +type DeleteManagedPrefixListInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the prefix list. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteManagedPrefixListInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteManagedPrefixListInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteManagedPrefixListInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteManagedPrefixListInput"} + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteManagedPrefixListInput) SetDryRun(v bool) *DeleteManagedPrefixListInput { + s.DryRun = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *DeleteManagedPrefixListInput) SetPrefixListId(v string) *DeleteManagedPrefixListInput { + s.PrefixListId = &v + return s +} + +type DeleteManagedPrefixListOutput struct { + _ struct{} `type:"structure"` + + // Information about the prefix list. + PrefixList *ManagedPrefixList `locationName:"prefixList" type:"structure"` +} + +// String returns the string representation +func (s DeleteManagedPrefixListOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteManagedPrefixListOutput) GoString() string { + return s.String() +} + +// SetPrefixList sets the PrefixList field's value. +func (s *DeleteManagedPrefixListOutput) SetPrefixList(v *ManagedPrefixList) *DeleteManagedPrefixListOutput { + s.PrefixList = v + return s +} + type DeleteNatGatewayInput struct { _ struct{} `type:"structure"` @@ -53063,6 +54208,9 @@ type DeleteRouteInput struct { // for the route exactly. DestinationIpv6CidrBlock *string `locationName:"destinationIpv6CidrBlock" type:"string"` + // The ID of the prefix list for the route. + DestinationPrefixListId *string `type:"string"` + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have // the required permissions, the error response is DryRunOperation. Otherwise, @@ -53110,6 +54258,12 @@ func (s *DeleteRouteInput) SetDestinationIpv6CidrBlock(v string) *DeleteRouteInp return s } +// SetDestinationPrefixListId sets the DestinationPrefixListId field's value. +func (s *DeleteRouteInput) SetDestinationPrefixListId(v string) *DeleteRouteInput { + s.DestinationPrefixListId = &v + return s +} + // SetDryRun sets the DryRun field's value. func (s *DeleteRouteInput) SetDryRun(v bool) *DeleteRouteInput { s.DryRun = &v @@ -55510,12 +56664,14 @@ type DescribeAvailabilityZonesInput struct { // Zones, use the name of the group associated with the Local Zone (for example, // us-west-2-lax-1). // - // * message - The Availability Zone or Local Zone message. + // * message - The Zone message. // // * opt-in-status - The opt in status (opted-in, and not-opted-in | opt-in-not-required). // - // * region-name - The name of the Region for the Availability Zone or Local - // Zone (for example, us-east-1). + // * The ID of the zone that handles some of the Local Zone control plane + // operations, such as API calls. + // + // * region-name - The name of the Region for the Zone (for example, us-east-1). // // * state - The state of the Availability Zone or Local Zone (available // | information | impaired | unavailable). @@ -55523,14 +56679,18 @@ type DescribeAvailabilityZonesInput struct { // * zone-id - The ID of the Availability Zone (for example, use1-az1) or // the Local Zone (for example, use usw2-lax1-az1). // + // * zone-type - The type of zone, for example, local-zone. + // // * zone-name - The name of the Availability Zone (for example, us-east-1a) // or the Local Zone (for example, use us-west-2-lax-1a). + // + // * zone-type - The type of zone, for example, local-zone. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` - // The IDs of the Availability Zones and Local Zones. + // The IDs of the Zones. ZoneIds []*string `locationName:"ZoneId" locationNameList:"ZoneId" type:"list"` - // The names of the Availability Zones and Local Zones. + // The names of the Zones. ZoneNames []*string `locationName:"ZoneName" locationNameList:"ZoneName" type:"list"` } @@ -55577,7 +56737,7 @@ func (s *DescribeAvailabilityZonesInput) SetZoneNames(v []*string) *DescribeAvai type DescribeAvailabilityZonesOutput struct { _ struct{} `type:"structure"` - // Information about the Availability Zones and Local Zones. + // Information about the Zones. AvailabilityZones []*AvailabilityZone `locationName:"availabilityZoneInfo" locationNameList:"item" type:"list"` } @@ -57518,10 +58678,11 @@ type DescribeFastSnapshotRestoreSuccessItem struct { // The time at which fast snapshot restores entered the optimizing state. OptimizingTime *time.Time `locationName:"optimizingTime" type:"timestamp"` - // The alias of the snapshot owner. + // The AWS owner alias that enabled fast snapshot restores on the snapshot. + // This is intended for future use. OwnerAlias *string `locationName:"ownerAlias" type:"string"` - // The ID of the AWS account that owns the snapshot. + // The ID of the AWS account that enabled fast snapshot restores on the snapshot. OwnerId *string `locationName:"ownerId" type:"string"` // The ID of the snapshot. @@ -57629,7 +58790,8 @@ type DescribeFastSnapshotRestoresInput struct { // // * availability-zone: The Availability Zone of the snapshot. // - // * owner-id: The ID of the AWS account that owns the snapshot. + // * owner-id: The ID of the AWS account that enabled fast snapshot restore + // on the snapshot. // // * snapshot-id: The ID of the snapshot. // @@ -59359,11 +60521,13 @@ type DescribeImagesInput struct { // // * name - The name of the AMI (provided during image creation). // - // * owner-alias - String value from an Amazon-maintained list (amazon | - // aws-marketplace | microsoft) of snapshot owners. Not to be confused with - // the user-configured AWS account alias, which is set from the IAM console. + // * owner-alias - The owner alias, from an Amazon-maintained list (amazon + // | aws-marketplace). This is not the user-configured AWS account alias + // set using the IAM console. We recommend that you use the related parameter + // instead of this filter. // - // * owner-id - The AWS account ID of the image owner. + // * owner-id - The AWS account ID of the owner. We recommend that you use + // the related parameter instead of this filter. // // * platform - The platform. To only list Windows-based AMIs, use windows. // @@ -59405,10 +60569,10 @@ type DescribeImagesInput struct { // Default: Describes all images available to you. ImageIds []*string `locationName:"ImageId" locationNameList:"ImageId" type:"list"` - // Filters the images by the owner. Specify an AWS account ID, self (owner is - // the sender of the request), or an AWS owner alias (valid values are amazon - // | aws-marketplace | microsoft). Omitting this option returns all images for - // which you have launch permissions, regardless of ownership. + // Scopes the results to images with the specified owners. You can specify a + // combination of AWS account IDs, self, amazon, and aws-marketplace. If you + // omit this parameter, the results include all images for which you have launch + // permissions, regardless of ownership. Owners []*string `locationName:"Owner" locationNameList:"Owner" type:"list"` } @@ -60373,6 +61537,9 @@ type DescribeInstanceTypesInput struct { // * ebs-info.encryption-support - Indicates whether EBS encryption is supported. // (supported | unsupported) // + // * ebs-info.nvme-support - Indicates whether non-volatile memory express + // (NVMe) is supported or required. (required | supported | unsupported) + // // * free-tier-eligible - Indicates whether the instance type is eligible // to use in the free tier. (true | false) // @@ -61249,12 +62416,16 @@ type DescribeLaunchTemplateVersionsInput struct { // * ram-disk-id - The RAM disk ID. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` - // The ID of the launch template. You must specify either the launch template - // ID or launch template name in the request. + // The ID of the launch template. To describe one or more versions of a specified + // launch template, you must specify either the launch template ID or the launch + // template name in the request. To describe all the latest or default launch + // template versions in your account, you must omit this parameter. LaunchTemplateId *string `type:"string"` - // The name of the launch template. You must specify either the launch template - // ID or launch template name in the request. + // The name of the launch template. To describe one or more versions of a specified + // launch template, you must specify either the launch template ID or the launch + // template name in the request. To describe all the latest or default launch + // template versions in your account, you must omit this parameter. LaunchTemplateName *string `min:"3" type:"string"` // The maximum number of results to return in a single call. To retrieve the @@ -61271,7 +62442,18 @@ type DescribeLaunchTemplateVersionsInput struct { // The token to request the next page of results. NextToken *string `type:"string"` - // One or more versions of the launch template. + // One or more versions of the launch template. Valid values depend on whether + // you are describing a specified launch template (by ID or name) or all launch + // templates in your account. + // + // To describe one or more versions of a specified launch template, valid values + // are $Latest, $Default, and numbers. + // + // To describe all launch templates in your account that are defined as the + // latest version, the valid value is $Latest. To describe all launch templates + // in your account that are defined as the default version, the valid value + // is $Default. You can specify $Latest and $Default in the same call. You cannot + // specify numbers. Versions []*string `locationName:"LaunchTemplateVersion" locationNameList:"item" type:"list"` } @@ -62223,6 +63405,121 @@ func (s *DescribeLocalGatewaysOutput) SetNextToken(v string) *DescribeLocalGatew return s } +type DescribeManagedPrefixListsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * owner-id - The ID of the prefix list owner. + // + // * prefix-list-id - The ID of the prefix list. + // + // * prefix-list-name - The name of the prefix list. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // One or more prefix list IDs. + PrefixListIds []*string `locationName:"PrefixListId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeManagedPrefixListsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeManagedPrefixListsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeManagedPrefixListsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeManagedPrefixListsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeManagedPrefixListsInput) SetDryRun(v bool) *DescribeManagedPrefixListsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeManagedPrefixListsInput) SetFilters(v []*Filter) *DescribeManagedPrefixListsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeManagedPrefixListsInput) SetMaxResults(v int64) *DescribeManagedPrefixListsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeManagedPrefixListsInput) SetNextToken(v string) *DescribeManagedPrefixListsInput { + s.NextToken = &v + return s +} + +// SetPrefixListIds sets the PrefixListIds field's value. +func (s *DescribeManagedPrefixListsInput) SetPrefixListIds(v []*string) *DescribeManagedPrefixListsInput { + s.PrefixListIds = v + return s +} + +type DescribeManagedPrefixListsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the prefix lists. + PrefixLists []*ManagedPrefixList `locationName:"prefixListSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeManagedPrefixListsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeManagedPrefixListsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeManagedPrefixListsOutput) SetNextToken(v string) *DescribeManagedPrefixListsOutput { + s.NextToken = &v + return s +} + +// SetPrefixLists sets the PrefixLists field's value. +func (s *DescribeManagedPrefixListsOutput) SetPrefixLists(v []*ManagedPrefixList) *DescribeManagedPrefixListsOutput { + s.PrefixLists = v + return s +} + type DescribeMovingAddressesInput struct { _ struct{} `type:"structure"` @@ -62913,9 +64210,6 @@ type DescribeNetworkInterfacesInput struct { // * attachment.instance-owner-id - The owner ID of the instance to which // the network interface is attached. // - // * attachment.nat-gateway-id - The ID of the NAT gateway to which the network - // interface is attached. - // // * attachment.status - The status of the attachment (attaching | attached // | detaching | detached). // @@ -64205,8 +65499,6 @@ type DescribeRouteTablesInput struct { // to find all resources assigned a tag with a specific key, regardless of // the tag value. // - // * transit-gateway-id - The ID of a transit gateway. - // // * vpc-id - The ID of the VPC for the route table. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -64695,8 +65987,8 @@ type DescribeSecurityGroupsInput struct { // * egress.ip-permission.ipv6-cidr - An IPv6 CIDR block for an outbound // security group rule. // - // * egress.ip-permission.prefix-list-id - The ID (prefix) of the AWS service - // to which a security group rule allows outbound access. + // * egress.ip-permission.prefix-list-id - The ID of a prefix list to which + // a security group rule allows outbound access. // // * egress.ip-permission.protocol - The IP protocol for an outbound security // group rule (tcp | udp | icmp or a protocol number). @@ -64726,8 +66018,8 @@ type DescribeSecurityGroupsInput struct { // * ip-permission.ipv6-cidr - An IPv6 CIDR block for an inbound security // group rule. // - // * ip-permission.prefix-list-id - The ID (prefix) of the AWS service from - // which a security group rule allows inbound access. + // * ip-permission.prefix-list-id - The ID of a prefix list from which a + // security group rule allows inbound access. // // * ip-permission.protocol - The IP protocol for an inbound security group // rule (tcp | udp | icmp or a protocol number). @@ -64990,12 +66282,12 @@ type DescribeSnapshotsInput struct { // // * encrypted - Indicates whether the snapshot is encrypted (true | false) // - // * owner-alias - Value from an Amazon-maintained list (amazon | self | - // all | aws-marketplace | microsoft) of snapshot owners. Not to be confused - // with the user-configured AWS account alias, which is set from the IAM - // console. + // * owner-alias - The owner alias, from an Amazon-maintained list (amazon). + // This is not the user-configured AWS account alias set using the IAM console. + // We recommend that you use the related parameter instead of this filter. // - // * owner-id - The ID of the AWS account that owns the snapshot. + // * owner-id - The AWS account ID of the owner. We recommend that you use + // the related parameter instead of this filter. // // * progress - The progress of the snapshot, as a percentage (for example, // 80%). @@ -65039,7 +66331,8 @@ type DescribeSnapshotsInput struct { // to return. NextToken *string `type:"string"` - // Describes the snapshots owned by these owners. + // Scopes the results to snapshots with the specified owners. You can specify + // a combination of AWS account IDs, self, and amazon. OwnerIds []*string `locationName:"Owner" locationNameList:"Owner" type:"list"` // The IDs of the AWS accounts that can create volumes from the snapshot. @@ -65650,7 +66943,7 @@ type DescribeSpotInstanceRequestsInput struct { // * state - The state of the Spot Instance request (open | active | closed // | cancelled | failed). Spot request status information can help you track // your Amazon EC2 Spot Instance requests. For more information, see Spot - // Request Status (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) + // request status (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) // in the Amazon EC2 User Guide for Linux Instances. // // * status-code - The short code describing the most recent evaluation of @@ -67778,9 +69071,34 @@ type DescribeVolumesModificationsInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The filters. Supported filters: volume-id | modification-state | target-size - // | target-iops | target-volume-type | original-size | original-iops | original-volume-type - // | start-time | originalMultiAttachEnabled | targetMultiAttachEnabled. + // The filters. + // + // * modification-state - The current modification state (modifying | optimizing + // | completed | failed). + // + // * original-iops - The original IOPS rate of the volume. + // + // * original-size - The original size of the volume, in GiB. + // + // * original-volume-type - The original volume type of the volume (standard + // | io1 | gp2 | sc1 | st1). + // + // * originalMultiAttachEnabled - Indicates whether Multi-Attach support + // was enabled (true | false). + // + // * start-time - The modification start time. + // + // * target-iops - The target IOPS rate of the volume. + // + // * target-size - The target size of the volume, in GiB. + // + // * target-volume-type - The target volume type of the volume (standard + // | io1 | gp2 | sc1 | st1). + // + // * targetMultiAttachEnabled - Indicates whether Multi-Attach support is + // to be enabled (true | false). + // + // * volume-id - The ID of the volume. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The maximum number of results (up to a limit of 500) to be returned in a @@ -67790,7 +69108,7 @@ type DescribeVolumesModificationsInput struct { // The nextToken value returned by a previous paginated request. NextToken *string `type:"string"` - // The IDs of the volumes for which in-progress modifications will be described. + // The IDs of the volumes. VolumeIds []*string `locationName:"VolumeId" locationNameList:"VolumeId" type:"list"` } @@ -70101,10 +71419,11 @@ type DisableFastSnapshotRestoreSuccessItem struct { // The time at which fast snapshot restores entered the optimizing state. OptimizingTime *time.Time `locationName:"optimizingTime" type:"timestamp"` - // The alias of the snapshot owner. + // The AWS owner alias that enabled fast snapshot restores on the snapshot. + // This is intended for future use. OwnerAlias *string `locationName:"ownerAlias" type:"string"` - // The ID of the AWS account that owns the snapshot. + // The ID of the AWS account that enabled fast snapshot restores on the snapshot. OwnerId *string `locationName:"ownerId" type:"string"` // The ID of the snapshot. @@ -71517,7 +72836,7 @@ type EbsBlockDevice struct { _ struct{} `type:"structure"` // Indicates whether the EBS volume is deleted on instance termination. For - // more information, see Preserving Amazon EBS Volumes on Instance Termination + // more information, see Preserving Amazon EBS volumes on instance termination // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/terminating-instances.html#preserving-volumes-on-termination) // in the Amazon Elastic Compute Cloud User Guide. DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` @@ -71532,7 +72851,7 @@ type EbsBlockDevice struct { // In no case can you remove encryption from an encrypted volume. // // Encrypted volumes can only be attached to instances that support Amazon EBS - // encryption. For more information, see Supported Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances). + // encryption. For more information, see Supported instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances). // // This parameter is not returned by . Encrypted *bool `locationName:"encrypted" type:"boolean"` @@ -71541,7 +72860,7 @@ type EbsBlockDevice struct { // For io1 volumes, this represents the number of IOPS that are provisioned // for the volume. For gp2 volumes, this represents the baseline performance // of the volume and the rate at which the volume accumulates I/O credits for - // bursting. For more information, see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // bursting. For more information, see Amazon EBS volume types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) // in the Amazon Elastic Compute Cloud User Guide. // // Constraints: Range is 100-16,000 IOPS for gp2 volumes and 100 to 64,000IOPS @@ -71653,6 +72972,9 @@ type EbsInfo struct { // Indicates whether Amazon EBS encryption is supported. EncryptionSupport *string `locationName:"encryptionSupport" type:"string" enum:"EbsEncryptionSupport"` + + // Indicates whether non-volatile memory express (NVMe) is supported. + NvmeSupport *string `locationName:"nvmeSupport" type:"string" enum:"EbsNvmeSupport"` } // String returns the string representation @@ -71683,6 +73005,12 @@ func (s *EbsInfo) SetEncryptionSupport(v string) *EbsInfo { return s } +// SetNvmeSupport sets the NvmeSupport field's value. +func (s *EbsInfo) SetNvmeSupport(v string) *EbsInfo { + s.NvmeSupport = &v + return s +} + // Describes a parameter used to set up an EBS volume in a block device mapping. type EbsInstanceBlockDevice struct { _ struct{} `type:"structure"` @@ -72115,7 +73443,7 @@ type ElasticInferenceAccelerator struct { Count *int64 `min:"1" type:"integer"` // The type of elastic inference accelerator. The possible values are eia1.medium, - // eia1.large, and eia1.xlarge. + // eia1.large, eia1.xlarge, eia2.medium, eia2.large, and eia2.xlarge. // // Type is a required field Type *string `type:"string" required:"true"` @@ -72383,10 +73711,11 @@ type EnableFastSnapshotRestoreSuccessItem struct { // The time at which fast snapshot restores entered the optimizing state. OptimizingTime *time.Time `locationName:"optimizingTime" type:"timestamp"` - // The alias of the snapshot owner. + // The AWS owner alias that enabled fast snapshot restores on the snapshot. + // This is intended for future use. OwnerAlias *string `locationName:"ownerAlias" type:"string"` - // The ID of the AWS account that owns the snapshot. + // The ID of the AWS account that enabled fast snapshot restores on the snapshot. OwnerId *string `locationName:"ownerId" type:"string"` // The ID of the snapshot. @@ -73198,7 +74527,7 @@ type ExportImageInput struct { // Token to enable idempotency for export image requests. ClientToken *string `type:"string" idempotencyToken:"true"` - // A description of the image being exported. The maximum length is 255 bytes. + // A description of the image being exported. The maximum length is 255 characters. Description *string `type:"string"` // The disk image format. @@ -73218,15 +74547,18 @@ type ExportImageInput struct { ImageId *string `type:"string" required:"true"` // The name of the role that grants VM Import/Export permission to export images - // to your S3 bucket. If this parameter is not specified, the default role is - // named 'vmimport'. + // to your Amazon S3 bucket. If this parameter is not specified, the default + // role is named 'vmimport'. RoleName *string `type:"string"` - // Information about the destination S3 bucket. The bucket must exist and grant - // WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. + // Information about the destination Amazon S3 bucket. The bucket must exist + // and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. // // S3ExportLocation is a required field S3ExportLocation *ExportTaskS3LocationRequest `type:"structure" required:"true"` + + // The tags to apply to the image being exported. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -73305,6 +74637,12 @@ func (s *ExportImageInput) SetS3ExportLocation(v *ExportTaskS3LocationRequest) * return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *ExportImageInput) SetTagSpecifications(v []*TagSpecification) *ExportImageInput { + s.TagSpecifications = v + return s +} + type ExportImageOutput struct { _ struct{} `type:"structure"` @@ -73324,10 +74662,10 @@ type ExportImageOutput struct { Progress *string `locationName:"progress" type:"string"` // The name of the role that grants VM Import/Export permission to export images - // to your S3 bucket. + // to your Amazon S3 bucket. RoleName *string `locationName:"roleName" type:"string"` - // Information about the destination S3 bucket. + // Information about the destination Amazon S3 bucket. S3ExportLocation *ExportTaskS3Location `locationName:"s3ExportLocation" type:"structure"` // The status of the export image task. The possible values are active, completed, @@ -73336,6 +74674,9 @@ type ExportImageOutput struct { // The status message for the export image task. StatusMessage *string `locationName:"statusMessage" type:"string"` + + // Any tags assigned to the image being exported. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -73402,6 +74743,12 @@ func (s *ExportImageOutput) SetStatusMessage(v string) *ExportImageOutput { return s } +// SetTags sets the Tags field's value. +func (s *ExportImageOutput) SetTags(v []*Tag) *ExportImageOutput { + s.Tags = v + return s +} + // Describes an export image task. type ExportImageTask struct { _ struct{} `type:"structure"` @@ -73418,7 +74765,7 @@ type ExportImageTask struct { // The percent complete of the export image task. Progress *string `locationName:"progress" type:"string"` - // Information about the destination S3 bucket. + // Information about the destination Amazon S3 bucket. S3ExportLocation *ExportTaskS3Location `locationName:"s3ExportLocation" type:"structure"` // The status of the export image task. The possible values are active, completed, @@ -73427,6 +74774,9 @@ type ExportImageTask struct { // The status message for the export image task. StatusMessage *string `locationName:"statusMessage" type:"string"` + + // Any tags assigned to the image being exported. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -73481,6 +74831,12 @@ func (s *ExportImageTask) SetStatusMessage(v string) *ExportImageTask { return s } +// SetTags sets the Tags field's value. +func (s *ExportImageTask) SetTags(v []*Tag) *ExportImageTask { + s.Tags = v + return s +} + // Describes an instance export task. type ExportTask struct { _ struct{} `type:"structure"` @@ -73563,7 +74919,7 @@ func (s *ExportTask) SetTags(v []*Tag) *ExportTask { type ExportTaskS3Location struct { _ struct{} `type:"structure"` - // The destination S3 bucket. + // The destination Amazon S3 bucket. S3Bucket *string `locationName:"s3Bucket" type:"string"` // The prefix (logical hierarchy) in the bucket. @@ -73596,7 +74952,7 @@ func (s *ExportTaskS3Location) SetS3Prefix(v string) *ExportTaskS3Location { type ExportTaskS3LocationRequest struct { _ struct{} `type:"structure"` - // The destination S3 bucket. + // The destination Amazon S3 bucket. // // S3Bucket is a required field S3Bucket *string `type:"string" required:"true"` @@ -73651,8 +75007,8 @@ type ExportToS3Task struct { // The format for the exported image. DiskImageFormat *string `locationName:"diskImageFormat" type:"string" enum:"DiskImageFormat"` - // The S3 bucket for the destination image. The destination bucket must exist - // and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. + // The Amazon S3 bucket for the destination image. The destination bucket must + // exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. S3Bucket *string `locationName:"s3Bucket" type:"string"` // The encryption key for your S3 bucket. @@ -73704,12 +75060,12 @@ type ExportToS3TaskSpecification struct { // The format for the exported image. DiskImageFormat *string `locationName:"diskImageFormat" type:"string" enum:"DiskImageFormat"` - // The S3 bucket for the destination image. The destination bucket must exist - // and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. + // The Amazon S3 bucket for the destination image. The destination bucket must + // exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. S3Bucket *string `locationName:"s3Bucket" type:"string"` - // The image is written to a single object in the S3 bucket at the S3 key s3prefix - // + exportTaskId + '.' + diskImageFormat. + // The image is written to a single object in the Amazon S3 bucket at the S3 + // key s3prefix + exportTaskId + '.' + diskImageFormat. S3Prefix *string `locationName:"s3Prefix" type:"string"` } @@ -76086,6 +77442,226 @@ func (s *GetLaunchTemplateDataOutput) SetLaunchTemplateData(v *ResponseLaunchTem return s } +type GetManagedPrefixListAssociationsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The ID of the prefix list. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetManagedPrefixListAssociationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetManagedPrefixListAssociationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetManagedPrefixListAssociationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetManagedPrefixListAssociationsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *GetManagedPrefixListAssociationsInput) SetDryRun(v bool) *GetManagedPrefixListAssociationsInput { + s.DryRun = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetManagedPrefixListAssociationsInput) SetMaxResults(v int64) *GetManagedPrefixListAssociationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetManagedPrefixListAssociationsInput) SetNextToken(v string) *GetManagedPrefixListAssociationsInput { + s.NextToken = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *GetManagedPrefixListAssociationsInput) SetPrefixListId(v string) *GetManagedPrefixListAssociationsInput { + s.PrefixListId = &v + return s +} + +type GetManagedPrefixListAssociationsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the associations. + PrefixListAssociations []*PrefixListAssociation `locationName:"prefixListAssociationSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s GetManagedPrefixListAssociationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetManagedPrefixListAssociationsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *GetManagedPrefixListAssociationsOutput) SetNextToken(v string) *GetManagedPrefixListAssociationsOutput { + s.NextToken = &v + return s +} + +// SetPrefixListAssociations sets the PrefixListAssociations field's value. +func (s *GetManagedPrefixListAssociationsOutput) SetPrefixListAssociations(v []*PrefixListAssociation) *GetManagedPrefixListAssociationsOutput { + s.PrefixListAssociations = v + return s +} + +type GetManagedPrefixListEntriesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The ID of the prefix list. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` + + // The version of the prefix list for which to return the entries. The default + // is the current version. + TargetVersion *int64 `type:"long"` +} + +// String returns the string representation +func (s GetManagedPrefixListEntriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetManagedPrefixListEntriesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetManagedPrefixListEntriesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetManagedPrefixListEntriesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *GetManagedPrefixListEntriesInput) SetDryRun(v bool) *GetManagedPrefixListEntriesInput { + s.DryRun = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetManagedPrefixListEntriesInput) SetMaxResults(v int64) *GetManagedPrefixListEntriesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetManagedPrefixListEntriesInput) SetNextToken(v string) *GetManagedPrefixListEntriesInput { + s.NextToken = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *GetManagedPrefixListEntriesInput) SetPrefixListId(v string) *GetManagedPrefixListEntriesInput { + s.PrefixListId = &v + return s +} + +// SetTargetVersion sets the TargetVersion field's value. +func (s *GetManagedPrefixListEntriesInput) SetTargetVersion(v int64) *GetManagedPrefixListEntriesInput { + s.TargetVersion = &v + return s +} + +type GetManagedPrefixListEntriesOutput struct { + _ struct{} `type:"structure"` + + // Information about the prefix list entries. + Entries []*PrefixListEntry `locationName:"entrySet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s GetManagedPrefixListEntriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetManagedPrefixListEntriesOutput) GoString() string { + return s.String() +} + +// SetEntries sets the Entries field's value. +func (s *GetManagedPrefixListEntriesOutput) SetEntries(v []*PrefixListEntry) *GetManagedPrefixListEntriesOutput { + s.Entries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetManagedPrefixListEntriesOutput) SetNextToken(v string) *GetManagedPrefixListEntriesOutput { + s.NextToken = &v + return s +} + type GetPasswordDataInput struct { _ struct{} `type:"structure"` @@ -76965,7 +78541,7 @@ func (s *GroupIdentifier) SetGroupName(v string) *GroupIdentifier { // Indicates whether your instance is configured for hibernation. This parameter // is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#hibernating-prerequisites). -// For more information, see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) +// For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon Elastic Compute Cloud User Guide. type HibernationOptions struct { _ struct{} `type:"structure"` @@ -76993,7 +78569,7 @@ func (s *HibernationOptions) SetConfigured(v bool) *HibernationOptions { // Indicates whether your instance is configured for hibernation. This parameter // is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#hibernating-prerequisites). -// For more information, see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) +// For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon Elastic Compute Cloud User Guide. type HibernationOptionsRequest struct { _ struct{} `type:"structure"` @@ -78143,7 +79719,7 @@ type ImageDiskContainer struct { // The format of the disk image being imported. // - // Valid values: VHD | VMDK | OVA + // Valid values: OVA | VHD | VHDX |VMDK Format *string `type:"string"` // The ID of the EBS snapshot to be used for importing the snapshot. @@ -78385,6 +79961,9 @@ type ImportImageInput struct { // The name of the role to use when not using the default role, 'vmimport'. RoleName *string `type:"string"` + + // The tags to apply to the image being imported. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -78475,6 +80054,12 @@ func (s *ImportImageInput) SetRoleName(v string) *ImportImageInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *ImportImageInput) SetTagSpecifications(v []*TagSpecification) *ImportImageInput { + s.TagSpecifications = v + return s +} + // The request information of license configurations. type ImportImageLicenseConfigurationRequest struct { _ struct{} `type:"structure"` @@ -78532,7 +80117,7 @@ type ImportImageOutput struct { // A description of the import task. Description *string `locationName:"description" type:"string"` - // Indicates whether the AMI is encypted. + // Indicates whether the AMI is encrypted. Encrypted *bool `locationName:"encrypted" type:"boolean"` // The target hypervisor of the import task. @@ -78568,6 +80153,9 @@ type ImportImageOutput struct { // A detailed status message of the import task. StatusMessage *string `locationName:"statusMessage" type:"string"` + + // Any tags assigned to the image being imported. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -78664,6 +80252,12 @@ func (s *ImportImageOutput) SetStatusMessage(v string) *ImportImageOutput { return s } +// SetTags sets the Tags field's value. +func (s *ImportImageOutput) SetTags(v []*Tag) *ImportImageOutput { + s.Tags = v + return s +} + // Describes an import image task. type ImportImageTask struct { _ struct{} `type:"structure"` @@ -79364,6 +80958,9 @@ type ImportSnapshotInput struct { // The name of the role to use when not using the default role, 'vmimport'. RoleName *string `type:"string"` + + // The tags to apply to the snapshot being imported. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -79424,6 +81021,12 @@ func (s *ImportSnapshotInput) SetRoleName(v string) *ImportSnapshotInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *ImportSnapshotInput) SetTagSpecifications(v []*TagSpecification) *ImportSnapshotInput { + s.TagSpecifications = v + return s +} + type ImportSnapshotOutput struct { _ struct{} `type:"structure"` @@ -79435,6 +81038,9 @@ type ImportSnapshotOutput struct { // Information about the import snapshot task. SnapshotTaskDetail *SnapshotTaskDetail `locationName:"snapshotTaskDetail" type:"structure"` + + // Any tags assigned to the snapshot being imported. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -79465,6 +81071,12 @@ func (s *ImportSnapshotOutput) SetSnapshotTaskDetail(v *SnapshotTaskDetail) *Imp return s } +// SetTags sets the Tags field's value. +func (s *ImportSnapshotOutput) SetTags(v []*Tag) *ImportSnapshotOutput { + s.Tags = v + return s +} + // Describes an import snapshot task. type ImportSnapshotTask struct { _ struct{} `type:"structure"` @@ -82110,9 +83722,7 @@ type IpPermission struct { // [VPC only] The IPv6 ranges. Ipv6Ranges []*Ipv6Range `locationName:"ipv6Ranges" locationNameList:"item" type:"list"` - // [VPC only] The prefix list IDs for an AWS service. With outbound rules, this - // is the AWS service to access through a VPC endpoint from instances associated - // with the security group. + // [VPC only] The prefix list IDs. PrefixListIds []*PrefixListId `locationName:"prefixListIds" locationNameList:"item" type:"list"` // The end of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 code. @@ -85301,6 +86911,111 @@ func (s *LocalGatewayVirtualInterfaceGroup) SetTags(v []*Tag) *LocalGatewayVirtu return s } +// Describes a managed prefix list. +type ManagedPrefixList struct { + _ struct{} `type:"structure"` + + // The IP address version. + AddressFamily *string `locationName:"addressFamily" type:"string"` + + // The maximum number of entries for the prefix list. + MaxEntries *int64 `locationName:"maxEntries" type:"integer"` + + // The ID of the owner of the prefix list. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The Amazon Resource Name (ARN) for the prefix list. + PrefixListArn *string `locationName:"prefixListArn" min:"1" type:"string"` + + // The ID of the prefix list. + PrefixListId *string `locationName:"prefixListId" type:"string"` + + // The name of the prefix list. + PrefixListName *string `locationName:"prefixListName" type:"string"` + + // The state of the prefix list. + State *string `locationName:"state" type:"string" enum:"PrefixListState"` + + // The state message. + StateMessage *string `locationName:"stateMessage" type:"string"` + + // The tags for the prefix list. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The version of the prefix list. + Version *int64 `locationName:"version" type:"long"` +} + +// String returns the string representation +func (s ManagedPrefixList) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ManagedPrefixList) GoString() string { + return s.String() +} + +// SetAddressFamily sets the AddressFamily field's value. +func (s *ManagedPrefixList) SetAddressFamily(v string) *ManagedPrefixList { + s.AddressFamily = &v + return s +} + +// SetMaxEntries sets the MaxEntries field's value. +func (s *ManagedPrefixList) SetMaxEntries(v int64) *ManagedPrefixList { + s.MaxEntries = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *ManagedPrefixList) SetOwnerId(v string) *ManagedPrefixList { + s.OwnerId = &v + return s +} + +// SetPrefixListArn sets the PrefixListArn field's value. +func (s *ManagedPrefixList) SetPrefixListArn(v string) *ManagedPrefixList { + s.PrefixListArn = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *ManagedPrefixList) SetPrefixListId(v string) *ManagedPrefixList { + s.PrefixListId = &v + return s +} + +// SetPrefixListName sets the PrefixListName field's value. +func (s *ManagedPrefixList) SetPrefixListName(v string) *ManagedPrefixList { + s.PrefixListName = &v + return s +} + +// SetState sets the State field's value. +func (s *ManagedPrefixList) SetState(v string) *ManagedPrefixList { + s.State = &v + return s +} + +// SetStateMessage sets the StateMessage field's value. +func (s *ManagedPrefixList) SetStateMessage(v string) *ManagedPrefixList { + s.StateMessage = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ManagedPrefixList) SetTags(v []*Tag) *ManagedPrefixList { + s.Tags = v + return s +} + +// SetVersion sets the Version field's value. +func (s *ManagedPrefixList) SetVersion(v int64) *ManagedPrefixList { + s.Version = &v + return s +} + // Describes the memory for the instance type. type MemoryInfo struct { _ struct{} `type:"structure"` @@ -86572,7 +88287,7 @@ type ModifyInstanceAttributeInput struct { // // To add instance store volumes to an Amazon EBS-backed instance, you must // add them when you launch the instance. For more information, see Updating - // the Block Device Mapping when Launching an Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html#Using_OverridingAMIBDM) + // the block device mapping when launching an instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html#Using_OverridingAMIBDM) // in the Amazon Elastic Compute Cloud User Guide. BlockDeviceMappings []*InstanceBlockDeviceMappingSpecification `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` @@ -86615,7 +88330,7 @@ type ModifyInstanceAttributeInput struct { InstanceInitiatedShutdownBehavior *AttributeValue `locationName:"instanceInitiatedShutdownBehavior" type:"structure"` // Changes the instance type to the specified value. For more information, see - // Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + // Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). // If the instance type is not valid, the error returned is InvalidInstanceAttributeValue. InstanceType *AttributeValue `locationName:"instanceType" type:"structure"` @@ -87423,6 +89138,135 @@ func (s *ModifyLaunchTemplateOutput) SetLaunchTemplate(v *LaunchTemplate) *Modif return s } +type ModifyManagedPrefixListInput struct { + _ struct{} `type:"structure"` + + // One or more entries to add to the prefix list. + AddEntries []*AddPrefixListEntry `locationName:"AddEntry" type:"list"` + + // The current version of the prefix list. + CurrentVersion *int64 `type:"long"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the prefix list. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` + + // A name for the prefix list. + PrefixListName *string `type:"string"` + + // One or more entries to remove from the prefix list. + RemoveEntries []*RemovePrefixListEntry `locationName:"RemoveEntry" type:"list"` +} + +// String returns the string representation +func (s ModifyManagedPrefixListInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyManagedPrefixListInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyManagedPrefixListInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyManagedPrefixListInput"} + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + if s.AddEntries != nil { + for i, v := range s.AddEntries { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AddEntries", i), err.(request.ErrInvalidParams)) + } + } + } + if s.RemoveEntries != nil { + for i, v := range s.RemoveEntries { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RemoveEntries", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddEntries sets the AddEntries field's value. +func (s *ModifyManagedPrefixListInput) SetAddEntries(v []*AddPrefixListEntry) *ModifyManagedPrefixListInput { + s.AddEntries = v + return s +} + +// SetCurrentVersion sets the CurrentVersion field's value. +func (s *ModifyManagedPrefixListInput) SetCurrentVersion(v int64) *ModifyManagedPrefixListInput { + s.CurrentVersion = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyManagedPrefixListInput) SetDryRun(v bool) *ModifyManagedPrefixListInput { + s.DryRun = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *ModifyManagedPrefixListInput) SetPrefixListId(v string) *ModifyManagedPrefixListInput { + s.PrefixListId = &v + return s +} + +// SetPrefixListName sets the PrefixListName field's value. +func (s *ModifyManagedPrefixListInput) SetPrefixListName(v string) *ModifyManagedPrefixListInput { + s.PrefixListName = &v + return s +} + +// SetRemoveEntries sets the RemoveEntries field's value. +func (s *ModifyManagedPrefixListInput) SetRemoveEntries(v []*RemovePrefixListEntry) *ModifyManagedPrefixListInput { + s.RemoveEntries = v + return s +} + +type ModifyManagedPrefixListOutput struct { + _ struct{} `type:"structure"` + + // Information about the prefix list. + PrefixList *ManagedPrefixList `locationName:"prefixList" type:"structure"` +} + +// String returns the string representation +func (s ModifyManagedPrefixListOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyManagedPrefixListOutput) GoString() string { + return s.String() +} + +// SetPrefixList sets the PrefixList field's value. +func (s *ModifyManagedPrefixListOutput) SetPrefixList(v *ManagedPrefixList) *ModifyManagedPrefixListOutput { + s.PrefixList = v + return s +} + // Contains the parameters for ModifyNetworkInterfaceAttribute. type ModifyNetworkInterfaceAttributeInput struct { _ struct{} `type:"structure"` @@ -92184,6 +94028,72 @@ func (s *PrefixList) SetPrefixListName(v string) *PrefixList { return s } +// Describes the resource with which a prefix list is associated. +type PrefixListAssociation struct { + _ struct{} `type:"structure"` + + // The ID of the resource. + ResourceId *string `locationName:"resourceId" type:"string"` + + // The owner of the resource. + ResourceOwner *string `locationName:"resourceOwner" type:"string"` +} + +// String returns the string representation +func (s PrefixListAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrefixListAssociation) GoString() string { + return s.String() +} + +// SetResourceId sets the ResourceId field's value. +func (s *PrefixListAssociation) SetResourceId(v string) *PrefixListAssociation { + s.ResourceId = &v + return s +} + +// SetResourceOwner sets the ResourceOwner field's value. +func (s *PrefixListAssociation) SetResourceOwner(v string) *PrefixListAssociation { + s.ResourceOwner = &v + return s +} + +// Describes a prefix list entry. +type PrefixListEntry struct { + _ struct{} `type:"structure"` + + // The CIDR block. + Cidr *string `locationName:"cidr" type:"string"` + + // The description. + Description *string `locationName:"description" type:"string"` +} + +// String returns the string representation +func (s PrefixListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrefixListEntry) GoString() string { + return s.String() +} + +// SetCidr sets the Cidr field's value. +func (s *PrefixListEntry) SetCidr(v string) *PrefixListEntry { + s.Cidr = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *PrefixListEntry) SetDescription(v string) *PrefixListEntry { + s.Description = &v + return s +} + // Describes a prefix list ID. type PrefixListId struct { _ struct{} `type:"structure"` @@ -93017,6 +94927,9 @@ type PurchaseHostReservationInput struct { // // OfferingId is a required field OfferingId *string `type:"string" required:"true"` + + // The tags to apply to the Dedicated Host Reservation during purchase. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -93075,6 +94988,12 @@ func (s *PurchaseHostReservationInput) SetOfferingId(v string) *PurchaseHostRese return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *PurchaseHostReservationInput) SetTagSpecifications(v []*TagSpecification) *PurchaseHostReservationInput { + s.TagSpecifications = v + return s +} + type PurchaseHostReservationOutput struct { _ struct{} `type:"structure"` @@ -94436,6 +96355,45 @@ func (s *ReleaseHostsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *ReleaseHost return s } +// An entry for a prefix list. +type RemovePrefixListEntry struct { + _ struct{} `type:"structure"` + + // The CIDR block. + // + // Cidr is a required field + Cidr *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RemovePrefixListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemovePrefixListEntry) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemovePrefixListEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemovePrefixListEntry"} + if s.Cidr == nil { + invalidParams.Add(request.NewErrParamRequired("Cidr")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCidr sets the Cidr field's value. +func (s *RemovePrefixListEntry) SetCidr(v string) *RemovePrefixListEntry { + s.Cidr = &v + return s +} + type ReplaceIamInstanceProfileAssociationInput struct { _ struct{} `type:"structure"` @@ -94776,6 +96734,9 @@ type ReplaceRouteInput struct { // you provide must match the CIDR of an existing route in the table. DestinationIpv6CidrBlock *string `locationName:"destinationIpv6CidrBlock" type:"string"` + // The ID of the prefix list for the route. + DestinationPrefixListId *string `type:"string"` + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have // the required permissions, the error response is DryRunOperation. Otherwise, @@ -94850,6 +96811,12 @@ func (s *ReplaceRouteInput) SetDestinationIpv6CidrBlock(v string) *ReplaceRouteI return s } +// SetDestinationPrefixListId sets the DestinationPrefixListId field's value. +func (s *ReplaceRouteInput) SetDestinationPrefixListId(v string) *ReplaceRouteInput { + s.DestinationPrefixListId = &v + return s +} + // SetDryRun sets the DryRun field's value. func (s *ReplaceRouteInput) SetDryRun(v bool) *ReplaceRouteInput { s.DryRun = &v @@ -95290,8 +97257,8 @@ type RequestLaunchTemplateData struct { // in the Amazon Elastic Compute Cloud User Guide. CpuOptions *LaunchTemplateCpuOptionsRequest `type:"structure"` - // The credit option for CPU usage of the instance. Valid for T2 or T3 instances - // only. + // The credit option for CPU usage of the instance. Valid for T2, T3, or T3a + // instances only. CreditSpecification *CreditSpecificationRequest `type:"structure"` // If you set this parameter to true, you can't terminate the instance using @@ -95753,6 +97720,12 @@ type RequestSpotInstancesInput struct { // The default is the On-Demand price. SpotPrice *string `locationName:"spotPrice" type:"string"` + // The key-value pair for tagging the Spot Instance request on creation. The + // value for ResourceType must be spot-instances-request, otherwise the Spot + // Instance request fails. To tag the Spot Instance request after it has been + // created, see CreateTags (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html). + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The Spot Instance request type. // // Default: one-time @@ -95856,6 +97829,12 @@ func (s *RequestSpotInstancesInput) SetSpotPrice(v string) *RequestSpotInstances return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *RequestSpotInstancesInput) SetTagSpecifications(v []*TagSpecification) *RequestSpotInstancesInput { + s.TagSpecifications = v + return s +} + // SetType sets the Type field's value. func (s *RequestSpotInstancesInput) SetType(v string) *RequestSpotInstancesInput { s.Type = &v @@ -97743,6 +99722,107 @@ func (s *RestoreAddressToClassicOutput) SetStatus(v string) *RestoreAddressToCla return s } +type RestoreManagedPrefixListVersionInput struct { + _ struct{} `type:"structure"` + + // The current version number for the prefix list. + // + // CurrentVersion is a required field + CurrentVersion *int64 `type:"long" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the prefix list. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` + + // The version to restore. + // + // PreviousVersion is a required field + PreviousVersion *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s RestoreManagedPrefixListVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreManagedPrefixListVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreManagedPrefixListVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreManagedPrefixListVersionInput"} + if s.CurrentVersion == nil { + invalidParams.Add(request.NewErrParamRequired("CurrentVersion")) + } + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + if s.PreviousVersion == nil { + invalidParams.Add(request.NewErrParamRequired("PreviousVersion")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCurrentVersion sets the CurrentVersion field's value. +func (s *RestoreManagedPrefixListVersionInput) SetCurrentVersion(v int64) *RestoreManagedPrefixListVersionInput { + s.CurrentVersion = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *RestoreManagedPrefixListVersionInput) SetDryRun(v bool) *RestoreManagedPrefixListVersionInput { + s.DryRun = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *RestoreManagedPrefixListVersionInput) SetPrefixListId(v string) *RestoreManagedPrefixListVersionInput { + s.PrefixListId = &v + return s +} + +// SetPreviousVersion sets the PreviousVersion field's value. +func (s *RestoreManagedPrefixListVersionInput) SetPreviousVersion(v int64) *RestoreManagedPrefixListVersionInput { + s.PreviousVersion = &v + return s +} + +type RestoreManagedPrefixListVersionOutput struct { + _ struct{} `type:"structure"` + + // Information about the prefix list. + PrefixList *ManagedPrefixList `locationName:"prefixList" type:"structure"` +} + +// String returns the string representation +func (s RestoreManagedPrefixListVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreManagedPrefixListVersionOutput) GoString() string { + return s.String() +} + +// SetPrefixList sets the PrefixList field's value. +func (s *RestoreManagedPrefixListVersionOutput) SetPrefixList(v *ManagedPrefixList) *RestoreManagedPrefixListVersionOutput { + s.PrefixList = v + return s +} + type RevokeClientVpnIngressInput struct { _ struct{} `type:"structure"` @@ -98475,14 +100555,14 @@ type RunInstancesInput struct { ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` // The CPU options for the instance. For more information, see Optimizing CPU - // Options (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) + // options (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) // in the Amazon Elastic Compute Cloud User Guide. CpuOptions *CpuOptionsRequest `type:"structure"` // The credit option for CPU usage of the burstable performance instance. Valid // values are standard and unlimited. To change this attribute after launch, // use ModifyInstanceCreditSpecification (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstanceCreditSpecification.html). - // For more information, see Burstable Performance Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) + // For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) // in the Amazon Elastic Compute Cloud User Guide. // // Default: standard (T2 instances) or unlimited (T3/T3a instances) @@ -98526,7 +100606,7 @@ type RunInstancesInput struct { ElasticInferenceAccelerators []*ElasticInferenceAccelerator `locationName:"ElasticInferenceAccelerator" locationNameList:"item" type:"list"` // Indicates whether an instance is enabled for hibernation. For more information, - // see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) + // see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon Elastic Compute Cloud User Guide. HibernationOptions *HibernationOptionsRequest `type:"structure"` @@ -98549,7 +100629,7 @@ type RunInstancesInput struct { // InstanceInterruptionBehavior is set to either hibernate or stop. InstanceMarketOptions *InstanceMarketOptionsRequest `type:"structure"` - // The instance type. For more information, see Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) + // The instance type. For more information, see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) // in the Amazon Elastic Compute Cloud User Guide. // // Default: m1.small @@ -98609,7 +100689,7 @@ type RunInstancesInput struct { MaxCount *int64 `type:"integer" required:"true"` // The metadata options for the instance. For more information, see Instance - // Metadata and User Data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). + // metadata and user data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). MetadataOptions *InstanceMetadataOptionsRequest `type:"structure"` // The minimum number of instances to launch. If you specify a minimum that @@ -98686,7 +100766,7 @@ type RunInstancesInput struct { TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` // The user data to make available to the instance. For more information, see - // Running Commands on Your Linux Instance at Launch (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) + // Running commands on your Linux instance at launch (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) // (Linux) and Adding User Data (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-instance-metadata.html#instancedata-add-user-data) // (Windows). If you are using a command line tool, base64-encoding is performed // for you, and you can load the text from a file. Otherwise, you must provide @@ -101192,9 +103272,10 @@ type Snapshot struct { // key for the parent volume. KmsKeyId *string `locationName:"kmsKeyId" type:"string"` - // Value from an Amazon-maintained list (amazon | self | all | aws-marketplace - // | microsoft) of snapshot owners. Not to be confused with the user-configured - // AWS account alias, which is set from the IAM console. + // The AWS owner alias, as maintained by Amazon. The possible values are: amazon + // | self | all | aws-marketplace | microsoft. This AWS owner alias is not to + // be confused with the user-configured AWS account alias, which is set from + // the IAM console. OwnerAlias *string `locationName:"ownerAlias" type:"string"` // The AWS account ID of the EBS snapshot owner. @@ -101357,7 +103438,7 @@ type SnapshotDetail struct { // The URL used to access the disk image. Url *string `locationName:"url" type:"string"` - // The S3 bucket for the disk image. + // The Amazon S3 bucket for the disk image. UserBucket *UserBucketDetails `locationName:"userBucket" type:"structure"` } @@ -101447,7 +103528,7 @@ type SnapshotDiskContainer struct { // a https URL (https://..) or an Amazon S3 URL (s3://..). Url *string `type:"string"` - // The S3 bucket for the disk image. + // The Amazon S3 bucket for the disk image. UserBucket *UserBucket `type:"structure"` } @@ -101627,7 +103708,7 @@ type SnapshotTaskDetail struct { // The URL of the disk image from which the snapshot is created. Url *string `locationName:"url" type:"string"` - // The S3 bucket for the disk image. + // The Amazon S3 bucket for the disk image. UserBucket *UserBucketDetails `locationName:"userBucket" type:"structure"` } @@ -102107,7 +104188,7 @@ type SpotFleetRequestConfigData struct { // The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) // role that grants the Spot Fleet the permission to request, launch, terminate, - // and tag instances on your behalf. For more information, see Spot Fleet Prerequisites + // and tag instances on your behalf. For more information, see Spot Fleet prerequisites // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-requests.html#spot-fleet-prerequisites) // in the Amazon EC2 User Guide for Linux Instances. Spot Fleet can terminate // Spot Instances on your behalf when you cancel its Spot Fleet request using @@ -102499,7 +104580,7 @@ type SpotInstanceRequest struct { SpotPrice *string `locationName:"spotPrice" type:"string"` // The state of the Spot Instance request. Spot status information helps track - // your Spot Instance requests. For more information, see Spot Status (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) + // your Spot Instance requests. For more information, see Spot status (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) // in the Amazon EC2 User Guide for Linux Instances. State *string `locationName:"state" type:"string" enum:"SpotInstanceState"` @@ -102685,7 +104766,7 @@ func (s *SpotInstanceStateFault) SetMessage(v string) *SpotInstanceStateFault { type SpotInstanceStatus struct { _ struct{} `type:"structure"` - // The status code. For a list of status codes, see Spot Status Codes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html#spot-instance-bid-status-understand) + // The status code. For a list of status codes, see Spot status codes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html#spot-instance-bid-status-understand) // in the Amazon EC2 User Guide for Linux Instances. Code *string `locationName:"code" type:"string"` @@ -103109,8 +105190,7 @@ type StaleIpPermission struct { // The IP ranges. Not applicable for stale security group rules. IpRanges []*string `locationName:"ipRanges" locationNameList:"item" type:"list"` - // The prefix list IDs for an AWS service. Not applicable for stale security - // group rules. + // The prefix list IDs. Not applicable for stale security group rules. PrefixListIds []*string `locationName:"prefixListIds" locationNameList:"item" type:"list"` // The end of the port range for the TCP and UDP protocols, or an ICMP type @@ -103482,7 +105562,7 @@ type StopInstancesInput struct { // Hibernates the instance if the instance was enabled for hibernation at launch. // If the instance cannot hibernate successfully, a normal shutdown occurs. - // For more information, see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) + // For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon Elastic Compute Cloud User Guide. // // Default: false @@ -104047,12 +106127,14 @@ type TagSpecification struct { // The type of resource to tag. Currently, the resource types that support tagging // on creation are: capacity-reservation | client-vpn-endpoint | dedicated-host - // | fleet | fpga-image | instance | ipv4pool-ec2 | ipv6pool-ec2 | key-pair - // | launch-template | natgateway | spot-fleet-request | placement-group | snapshot - // | traffic-mirror-filter | traffic-mirror-session | traffic-mirror-target - // | transit-gateway | transit-gateway-attachment | transit-gateway-route-table - // | vpc-endpoint (for interface VPC endpoints)| vpc-endpoint-service (for gateway - // VPC endpoints) | volume | vpc-flow-log. + // | dhcp-options | export-image-task | export-instance-task | fleet | fpga-image + // | host-reservation | import-image-task | import-snapshot-task | instance + // | internet-gateway | ipv4pool-ec2 | ipv6pool-ec2 | key-pair | launch-template + // | placement-group | prefix-list | natgateway | network-acl | security-group + // | spot-fleet-request | spot-instances-request | snapshot | subnet | traffic-mirror-filter + // | traffic-mirror-session | traffic-mirror-target | transit-gateway | transit-gateway-attachment + // | transit-gateway-route-table | volume |vpc | vpc-endpoint (for interface + // and gateway endpoints) | vpc-endpoint-service (for AWS PrivateLink) | vpc-flow-log. // // To tag a resource after it has been created, see CreateTags (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html). ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` @@ -106163,28 +108245,28 @@ type TransitGatewayRequestOptions struct { // A private Autonomous System Number (ASN) for the Amazon side of a BGP session. // The range is 64512 to 65534 for 16-bit ASNs and 4200000000 to 4294967294 - // for 32-bit ASNs. + // for 32-bit ASNs. The default is 64512. AmazonSideAsn *int64 `type:"long"` - // Enable or disable automatic acceptance of attachment requests. The default - // is disable. + // Enable or disable automatic acceptance of attachment requests. Disabled by + // default. AutoAcceptSharedAttachments *string `type:"string" enum:"AutoAcceptSharedAttachmentsValue"` // Enable or disable automatic association with the default association route - // table. The default is enable. + // table. Enabled by default. DefaultRouteTableAssociation *string `type:"string" enum:"DefaultRouteTableAssociationValue"` // Enable or disable automatic propagation of routes to the default propagation - // route table. The default is enable. + // route table. Enabled by default. DefaultRouteTablePropagation *string `type:"string" enum:"DefaultRouteTablePropagationValue"` - // Enable or disable DNS support. + // Enable or disable DNS support. Enabled by default. DnsSupport *string `type:"string" enum:"DnsSupportValue"` // Indicates whether multicast is enabled on the transit gateway MulticastSupport *string `type:"string" enum:"MulticastSupportValue"` - // Enable or disable Equal Cost Multipath Protocol support. + // Enable or disable Equal Cost Multipath Protocol support. Enabled by default. VpnEcmpSupport *string `type:"string" enum:"VpnEcmpSupportValue"` } @@ -107357,11 +109439,11 @@ func (s *UpdateSecurityGroupRuleDescriptionsIngressOutput) SetReturn(v bool) *Up return s } -// Describes the S3 bucket for the disk image. +// Describes the Amazon S3 bucket for the disk image. type UserBucket struct { _ struct{} `type:"structure"` - // The name of the S3 bucket where the disk image is located. + // The name of the Amazon S3 bucket where the disk image is located. S3Bucket *string `type:"string"` // The file name of the disk image. @@ -107390,11 +109472,11 @@ func (s *UserBucket) SetS3Key(v string) *UserBucket { return s } -// Describes the S3 bucket for the disk image. +// Describes the Amazon S3 bucket for the disk image. type UserBucketDetails struct { _ struct{} `type:"structure"` - // The S3 bucket from which the disk image was created. + // The Amazon S3 bucket from which the disk image was created. S3Bucket *string `locationName:"s3Bucket" type:"string"` // The file name of the disk image. @@ -108031,7 +110113,7 @@ type VolumeModification struct { // The original IOPS rate of the volume. OriginalIops *int64 `locationName:"originalIops" type:"integer"` - // The original size of the volume. + // The original size of the volume, in GiB. OriginalSize *int64 `locationName:"originalSize" type:"integer"` // The original EBS volume type of the volume. @@ -110439,6 +112521,17 @@ const ( EbsEncryptionSupportSupported = "supported" ) +const ( + // EbsNvmeSupportUnsupported is a EbsNvmeSupport enum value + EbsNvmeSupportUnsupported = "unsupported" + + // EbsNvmeSupportSupported is a EbsNvmeSupport enum value + EbsNvmeSupportSupported = "supported" + + // EbsNvmeSupportRequired is a EbsNvmeSupport enum value + EbsNvmeSupportRequired = "required" +) + const ( // EbsOptimizedSupportUnsupported is a EbsOptimizedSupport enum value EbsOptimizedSupportUnsupported = "unsupported" @@ -111169,6 +113262,33 @@ const ( // InstanceTypeR5ad24xlarge is a InstanceType enum value InstanceTypeR5ad24xlarge = "r5ad.24xlarge" + // InstanceTypeR6gMetal is a InstanceType enum value + InstanceTypeR6gMetal = "r6g.metal" + + // InstanceTypeR6gMedium is a InstanceType enum value + InstanceTypeR6gMedium = "r6g.medium" + + // InstanceTypeR6gLarge is a InstanceType enum value + InstanceTypeR6gLarge = "r6g.large" + + // InstanceTypeR6gXlarge is a InstanceType enum value + InstanceTypeR6gXlarge = "r6g.xlarge" + + // InstanceTypeR6g2xlarge is a InstanceType enum value + InstanceTypeR6g2xlarge = "r6g.2xlarge" + + // InstanceTypeR6g4xlarge is a InstanceType enum value + InstanceTypeR6g4xlarge = "r6g.4xlarge" + + // InstanceTypeR6g8xlarge is a InstanceType enum value + InstanceTypeR6g8xlarge = "r6g.8xlarge" + + // InstanceTypeR6g12xlarge is a InstanceType enum value + InstanceTypeR6g12xlarge = "r6g.12xlarge" + + // InstanceTypeR6g16xlarge is a InstanceType enum value + InstanceTypeR6g16xlarge = "r6g.16xlarge" + // InstanceTypeX116xlarge is a InstanceType enum value InstanceTypeX116xlarge = "x1.16xlarge" @@ -111319,6 +113439,30 @@ const ( // InstanceTypeC5Metal is a InstanceType enum value InstanceTypeC5Metal = "c5.metal" + // InstanceTypeC5aLarge is a InstanceType enum value + InstanceTypeC5aLarge = "c5a.large" + + // InstanceTypeC5aXlarge is a InstanceType enum value + InstanceTypeC5aXlarge = "c5a.xlarge" + + // InstanceTypeC5a2xlarge is a InstanceType enum value + InstanceTypeC5a2xlarge = "c5a.2xlarge" + + // InstanceTypeC5a4xlarge is a InstanceType enum value + InstanceTypeC5a4xlarge = "c5a.4xlarge" + + // InstanceTypeC5a8xlarge is a InstanceType enum value + InstanceTypeC5a8xlarge = "c5a.8xlarge" + + // InstanceTypeC5a12xlarge is a InstanceType enum value + InstanceTypeC5a12xlarge = "c5a.12xlarge" + + // InstanceTypeC5a16xlarge is a InstanceType enum value + InstanceTypeC5a16xlarge = "c5a.16xlarge" + + // InstanceTypeC5a24xlarge is a InstanceType enum value + InstanceTypeC5a24xlarge = "c5a.24xlarge" + // InstanceTypeC5dLarge is a InstanceType enum value InstanceTypeC5dLarge = "c5d.large" @@ -111364,6 +113508,33 @@ const ( // InstanceTypeC5n18xlarge is a InstanceType enum value InstanceTypeC5n18xlarge = "c5n.18xlarge" + // InstanceTypeC6gMetal is a InstanceType enum value + InstanceTypeC6gMetal = "c6g.metal" + + // InstanceTypeC6gMedium is a InstanceType enum value + InstanceTypeC6gMedium = "c6g.medium" + + // InstanceTypeC6gLarge is a InstanceType enum value + InstanceTypeC6gLarge = "c6g.large" + + // InstanceTypeC6gXlarge is a InstanceType enum value + InstanceTypeC6gXlarge = "c6g.xlarge" + + // InstanceTypeC6g2xlarge is a InstanceType enum value + InstanceTypeC6g2xlarge = "c6g.2xlarge" + + // InstanceTypeC6g4xlarge is a InstanceType enum value + InstanceTypeC6g4xlarge = "c6g.4xlarge" + + // InstanceTypeC6g8xlarge is a InstanceType enum value + InstanceTypeC6g8xlarge = "c6g.8xlarge" + + // InstanceTypeC6g12xlarge is a InstanceType enum value + InstanceTypeC6g12xlarge = "c6g.12xlarge" + + // InstanceTypeC6g16xlarge is a InstanceType enum value + InstanceTypeC6g16xlarge = "c6g.16xlarge" + // InstanceTypeCc14xlarge is a InstanceType enum value InstanceTypeCc14xlarge = "cc1.4xlarge" @@ -111406,6 +113577,9 @@ const ( // InstanceTypeG4dn16xlarge is a InstanceType enum value InstanceTypeG4dn16xlarge = "g4dn.16xlarge" + // InstanceTypeG4dnMetal is a InstanceType enum value + InstanceTypeG4dnMetal = "g4dn.metal" + // InstanceTypeCg14xlarge is a InstanceType enum value InstanceTypeCg14xlarge = "cg1.4xlarge" @@ -112125,6 +114299,44 @@ const ( PlatformValuesWindows = "Windows" ) +const ( + // PrefixListStateCreateInProgress is a PrefixListState enum value + PrefixListStateCreateInProgress = "create-in-progress" + + // PrefixListStateCreateComplete is a PrefixListState enum value + PrefixListStateCreateComplete = "create-complete" + + // PrefixListStateCreateFailed is a PrefixListState enum value + PrefixListStateCreateFailed = "create-failed" + + // PrefixListStateModifyInProgress is a PrefixListState enum value + PrefixListStateModifyInProgress = "modify-in-progress" + + // PrefixListStateModifyComplete is a PrefixListState enum value + PrefixListStateModifyComplete = "modify-complete" + + // PrefixListStateModifyFailed is a PrefixListState enum value + PrefixListStateModifyFailed = "modify-failed" + + // PrefixListStateRestoreInProgress is a PrefixListState enum value + PrefixListStateRestoreInProgress = "restore-in-progress" + + // PrefixListStateRestoreComplete is a PrefixListState enum value + PrefixListStateRestoreComplete = "restore-complete" + + // PrefixListStateRestoreFailed is a PrefixListState enum value + PrefixListStateRestoreFailed = "restore-failed" + + // PrefixListStateDeleteInProgress is a PrefixListState enum value + PrefixListStateDeleteInProgress = "delete-in-progress" + + // PrefixListStateDeleteComplete is a PrefixListState enum value + PrefixListStateDeleteComplete = "delete-complete" + + // PrefixListStateDeleteFailed is a PrefixListState enum value + PrefixListStateDeleteFailed = "delete-failed" +) + const ( // PrincipalTypeAll is a PrincipalType enum value PrincipalTypeAll = "All" @@ -112269,6 +114481,15 @@ const ( // ResourceTypeElasticIp is a ResourceType enum value ResourceTypeElasticIp = "elastic-ip" + // ResourceTypeElasticGpu is a ResourceType enum value + ResourceTypeElasticGpu = "elastic-gpu" + + // ResourceTypeExportImageTask is a ResourceType enum value + ResourceTypeExportImageTask = "export-image-task" + + // ResourceTypeExportInstanceTask is a ResourceType enum value + ResourceTypeExportInstanceTask = "export-instance-task" + // ResourceTypeFleet is a ResourceType enum value ResourceTypeFleet = "fleet" @@ -112281,6 +114502,12 @@ const ( // ResourceTypeImage is a ResourceType enum value ResourceTypeImage = "image" + // ResourceTypeImportImageTask is a ResourceType enum value + ResourceTypeImportImageTask = "import-image-task" + + // ResourceTypeImportSnapshotTask is a ResourceType enum value + ResourceTypeImportSnapshotTask = "import-snapshot-task" + // ResourceTypeInstance is a ResourceType enum value ResourceTypeInstance = "instance" @@ -112293,6 +114520,9 @@ const ( // ResourceTypeLaunchTemplate is a ResourceType enum value ResourceTypeLaunchTemplate = "launch-template" + // ResourceTypeLocalGatewayRouteTableVpcAssociation is a ResourceType enum value + ResourceTypeLocalGatewayRouteTableVpcAssociation = "local-gateway-route-table-vpc-association" + // ResourceTypeNatgateway is a ResourceType enum value ResourceTypeNatgateway = "natgateway" diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 228cd3cfe5947..82defe7f2cf87 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -9599,6 +9599,8 @@ func (c *S3) SelectObjectContentWithContext(ctx aws.Context, input *SelectObject return out, req.Send() } +var _ awserr.Error + // SelectObjectContentEventStream provides the event stream handling for the SelectObjectContent. type SelectObjectContentEventStream struct { @@ -9673,6 +9675,7 @@ func (es *SelectObjectContentEventStream) waitStreamPartClose() { // * ProgressEvent // * RecordsEvent // * StatsEvent +// * SelectObjectContentEventStreamUnknownEvent func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { return es.Reader.Events() } @@ -11558,6 +11561,8 @@ func (s *ContinuationEvent) UnmarshalEvent( return nil } +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. func (s *ContinuationEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) return msg, err @@ -14774,6 +14779,8 @@ func (s *EndEvent) UnmarshalEvent( return nil } +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. func (s *EndEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) return msg, err @@ -23548,6 +23555,8 @@ func (s *ProgressEvent) UnmarshalEvent( return nil } +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. func (s *ProgressEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) var buf bytes.Buffer @@ -26984,6 +26993,8 @@ func (s *RecordsEvent) UnmarshalEvent( return nil } +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. func (s *RecordsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) msg.Headers.Set(":content-type", eventstream.StringValue("application/octet-stream")) @@ -28204,6 +28215,7 @@ type SelectObjectContentEventStreamEvent interface { // * ProgressEvent // * RecordsEvent // * StatsEvent +// * SelectObjectContentEventStreamUnknownEvent type SelectObjectContentEventStreamReader interface { // Returns a channel of events as they are read from the event stream. Events() <-chan SelectObjectContentEventStreamEvent @@ -28278,6 +28290,9 @@ func (r *readSelectObjectContentEventStream) readEventStream() { return default: } + if _, ok := err.(*eventstreamapi.UnknownMessageTypeError); ok { + continue + } r.err.SetError(err) return } @@ -28307,14 +28322,39 @@ func (u unmarshalerForSelectObjectContentEventStreamEvent) UnmarshalerForEventNa case "Stats": return &StatsEvent{}, nil default: - return nil, awserr.New( - request.ErrCodeSerialization, - fmt.Sprintf("unknown event type name, %s, for SelectObjectContentEventStream", eventType), - nil, - ) + return &SelectObjectContentEventStreamUnknownEvent{Type: eventType}, nil } } +// SelectObjectContentEventStreamUnknownEvent provides a failsafe event for the +// SelectObjectContentEventStream group of events when an unknown event is received. +type SelectObjectContentEventStreamUnknownEvent struct { + Type string + Message eventstream.Message +} + +// The SelectObjectContentEventStreamUnknownEvent is and event in the SelectObjectContentEventStream +// group of events. +func (s *SelectObjectContentEventStreamUnknownEvent) eventSelectObjectContentEventStream() {} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (e *SelectObjectContentEventStreamUnknownEvent) MarshalEvent(pm protocol.PayloadMarshaler) ( + msg eventstream.Message, err error, +) { + return e.Message.Clone(), nil +} + +// UnmarshalEvent unmarshals the EventStream Message into the SelectObjectContentEventStreamData value. +// This method is only used internally within the SDK's EventStream handling. +func (e *SelectObjectContentEventStreamUnknownEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + e.Message = msg.Clone() + return nil +} + // Request to filter the contents of an Amazon S3 object based on a simple Structured // Query Language (SQL) statement. In the request, along with the SQL expression, // you must specify a data serialization format (JSON or CSV) of the object. @@ -28966,6 +29006,8 @@ func (s *StatsEvent) UnmarshalEvent( return nil } +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. func (s *StatsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) var buf bytes.Buffer diff --git a/vendor/github.com/cenkalti/backoff/.gitignore b/vendor/github.com/cenkalti/backoff/v4/.gitignore similarity index 100% rename from vendor/github.com/cenkalti/backoff/.gitignore rename to vendor/github.com/cenkalti/backoff/v4/.gitignore diff --git a/vendor/github.com/cenkalti/backoff/.travis.yml b/vendor/github.com/cenkalti/backoff/v4/.travis.yml similarity index 95% rename from vendor/github.com/cenkalti/backoff/.travis.yml rename to vendor/github.com/cenkalti/backoff/v4/.travis.yml index 47a6a46ec2ab6..871150c467261 100644 --- a/vendor/github.com/cenkalti/backoff/.travis.yml +++ b/vendor/github.com/cenkalti/backoff/v4/.travis.yml @@ -1,6 +1,6 @@ language: go go: - - 1.7 + - 1.12 - 1.x - tip before_install: diff --git a/vendor/github.com/cenkalti/backoff/LICENSE b/vendor/github.com/cenkalti/backoff/v4/LICENSE similarity index 100% rename from vendor/github.com/cenkalti/backoff/LICENSE rename to vendor/github.com/cenkalti/backoff/v4/LICENSE diff --git a/vendor/github.com/cenkalti/backoff/README.md b/vendor/github.com/cenkalti/backoff/v4/README.md similarity index 87% rename from vendor/github.com/cenkalti/backoff/README.md rename to vendor/github.com/cenkalti/backoff/v4/README.md index 55ebc98fc25fa..cabfc9c70170a 100644 --- a/vendor/github.com/cenkalti/backoff/README.md +++ b/vendor/github.com/cenkalti/backoff/v4/README.md @@ -9,7 +9,10 @@ The retries exponentially increase and stop increasing when a certain threshold ## Usage -See https://godoc.org/github.com/cenkalti/backoff#pkg-examples +Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end. + +godoc.org does not support modules yet, +so you can use https://godoc.org/gopkg.in/cenkalti/backoff.v4 to view the documentation. ## Contributing diff --git a/vendor/github.com/cenkalti/backoff/backoff.go b/vendor/github.com/cenkalti/backoff/v4/backoff.go similarity index 100% rename from vendor/github.com/cenkalti/backoff/backoff.go rename to vendor/github.com/cenkalti/backoff/v4/backoff.go diff --git a/vendor/github.com/cenkalti/backoff/context.go b/vendor/github.com/cenkalti/backoff/v4/context.go similarity index 77% rename from vendor/github.com/cenkalti/backoff/context.go rename to vendor/github.com/cenkalti/backoff/v4/context.go index 7706faa2b6005..fcff86c1b3de8 100644 --- a/vendor/github.com/cenkalti/backoff/context.go +++ b/vendor/github.com/cenkalti/backoff/v4/context.go @@ -7,7 +7,7 @@ import ( // BackOffContext is a backoff policy that stops retrying after the context // is canceled. -type BackOffContext interface { +type BackOffContext interface { // nolint: golint BackOff Context() context.Context } @@ -20,7 +20,7 @@ type backOffContext struct { // WithContext returns a BackOffContext with context ctx // // ctx must not be nil -func WithContext(b BackOff, ctx context.Context) BackOffContext { +func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint if ctx == nil { panic("nil context") } @@ -38,11 +38,14 @@ func WithContext(b BackOff, ctx context.Context) BackOffContext { } } -func ensureContext(b BackOff) BackOffContext { +func getContext(b BackOff) context.Context { if cb, ok := b.(BackOffContext); ok { - return cb + return cb.Context() } - return WithContext(b, context.Background()) + if tb, ok := b.(*backOffTries); ok { + return getContext(tb.delegate) + } + return context.Background() } func (b *backOffContext) Context() context.Context { @@ -56,7 +59,7 @@ func (b *backOffContext) NextBackOff() time.Duration { default: } next := b.BackOff.NextBackOff() - if deadline, ok := b.ctx.Deadline(); ok && deadline.Sub(time.Now()) < next { + if deadline, ok := b.ctx.Deadline(); ok && deadline.Sub(time.Now()) < next { // nolint: gosimple return Stop } return next diff --git a/vendor/github.com/cenkalti/backoff/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go similarity index 89% rename from vendor/github.com/cenkalti/backoff/exponential.go rename to vendor/github.com/cenkalti/backoff/v4/exponential.go index a031a659799fc..3d3453215bb30 100644 --- a/vendor/github.com/cenkalti/backoff/exponential.go +++ b/vendor/github.com/cenkalti/backoff/v4/exponential.go @@ -56,9 +56,10 @@ type ExponentialBackOff struct { RandomizationFactor float64 Multiplier float64 MaxInterval time.Duration - // After MaxElapsedTime the ExponentialBackOff stops. + // After MaxElapsedTime the ExponentialBackOff returns Stop. // It never stops if MaxElapsedTime == 0. MaxElapsedTime time.Duration + Stop time.Duration Clock Clock currentInterval time.Duration @@ -87,6 +88,7 @@ func NewExponentialBackOff() *ExponentialBackOff { Multiplier: DefaultMultiplier, MaxInterval: DefaultMaxInterval, MaxElapsedTime: DefaultMaxElapsedTime, + Stop: Stop, Clock: SystemClock, } b.Reset() @@ -103,20 +105,23 @@ func (t systemClock) Now() time.Time { var SystemClock = systemClock{} // Reset the interval back to the initial retry interval and restarts the timer. +// Reset must be called before using b. func (b *ExponentialBackOff) Reset() { b.currentInterval = b.InitialInterval b.startTime = b.Clock.Now() } // NextBackOff calculates the next backoff interval using the formula: -// Randomized interval = RetryInterval +/- (RandomizationFactor * RetryInterval) +// Randomized interval = RetryInterval * (1 ± RandomizationFactor) func (b *ExponentialBackOff) NextBackOff() time.Duration { // Make sure we have not gone over the maximum elapsed time. - if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime { - return Stop + elapsed := b.GetElapsedTime() + next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) + b.incrementCurrentInterval() + if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime { + return b.Stop } - defer b.incrementCurrentInterval() - return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) + return next } // GetElapsedTime returns the elapsed time since an ExponentialBackOff instance @@ -140,7 +145,7 @@ func (b *ExponentialBackOff) incrementCurrentInterval() { } // Returns a random value from the following interval: -// [randomizationFactor * currentInterval, randomizationFactor * currentInterval]. +// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { var delta = randomizationFactor * float64(currentInterval) var minInterval = float64(currentInterval) - delta diff --git a/vendor/github.com/cenkalti/backoff/v4/go.mod b/vendor/github.com/cenkalti/backoff/v4/go.mod new file mode 100644 index 0000000000000..cef50ea6724a4 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/go.mod @@ -0,0 +1,3 @@ +module github.com/cenkalti/backoff/v4 + +go 1.12 diff --git a/vendor/github.com/cenkalti/backoff/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go similarity index 69% rename from vendor/github.com/cenkalti/backoff/retry.go rename to vendor/github.com/cenkalti/backoff/v4/retry.go index e936a506f8493..6c776ccf8ed74 100644 --- a/vendor/github.com/cenkalti/backoff/retry.go +++ b/vendor/github.com/cenkalti/backoff/v4/retry.go @@ -21,16 +21,31 @@ type Notify func(error, time.Duration) // // Retry sleeps the goroutine for the duration returned by BackOff after a // failed operation returns. -func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) } +func Retry(o Operation, b BackOff) error { + return RetryNotify(o, b, nil) +} // RetryNotify calls notify function with the error and wait duration // for each failed attempt before sleep. func RetryNotify(operation Operation, b BackOff, notify Notify) error { + return RetryNotifyWithTimer(operation, b, notify, nil) +} + +// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer +// for each failed attempt before sleep. +// A default timer that uses system timer is used when nil is passed. +func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { var err error var next time.Duration - var t *time.Timer + if t == nil { + t = &defaultTimer{} + } - cb := ensureContext(b) + defer func() { + t.Stop() + }() + + ctx := getContext(b) b.Reset() for { @@ -42,7 +57,7 @@ func RetryNotify(operation Operation, b BackOff, notify Notify) error { return permanent.Err } - if next = cb.NextBackOff(); next == Stop { + if next = b.NextBackOff(); next == Stop { return err } @@ -50,17 +65,12 @@ func RetryNotify(operation Operation, b BackOff, notify Notify) error { notify(err, next) } - if t == nil { - t = time.NewTimer(next) - defer t.Stop() - } else { - t.Reset(next) - } + t.Start(next) select { - case <-cb.Context().Done(): - return err - case <-t.C: + case <-ctx.Done(): + return ctx.Err() + case <-t.C(): } } } @@ -74,6 +84,10 @@ func (e *PermanentError) Error() string { return e.Err.Error() } +func (e *PermanentError) Unwrap() error { + return e.Err +} + // Permanent wraps the given err in a *PermanentError. func Permanent(err error) *PermanentError { return &PermanentError{ diff --git a/vendor/github.com/cenkalti/backoff/ticker.go b/vendor/github.com/cenkalti/backoff/v4/ticker.go similarity index 74% rename from vendor/github.com/cenkalti/backoff/ticker.go rename to vendor/github.com/cenkalti/backoff/v4/ticker.go index e41084b0eff94..df9d68bce527f 100644 --- a/vendor/github.com/cenkalti/backoff/ticker.go +++ b/vendor/github.com/cenkalti/backoff/v4/ticker.go @@ -1,6 +1,7 @@ package backoff import ( + "context" "sync" "time" ) @@ -12,7 +13,9 @@ import ( type Ticker struct { C <-chan time.Time c chan time.Time - b BackOffContext + b BackOff + ctx context.Context + timer Timer stop chan struct{} stopOnce sync.Once } @@ -24,12 +27,23 @@ type Ticker struct { // provided backoff policy (notably calling NextBackOff or Reset) // while the ticker is running. func NewTicker(b BackOff) *Ticker { + return NewTickerWithTimer(b, &defaultTimer{}) +} + +// NewTickerWithTimer returns a new Ticker with a custom timer. +// A default timer that uses system timer is used when nil is passed. +func NewTickerWithTimer(b BackOff, timer Timer) *Ticker { + if timer == nil { + timer = &defaultTimer{} + } c := make(chan time.Time) t := &Ticker{ - C: c, - c: c, - b: ensureContext(b), - stop: make(chan struct{}), + C: c, + c: c, + b: b, + ctx: getContext(b), + timer: timer, + stop: make(chan struct{}), } t.b.Reset() go t.run() @@ -59,7 +73,7 @@ func (t *Ticker) run() { case <-t.stop: t.c = nil // Prevent future ticks from being sent to the channel. return - case <-t.b.Context().Done(): + case <-t.ctx.Done(): return } } @@ -78,5 +92,6 @@ func (t *Ticker) send(tick time.Time) <-chan time.Time { return nil } - return time.After(next) + t.timer.Start(next) + return t.timer.C() } diff --git a/vendor/github.com/cenkalti/backoff/v4/timer.go b/vendor/github.com/cenkalti/backoff/v4/timer.go new file mode 100644 index 0000000000000..8120d0213c58d --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/timer.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +type Timer interface { + Start(duration time.Duration) + Stop() + C() <-chan time.Time +} + +// defaultTimer implements Timer interface using time.Timer +type defaultTimer struct { + timer *time.Timer +} + +// C returns the timers channel which receives the current time when the timer fires. +func (t *defaultTimer) C() <-chan time.Time { + return t.timer.C +} + +// Start starts the timer to fire after the given duration +func (t *defaultTimer) Start(duration time.Duration) { + if t.timer == nil { + t.timer = time.NewTimer(duration) + } else { + t.timer.Reset(duration) + } +} + +// Stop is called when the timer is not used anymore and resources may be freed. +func (t *defaultTimer) Stop() { + if t.timer != nil { + t.timer.Stop() + } +} diff --git a/vendor/github.com/cenkalti/backoff/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go similarity index 94% rename from vendor/github.com/cenkalti/backoff/tries.go rename to vendor/github.com/cenkalti/backoff/v4/tries.go index cfeefd9b764c4..28d58ca37c684 100644 --- a/vendor/github.com/cenkalti/backoff/tries.go +++ b/vendor/github.com/cenkalti/backoff/v4/tries.go @@ -20,6 +20,9 @@ type backOffTries struct { } func (b *backOffTries) NextBackOff() time.Duration { + if b.maxTries == 0 { + return Stop + } if b.maxTries > 0 { if b.maxTries <= b.numTries { return Stop diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go index 5f17a82213f47..587bdb8ad7f9c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go @@ -19,7 +19,6 @@ import ( "github.com/prometheus/alertmanager/nflog" "github.com/prometheus/alertmanager/notify" "github.com/prometheus/alertmanager/notify/email" - "github.com/prometheus/alertmanager/notify/hipchat" "github.com/prometheus/alertmanager/notify/opsgenie" "github.com/prometheus/alertmanager/notify/pagerduty" "github.com/prometheus/alertmanager/notify/pushover" @@ -359,9 +358,6 @@ func buildReceiverIntegrations(nc *config.Receiver, tmpl *template.Template, log for i, c := range nc.SlackConfigs { add("slack", i, c, func(l log.Logger) (notify.Notifier, error) { return slack.New(c, tmpl, l) }) } - for i, c := range nc.HipchatConfigs { - add("hipchat", i, c, func(l log.Logger) (notify.Notifier, error) { return hipchat.New(c, tmpl, l) }) - } for i, c := range nc.VictorOpsConfigs { add("victorops", i, c, func(l log.Logger) (notify.Notifier, error) { return victorops.New(c, tmpl, l) }) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/api/api.go b/vendor/github.com/cortexproject/cortex/pkg/api/api.go index c70f0a8b9248a..4238dcad5d326 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/api/api.go +++ b/vendor/github.com/cortexproject/cortex/pkg/api/api.go @@ -44,9 +44,9 @@ type Config struct { PrometheusHTTPPrefix string `yaml:"prometheus_http_prefix"` // The following configs are injected by the upstream caller. - ServerPrefix string `yaml:"-"` - LegacyHTTPPrefix string `yaml:"-"` - HTTPAuthMiddleware middleware.Func `yaml:"-"` + ServerPrefix string `yaml:"-"` + LegacyHTTPPrefix string `yaml:"-"` + HTTPAuthMiddleware middleware.Interface `yaml:"-"` } // RegisterFlags adds the flags required to config this to the given FlagSet. @@ -62,7 +62,7 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { type API struct { cfg Config - authMiddleware middleware.Func + authMiddleware middleware.Interface server *server.Server logger log.Logger } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_metrics.go new file mode 100644 index 0000000000000..58533b4414677 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_metrics.go @@ -0,0 +1,60 @@ +package aws + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/weaveworks/common/instrument" +) + +type dynamoDBMetrics struct { + dynamoRequestDuration *instrument.HistogramCollector + dynamoConsumedCapacity *prometheus.CounterVec + dynamoThrottled *prometheus.CounterVec + dynamoFailures *prometheus.CounterVec + dynamoDroppedRequests *prometheus.CounterVec + dynamoQueryPagesCount prometheus.Histogram +} + +func newMetrics(r prometheus.Registerer) *dynamoDBMetrics { + m := dynamoDBMetrics{} + + m.dynamoRequestDuration = instrument.NewHistogramCollector(promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "cortex", + Name: "dynamo_request_duration_seconds", + Help: "Time spent doing DynamoDB requests.", + + // DynamoDB latency seems to range from a few ms to a several seconds and is + // important. So use 9 buckets from 1ms to just over 1 minute (65s). + Buckets: prometheus.ExponentialBuckets(0.001, 4, 9), + }, []string{"operation", "status_code"})) + m.dynamoConsumedCapacity = promauto.With(r).NewCounterVec(prometheus.CounterOpts{ + Namespace: "cortex", + Name: "dynamo_consumed_capacity_total", + Help: "The capacity units consumed by operation.", + }, []string{"operation", tableNameLabel}) + m.dynamoThrottled = promauto.With(r).NewCounterVec(prometheus.CounterOpts{ + Namespace: "cortex", + Name: "dynamo_throttled_total", + Help: "The total number of throttled events.", + }, []string{"operation", tableNameLabel}) + m.dynamoFailures = promauto.With(r).NewCounterVec(prometheus.CounterOpts{ + Namespace: "cortex", + Name: "dynamo_failures_total", + Help: "The total number of errors while storing chunks to the chunk store.", + }, []string{tableNameLabel, errorReasonLabel, "operation"}) + m.dynamoDroppedRequests = promauto.With(r).NewCounterVec(prometheus.CounterOpts{ + Namespace: "cortex", + Name: "dynamo_dropped_requests_total", + Help: "The total number of requests which were dropped due to errors encountered from dynamo.", + }, []string{tableNameLabel, errorReasonLabel, "operation"}) + m.dynamoQueryPagesCount = promauto.With(r).NewHistogram(prometheus.HistogramOpts{ + Namespace: "cortex", + Name: "dynamo_query_pages_count", + Help: "Number of pages per query.", + // Most queries will have one page, however this may increase with fuzzy + // metric names. + Buckets: prometheus.ExponentialBuckets(1, 4, 6), + }) + + return &m +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go index 126de4e209d2c..b1e25ec8c7e12 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go @@ -50,55 +50,6 @@ const ( validationException = "ValidationException" ) -var ( - dynamoRequestDuration = instrument.NewHistogramCollector(prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: "cortex", - Name: "dynamo_request_duration_seconds", - Help: "Time spent doing DynamoDB requests.", - - // DynamoDB latency seems to range from a few ms to a several seconds and is - // important. So use 9 buckets from 1ms to just over 1 minute (65s). - Buckets: prometheus.ExponentialBuckets(0.001, 4, 9), - }, []string{"operation", "status_code"})) - dynamoConsumedCapacity = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "dynamo_consumed_capacity_total", - Help: "The capacity units consumed by operation.", - }, []string{"operation", tableNameLabel}) - dynamoThrottled = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "dynamo_throttled_total", - Help: "The total number of throttled events.", - }, []string{"operation", tableNameLabel}) - dynamoFailures = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "dynamo_failures_total", - Help: "The total number of errors while storing chunks to the chunk store.", - }, []string{tableNameLabel, errorReasonLabel, "operation"}) - dynamoDroppedRequests = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "dynamo_dropped_requests_total", - Help: "The total number of requests which were dropped due to errors encountered from dynamo.", - }, []string{tableNameLabel, errorReasonLabel, "operation"}) - dynamoQueryPagesCount = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "cortex", - Name: "dynamo_query_pages_count", - Help: "Number of pages per query.", - // Most queries will have one page, however this may increase with fuzzy - // metric names. - Buckets: prometheus.ExponentialBuckets(1, 4, 6), - }) -) - -func init() { - dynamoRequestDuration.Register() - prometheus.MustRegister(dynamoConsumedCapacity) - prometheus.MustRegister(dynamoThrottled) - prometheus.MustRegister(dynamoFailures) - prometheus.MustRegister(dynamoQueryPagesCount) - prometheus.MustRegister(dynamoDroppedRequests) -} - // DynamoDBConfig specifies config for a DynamoDB database. type DynamoDBConfig struct { DynamoDB flagext.URLValue `yaml:"dynamodb_url"` @@ -148,20 +99,22 @@ type dynamoDBStorageClient struct { // of boilerplate. batchGetItemRequestFn func(ctx context.Context, input *dynamodb.BatchGetItemInput) dynamoDBRequest batchWriteItemRequestFn func(ctx context.Context, input *dynamodb.BatchWriteItemInput) dynamoDBRequest + + metrics *dynamoDBMetrics } // NewDynamoDBIndexClient makes a new DynamoDB-backed IndexClient. -func NewDynamoDBIndexClient(cfg DynamoDBConfig, schemaCfg chunk.SchemaConfig) (chunk.IndexClient, error) { - return newDynamoDBStorageClient(cfg, schemaCfg) +func NewDynamoDBIndexClient(cfg DynamoDBConfig, schemaCfg chunk.SchemaConfig, reg prometheus.Registerer) (chunk.IndexClient, error) { + return newDynamoDBStorageClient(cfg, schemaCfg, reg) } // NewDynamoDBChunkClient makes a new DynamoDB-backed chunk.Client. -func NewDynamoDBChunkClient(cfg DynamoDBConfig, schemaCfg chunk.SchemaConfig) (chunk.Client, error) { - return newDynamoDBStorageClient(cfg, schemaCfg) +func NewDynamoDBChunkClient(cfg DynamoDBConfig, schemaCfg chunk.SchemaConfig, reg prometheus.Registerer) (chunk.Client, error) { + return newDynamoDBStorageClient(cfg, schemaCfg, reg) } // newDynamoDBStorageClient makes a new DynamoDB-backed IndexClient and chunk.Client. -func newDynamoDBStorageClient(cfg DynamoDBConfig, schemaCfg chunk.SchemaConfig) (*dynamoDBStorageClient, error) { +func newDynamoDBStorageClient(cfg DynamoDBConfig, schemaCfg chunk.SchemaConfig, reg prometheus.Registerer) (*dynamoDBStorageClient, error) { dynamoDB, err := dynamoClientFromURL(cfg.DynamoDB.URL) if err != nil { return nil, err @@ -172,6 +125,7 @@ func newDynamoDBStorageClient(cfg DynamoDBConfig, schemaCfg chunk.SchemaConfig) schemaCfg: schemaCfg, DynamoDB: dynamoDB, writeThrottle: rate.NewLimiter(rate.Limit(cfg.ThrottleLimit), dynamoDBMaxWriteBatchSize), + metrics: newMetrics(reg), } client.batchGetItemRequestFn = client.batchGetItemRequest client.batchWriteItemRequestFn = client.batchWriteItemRequest @@ -187,9 +141,9 @@ func (a dynamoDBStorageClient) NewWriteBatch() chunk.WriteBatch { return dynamoDBWriteBatch(map[string][]*dynamodb.WriteRequest{}) } -func logWriteRetry(ctx context.Context, unprocessed dynamoDBWriteBatch) { +func logWriteRetry(unprocessed dynamoDBWriteBatch, metrics *dynamoDBMetrics) { for table, reqs := range unprocessed { - dynamoThrottled.WithLabelValues("DynamoDB.BatchWriteItem", table).Add(float64(len(reqs))) + metrics.dynamoThrottled.WithLabelValues("DynamoDB.BatchWriteItem", table).Add(float64(len(reqs))) for _, req := range reqs { item := req.PutRequest.Item var hash, rnge string @@ -225,25 +179,25 @@ func (a dynamoDBStorageClient) BatchWrite(ctx context.Context, input chunk.Write ReturnConsumedCapacity: aws.String(dynamodb.ReturnConsumedCapacityTotal), }) - err := instrument.CollectedRequest(ctx, "DynamoDB.BatchWriteItem", dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + err := instrument.CollectedRequest(ctx, "DynamoDB.BatchWriteItem", a.metrics.dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { return request.Send() }) resp := request.Data().(*dynamodb.BatchWriteItemOutput) for _, cc := range resp.ConsumedCapacity { - dynamoConsumedCapacity.WithLabelValues("DynamoDB.BatchWriteItem", *cc.TableName). + a.metrics.dynamoConsumedCapacity.WithLabelValues("DynamoDB.BatchWriteItem", *cc.TableName). Add(float64(*cc.CapacityUnits)) } if err != nil { for tableName := range requests { - recordDynamoError(tableName, err, "DynamoDB.BatchWriteItem") + recordDynamoError(tableName, err, "DynamoDB.BatchWriteItem", a.metrics) } // If we get provisionedThroughputExceededException, then no items were processed, // so back off and retry all. if awsErr, ok := err.(awserr.Error); ok && ((awsErr.Code() == dynamodb.ErrCodeProvisionedThroughputExceededException) || request.Retryable()) { - logWriteRetry(ctx, requests) + logWriteRetry(requests, a.metrics) unprocessed.TakeReqs(requests, -1) _ = a.writeThrottle.WaitN(ctx, len(requests)) backoff.Wait() @@ -256,7 +210,7 @@ func (a dynamoDBStorageClient) BatchWrite(ctx context.Context, input chunk.Write // recording the drop counter separately from recordDynamoError(), as the error code alone may not provide enough context // to determine if a request was dropped (or not) for tableName := range requests { - dynamoDroppedRequests.WithLabelValues(tableName, validationException, "DynamoDB.BatchWriteItem").Inc() + a.metrics.dynamoDroppedRequests.WithLabelValues(tableName, validationException, "DynamoDB.BatchWriteItem").Inc() } continue } @@ -268,7 +222,7 @@ func (a dynamoDBStorageClient) BatchWrite(ctx context.Context, input chunk.Write // If there are unprocessed items, retry those items. unprocessedItems := dynamoDBWriteBatch(resp.UnprocessedItems) if len(unprocessedItems) > 0 { - logWriteRetry(ctx, unprocessedItems) + logWriteRetry(unprocessedItems, a.metrics) _ = a.writeThrottle.WaitN(ctx, unprocessedItems.Len()) unprocessed.TakeReqs(unprocessedItems, -1) } @@ -329,11 +283,11 @@ func (a dynamoDBStorageClient) query(ctx context.Context, query chunk.IndexQuery pageCount := 0 defer func() { - dynamoQueryPagesCount.Observe(float64(pageCount)) + a.metrics.dynamoQueryPagesCount.Observe(float64(pageCount)) }() retryer := newRetryer(ctx, a.cfg.backoffConfig) - err := instrument.CollectedRequest(ctx, "DynamoDB.QueryPages", dynamoRequestDuration, instrument.ErrorCode, func(innerCtx context.Context) error { + err := instrument.CollectedRequest(ctx, "DynamoDB.QueryPages", a.metrics.dynamoRequestDuration, instrument.ErrorCode, func(innerCtx context.Context) error { if sp := ot.SpanFromContext(innerCtx); sp != nil { sp.SetTag("tableName", query.TableName) sp.SetTag("hashValue", query.HashValue) @@ -345,12 +299,12 @@ func (a dynamoDBStorageClient) query(ctx context.Context, query chunk.IndexQuery } if cc := output.ConsumedCapacity; cc != nil { - dynamoConsumedCapacity.WithLabelValues("DynamoDB.QueryPages", *cc.TableName). + a.metrics.dynamoConsumedCapacity.WithLabelValues("DynamoDB.QueryPages", *cc.TableName). Add(float64(*cc.CapacityUnits)) } return callback(query, &dynamoDBReadResponse{items: output.Items}) - }, retryer.withRetries, withErrorHandler(query.TableName, "DynamoDB.QueryPages")) + }, retryer.withRetries, withErrorHandler(query.TableName, "DynamoDB.QueryPages", a.metrics)) }) if err != nil { return errors.Wrapf(err, "QueryPages error: table=%v", query.TableName) @@ -481,19 +435,19 @@ func (a dynamoDBStorageClient) getDynamoDBChunks(ctx context.Context, chunks []c ReturnConsumedCapacity: aws.String(dynamodb.ReturnConsumedCapacityTotal), }) - err := instrument.CollectedRequest(ctx, "DynamoDB.BatchGetItemPages", dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + err := instrument.CollectedRequest(ctx, "DynamoDB.BatchGetItemPages", a.metrics.dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { return request.Send() }) response := request.Data().(*dynamodb.BatchGetItemOutput) for _, cc := range response.ConsumedCapacity { - dynamoConsumedCapacity.WithLabelValues("DynamoDB.BatchGetItemPages", *cc.TableName). + a.metrics.dynamoConsumedCapacity.WithLabelValues("DynamoDB.BatchGetItemPages", *cc.TableName). Add(float64(*cc.CapacityUnits)) } if err != nil { for tableName := range requests { - recordDynamoError(tableName, err, "DynamoDB.BatchGetItemPages") + recordDynamoError(tableName, err, "DynamoDB.BatchGetItemPages", a.metrics) } // If we get provisionedThroughputExceededException, then no items were processed, @@ -509,7 +463,7 @@ func (a dynamoDBStorageClient) getDynamoDBChunks(ctx context.Context, chunks []c // recording the drop counter separately from recordDynamoError(), as the error code alone may not provide enough context // to determine if a request was dropped (or not) for tableName := range requests { - dynamoDroppedRequests.WithLabelValues(tableName, validationException, "DynamoDB.BatchGetItemPages").Inc() + a.metrics.dynamoDroppedRequests.WithLabelValues(tableName, validationException, "DynamoDB.BatchGetItemPages").Inc() } continue } @@ -792,21 +746,21 @@ func (b dynamoDBReadRequest) TakeReqs(from dynamoDBReadRequest, max int) { } } -func withErrorHandler(tableName, operation string) func(req *request.Request) { +func withErrorHandler(tableName, operation string, metrics *dynamoDBMetrics) func(req *request.Request) { return func(req *request.Request) { req.Handlers.CompleteAttempt.PushBack(func(req *request.Request) { if req.Error != nil { - recordDynamoError(tableName, req.Error, operation) + recordDynamoError(tableName, req.Error, operation, metrics) } }) } } -func recordDynamoError(tableName string, err error, operation string) { +func recordDynamoError(tableName string, err error, operation string, metrics *dynamoDBMetrics) { if awsErr, ok := err.(awserr.Error); ok { - dynamoFailures.WithLabelValues(tableName, awsErr.Code(), operation).Add(float64(1)) + metrics.dynamoFailures.WithLabelValues(tableName, awsErr.Code(), operation).Add(float64(1)) } else { - dynamoFailures.WithLabelValues(tableName, otherError, operation).Add(float64(1)) + metrics.dynamoFailures.WithLabelValues(tableName, otherError, operation).Add(float64(1)) } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_table_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_table_client.go index 8d8571f7adf6d..a2ab3b8096bac 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_table_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_table_client.go @@ -10,6 +10,7 @@ import ( "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" "github.com/weaveworks/common/instrument" "golang.org/x/time/rate" @@ -35,10 +36,11 @@ type dynamoTableClient struct { DynamoDB dynamodbiface.DynamoDBAPI callManager callManager autoscale autoscale + metrics *dynamoDBMetrics } // NewDynamoDBTableClient makes a new DynamoTableClient. -func NewDynamoDBTableClient(cfg DynamoDBConfig) (chunk.TableClient, error) { +func NewDynamoDBTableClient(cfg DynamoDBConfig, reg prometheus.Registerer) (chunk.TableClient, error) { dynamoDB, err := dynamoClientFromURL(cfg.DynamoDB.URL) if err != nil { return nil, err @@ -51,7 +53,7 @@ func NewDynamoDBTableClient(cfg DynamoDBConfig) (chunk.TableClient, error) { var autoscale autoscale if cfg.Metrics.URL != "" { - autoscale, err = newMetrics(cfg) + autoscale, err = newMetricsAutoScaling(cfg) if err != nil { return nil, err } @@ -61,6 +63,7 @@ func NewDynamoDBTableClient(cfg DynamoDBConfig) (chunk.TableClient, error) { DynamoDB: dynamoDB, callManager: callManager, autoscale: autoscale, + metrics: newMetrics(reg), }, nil } @@ -95,7 +98,7 @@ func (d callManager) backoffAndRetry(ctx context.Context, fn func(context.Contex func (d dynamoTableClient) ListTables(ctx context.Context) ([]string, error) { table := []string{} err := d.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.CollectedRequest(ctx, "DynamoDB.ListTablesPages", dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "DynamoDB.ListTablesPages", d.metrics.dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { return d.DynamoDB.ListTablesPagesWithContext(ctx, &dynamodb.ListTablesInput{}, func(resp *dynamodb.ListTablesOutput, _ bool) bool { for _, s := range resp.TableNames { table = append(table, *s) @@ -121,7 +124,7 @@ func chunkTagsToDynamoDB(ts chunk.Tags) []*dynamodb.Tag { func (d dynamoTableClient) CreateTable(ctx context.Context, desc chunk.TableDesc) error { var tableARN *string if err := d.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.CollectedRequest(ctx, "DynamoDB.CreateTable", dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "DynamoDB.CreateTable", d.metrics.dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { input := &dynamodb.CreateTableInput{ TableName: aws.String(desc.Name), AttributeDefinitions: []*dynamodb.AttributeDefinition{ @@ -179,7 +182,7 @@ func (d dynamoTableClient) CreateTable(ctx context.Context, desc chunk.TableDesc tags := chunkTagsToDynamoDB(desc.Tags) if len(tags) > 0 { return d.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.CollectedRequest(ctx, "DynamoDB.TagResource", dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "DynamoDB.TagResource", d.metrics.dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { _, err := d.DynamoDB.TagResourceWithContext(ctx, &dynamodb.TagResourceInput{ ResourceArn: tableARN, Tags: tags, @@ -196,7 +199,7 @@ func (d dynamoTableClient) CreateTable(ctx context.Context, desc chunk.TableDesc func (d dynamoTableClient) DeleteTable(ctx context.Context, name string) error { if err := d.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.CollectedRequest(ctx, "DynamoDB.DeleteTable", dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "DynamoDB.DeleteTable", d.metrics.dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { input := &dynamodb.DeleteTableInput{TableName: aws.String(name)} _, err := d.DynamoDB.DeleteTableWithContext(ctx, input) if err != nil { @@ -215,7 +218,7 @@ func (d dynamoTableClient) DeleteTable(ctx context.Context, name string) error { func (d dynamoTableClient) DescribeTable(ctx context.Context, name string) (desc chunk.TableDesc, isActive bool, err error) { var tableARN *string err = d.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.CollectedRequest(ctx, "DynamoDB.DescribeTable", dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "DynamoDB.DescribeTable", d.metrics.dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { out, err := d.DynamoDB.DescribeTableWithContext(ctx, &dynamodb.DescribeTableInput{ TableName: aws.String(name), }) @@ -248,7 +251,7 @@ func (d dynamoTableClient) DescribeTable(ctx context.Context, name string) (desc } err = d.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.CollectedRequest(ctx, "DynamoDB.ListTagsOfResource", dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "DynamoDB.ListTagsOfResource", d.metrics.dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { out, err := d.DynamoDB.ListTagsOfResourceWithContext(ctx, &dynamodb.ListTagsOfResourceInput{ ResourceArn: tableARN, }) @@ -300,7 +303,7 @@ func (d dynamoTableClient) UpdateTable(ctx context.Context, current, expected ch !expected.UseOnDemandIOMode { level.Info(util.Logger).Log("msg", "updating provisioned throughput on table", "table", expected.Name, "old_read", current.ProvisionedRead, "old_write", current.ProvisionedWrite, "new_read", expected.ProvisionedRead, "new_write", expected.ProvisionedWrite) if err := d.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.CollectedRequest(ctx, "DynamoDB.UpdateTable", dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "DynamoDB.UpdateTable", d.metrics.dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { var dynamoBillingMode string updateTableInput := &dynamodb.UpdateTableInput{TableName: aws.String(expected.Name), ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ @@ -320,7 +323,7 @@ func (d dynamoTableClient) UpdateTable(ctx context.Context, current, expected ch return err }) }); err != nil { - recordDynamoError(expected.Name, err, "DynamoDB.UpdateTable") + recordDynamoError(expected.Name, err, "DynamoDB.UpdateTable", d.metrics) if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "LimitExceededException" { level.Warn(util.Logger).Log("msg", "update limit exceeded", "err", err) } else { @@ -331,14 +334,14 @@ func (d dynamoTableClient) UpdateTable(ctx context.Context, current, expected ch // moved the enabling of OnDemand mode to it's own block to reduce complexities & interactions with the various // settings used in provisioned mode. Unfortunately the boilerplate wrappers for retry and tracking needed to be copied. if err := d.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.CollectedRequest(ctx, "DynamoDB.UpdateTable", dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "DynamoDB.UpdateTable", d.metrics.dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { level.Info(util.Logger).Log("msg", "updating billing mode on table", "table", expected.Name, "old_mode", current.UseOnDemandIOMode, "new_mode", expected.UseOnDemandIOMode) updateTableInput := &dynamodb.UpdateTableInput{TableName: aws.String(expected.Name), BillingMode: aws.String(dynamodb.BillingModePayPerRequest)} _, err := d.DynamoDB.UpdateTableWithContext(ctx, updateTableInput) return err }) }); err != nil { - recordDynamoError(expected.Name, err, "DynamoDB.UpdateTable") + recordDynamoError(expected.Name, err, "DynamoDB.UpdateTable", d.metrics) if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "LimitExceededException" { level.Warn(util.Logger).Log("msg", "update limit exceeded", "err", err) } else { @@ -350,7 +353,7 @@ func (d dynamoTableClient) UpdateTable(ctx context.Context, current, expected ch if !current.Tags.Equals(expected.Tags) { var tableARN *string if err := d.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.CollectedRequest(ctx, "DynamoDB.DescribeTable", dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "DynamoDB.DescribeTable", d.metrics.dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { out, err := d.DynamoDB.DescribeTableWithContext(ctx, &dynamodb.DescribeTableInput{ TableName: aws.String(expected.Name), }) @@ -367,7 +370,7 @@ func (d dynamoTableClient) UpdateTable(ctx context.Context, current, expected ch } return d.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.CollectedRequest(ctx, "DynamoDB.TagResource", dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "DynamoDB.TagResource", d.metrics.dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { _, err := d.DynamoDB.TagResourceWithContext(ctx, &dynamodb.TagResourceInput{ ResourceArn: tableARN, Tags: chunkTagsToDynamoDB(expected.Tags), diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/fixtures.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/fixtures.go index 0d23d9ee31eb7..24ab8cde736a5 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/fixtures.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/fixtures.go @@ -35,12 +35,14 @@ var Fixtures = []testutils.Fixture{ dynamoDB := newMockDynamoDB(0, 0) table := &dynamoTableClient{ DynamoDB: dynamoDB, + metrics: newMetrics(nil), } index := &dynamoDBStorageClient{ DynamoDB: dynamoDB, batchGetItemRequestFn: dynamoDB.batchGetItemRequest, batchWriteItemRequestFn: dynamoDB.batchWriteItemRequest, schemaCfg: schemaConfig, + metrics: newMetrics(nil), } object := objectclient.NewClient(&S3ObjectClient{ S3: newMockS3(), @@ -68,6 +70,7 @@ func dynamoDBFixture(provisionedErr, gangsize, maxParallelism int) testutils.Fix schemaCfg := testutils.DefaultSchemaConfig("aws") table := &dynamoTableClient{ DynamoDB: dynamoDB, + metrics: newMetrics(nil), } storage := &dynamoDBStorageClient{ cfg: DynamoDBConfig{ @@ -84,6 +87,7 @@ func dynamoDBFixture(provisionedErr, gangsize, maxParallelism int) testutils.Fix batchGetItemRequestFn: dynamoDB.batchGetItemRequest, batchWriteItemRequestFn: dynamoDB.batchWriteItemRequest, schemaCfg: schemaCfg, + metrics: newMetrics(nil), } return storage, storage, table, schemaCfg, testutils.CloserFunc(func() error { table.Stop() diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/metrics_autoscaling.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/metrics_autoscaling.go index 3df859cbdcf5c..fea098c82334a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/metrics_autoscaling.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/metrics_autoscaling.go @@ -78,7 +78,7 @@ type metricsData struct { readErrorRates map[string]float64 } -func newMetrics(cfg DynamoDBConfig) (*metricsData, error) { +func newMetricsAutoScaling(cfg DynamoDBConfig) (*metricsData, error) { client, err := promApi.NewClient(promApi.Config{Address: cfg.Metrics.URL}) if err != nil { return nil, err diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go index 944ad2dab75a5..eb35948f8abb6 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go @@ -35,6 +35,10 @@ var ( }, []string{"operation", "status_code"})) ) +// InjectRequestMiddleware gives users of this client the ability to make arbitrary +// changes to outgoing requests. +type InjectRequestMiddleware func(next http.RoundTripper) http.RoundTripper + func init() { s3RequestDuration.Register() } @@ -52,6 +56,8 @@ type S3Config struct { Insecure bool `yaml:"insecure"` SSEEncryption bool `yaml:"sse_encryption"` HTTPConfig HTTPConfig `yaml:"http_config"` + + Inject InjectRequestMiddleware `yaml:"-"` } // HTTPConfig stores the http.Transport configuration @@ -165,22 +171,28 @@ func buildS3Config(cfg S3Config) (*aws.Config, []string, error) { // to maintain backwards compatibility with previous versions of Cortex while providing // more flexible configuration of the http client // https://github.com/weaveworks/common/blob/4b1847531bc94f54ce5cf210a771b2a86cd34118/aws/config.go#L23 + transport := http.RoundTripper(&http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: cfg.HTTPConfig.IdleConnTimeout, + MaxIdleConnsPerHost: 100, + TLSHandshakeTimeout: 3 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + ResponseHeaderTimeout: time.Duration(cfg.HTTPConfig.ResponseHeaderTimeout), + TLSClientConfig: &tls.Config{InsecureSkipVerify: cfg.HTTPConfig.InsecureSkipVerify}, + }) + + if cfg.Inject != nil { + transport = cfg.Inject(transport) + } + s3Config = s3Config.WithHTTPClient(&http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - MaxIdleConns: 100, - IdleConnTimeout: cfg.HTTPConfig.IdleConnTimeout, - MaxIdleConnsPerHost: 100, - TLSHandshakeTimeout: 3 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - ResponseHeaderTimeout: time.Duration(cfg.HTTPConfig.ResponseHeaderTimeout), - TLSClientConfig: &tls.Config{InsecureSkipVerify: cfg.HTTPConfig.InsecureSkipVerify}, - }, + Transport: transport, }) // bucketnames @@ -325,3 +337,7 @@ func (a *S3ObjectClient) List(ctx context.Context, prefix string) ([]chunk.Stora return storageObjects, commonPrefixes, nil } + +func (a *S3ObjectClient) PathSeparator() string { + return a.delimiter +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/azure/blob_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/azure/blob_storage_client.go index 2da1aa2683b05..45d34c2748e84 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/azure/blob_storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/azure/blob_storage_client.go @@ -41,13 +41,13 @@ func (c *BlobStorageConfig) RegisterFlags(f *flag.FlagSet) { // RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet func (c *BlobStorageConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { - f.StringVar(&c.ContainerName, prefix+"azure.container-name", "cortex", "Name of the blob container used to store chunks. Defaults to `cortex`. This container must be created before running cortex.") + f.StringVar(&c.ContainerName, prefix+"azure.container-name", "cortex", "Name of the blob container used to store chunks. This container must be created before running cortex.") f.StringVar(&c.AccountName, prefix+"azure.account-name", "", "The Microsoft Azure account name to be used") f.Var(&c.AccountKey, prefix+"azure.account-key", "The Microsoft Azure account key to use.") - f.DurationVar(&c.RequestTimeout, prefix+"azure.request-timeout", 30*time.Second, "Timeout for requests made against azure blob storage. Defaults to 30 seconds.") - f.IntVar(&c.DownloadBufferSize, prefix+"azure.download-buffer-size", 512000, "Preallocated buffer size for downloads (default is 512KB)") - f.IntVar(&c.UploadBufferSize, prefix+"azure.upload-buffer-size", 256000, "Preallocated buffer size for up;oads (default is 256KB)") - f.IntVar(&c.UploadBufferCount, prefix+"azure.download-buffer-count", 1, "Number of buffers used to used to upload a chunk. (defaults to 1)") + f.DurationVar(&c.RequestTimeout, prefix+"azure.request-timeout", 30*time.Second, "Timeout for requests made against azure blob storage.") + f.IntVar(&c.DownloadBufferSize, prefix+"azure.download-buffer-size", 512000, "Preallocated buffer size for downloads.") + f.IntVar(&c.UploadBufferSize, prefix+"azure.upload-buffer-size", 256000, "Preallocated buffer size for uploads.") + f.IntVar(&c.UploadBufferCount, prefix+"azure.download-buffer-count", 1, "Number of buffers used to used to upload a chunk.") f.IntVar(&c.MaxRetries, prefix+"azure.max-retries", 5, "Number of retries for a request which times out.") f.DurationVar(&c.MinRetryDelay, prefix+"azure.min-retry-delay", 10*time.Millisecond, "Minimum time to wait before retrying a request.") f.DurationVar(&c.MaxRetryDelay, prefix+"azure.max-retry-delay", 500*time.Millisecond, "Maximum time to wait before retrying a request.") @@ -210,3 +210,7 @@ func (b *BlobStorage) DeleteObject(ctx context.Context, blobID string) error { _, err = blockBlobURL.Delete(ctx, azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}) return err } + +func (b *BlobStorage) PathSeparator() string { + return b.delimiter +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/fixtures.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/fixtures.go index feb2df252195a..b55cb16af5f3d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/fixtures.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/fixtures.go @@ -35,17 +35,17 @@ func (f *fixture) Clients() (chunk.IndexClient, chunk.Client, chunk.TableClient, // Get a SchemaConfig with the defaults. schemaConfig := testutils.DefaultSchemaConfig("cassandra") - storageClient, err := NewStorageClient(cfg, schemaConfig) + storageClient, err := NewStorageClient(cfg, schemaConfig, nil) if err != nil { return nil, nil, nil, schemaConfig, nil, err } - objectClient, err := NewObjectClient(cfg, schemaConfig) + objectClient, err := NewObjectClient(cfg, schemaConfig, nil) if err != nil { return nil, nil, nil, schemaConfig, nil, err } - tableClient, err := NewTableClient(context.Background(), cfg) + tableClient, err := NewTableClient(context.Background(), cfg, nil) if err != nil { return nil, nil, nil, schemaConfig, nil, err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go index c4a0fa56f2950..9c328fe08abcf 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go @@ -70,13 +70,13 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.Timeout, "cassandra.timeout", 2*time.Second, "Timeout when connecting to cassandra.") f.DurationVar(&cfg.ConnectTimeout, "cassandra.connect-timeout", 5*time.Second, "Initial connection timeout, used during initial dial to server.") f.DurationVar(&cfg.ReconnectInterval, "cassandra.reconnent-interval", 1*time.Second, "Interval to retry connecting to cassandra nodes marked as DOWN.") - f.IntVar(&cfg.Retries, "cassandra.max-retries", 0, "Number of retries to perform on a request. (Default is 0: no retries)") - f.DurationVar(&cfg.MinBackoff, "cassandra.retry-min-backoff", 100*time.Millisecond, "Minimum time to wait before retrying a failed request. (Default = 100ms)") - f.DurationVar(&cfg.MaxBackoff, "cassandra.retry-max-backoff", 10*time.Second, "Maximum time to wait before retrying a failed request. (Default = 10s)") - f.IntVar(&cfg.QueryConcurrency, "cassandra.query-concurrency", 0, "Limit number of concurrent queries to Cassandra. (Default is 0: no limit)") + f.IntVar(&cfg.Retries, "cassandra.max-retries", 0, "Number of retries to perform on a request. Set to 0 to disable retries.") + f.DurationVar(&cfg.MinBackoff, "cassandra.retry-min-backoff", 100*time.Millisecond, "Minimum time to wait before retrying a failed request.") + f.DurationVar(&cfg.MaxBackoff, "cassandra.retry-max-backoff", 10*time.Second, "Maximum time to wait before retrying a failed request.") + f.IntVar(&cfg.QueryConcurrency, "cassandra.query-concurrency", 0, "Limit number of concurrent queries to Cassandra. Set to 0 to disable the limit.") f.IntVar(&cfg.NumConnections, "cassandra.num-connections", 2, "Number of TCP connections per host.") f.BoolVar(&cfg.ConvictHosts, "cassandra.convict-hosts-on-failure", true, "Convict hosts of being down on failure.") - f.StringVar(&cfg.TableOptions, "cassandra.table-options", "", "Table options used to create index or chunk tables. This value is used as plain text in the table `WITH` like this, \"CREATE TABLE (...) WITH \". For details, see https://cortexmetrics.io/docs/production/cassandra. (Default = \"\": use default table options of your Cassandra)") + f.StringVar(&cfg.TableOptions, "cassandra.table-options", "", "Table options used to create index or chunk tables. This value is used as plain text in the table `WITH` like this, \"CREATE TABLE (...) WITH \". For details, see https://cortexmetrics.io/docs/production/cassandra. By default it will use the default table options of your Cassandra cluster.") } func (cfg *Config) Validate() error { @@ -89,7 +89,7 @@ func (cfg *Config) Validate() error { return nil } -func (cfg *Config) session(name string) (*gocql.Session, error) { +func (cfg *Config) session(name string, reg prometheus.Registerer) (*gocql.Session, error) { consistency, err := gocql.ParseConsistencyWrapper(cfg.Consistency) if err != nil { return nil, errors.WithStack(err) @@ -107,7 +107,7 @@ func (cfg *Config) session(name string) (*gocql.Session, error) { cluster.NumConns = cfg.NumConnections cluster.Logger = log.With(pkgutil.Logger, "module", "gocql", "client", name) cluster.Registerer = prometheus.WrapRegistererWith( - prometheus.Labels{"client": name}, prometheus.DefaultRegisterer) + prometheus.Labels{"client": name}, reg) if cfg.Retries > 0 { cluster.RetryPolicy = &gocql.ExponentialBackoffRetryPolicy{ NumRetries: cfg.Retries, @@ -222,15 +222,15 @@ type StorageClient struct { } // NewStorageClient returns a new StorageClient. -func NewStorageClient(cfg Config, schemaCfg chunk.SchemaConfig) (*StorageClient, error) { +func NewStorageClient(cfg Config, schemaCfg chunk.SchemaConfig, registerer prometheus.Registerer) (*StorageClient, error) { pkgutil.WarnExperimentalUse("Cassandra Backend") - readSession, err := cfg.session("index-read") + readSession, err := cfg.session("index-read", registerer) if err != nil { return nil, errors.WithStack(err) } - writeSession, err := cfg.session("index-write") + writeSession, err := cfg.session("index-write", registerer) if err != nil { return nil, errors.WithStack(err) } @@ -407,15 +407,15 @@ type ObjectClient struct { } // NewObjectClient returns a new ObjectClient. -func NewObjectClient(cfg Config, schemaCfg chunk.SchemaConfig) (*ObjectClient, error) { +func NewObjectClient(cfg Config, schemaCfg chunk.SchemaConfig, registerer prometheus.Registerer) (*ObjectClient, error) { pkgutil.WarnExperimentalUse("Cassandra Backend") - readSession, err := cfg.session("chunks-read") + readSession, err := cfg.session("chunks-read", registerer) if err != nil { return nil, errors.WithStack(err) } - writeSession, err := cfg.session("chunks-write") + writeSession, err := cfg.session("chunks-write", registerer) if err != nil { return nil, errors.WithStack(err) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/table_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/table_client.go index ee242e354c760..fc269e26409df 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/table_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/table_client.go @@ -6,6 +6,7 @@ import ( "github.com/gocql/gocql" "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" "github.com/cortexproject/cortex/pkg/chunk" ) @@ -16,8 +17,8 @@ type tableClient struct { } // NewTableClient returns a new TableClient. -func NewTableClient(ctx context.Context, cfg Config) (chunk.TableClient, error) { - session, err := cfg.session("table-manager") +func NewTableClient(ctx context.Context, cfg Config, registerer prometheus.Registerer) (chunk.TableClient, error) { + session, err := cfg.session("table-manager", registerer) if err != nil { return nil, errors.WithStack(err) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go index eadfa8121ee06..e802e667598a1 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go @@ -90,11 +90,11 @@ func (cfg *StoreConfig) Validate() error { type baseStore struct { cfg StoreConfig - index IndexClient - chunks Client - schema BaseSchema - limits StoreLimits - *Fetcher + index IndexClient + chunks Client + schema BaseSchema + limits StoreLimits + fetcher *Fetcher } func newBaseStore(cfg StoreConfig, schema BaseSchema, index IndexClient, chunks Client, limits StoreLimits, chunksCache cache.Cache) (baseStore, error) { @@ -109,10 +109,17 @@ func newBaseStore(cfg StoreConfig, schema BaseSchema, index IndexClient, chunks chunks: chunks, schema: schema, limits: limits, - Fetcher: fetcher, + fetcher: fetcher, }, nil } +// Stop any background goroutines (ie in the cache.) +func (c *baseStore) Stop() { + c.fetcher.storage.Stop() + c.fetcher.Stop() + c.index.Stop() +} + // store implements Store type store struct { baseStore @@ -131,13 +138,6 @@ func newStore(cfg StoreConfig, schema StoreSchema, index IndexClient, chunks Cli }, nil } -// Stop any background goroutines (ie in the cache.) -func (c *store) Stop() { - c.storage.Stop() - c.Fetcher.Stop() - c.index.Stop() -} - // Put implements ChunkStore func (c *store) Put(ctx context.Context, chunks []Chunk) error { for _, chunk := range chunks { @@ -153,12 +153,12 @@ func (c *store) PutOne(ctx context.Context, from, through model.Time, chunk Chun log, ctx := spanlogger.New(ctx, "ChunkStore.PutOne") chunks := []Chunk{chunk} - err := c.storage.PutChunks(ctx, chunks) + err := c.fetcher.storage.PutChunks(ctx, chunks) if err != nil { return err } - if cacheErr := c.writeBackCache(ctx, chunks); cacheErr != nil { + if cacheErr := c.fetcher.writeBackCache(ctx, chunks); cacheErr != nil { level.Warn(log).Log("msg", "could not store chunks in chunk cache", "err", cacheErr) } @@ -278,7 +278,7 @@ func (c *store) LabelNamesForMetricName(ctx context.Context, userID string, from level.Debug(log).Log("msg", "Chunks post filtering", "chunks", len(chunks)) // Now fetch the actual chunk data from Memcache / S3 - allChunks, err := c.FetchChunks(ctx, filtered, keys) + allChunks, err := c.fetcher.FetchChunks(ctx, filtered, keys) if err != nil { level.Error(log).Log("msg", "FetchChunks", "err", err) return nil, err @@ -370,7 +370,7 @@ func (c *store) getMetricNameChunks(ctx context.Context, userID string, from, th // Now fetch the actual chunk data from Memcache / S3 keys := keysFromChunks(filtered) - allChunks, err := c.FetchChunks(ctx, filtered, keys) + allChunks, err := c.fetcher.FetchChunks(ctx, filtered, keys) if err != nil { return nil, err } @@ -648,7 +648,7 @@ func (c *baseStore) reboundChunk(ctx context.Context, userID, chunkID string, pa return ErrParialDeleteChunkNoOverlap } - chunks, err := c.Fetcher.FetchChunks(ctx, []Chunk{chunk}, []string{chunkID}) + chunks, err := c.fetcher.FetchChunks(ctx, []Chunk{chunk}, []string{chunkID}) if err != nil { if err == ErrStorageObjectNotFound { return nil diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/gcs_object_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/gcs_object_client.go index 2029e9e11b26c..fd35d0fa743e7 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/gcs_object_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/gcs_object_client.go @@ -155,3 +155,7 @@ func (s *GCSObjectClient) DeleteObject(ctx context.Context, objectKey string) er return nil } + +func (s *GCSObjectClient) PathSeparator() string { + return s.delimiter +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/inmemory_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/inmemory_storage_client.go index 1a7ae5c2dced1..fb420b0215964 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/inmemory_storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/inmemory_storage_client.go @@ -390,6 +390,10 @@ func (m *MockStorage) List(ctx context.Context, prefix string) ([]StorageObject, return storageObjects, []StorageCommonPrefix{}, nil } +func (m *MockStorage) PathSeparator() string { + return DirDelim +} + type mockWriteBatch struct { inserts []struct { tableName, hashValue string diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go index dc308d41fb396..340a1b8016d7b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go @@ -34,7 +34,8 @@ func (cfg *FSConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { // FSObjectClient holds config for filesystem as object store type FSObjectClient struct { - cfg FSConfig + cfg FSConfig + pathSeparator string } // NewFSObjectClient makes a chunk.Client which stores chunks as files in the local filesystem. @@ -48,7 +49,8 @@ func NewFSObjectClient(cfg FSConfig) (*FSObjectClient, error) { } return &FSObjectClient{ - cfg: cfg, + cfg: cfg, + pathSeparator: string(os.PathSeparator), }, nil } @@ -124,7 +126,7 @@ func (f *FSObjectClient) List(ctx context.Context, prefix string) ([]chunk.Stora // add the directory only if it is not empty if !empty { - commonPrefixes = append(commonPrefixes, chunk.StorageCommonPrefix(nameWithPrefix+chunk.DirDelim)) + commonPrefixes = append(commonPrefixes, chunk.StorageCommonPrefix(nameWithPrefix+f.pathSeparator)) } continue } @@ -173,6 +175,10 @@ func (f *FSObjectClient) DeleteChunksBefore(ctx context.Context, ts time.Time) e }) } +func (f *FSObjectClient) PathSeparator() string { + return f.pathSeparator +} + // copied from https://github.com/thanos-io/thanos/blob/55cb8ca38b3539381dc6a781e637df15c694e50a/pkg/objstore/filesystem/filesystem.go#L181 func isDirEmpty(name string) (ok bool, err error) { f, err := os.Open(name) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/openstack/swift_object_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/openstack/swift_object_client.go index a30dfbbbfd70b..5e19b87eb09e4 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/openstack/swift_object_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/openstack/swift_object_client.go @@ -176,3 +176,7 @@ func (s *SwiftObjectClient) DeleteObject(ctx context.Context, objectKey string) } return err } + +func (s *SwiftObjectClient) PathSeparator() string { + return string(s.delimiter) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go index 2ffc73714ad76..612c12c5f97c9 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go @@ -49,7 +49,7 @@ type DayTime struct { // MarshalYAML implements yaml.Marshaller. func (d DayTime) MarshalYAML() (interface{}, error) { - return d.Time.Time().Format("2006-01-02"), nil + return d.String(), nil } // UnmarshalYAML implements yaml.Unmarshaller. @@ -66,6 +66,10 @@ func (d *DayTime) UnmarshalYAML(unmarshal func(interface{}) error) error { return nil } +func (d *DayTime) String() string { + return d.Time.Time().Format("2006-01-02") +} + // SchemaConfig contains the config for our chunk index schemas type SchemaConfig struct { Configs []PeriodConfig `yaml:"configs"` @@ -76,9 +80,9 @@ type SchemaConfig struct { // RegisterFlags adds the flags required to config this to the given FlagSet. func (cfg *SchemaConfig) RegisterFlags(f *flag.FlagSet) { - flag.StringVar(&cfg.fileName, "schema-config-file", "", "The path to the schema config file.") + f.StringVar(&cfg.fileName, "schema-config-file", "", "The path to the schema config file.") // TODO(gouthamve): Add a metric for this. - flag.StringVar(&cfg.legacyFileName, "config-yaml", "", "DEPRECATED(use -schema-config-file) The path to the schema config file.") + f.StringVar(&cfg.legacyFileName, "config-yaml", "", "DEPRECATED(use -schema-config-file) The path to the schema config file.") } // loadFromFile loads the schema config from a yaml file diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go index 4792ca1ae2e00..3d3373c5dfd4f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go @@ -187,7 +187,7 @@ func (c *seriesStore) GetChunkRefs(ctx context.Context, userID string, from, thr return [][]Chunk{}, []*Fetcher{}, nil } - return [][]Chunk{chunks}, []*Fetcher{c.baseStore.Fetcher}, nil + return [][]Chunk{chunks}, []*Fetcher{c.baseStore.fetcher}, nil } // LabelNamesForMetricName retrieves all label names for a metric name. @@ -251,7 +251,7 @@ func (c *seriesStore) lookupLabelNamesByChunks(ctx context.Context, from, throug chunksPerQuery.Observe(float64(len(filtered))) // Now fetch the actual chunk data from Memcache / S3 - allChunks, err := c.FetchChunks(ctx, filtered, keys) + allChunks, err := c.fetcher.FetchChunks(ctx, filtered, keys) if err != nil { level.Error(log).Log("msg", "FetchChunks", "err", err) return nil, err @@ -424,7 +424,7 @@ func (c *seriesStore) PutOne(ctx context.Context, from, through model.Time, chun writeChunk := true // If this chunk is in cache it must already be in the database so we don't need to write it again - found, _, _ := c.cache.Fetch(ctx, []string{chunk.ExternalKey()}) + found, _, _ := c.fetcher.cache.Fetch(ctx, []string{chunk.ExternalKey()}) if len(found) > 0 { writeChunk = false dedupedChunksTotal.Inc() @@ -444,7 +444,7 @@ func (c *seriesStore) PutOne(ctx context.Context, from, through model.Time, chun return err } - if oic, ok := c.storage.(ObjectAndIndexClient); ok { + if oic, ok := c.fetcher.storage.(ObjectAndIndexClient); ok { chunks := chunks if !writeChunk { chunks = []Chunk{} @@ -455,7 +455,7 @@ func (c *seriesStore) PutOne(ctx context.Context, from, through model.Time, chun } else { // chunk not found, write it. if writeChunk { - err := c.storage.PutChunks(ctx, chunks) + err := c.fetcher.storage.PutChunks(ctx, chunks) if err != nil { return err } @@ -467,7 +467,7 @@ func (c *seriesStore) PutOne(ctx context.Context, from, through model.Time, chun // we already have the chunk in the cache so don't write it back to the cache. if writeChunk { - if cacheErr := c.writeBackCache(ctx, chunks); cacheErr != nil { + if cacheErr := c.fetcher.writeBackCache(ctx, chunks); cacheErr != nil { level.Warn(log).Log("msg", "could not store chunks in chunk cache", "err", cacheErr) } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go index 8d87a7d34f39f..1629ead9247fc 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go @@ -153,7 +153,10 @@ func NewStore(cfg Config, storeCfg chunk.StoreConfig, schemaCfg chunk.SchemaConf stores := chunk.NewCompositeStore(cacheGenNumLoader) for _, s := range schemaCfg.Configs { - index, err := NewIndexClient(s.IndexType, cfg, schemaCfg) + indexClientReg := prometheus.WrapRegistererWith( + prometheus.Labels{"component": "index-store-" + s.From.String()}, reg) + + index, err := NewIndexClient(s.IndexType, cfg, schemaCfg, indexClientReg) if err != nil { return nil, errors.Wrap(err, "error creating index client") } @@ -163,7 +166,11 @@ func NewStore(cfg Config, storeCfg chunk.StoreConfig, schemaCfg chunk.SchemaConf if objectStoreType == "" { objectStoreType = s.IndexType } - chunks, err := NewChunkClient(objectStoreType, cfg, schemaCfg) + + chunkClientReg := prometheus.WrapRegistererWith( + prometheus.Labels{"component": "chunk-store-" + s.From.String()}, reg) + + chunks, err := NewChunkClient(objectStoreType, cfg, schemaCfg, chunkClientReg) if err != nil { return nil, errors.Wrap(err, "error creating object client") } @@ -180,7 +187,7 @@ func NewStore(cfg Config, storeCfg chunk.StoreConfig, schemaCfg chunk.SchemaConf } // NewIndexClient makes a new index client of the desired type. -func NewIndexClient(name string, cfg Config, schemaCfg chunk.SchemaConfig) (chunk.IndexClient, error) { +func NewIndexClient(name string, cfg Config, schemaCfg chunk.SchemaConfig, registerer prometheus.Registerer) (chunk.IndexClient, error) { if indexClientFactory, ok := customIndexStores[name]; ok { if indexClientFactory.indexClientFactoryFunc != nil { return indexClientFactory.indexClientFactoryFunc() @@ -199,7 +206,7 @@ func NewIndexClient(name string, cfg Config, schemaCfg chunk.SchemaConfig) (chun if len(path) > 0 { level.Warn(util.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path) } - return aws.NewDynamoDBIndexClient(cfg.AWSStorageConfig.DynamoDBConfig, schemaCfg) + return aws.NewDynamoDBIndexClient(cfg.AWSStorageConfig.DynamoDBConfig, schemaCfg, registerer) case "gcp": return gcp.NewStorageClientV1(context.Background(), cfg.GCPStorageConfig, schemaCfg) case "gcp-columnkey", "bigtable": @@ -208,7 +215,7 @@ func NewIndexClient(name string, cfg Config, schemaCfg chunk.SchemaConfig) (chun cfg.GCPStorageConfig.DistributeKeys = true return gcp.NewStorageClientColumnKey(context.Background(), cfg.GCPStorageConfig, schemaCfg) case "cassandra": - return cassandra.NewStorageClient(cfg.CassandraStorageConfig, schemaCfg) + return cassandra.NewStorageClient(cfg.CassandraStorageConfig, schemaCfg, registerer) case "boltdb": return local.NewBoltDBIndexClient(cfg.BoltDBConfig) case "grpc-store": @@ -219,7 +226,7 @@ func NewIndexClient(name string, cfg Config, schemaCfg chunk.SchemaConfig) (chun } // NewChunkClient makes a new chunk.Client of the desired types. -func NewChunkClient(name string, cfg Config, schemaCfg chunk.SchemaConfig) (chunk.Client, error) { +func NewChunkClient(name string, cfg Config, schemaCfg chunk.SchemaConfig, registerer prometheus.Registerer) (chunk.Client, error) { switch name { case "inmemory": return chunk.NewMockStorage(), nil @@ -233,7 +240,7 @@ func NewChunkClient(name string, cfg Config, schemaCfg chunk.SchemaConfig) (chun if len(path) > 0 { level.Warn(util.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path) } - return aws.NewDynamoDBChunkClient(cfg.AWSStorageConfig.DynamoDBConfig, schemaCfg) + return aws.NewDynamoDBChunkClient(cfg.AWSStorageConfig.DynamoDBConfig, schemaCfg, registerer) case "azure": return newChunkClientFromStore(azure.NewBlobStorage(&cfg.AzureStorageConfig, chunk.DirDelim)) case "gcp": @@ -245,7 +252,7 @@ func NewChunkClient(name string, cfg Config, schemaCfg chunk.SchemaConfig) (chun case "swift": return newChunkClientFromStore(openstack.NewSwiftObjectClient(cfg.Swift, chunk.DirDelim)) case "cassandra": - return cassandra.NewObjectClient(cfg.CassandraStorageConfig, schemaCfg) + return cassandra.NewObjectClient(cfg.CassandraStorageConfig, schemaCfg, registerer) case "filesystem": store, err := local.NewFSObjectClient(cfg.FSConfig) if err != nil { @@ -267,7 +274,7 @@ func newChunkClientFromStore(store chunk.ObjectClient, err error) (chunk.Client, } // NewTableClient makes a new table client based on the configuration. -func NewTableClient(name string, cfg Config) (chunk.TableClient, error) { +func NewTableClient(name string, cfg Config, registerer prometheus.Registerer) (chunk.TableClient, error) { if indexClientFactory, ok := customIndexStores[name]; ok { if indexClientFactory.tableClientFactoryFunc != nil { return indexClientFactory.tableClientFactoryFunc() @@ -285,11 +292,11 @@ func NewTableClient(name string, cfg Config) (chunk.TableClient, error) { if len(path) > 0 { level.Warn(util.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path) } - return aws.NewDynamoDBTableClient(cfg.AWSStorageConfig.DynamoDBConfig) + return aws.NewDynamoDBTableClient(cfg.AWSStorageConfig.DynamoDBConfig, registerer) case "gcp", "gcp-columnkey", "bigtable", "bigtable-hashed": return gcp.NewTableClient(context.Background(), cfg.GCPStorageConfig) case "cassandra": - return cassandra.NewTableClient(context.Background(), cfg.CassandraStorageConfig) + return cassandra.NewTableClient(context.Background(), cfg.CassandraStorageConfig, registerer) case "boltdb": return local.NewTableClient(cfg.BoltDBConfig.Directory) case "grpc-store": diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage_client.go index bb25d4737a3cc..16470639ac1ce 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage_client.go @@ -67,6 +67,7 @@ type ObjectClient interface { GetObject(ctx context.Context, objectKey string) (io.ReadCloser, error) List(ctx context.Context, prefix string) ([]StorageObject, []StorageCommonPrefix, error) DeleteObject(ctx context.Context, objectKey string) error + PathSeparator() string Stop() } diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/userconfig/config.go b/vendor/github.com/cortexproject/cortex/pkg/configs/userconfig/config.go index ff0a1f77b6806..0b36801012ced 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/userconfig/config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/userconfig/config.go @@ -7,16 +7,16 @@ import ( "github.com/pkg/errors" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/go-kit/kit/log" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/rulefmt" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/rules" legacy_promql "github.com/cortexproject/cortex/pkg/configs/legacy_promql" - legacy_rulefmt "github.com/cortexproject/cortex/pkg/ruler/legacy_rulefmt" "github.com/cortexproject/cortex/pkg/util" ) @@ -247,7 +247,7 @@ func (c RulesConfig) Parse() (map[string][]rules.Rule, error) { // ParseFormatted returns the rulefmt map of a users rules configs. It allows // for rules to be mapped to disk and read by the prometheus rules manager. -func (c RulesConfig) ParseFormatted() (map[string]legacy_rulefmt.RuleGroups, error) { +func (c RulesConfig) ParseFormatted() (map[string]rulefmt.RuleGroups, error) { switch c.FormatVersion { case RuleFormatV1: return c.parseV1Formatted() @@ -260,11 +260,11 @@ func (c RulesConfig) ParseFormatted() (map[string]legacy_rulefmt.RuleGroups, err // parseV2 parses and validates the content of the rule files in a RulesConfig // according to the Prometheus 2.x rule format. -func (c RulesConfig) parseV2Formatted() (map[string]legacy_rulefmt.RuleGroups, error) { - ruleMap := map[string]legacy_rulefmt.RuleGroups{} +func (c RulesConfig) parseV2Formatted() (map[string]rulefmt.RuleGroups, error) { + ruleMap := map[string]rulefmt.RuleGroups{} for fn, content := range c.Files { - rgs, errs := legacy_rulefmt.Parse([]byte(content)) + rgs, errs := rulefmt.Parse([]byte(content)) for _, err := range errs { // return just the first error, if any return nil, err } @@ -276,17 +276,17 @@ func (c RulesConfig) parseV2Formatted() (map[string]legacy_rulefmt.RuleGroups, e // parseV1 parses and validates the content of the rule files in a RulesConfig // according to the Prometheus 1.x rule format. -func (c RulesConfig) parseV1Formatted() (map[string]legacy_rulefmt.RuleGroups, error) { - result := map[string]legacy_rulefmt.RuleGroups{} +func (c RulesConfig) parseV1Formatted() (map[string]rulefmt.RuleGroups, error) { + result := map[string]rulefmt.RuleGroups{} for fn, content := range c.Files { stmts, err := legacy_promql.ParseStmts(content) if err != nil { return nil, fmt.Errorf("error parsing %s: %s", fn, err) } - ra := []legacy_rulefmt.Rule{} + ra := []rulefmt.RuleNode{} for _, stmt := range stmts { - var rule legacy_rulefmt.Rule + var rule rulefmt.RuleNode switch r := stmt.(type) { case *legacy_promql.AlertStmt: _, err := parser.ParseExpr(r.Expr.String()) @@ -294,9 +294,9 @@ func (c RulesConfig) parseV1Formatted() (map[string]legacy_rulefmt.RuleGroups, e return nil, err } - rule = legacy_rulefmt.Rule{ - Alert: r.Name, - Expr: r.Expr.String(), + rule = rulefmt.RuleNode{ + Alert: yaml.Node{Value: r.Name}, + Expr: yaml.Node{Value: r.Expr.String()}, For: model.Duration(r.Duration), Labels: r.Labels.Map(), Annotations: r.Annotations.Map(), @@ -308,9 +308,9 @@ func (c RulesConfig) parseV1Formatted() (map[string]legacy_rulefmt.RuleGroups, e return nil, err } - rule = legacy_rulefmt.Rule{ - Record: r.Name, - Expr: r.Expr.String(), + rule = rulefmt.RuleNode{ + Record: yaml.Node{Value: r.Name}, + Expr: yaml.Node{Value: r.Expr.String()}, Labels: r.Labels.Map(), } @@ -319,8 +319,8 @@ func (c RulesConfig) parseV1Formatted() (map[string]legacy_rulefmt.RuleGroups, e } ra = append(ra, rule) } - result[fn] = legacy_rulefmt.RuleGroups{ - Groups: []legacy_rulefmt.RuleGroup{ + result[fn] = rulefmt.RuleGroups{ + Groups: []rulefmt.RuleGroup{ { Name: "rg:" + fn, Rules: ra, @@ -348,7 +348,7 @@ func (c RulesConfig) parseV2() (map[string][]rules.Rule, error) { groups := map[string][]rules.Rule{} for fn, content := range c.Files { - rgs, errs := legacy_rulefmt.Parse([]byte(content)) + rgs, errs := rulefmt.Parse([]byte(content)) if len(errs) > 0 { return nil, fmt.Errorf("error parsing %s: %v", fn, errs[0]) } @@ -356,26 +356,26 @@ func (c RulesConfig) parseV2() (map[string][]rules.Rule, error) { for _, rg := range rgs.Groups { rls := make([]rules.Rule, 0, len(rg.Rules)) for _, rl := range rg.Rules { - expr, err := parser.ParseExpr(rl.Expr) + expr, err := parser.ParseExpr(rl.Expr.Value) if err != nil { return nil, err } - if rl.Alert != "" { + if rl.Alert.Value != "" { rls = append(rls, rules.NewAlertingRule( - rl.Alert, + rl.Alert.Value, expr, time.Duration(rl.For), labels.FromMap(rl.Labels), labels.FromMap(rl.Annotations), nil, true, - log.With(util.Logger, "alert", rl.Alert), + log.With(util.Logger, "alert", rl.Alert.Value), )) continue } rls = append(rls, rules.NewRecordingRule( - rl.Record, + rl.Record.Value, expr, labels.FromMap(rl.Labels), )) diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go index 23b9689496f66..5b06530065b62 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go +++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go @@ -13,10 +13,8 @@ import ( "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/thanos-io/thanos/pkg/tracing" - "github.com/weaveworks/common/middleware" "github.com/weaveworks/common/server" "github.com/weaveworks/common/signals" - "google.golang.org/grpc" "google.golang.org/grpc/health/grpc_health_v1" "gopkg.in/yaml.v2" @@ -42,9 +40,11 @@ import ( "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv/memberlist" "github.com/cortexproject/cortex/pkg/ruler" + "github.com/cortexproject/cortex/pkg/ruler/rules" "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/storegateway" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/fakeauth" "github.com/cortexproject/cortex/pkg/util/grpc/healthcheck" "github.com/cortexproject/cortex/pkg/util/modules" "github.com/cortexproject/cortex/pkg/util/runtimeconfig" @@ -212,6 +212,7 @@ type Cortex struct { TombstonesLoader *purger.TombstonesLoader Ruler *ruler.Ruler + RulerStorage rules.RuleStore ConfigAPI *configAPI.API ConfigDB db.DB Alertmanager *alertmanager.MultitenantAlertmanager @@ -233,11 +234,18 @@ func New(cfg Config) (*Cortex, error) { os.Exit(0) } + // Don't check auth header on TransferChunks, as we weren't originally + // sending it and this could cause transfers to fail on update. + // + // Also don't check auth /frontend.Frontend/Process, as this handles + // queries for multiple users. + cfg.API.HTTPAuthMiddleware = fakeauth.SetupAuthMiddleware(&cfg.Server, cfg.AuthEnabled, + []string{"/cortex.Ingester/TransferChunks", "/frontend.Frontend/Process"}) + cortex := &Cortex{ Cfg: cfg, } - cortex.setupAuthMiddleware() cortex.setupThanosTracing() if err := cortex.setupModuleManager(); err != nil { @@ -247,37 +255,6 @@ func New(cfg Config) (*Cortex, error) { return cortex, nil } -func (t *Cortex) setupAuthMiddleware() { - if t.Cfg.AuthEnabled { - t.Cfg.Server.GRPCMiddleware = append(t.Cfg.Server.GRPCMiddleware, - middleware.ServerUserHeaderInterceptor, - ) - t.Cfg.Server.GRPCStreamMiddleware = append(t.Cfg.Server.GRPCStreamMiddleware, - func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - switch info.FullMethod { - // Don't check auth header on TransferChunks, as we weren't originally - // sending it and this could cause transfers to fail on update. - // - // Also don't check auth /frontend.Frontend/Process, as this handles - // queries for multiple users. - case "/cortex.Ingester/TransferChunks", "/frontend.Frontend/Process": - return handler(srv, ss) - default: - return middleware.StreamServerUserHeaderInterceptor(srv, ss, info, handler) - } - }, - ) - } else { - t.Cfg.Server.GRPCMiddleware = append(t.Cfg.Server.GRPCMiddleware, - fakeGRPCAuthUniaryMiddleware, - ) - t.Cfg.Server.GRPCStreamMiddleware = append(t.Cfg.Server.GRPCStreamMiddleware, - fakeGRPCAuthStreamMiddleware, - ) - t.Cfg.API.HTTPAuthMiddleware = fakeHTTPAuthMiddleware - } -} - // setupThanosTracing appends a gRPC middleware used to inject our tracer into the custom // context used by Thanos, in order to get Thanos spans correctly attached to our traces. func (t *Cortex) setupThanosTracing() { diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/fake_auth.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/fake_auth.go deleted file mode 100644 index 5e3d4c15d356b..0000000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/cortex/fake_auth.go +++ /dev/null @@ -1,42 +0,0 @@ -package cortex - -import ( - "context" - "net/http" - - "github.com/weaveworks/common/middleware" - "github.com/weaveworks/common/user" - "google.golang.org/grpc" -) - -// Fake auth middlewares just injects a fake userID, so the rest of the code -// can continue to be multitenant. - -var fakeHTTPAuthMiddleware = middleware.Func(func(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx := user.InjectOrgID(r.Context(), "fake") - next.ServeHTTP(w, r.WithContext(ctx)) - }) -}) - -var fakeGRPCAuthUniaryMiddleware = func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - ctx = user.InjectOrgID(ctx, "fake") - return handler(ctx, req) -} - -var fakeGRPCAuthStreamMiddleware = func(srv interface{}, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - ctx := user.InjectOrgID(ss.Context(), "fake") - return handler(srv, serverStream{ - ctx: ctx, - ServerStream: ss, - }) -} - -type serverStream struct { - ctx context.Context - grpc.ServerStream -} - -func (ss serverStream) Context() context.Context { - return ss.ctx -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go index ad2dbc8dbc8ac..25f03ae5e5181 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go +++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go @@ -56,6 +56,7 @@ const ( Store string = "store" DeleteRequestsStore string = "delete-requests-store" TableManager string = "table-manager" + RulerStorage string = "ruler-storage" Ruler string = "ruler" Configs string = "configs" AlertManager string = "alertmanager" @@ -169,7 +170,8 @@ func (t *Cortex) initDistributor() (serv services.Service, err error) { } func (t *Cortex) initQuerier() (serv services.Service, err error) { - queryable, engine := querier.New(t.Cfg.Querier, t.Overrides, t.Distributor, t.StoreQueryables, t.TombstonesLoader, prometheus.DefaultRegisterer) + querierRegisterer := prometheus.WrapRegistererWith(prometheus.Labels{"engine": "querier"}, prometheus.DefaultRegisterer) + queryable, engine := querier.New(t.Cfg.Querier, t.Overrides, t.Distributor, t.StoreQueryables, t.TombstonesLoader, querierRegisterer) // Prometheus histograms for requests to the querier. querierRequestDuration := promauto.With(prometheus.DefaultRegisterer).NewHistogramVec(prometheus.HistogramOpts{ @@ -205,7 +207,7 @@ func (t *Cortex) initStoreQueryables() (services.Service, error) { var servs []services.Service //nolint:golint // I prefer this form over removing 'else', because it allows q to have smaller scope. - if q, err := initQueryableForEngine(t.Cfg.Storage.Engine, t.Cfg, t.Store, prometheus.DefaultRegisterer); err != nil { + if q, err := initQueryableForEngine(t.Cfg.Storage.Engine, t.Cfg, t.Store, t.Overrides, prometheus.DefaultRegisterer); err != nil { return nil, fmt.Errorf("failed to initialize querier for engine '%s': %v", t.Cfg.Storage.Engine, err) } else { t.StoreQueryables = append(t.StoreQueryables, querier.UseAlwaysQueryable(q)) @@ -219,7 +221,7 @@ func (t *Cortex) initStoreQueryables() (services.Service, error) { return nil, fmt.Errorf("second store engine used by querier '%s' must be different than primary engine '%s'", t.Cfg.Querier.SecondStoreEngine, t.Cfg.Storage.Engine) } - sq, err := initQueryableForEngine(t.Cfg.Querier.SecondStoreEngine, t.Cfg, t.Store, prometheus.DefaultRegisterer) + sq, err := initQueryableForEngine(t.Cfg.Querier.SecondStoreEngine, t.Cfg, t.Store, t.Overrides, prometheus.DefaultRegisterer) if err != nil { return nil, fmt.Errorf("failed to initialize querier for engine '%s': %v", t.Cfg.Querier.SecondStoreEngine, err) } @@ -245,7 +247,7 @@ func (t *Cortex) initStoreQueryables() (services.Service, error) { } } -func initQueryableForEngine(engine string, cfg Config, chunkStore chunk.Store, reg prometheus.Registerer) (prom_storage.Queryable, error) { +func initQueryableForEngine(engine string, cfg Config, chunkStore chunk.Store, limits *validation.Overrides, reg prometheus.Registerer) (prom_storage.Queryable, error) { switch engine { case storage.StorageEngineChunks: if chunkStore == nil { @@ -260,20 +262,24 @@ func initQueryableForEngine(engine string, cfg Config, chunkStore chunk.Store, r cfg.Querier.StoreGatewayAddresses = fmt.Sprintf("127.0.0.1:%d", cfg.Server.GRPCListenPort) } - return querier.NewBlocksStoreQueryableFromConfig(cfg.Querier, cfg.StoreGateway, cfg.TSDB, util.Logger, reg) + return querier.NewBlocksStoreQueryableFromConfig(cfg.Querier, cfg.StoreGateway, cfg.TSDB, limits, util.Logger, reg) default: return nil, fmt.Errorf("unknown storage engine '%s'", engine) } } +func (t *Cortex) tsdbIngesterConfig() { + t.Cfg.Ingester.TSDBEnabled = t.Cfg.Storage.Engine == storage.StorageEngineTSDB + t.Cfg.Ingester.TSDBConfig = t.Cfg.TSDB +} + func (t *Cortex) initIngester() (serv services.Service, err error) { t.Cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.RuntimeConfig) t.Cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV t.Cfg.Ingester.LifecyclerConfig.ListenPort = t.Cfg.Server.GRPCListenPort - t.Cfg.Ingester.TSDBEnabled = t.Cfg.Storage.Engine == storage.StorageEngineTSDB - t.Cfg.Ingester.TSDBConfig = t.Cfg.TSDB t.Cfg.Ingester.ShardByAllLabels = t.Cfg.Distributor.ShardByAllLabels + t.tsdbIngesterConfig() t.Ingester, err = ingester.New(t.Cfg.Ingester, t.Cfg.IngesterClient, t.Overrides, t.Store, prometheus.DefaultRegisterer) if err != nil { @@ -286,10 +292,11 @@ func (t *Cortex) initIngester() (serv services.Service, err error) { } func (t *Cortex) initFlusher() (serv services.Service, err error) { + t.tsdbIngesterConfig() + t.Flusher, err = flusher.New( t.Cfg.Flusher, t.Cfg.Ingester, - t.Cfg.IngesterClient, t.Store, prometheus.DefaultRegisterer, ) @@ -329,7 +336,9 @@ func (t *Cortex) initDeleteRequestsStore() (serv services.Service, err error) { } var indexClient chunk.IndexClient - indexClient, err = storage.NewIndexClient(t.Cfg.Storage.DeleteStoreConfig.Store, t.Cfg.Storage, t.Cfg.Schema) + reg := prometheus.WrapRegistererWith( + prometheus.Labels{"component": DeleteRequestsStore}, prometheus.DefaultRegisterer) + indexClient, err = storage.NewIndexClient(t.Cfg.Storage.DeleteStoreConfig.Store, t.Cfg.Storage, t.Cfg.Schema, reg) if err != nil { return } @@ -358,11 +367,6 @@ func (t *Cortex) initQueryFrontend() (serv services.Service, err error) { return } - // Ensure the default evaluation interval is set (promql uses a package-scoped mutable variable). - // This is important when `querier.parallelise-shardable-queries` is enabled because the frontend - // aggregates the sharded queries. - promql.SetDefaultEvaluationInterval(t.Cfg.Querier.DefaultEvaluationInterval) - tripperware, cache, err := queryrange.NewTripperware( t.Cfg.QueryRange, util.Logger, @@ -375,6 +379,9 @@ func (t *Cortex) initQueryFrontend() (serv services.Service, err error) { Reg: prometheus.DefaultRegisterer, MaxSamples: t.Cfg.Querier.MaxSamples, Timeout: t.Cfg.Querier.Timeout, + NoStepSubqueryIntervalFn: func(int64) int64 { + return t.Cfg.Querier.DefaultEvaluationInterval.Milliseconds() + }, }, t.Cfg.Querier.QueryIngestersWithin, prometheus.DefaultRegisterer, @@ -425,7 +432,10 @@ func (t *Cortex) initTableManager() (services.Service, error) { os.Exit(1) } - tableClient, err := storage.NewTableClient(lastConfig.IndexType, t.Cfg.Storage) + reg := prometheus.WrapRegistererWith( + prometheus.Labels{"component": "table-manager-store"}, prometheus.DefaultRegisterer) + + tableClient, err := storage.NewTableClient(lastConfig.IndexType, t.Cfg.Storage, reg) if err != nil { return nil, err } @@ -435,7 +445,10 @@ func (t *Cortex) initTableManager() (services.Service, error) { var extraTables []chunk.ExtraTables if t.Cfg.PurgerConfig.Enable { - deleteStoreTableClient, err := storage.NewTableClient(t.Cfg.Storage.DeleteStoreConfig.Store, t.Cfg.Storage) + reg := prometheus.WrapRegistererWith( + prometheus.Labels{"component": "table-manager-" + DeleteRequestsStore}, prometheus.DefaultRegisterer) + + deleteStoreTableClient, err := storage.NewTableClient(t.Cfg.Storage.DeleteStoreConfig.Store, t.Cfg.Storage, reg) if err != nil { return nil, err } @@ -448,12 +461,33 @@ func (t *Cortex) initTableManager() (services.Service, error) { return t.TableManager, err } +func (t *Cortex) initRulerStorage() (serv services.Service, err error) { + // if the ruler is not configured and we're in single binary then let's just log an error and continue. + // unfortunately there is no way to generate a "default" config and compare default against actual + // to determine if it's unconfigured. the following check, however, correctly tests this. + // Single binary integration tests will break if this ever drifts + if t.Cfg.Target == All && t.Cfg.Ruler.StoreConfig.IsDefaults() { + level.Info(util.Logger).Log("msg", "RulerStorage is not configured in single binary mode and will not be started.") + return + } + + t.RulerStorage, err = ruler.NewRuleStorage(t.Cfg.Ruler.StoreConfig) + + return +} + func (t *Cortex) initRuler() (serv services.Service, err error) { + if t.RulerStorage == nil { + level.Info(util.Logger).Log("msg", "RulerStorage is nil. Not starting the ruler.") + return nil, nil + } + t.Cfg.Ruler.Ring.ListenPort = t.Cfg.Server.GRPCListenPort t.Cfg.Ruler.Ring.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV - queryable, engine := querier.New(t.Cfg.Querier, t.Overrides, t.Distributor, t.StoreQueryables, t.TombstonesLoader, prometheus.DefaultRegisterer) + rulerRegisterer := prometheus.WrapRegistererWith(prometheus.Labels{"engine": "ruler"}, prometheus.DefaultRegisterer) + queryable, engine := querier.New(t.Cfg.Querier, t.Overrides, t.Distributor, t.StoreQueryables, t.TombstonesLoader, rulerRegisterer) - t.Ruler, err = ruler.NewRuler(t.Cfg.Ruler, engine, queryable, t.Distributor, prometheus.DefaultRegisterer, util.Logger) + t.Ruler, err = ruler.NewRuler(t.Cfg.Ruler, engine, queryable, t.Distributor, prometheus.DefaultRegisterer, util.Logger, t.RulerStorage) if err != nil { return } @@ -570,6 +604,7 @@ func (t *Cortex) setupModuleManager() error { mm.RegisterModule(StoreQueryable, t.initStoreQueryables, modules.UserInvisibleModule) mm.RegisterModule(QueryFrontend, t.initQueryFrontend) mm.RegisterModule(TableManager, t.initTableManager) + mm.RegisterModule(RulerStorage, t.initRulerStorage, modules.UserInvisibleModule) mm.RegisterModule(Ruler, t.initRuler) mm.RegisterModule(Configs, t.initConfig) mm.RegisterModule(AlertManager, t.initAlertManager) @@ -588,16 +623,16 @@ func (t *Cortex) setupModuleManager() error { Ingester: {Overrides, Store, API, RuntimeConfig, MemberlistKV}, Flusher: {Store, API}, Querier: {Overrides, Distributor, Store, Ring, API, StoreQueryable}, - StoreQueryable: {Store}, + StoreQueryable: {Overrides, Store}, QueryFrontend: {API, Overrides, DeleteRequestsStore}, TableManager: {API}, - Ruler: {Overrides, Distributor, Store, StoreQueryable}, + Ruler: {Overrides, Distributor, Store, StoreQueryable, RulerStorage}, Configs: {API}, AlertManager: {API}, Compactor: {API}, StoreGateway: {API}, Purger: {Store, DeleteRequestsStore, API}, - All: {QueryFrontend, Querier, Ingester, Distributor, TableManager, Purger, StoreGateway}, + All: {QueryFrontend, Querier, Ingester, Distributor, TableManager, Purger, StoreGateway, Ruler}, } for mod, targets := range deps { if err := mm.AddDependency(mod, targets...); err != nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/runtime_config.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/runtime_config.go index 44877c973c218..41916d53eca36 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/cortex/runtime_config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/runtime_config.go @@ -1,7 +1,7 @@ package cortex import ( - "os" + "io" "gopkg.in/yaml.v2" @@ -19,15 +19,10 @@ type runtimeConfigValues struct { Multi kv.MultiRuntimeConfig `yaml:"multi_kv_config"` } -func loadRuntimeConfig(filename string) (interface{}, error) { - f, err := os.Open(filename) - if err != nil { - return nil, err - } - +func loadRuntimeConfig(r io.Reader) (interface{}, error) { var overrides = &runtimeConfigValues{} - decoder := yaml.NewDecoder(f) + decoder := yaml.NewDecoder(r) decoder.SetStrict(true) if err := decoder.Decode(&overrides); err != nil { return nil, err diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go index da73d0f0decd4..0e92c451c548b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go @@ -512,7 +512,7 @@ func (d *Distributor) Push(ctx context.Context, req *client.WriteRequest) (*clie // Obtain a subring if required if size := d.limits.SubringSize(userID); size > 0 { - h := client.HashAdd32(client.HashNew32(), userID) + h := client.HashAdd32a(client.HashNew32a(), userID) subRing, err = d.ingestersRing.Subring(h, size) if err != nil { return nil, httpgrpc.Errorf(http.StatusInternalServerError, "unable to create subring: %v", err) diff --git a/vendor/github.com/cortexproject/cortex/pkg/flusher/flusher.go b/vendor/github.com/cortexproject/cortex/pkg/flusher/flusher.go index b9d101be932be..9f96d21295615 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/flusher/flusher.go +++ b/vendor/github.com/cortexproject/cortex/pkg/flusher/flusher.go @@ -10,7 +10,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/cortexproject/cortex/pkg/ingester" - "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -20,22 +19,24 @@ type Config struct { WALDir string `yaml:"wal_dir"` ConcurrentFlushes int `yaml:"concurrent_flushes"` FlushOpTimeout time.Duration `yaml:"flush_op_timeout"` + ExitAfterFlush bool `yaml:"exit_after_flush"` } // RegisterFlags adds the flags required to config this to the given FlagSet func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.StringVar(&cfg.WALDir, "flusher.wal-dir", "wal", "Directory to read WAL from.") - f.IntVar(&cfg.ConcurrentFlushes, "flusher.concurrent-flushes", 50, "Number of concurrent goroutines flushing to dynamodb.") - f.DurationVar(&cfg.FlushOpTimeout, "flusher.flush-op-timeout", 2*time.Minute, "Timeout for individual flush operations.") + f.StringVar(&cfg.WALDir, "flusher.wal-dir", "wal", "Directory to read WAL from (chunks storage engine only).") + f.IntVar(&cfg.ConcurrentFlushes, "flusher.concurrent-flushes", 50, "Number of concurrent goroutines flushing to storage (chunks storage engine only).") + f.DurationVar(&cfg.FlushOpTimeout, "flusher.flush-op-timeout", 2*time.Minute, "Timeout for individual flush operations (chunks storage engine only).") + f.BoolVar(&cfg.ExitAfterFlush, "flusher.exit-after-flush", true, "Stop Cortex after flush has finished. If false, Cortex process will keep running, doing nothing.") } -// Flusher is designed to be used as a job to flush the chunks from the WAL on disk. +// Flusher is designed to be used as a job to flush the data from the WAL on disk. +// Flusher works with both chunks-based and blocks-based ingesters. type Flusher struct { services.Service cfg Config ingesterConfig ingester.Config - clientConfig client.Config chunkStore ingester.ChunkStore registerer prometheus.Registerer } @@ -49,11 +50,11 @@ const ( func New( cfg Config, ingesterConfig ingester.Config, - clientConfig client.Config, chunkStore ingester.ChunkStore, registerer prometheus.Registerer, ) (*Flusher, error) { + // These are ignored by blocks-ingester, but that's fine. ingesterConfig.WALConfig.Dir = cfg.WALDir ingesterConfig.ConcurrentFlushes = cfg.ConcurrentFlushes ingesterConfig.FlushOpTimeout = cfg.FlushOpTimeout @@ -61,7 +62,6 @@ func New( f := &Flusher{ cfg: cfg, ingesterConfig: ingesterConfig, - clientConfig: clientConfig, chunkStore: chunkStore, registerer: registerer, } @@ -70,7 +70,7 @@ func New( } func (f *Flusher) running(ctx context.Context) error { - ing, err := ingester.NewForFlusher(f.ingesterConfig, f.clientConfig, f.chunkStore, f.registerer) + ing, err := ingester.NewForFlusher(f.ingesterConfig, f.chunkStore, f.registerer) if err != nil { return errors.Wrap(err, "create ingester") } @@ -89,5 +89,11 @@ func (f *Flusher) running(ctx context.Context) error { if err := services.StopAndAwaitTerminated(ctx, ing); err != nil { return errors.Wrap(err, "stop and await terminated ingester") } - return util.ErrStopProcess + + if f.cfg.ExitAfterFlush { + return util.ErrStopProcess + } + + // Return normally -- this keep Cortex running. + return nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go index 0241f16e48b4f..f2e3b7fb80f5f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go @@ -75,3 +75,25 @@ func HashAddByte32(h uint32, b byte) uint32 { h ^= uint32(b) return h } + +// HashNew32a initializies a new fnv32a hash value. +func HashNew32a() uint32 { + return offset32 +} + +// HashAdd32a adds a string to a fnv32a hash value, returning the updated hash. +// Note this is the same algorithm as Go stdlib `sum32.Write()` +func HashAdd32a(h uint32, s string) uint32 { + for i := 0; i < len(s); i++ { + h ^= uint32(s[i]) + h *= prime32 + } + return h +} + +// HashAddByte32a adds a byte to a fnv32a hash value, returning the updated hash. +func HashAddByte32a(h uint32, b byte) uint32 { + h ^= uint32(b) + h *= prime32 + return h +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go index 1af6a368fc3d5..79634f77b16ba 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go @@ -262,19 +262,17 @@ func (i *Ingester) startFlushLoops() { // Compared to the 'New' method: // * Always replays the WAL. // * Does not start the lifecycler. -// * No ingester v2. -func NewForFlusher(cfg Config, clientConfig client.Config, chunkStore ChunkStore, registerer prometheus.Registerer) (*Ingester, error) { - if cfg.ingesterClientFactory == nil { - cfg.ingesterClientFactory = client.MakeIngesterClient +func NewForFlusher(cfg Config, chunkStore ChunkStore, registerer prometheus.Registerer) (*Ingester, error) { + if cfg.TSDBEnabled { + return NewV2ForFlusher(cfg, registerer) } i := &Ingester{ - cfg: cfg, - clientConfig: clientConfig, - metrics: newIngesterMetrics(registerer, true), - chunkStore: chunkStore, - flushQueues: make([]*util.PriorityQueue, cfg.ConcurrentFlushes), - wal: &noopWAL{}, + cfg: cfg, + metrics: newIngesterMetrics(registerer, true), + chunkStore: chunkStore, + flushQueues: make([]*util.PriorityQueue, cfg.ConcurrentFlushes), + wal: &noopWAL{}, } i.BasicService = services.NewBasicService(i.startingForFlusher, i.loop, i.stopping) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go index 852cdc5ac99ac..da21f5da928e3 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go @@ -124,6 +124,9 @@ type TSDBState struct { dbs map[string]*userTSDB // tsdb sharded by userID bucket objstore.Bucket + // Value used by shipper as external label. + shipperIngesterID string + // Keeps count of in-flight requests inflightWriteReqs sync.WaitGroup @@ -147,7 +150,47 @@ type TSDBState struct { refCachePurgeDuration prometheus.Histogram } -// NewV2 returns a new Ingester that uses prometheus block storage instead of chunk storage +func newTSDBState(bucketClient objstore.Bucket, registerer prometheus.Registerer) TSDBState { + return TSDBState{ + dbs: make(map[string]*userTSDB), + bucket: bucketClient, + tsdbMetrics: newTSDBMetrics(registerer), + forceCompactTrigger: make(chan chan<- struct{}), + shipTrigger: make(chan chan<- struct{}), + + compactionsTriggered: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Name: "cortex_ingester_tsdb_compactions_triggered_total", + Help: "Total number of triggered compactions.", + }), + + compactionsFailed: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Name: "cortex_ingester_tsdb_compactions_failed_total", + Help: "Total number of compactions that failed.", + }), + walReplayTime: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ + Name: "cortex_ingester_tsdb_wal_replay_duration_seconds", + Help: "The total time it takes to open and replay a TSDB WAL.", + Buckets: prometheus.DefBuckets, + }), + appenderAddDuration: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ + Name: "cortex_ingester_tsdb_appender_add_duration_seconds", + Help: "The total time it takes for a push request to add samples to the TSDB appender.", + Buckets: []float64{.001, .005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}, + }), + appenderCommitDuration: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ + Name: "cortex_ingester_tsdb_appender_commit_duration_seconds", + Help: "The total time it takes for a push request to commit samples appended to TSDB.", + Buckets: []float64{.001, .005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}, + }), + refCachePurgeDuration: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ + Name: "cortex_ingester_tsdb_refcache_purge_duration_seconds", + Help: "The total time it takes to purge the TSDB series reference cache for a single tenant.", + Buckets: prometheus.DefBuckets, + }), + } +} + +// NewV2 returns a new Ingester that uses Cortex block storage instead of chunks storage. func NewV2(cfg Config, clientConfig client.Config, limits *validation.Overrides, registerer prometheus.Registerer) (*Ingester, error) { util.WarnExperimentalUse("Blocks storage engine") bucketClient, err := cortex_tsdb.NewBucketClient(context.Background(), cfg.TSDBConfig, "ingester", util.Logger, registerer) @@ -163,43 +206,7 @@ func NewV2(cfg Config, clientConfig client.Config, limits *validation.Overrides, chunkStore: nil, usersMetadata: map[string]*userMetricsMetadata{}, wal: &noopWAL{}, - TSDBState: TSDBState{ - dbs: make(map[string]*userTSDB), - bucket: bucketClient, - tsdbMetrics: newTSDBMetrics(registerer), - forceCompactTrigger: make(chan chan<- struct{}), - shipTrigger: make(chan chan<- struct{}), - - compactionsTriggered: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ingester_tsdb_compactions_triggered_total", - Help: "Total number of triggered compactions.", - }), - - compactionsFailed: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ingester_tsdb_compactions_failed_total", - Help: "Total number of compactions that failed.", - }), - walReplayTime: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ - Name: "cortex_ingester_tsdb_wal_replay_duration_seconds", - Help: "The total time it takes to open and replay a TSDB WAL.", - Buckets: prometheus.DefBuckets, - }), - appenderAddDuration: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ - Name: "cortex_ingester_tsdb_appender_add_duration_seconds", - Help: "The total time it takes for a push request to add samples to the TSDB appender.", - Buckets: []float64{.001, .005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}, - }), - appenderCommitDuration: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ - Name: "cortex_ingester_tsdb_appender_commit_duration_seconds", - Help: "The total time it takes for a push request to commit samples appended to TSDB.", - Buckets: []float64{.001, .005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}, - }), - refCachePurgeDuration: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ - Name: "cortex_ingester_tsdb_refcache_purge_duration_seconds", - Help: "The total time it takes to purge the TSDB series reference cache for a single tenant.", - Buckets: prometheus.DefBuckets, - }), - }, + TSDBState: newTSDBState(bucketClient, registerer), } // Replace specific metrics which we can't directly track but we need to read @@ -223,13 +230,47 @@ func NewV2(cfg Config, clientConfig client.Config, limits *validation.Overrides, i.limiter = NewLimiter(limits, i.lifecycler, cfg.LifecyclerConfig.RingConfig.ReplicationFactor, cfg.ShardByAllLabels) i.userStates = newUserStates(i.limiter, cfg, i.metrics) + i.TSDBState.shipperIngesterID = i.lifecycler.ID + i.BasicService = services.NewBasicService(i.startingV2, i.updateLoop, i.stoppingV2) return i, nil } +// Special version of ingester used by Flusher. This ingester is not ingesting anything, its only purpose is to react +// on Flush method and flush all openened TSDBs when called. +func NewV2ForFlusher(cfg Config, registerer prometheus.Registerer) (*Ingester, error) { + util.WarnExperimentalUse("Blocks storage engine") + bucketClient, err := cortex_tsdb.NewBucketClient(context.Background(), cfg.TSDBConfig, "ingester", util.Logger, registerer) + if err != nil { + return nil, errors.Wrap(err, "failed to create the bucket client") + } + + i := &Ingester{ + cfg: cfg, + metrics: newIngesterMetrics(registerer, false), + wal: &noopWAL{}, + TSDBState: newTSDBState(bucketClient, registerer), + } + + i.TSDBState.shipperIngesterID = "flusher" + + // This ingester will not start any subservices (lifecycler, compaction, shipping), + // and will only open TSDBs, wait for Flush to be called, and then close TSDBs again. + i.BasicService = services.NewIdleService(i.startingV2ForFlusher, i.stoppingV2ForFlusher) + return i, nil +} + +func (i *Ingester) startingV2ForFlusher(ctx context.Context) error { + if err := i.openExistingTSDB(ctx); err != nil { + return errors.Wrap(err, "opening existing TSDBs") + } + + // Don't start any sub-services (lifecycler, compaction, shipper) at all. + return nil +} + func (i *Ingester) startingV2(ctx context.Context) error { - // Scan and open TSDB's that already exist on disk - if err := i.openExistingTSDB(context.Background()); err != nil { + if err := i.openExistingTSDB(ctx); err != nil { return errors.Wrap(err, "opening existing TSDBs") } @@ -260,6 +301,13 @@ func (i *Ingester) startingV2(ctx context.Context) error { return errors.Wrap(err, "failed to start ingester components") } +func (i *Ingester) stoppingV2ForFlusher(_ error) error { + if !i.cfg.TSDBConfig.KeepUserTSDBOpenOnShutdown { + i.closeAllTSDB() + } + return nil +} + // runs when V2 ingester is stopping func (i *Ingester) stoppingV2(_ error) error { // It's important to wait until shipper is finished, @@ -267,11 +315,18 @@ func (i *Ingester) stoppingV2(_ error) error { // there's no shipping on-going. if err := services.StopManagerAndAwaitStopped(context.Background(), i.TSDBState.subservices); err != nil { - level.Warn(util.Logger).Log("msg", "stopping ingester subservices", "err", err) + level.Warn(util.Logger).Log("msg", "failed to stop ingester subservices", "err", err) } // Next initiate our graceful exit from the ring. - return services.StopAndAwaitTerminated(context.Background(), i.lifecycler) + if err := services.StopAndAwaitTerminated(context.Background(), i.lifecycler); err != nil { + level.Warn(util.Logger).Log("msg", "failed to stop ingester lifecycler", "err", err) + } + + if !i.cfg.TSDBConfig.KeepUserTSDBOpenOnShutdown { + i.closeAllTSDB() + } + return nil } func (i *Ingester) updateLoop(ctx context.Context) error { @@ -891,10 +946,19 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { SeriesLifecycleCallback: userDB, }) if err != nil { - return nil, err + return nil, errors.Wrapf(err, "failed to open TSDB: %s", udir) } db.DisableCompactions() // we will compact on our own schedule + // Run compaction before using this TSDB. If there is data in head that needs to be put into blocks, + // this will actually create the blocks. If there is no data (empty TSDB), this is a no-op, although + // local blocks compaction may still take place if configured. + level.Info(userLogger).Log("msg", "Running compaction after WAL replay") + err = db.Compact() + if err != nil { + return nil, errors.Wrapf(err, "failed to compact TSDB: %s", udir) + } + userDB.DB = db // We set the limiter here because we don't want to limit // series during WAL replay. @@ -910,7 +974,7 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { Value: userID, }, { Name: cortex_tsdb.IngesterIDExternalLabel, - Value: i.lifecycler.ID, + Value: i.TSDBState.shipperIngesterID, }, } @@ -1080,9 +1144,11 @@ func (i *Ingester) shipBlocks(ctx context.Context) { // particularly important for the JOINING state because there could // be a blocks transfer in progress (from another ingester) and if we // run the shipper in such state we could end up with race conditions. - if ingesterState := i.lifecycler.GetState(); ingesterState == ring.PENDING || ingesterState == ring.JOINING { - level.Info(util.Logger).Log("msg", "TSDB blocks shipping has been skipped because of the current ingester state", "state", ingesterState) - return + if i.lifecycler != nil { + if ingesterState := i.lifecycler.GetState(); ingesterState == ring.PENDING || ingesterState == ring.JOINING { + level.Info(util.Logger).Log("msg", "TSDB blocks shipping has been skipped because of the current ingester state", "state", ingesterState) + return + } } // Number of concurrent workers is limited in order to avoid to concurrently sync a lot @@ -1131,9 +1197,11 @@ func (i *Ingester) compactionLoop(ctx context.Context) error { func (i *Ingester) compactBlocks(ctx context.Context, force bool) { // Don't compact TSDB blocks while JOINING as there may be ongoing blocks transfers. // Compaction loop is not running in LEAVING state, so if we get here in LEAVING state, we're flushing blocks. - if ingesterState := i.lifecycler.GetState(); ingesterState == ring.JOINING { - level.Info(util.Logger).Log("msg", "TSDB blocks compaction has been skipped because of the current ingester state", "state", ingesterState) - return + if i.lifecycler != nil { + if ingesterState := i.lifecycler.GetState(); ingesterState == ring.JOINING { + level.Info(util.Logger).Log("msg", "TSDB blocks compaction has been skipped because of the current ingester state", "state", ingesterState) + return + } } i.runConcurrentUserWorkers(ctx, i.cfg.TSDBConfig.HeadCompactionConcurrency, func(userID string) { @@ -1209,9 +1277,12 @@ sendLoop: wg.Wait() } -// This method is called as part of Lifecycler's shutdown, to flush all data. -// Lifecycler shutdown happens as part of Ingester shutdown (see stoppingV2 method). +// This method will flush all data. It is called as part of Lifecycler's shutdown (if flush on shutdown is configured), or from the flusher. +// +// When called as during Lifecycler shutdown, this happens as part of normal Ingester shutdown (see stoppingV2 method). // Samples are not received at this stage. Compaction and Shipping loops have already been stopped as well. +// +// When used from flusher, ingester is constructed in a way that compaction, shipping and receiving of samples is never started. func (i *Ingester) v2LifecyclerFlush() { level.Info(util.Logger).Log("msg", "starting to flush and ship TSDB blocks") diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go index c9f28c3e6e5e7..6c8e392cacbeb 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go @@ -24,6 +24,7 @@ import ( "github.com/thanos-io/thanos/pkg/store/hintspb" "github.com/thanos-io/thanos/pkg/store/storepb" "github.com/weaveworks/common/user" + "go.uber.org/atomic" "golang.org/x/sync/errgroup" grpc_metadata "google.golang.org/grpc/metadata" @@ -46,7 +47,8 @@ const ( ) var ( - errNoStoreGatewayAddress = errors.New("no store-gateway address configured") + errNoStoreGatewayAddress = errors.New("no store-gateway address configured") + errMaxChunksPerQueryLimit = "the query hit the max number of chunks limit while fetching chunks for %s (limit: %d)" ) // BlocksStoreSet is the interface used to get the clients to query series on a set of blocks. @@ -78,6 +80,11 @@ type BlocksStoreClient interface { RemoteAddress() string } +// BlocksStoreLimits is the interface that should be implemented by the limits provider. +type BlocksStoreLimits interface { + MaxChunksPerQuery(userID string) int +} + type blocksStoreQueryableMetrics struct { storesHit prometheus.Histogram refetches prometheus.Histogram @@ -111,13 +118,14 @@ type BlocksStoreQueryable struct { logger log.Logger queryStoreAfter time.Duration metrics *blocksStoreQueryableMetrics + limits BlocksStoreLimits // Subservices manager. subservices *services.Manager subservicesWatcher *services.FailureWatcher } -func NewBlocksStoreQueryable(stores BlocksStoreSet, finder BlocksFinder, consistency *BlocksConsistencyChecker, queryStoreAfter time.Duration, logger log.Logger, reg prometheus.Registerer) (*BlocksStoreQueryable, error) { +func NewBlocksStoreQueryable(stores BlocksStoreSet, finder BlocksFinder, consistency *BlocksConsistencyChecker, limits BlocksStoreLimits, queryStoreAfter time.Duration, logger log.Logger, reg prometheus.Registerer) (*BlocksStoreQueryable, error) { util.WarnExperimentalUse("Blocks storage engine") manager, err := services.NewManager(stores, finder) @@ -134,6 +142,7 @@ func NewBlocksStoreQueryable(stores BlocksStoreSet, finder BlocksFinder, consist subservices: manager, subservicesWatcher: services.NewFailureWatcher(), metrics: newBlocksStoreQueryableMetrics(reg), + limits: limits, } q.Service = services.NewBasicService(q.starting, q.running, q.stopping) @@ -141,7 +150,7 @@ func NewBlocksStoreQueryable(stores BlocksStoreSet, finder BlocksFinder, consist return q, nil } -func NewBlocksStoreQueryableFromConfig(querierCfg Config, gatewayCfg storegateway.Config, storageCfg cortex_tsdb.Config, logger log.Logger, reg prometheus.Registerer) (*BlocksStoreQueryable, error) { +func NewBlocksStoreQueryableFromConfig(querierCfg Config, gatewayCfg storegateway.Config, storageCfg cortex_tsdb.Config, limits BlocksStoreLimits, logger log.Logger, reg prometheus.Registerer) (*BlocksStoreQueryable, error) { var stores BlocksStoreSet bucketClient, err := cortex_tsdb.NewBucketClient(context.Background(), storageCfg, "querier", logger, reg) @@ -209,7 +218,7 @@ func NewBlocksStoreQueryableFromConfig(querierCfg Config, gatewayCfg storegatewa reg, ) - return NewBlocksStoreQueryable(stores, scanner, consistency, querierCfg.QueryStoreAfter, logger, reg) + return NewBlocksStoreQueryable(stores, scanner, consistency, limits, querierCfg.QueryStoreAfter, logger, reg) } func (q *BlocksStoreQueryable) starting(ctx context.Context) error { @@ -256,6 +265,7 @@ func (q *BlocksStoreQueryable) Querier(ctx context.Context, mint, maxt int64) (s finder: q.finder, stores: q.stores, metrics: q.metrics, + limits: q.limits, consistency: q.consistency, logger: q.logger, queryStoreAfter: q.queryStoreAfter, @@ -270,6 +280,7 @@ type blocksStoreQuerier struct { stores BlocksStoreSet metrics *blocksStoreQueryableMetrics consistency *BlocksConsistencyChecker + limits BlocksStoreLimits logger log.Logger // If set, the querier manipulates the max time to not be greater than @@ -357,6 +368,9 @@ func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...* resSeriesSets = []storage.SeriesSet(nil) resWarnings = storage.Warnings(nil) resQueriedBlocks = []ulid.ULID(nil) + + maxChunksLimit = q.limits.MaxChunksPerQuery(q.userID) + leftChunksLimit = maxChunksLimit ) for attempt := 1; attempt <= maxFetchSeriesAttempts; attempt++ { @@ -377,7 +391,7 @@ func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...* // Fetch series from stores. If an error occur we do not retry because retries // are only meant to cover missing blocks. - seriesSets, queriedBlocks, warnings, err := q.fetchSeriesFromStores(spanCtx, clients, minT, maxT, convertedMatchers) + seriesSets, queriedBlocks, warnings, numChunks, err := q.fetchSeriesFromStores(spanCtx, clients, minT, maxT, matchers, convertedMatchers, maxChunksLimit, leftChunksLimit) if err != nil { return storage.ErrSeriesSet(err) } @@ -387,6 +401,12 @@ func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...* resWarnings = append(resWarnings, warnings...) resQueriedBlocks = append(resQueriedBlocks, queriedBlocks...) + // Given a single block is guaranteed to not be queried twice, we can safely decrease the number of + // chunks we can still read before hitting the limit (max == 0 means disabled). + if maxChunksLimit > 0 { + leftChunksLimit -= numChunks + } + // Update the map of blocks we attempted to query. for client, blockIDs := range clients { touchedStores[client.RemoteAddress()] = struct{}{} @@ -425,8 +445,11 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( clients map[BlocksStoreClient][]ulid.ULID, minT int64, maxT int64, - matchers []storepb.LabelMatcher, -) ([]storage.SeriesSet, []ulid.ULID, storage.Warnings, error) { + matchers []*labels.Matcher, + convertedMatchers []storepb.LabelMatcher, + maxChunksLimit int, + leftChunksLimit int, +) ([]storage.SeriesSet, []ulid.ULID, storage.Warnings, int, error) { var ( reqCtx = grpc_metadata.AppendToOutgoingContext(ctx, cortex_tsdb.TenantIDExternalLabel, q.userID) g, gCtx = errgroup.WithContext(reqCtx) @@ -434,6 +457,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( seriesSets = []storage.SeriesSet(nil) warnings = storage.Warnings(nil) queriedBlocks = []ulid.ULID(nil) + numChunks = atomic.NewInt32(0) spanLog = spanlogger.FromContext(ctx) ) @@ -444,7 +468,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( blockIDs := blockIDs g.Go(func() error { - req, err := createSeriesRequest(minT, maxT, matchers, blockIDs) + req, err := createSeriesRequest(minT, maxT, convertedMatchers, blockIDs) if err != nil { return errors.Wrapf(err, "failed to create series request") } @@ -459,6 +483,12 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( myQueriedBlocks := []ulid.ULID(nil) for { + // Ensure the context hasn't been canceled in the meanwhile (eg. an error occurred + // in another goroutine). + if gCtx.Err() != nil { + return gCtx.Err() + } + resp, err := stream.Recv() if err == io.EOF { break @@ -470,6 +500,14 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( // Response may either contain series, warning or hints. if s := resp.GetSeries(); s != nil { mySeries = append(mySeries, s) + + // Ensure the max number of chunks limit hasn't been reached (max == 0 means disabled). + if maxChunksLimit > 0 { + actual := numChunks.Add(int32(len(s.Chunks))) + if actual > int32(leftChunksLimit) { + return fmt.Errorf(errMaxChunksPerQueryLimit, convertMatchersToString(matchers), maxChunksLimit) + } + } } if w := resp.GetWarning(); w != "" { @@ -511,10 +549,10 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores( // Wait until all client requests complete. if err := g.Wait(); err != nil { - return nil, nil, nil, err + return nil, nil, nil, 0, err } - return seriesSets, queriedBlocks, warnings, nil + return seriesSets, queriedBlocks, warnings, int(numChunks.Load()), nil } func createSeriesRequest(minT, maxT int64, matchers []storepb.LabelMatcher, blockIDs []ulid.ULID) (*storepb.SeriesRequest, error) { @@ -577,3 +615,19 @@ func countSeriesBytes(series []*storepb.Series) (count uint64) { return count } + +func convertMatchersToString(matchers []*labels.Matcher) string { + out := strings.Builder{} + out.WriteRune('{') + + for idx, m := range matchers { + if idx > 0 { + out.WriteRune(',') + } + + out.WriteString(m.String()) + } + + out.WriteRune('}') + return out.String() +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go b/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go index 3f42b030d186c..f629a0aafe5df 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go @@ -123,6 +123,7 @@ func New(cfg Config, log log.Logger, registerer prometheus.Registerer) (*Fronten r.URL.Scheme = u.Scheme r.URL.Host = u.Host r.URL.Path = path.Join(u.Path, r.URL.Path) + r.Host = "" return http.DefaultTransport.RoundTrip(r) }) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/lazyquery/lazyquery.go b/vendor/github.com/cortexproject/cortex/pkg/querier/lazyquery/lazyquery.go index 82d6bc6f7924c..a26af12a09a1d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/lazyquery/lazyquery.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/lazyquery/lazyquery.go @@ -96,7 +96,7 @@ func (s *lazySeriesSet) Next() bool { } // At implements storage.SeriesSet. -func (s lazySeriesSet) At() storage.Series { +func (s *lazySeriesSet) At() storage.Series { if s.next == nil { s.next = <-s.future } @@ -104,7 +104,7 @@ func (s lazySeriesSet) At() storage.Series { } // Err implements storage.SeriesSet. -func (s lazySeriesSet) Err() error { +func (s *lazySeriesSet) Err() error { if s.next == nil { s.next = <-s.future } @@ -112,6 +112,6 @@ func (s lazySeriesSet) Err() error { } // Warnings implements storage.SeriesSet. -func (s lazySeriesSet) Warnings() storage.Warnings { +func (s *lazySeriesSet) Warnings() storage.Warnings { return nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go b/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go index 634fbd410dfe4..46e5b1e8dc18f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go @@ -167,7 +167,6 @@ func New(cfg Config, limits *validation.Overrides, distributor Distributor, stor level.Warn(util.Logger).Log("msg", "Using deprecated flag -promql.lookback-delta, use -querier.lookback-delta instead") } - promql.SetDefaultEvaluationInterval(cfg.DefaultEvaluationInterval) engine := promql.NewEngine(promql.EngineOpts{ Logger: util.Logger, Reg: reg, @@ -175,6 +174,9 @@ func New(cfg Config, limits *validation.Overrides, distributor Distributor, stor MaxSamples: cfg.MaxSamples, Timeout: cfg.Timeout, LookbackDelta: lookbackDelta, + NoStepSubqueryIntervalFn: func(int64) int64 { + return cfg.DefaultEvaluationInterval.Milliseconds() + }, }) return &sampleAndChunkQueryable{lazyQueryable}, engine } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/value.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/value.go index 6827e82fb8ded..4954fdb6b18ae 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/value.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/value.go @@ -15,7 +15,9 @@ import ( // FromResult transforms a promql query result into a samplestream func FromResult(res *promql.Result) ([]SampleStream, error) { if res.Err != nil { - return nil, res.Err + // The error could be wrapped by the PromQL engine. We get the error's cause in order to + // correctly parse the error in parent callers (eg. gRPC response status code extraction). + return nil, errors.Cause(res.Err) } switch v := res.Value.(type) { case promql.Scalar: diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/remote_read.go b/vendor/github.com/cortexproject/cortex/pkg/querier/remote_read.go index cf935007cd3b6..353874901b88a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/remote_read.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/remote_read.go @@ -67,7 +67,7 @@ func RemoteReadHandler(q storage.Queryable) http.Handler { http.Error(w, lastErr.Error(), http.StatusBadRequest) return } - + w.Header().Add("Content-Type", "application/x-protobuf") if err := util.SerializeProtoResponse(w, &resp, compressionType); err != nil { level.Error(logger).Log("msg", "error sending remote read response", "err", err) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/api.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/api.go index 045631f4ac2c9..20e5791c04bfb 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/api.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/api.go @@ -15,11 +15,11 @@ import ( "github.com/pkg/errors" v1 "github.com/prometheus/client_golang/api/prometheus/v1" "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/rulefmt" "github.com/weaveworks/common/user" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/cortexproject/cortex/pkg/ingester/client" - rulefmt "github.com/cortexproject/cortex/pkg/ruler/legacy_rulefmt" "github.com/cortexproject/cortex/pkg/ruler/rules" store "github.com/cortexproject/cortex/pkg/ruler/rules" "github.com/cortexproject/cortex/pkg/util" @@ -279,10 +279,10 @@ func ValidateRuleGroup(g rulefmt.RuleGroup) []error { for i, r := range g.Rules { for _, err := range r.Validate() { var ruleName string - if r.Alert != "" { - ruleName = r.Alert + if r.Alert.Value != "" { + ruleName = r.Alert.Value } else { - ruleName = r.Record + ruleName = r.Record.Value } errs = append(errs, &rulefmt.Error{ Group: g.Name, diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/legacy_rulefmt/rulefmt.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/legacy_rulefmt/rulefmt.go deleted file mode 100644 index de92d744e12b5..0000000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/legacy_rulefmt/rulefmt.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rulefmt - -import ( - "context" - "io/ioutil" - "strings" - "time" - - "github.com/pkg/errors" - "github.com/prometheus/common/model" - yaml "gopkg.in/yaml.v2" - - "github.com/prometheus/prometheus/pkg/timestamp" - "github.com/prometheus/prometheus/promql/parser" - "github.com/prometheus/prometheus/template" -) - -// Error represents semantic errors on parsing rule groups. -type Error struct { - Group string - Rule int - RuleName string - Err error -} - -func (err *Error) Error() string { - return errors.Wrapf(err.Err, "group %q, rule %d, %q", err.Group, err.Rule, err.RuleName).Error() -} - -// RuleGroups is a set of rule groups that are typically exposed in a file. -type RuleGroups struct { - Groups []RuleGroup `yaml:"groups"` -} - -// Validate validates all rules in the rule groups. -func (g *RuleGroups) Validate() (errs []error) { - set := map[string]struct{}{} - - for _, g := range g.Groups { - if g.Name == "" { - errs = append(errs, errors.Errorf("Groupname should not be empty")) - } - - if _, ok := set[g.Name]; ok { - errs = append( - errs, - errors.Errorf("groupname: \"%s\" is repeated in the same file", g.Name), - ) - } - - set[g.Name] = struct{}{} - - for i, r := range g.Rules { - for _, err := range r.Validate() { - var ruleName string - if r.Alert != "" { - ruleName = r.Alert - } else { - ruleName = r.Record - } - errs = append(errs, &Error{ - Group: g.Name, - Rule: i, - RuleName: ruleName, - Err: err, - }) - } - } - } - - return errs -} - -// RuleGroup is a list of sequentially evaluated recording and alerting rules. -type RuleGroup struct { - Name string `yaml:"name"` - Interval model.Duration `yaml:"interval"` - Rules []Rule `yaml:"rules"` -} - -// Rule describes an alerting or recording rule. -type Rule struct { - Record string `yaml:"record"` - Alert string `yaml:"alert"` - Expr string `yaml:"expr"` - For model.Duration `yaml:"for"` - Labels map[string]string `yaml:"labels"` - Annotations map[string]string `yaml:"annotations"` -} - -// Validate the rule and return a list of encountered errors. -func (r *Rule) Validate() (errs []error) { - if r.Record != "" && r.Alert != "" { - errs = append(errs, errors.Errorf("only one of 'record' and 'alert' must be set")) - } - if r.Record == "" && r.Alert == "" { - errs = append(errs, errors.Errorf("one of 'record' or 'alert' must be set")) - } - - if r.Expr == "" { - errs = append(errs, errors.Errorf("field 'expr' must be set in rule")) - } else if _, err := parser.ParseExpr(r.Expr); err != nil { - errs = append(errs, errors.Wrap(err, "could not parse expression")) - } - if r.Record != "" { - if len(r.Annotations) > 0 { - errs = append(errs, errors.Errorf("invalid field 'annotations' in recording rule")) - } - if r.For != 0 { - errs = append(errs, errors.Errorf("invalid field 'for' in recording rule")) - } - if !model.IsValidMetricName(model.LabelValue(r.Record)) { - errs = append(errs, errors.Errorf("invalid recording rule name: %s", r.Record)) - } - } - - for k, v := range r.Labels { - if !model.LabelName(k).IsValid() { - errs = append(errs, errors.Errorf("invalid label name: %s", k)) - } - - if !model.LabelValue(v).IsValid() { - errs = append(errs, errors.Errorf("invalid label value: %s", v)) - } - } - - for k := range r.Annotations { - if !model.LabelName(k).IsValid() { - errs = append(errs, errors.Errorf("invalid annotation name: %s", k)) - } - } - - return append(errs, testTemplateParsing(r)...) -} - -// testTemplateParsing checks if the templates used in labels and annotations -// of the alerting rules are parsed correctly. -func testTemplateParsing(rl *Rule) (errs []error) { - if rl.Alert == "" { - // Not an alerting rule. - return errs - } - - // Trying to parse templates. - tmplData := template.AlertTemplateData(map[string]string{}, map[string]string{}, 0) - defs := []string{ - "{{$labels := .Labels}}", - "{{$externalLabels := .ExternalLabels}}", - "{{$value := .Value}}", - } - parseTest := func(text string) error { - tmpl := template.NewTemplateExpander( - context.TODO(), - strings.Join(append(defs, text), ""), - "__alert_"+rl.Alert, - tmplData, - model.Time(timestamp.FromTime(time.Now())), - nil, - nil, - ) - return tmpl.ParseTest() - } - - // Parsing Labels. - for k, val := range rl.Labels { - err := parseTest(val) - if err != nil { - errs = append(errs, errors.Wrapf(err, "label %q", k)) - } - } - - // Parsing Annotations. - for k, val := range rl.Annotations { - err := parseTest(val) - if err != nil { - errs = append(errs, errors.Wrapf(err, "annotation %q", k)) - } - } - - return errs -} - -// Parse parses and validates a set of rules. -func Parse(content []byte) (*RuleGroups, []error) { - var groups RuleGroups - if err := yaml.UnmarshalStrict(content, &groups); err != nil { - return nil, []error{err} - } - return &groups, groups.Validate() -} - -// ParseFile reads and parses rules from a file. -func ParseFile(file string) (*RuleGroups, []error) { - b, err := ioutil.ReadFile(file) - if err != nil { - return nil, []error{errors.Wrap(err, file)} - } - rgs, errs := Parse(b) - for i := range errs { - errs[i] = errors.Wrap(errs[i], file) - } - return rgs, errs -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/mapper.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/mapper.go index 43a8cdf008f91..d6aba3b137e67 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/mapper.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/mapper.go @@ -8,10 +8,9 @@ import ( "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" + "github.com/prometheus/prometheus/pkg/rulefmt" "github.com/spf13/afero" - "gopkg.in/yaml.v2" - - legacy_rulefmt "github.com/cortexproject/cortex/pkg/ruler/legacy_rulefmt" + "gopkg.in/yaml.v3" ) // mapper is designed to enusre the provided rule sets are identical @@ -31,7 +30,7 @@ func newMapper(path string, logger log.Logger) *mapper { } } -func (m *mapper) MapRules(user string, ruleConfigs map[string][]legacy_rulefmt.RuleGroup) (bool, []string, error) { +func (m *mapper) MapRules(user string, ruleConfigs map[string][]rulefmt.RuleGroup) (bool, []string, error) { anyUpdated := false filenames := []string{} @@ -86,12 +85,12 @@ func (m *mapper) MapRules(user string, ruleConfigs map[string][]legacy_rulefmt.R return anyUpdated, filenames, nil } -func (m *mapper) writeRuleGroupsIfNewer(groups []legacy_rulefmt.RuleGroup, filename string) (bool, error) { +func (m *mapper) writeRuleGroupsIfNewer(groups []rulefmt.RuleGroup, filename string) (bool, error) { sort.Slice(groups, func(i, j int) bool { return groups[i].Name > groups[j].Name }) - rgs := legacy_rulefmt.RuleGroups{Groups: groups} + rgs := rulefmt.RuleGroups{Groups: groups} d, err := yaml.Marshal(&rgs) if err != nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/notifier.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/notifier.go index fb36d7e8089ab..6407e523015d8 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/notifier.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/notifier.go @@ -3,7 +3,8 @@ package ruler import ( "context" "fmt" - "strings" + "net/url" + "regexp" "sync" gklog "github.com/go-kit/kit/log" @@ -74,68 +75,86 @@ func (rn *rulerNotifier) stop() { // Builds a Prometheus config.Config from a ruler.Config with just the required // options to configure notifications to Alertmanager. func buildNotifierConfig(rulerConfig *Config) (*config.Config, error) { - if rulerConfig.AlertmanagerURL.URL == nil { - return &config.Config{}, nil - } + validURLs := make([]*url.URL, 0, len(rulerConfig.AlertmanagerURL)) - u := rulerConfig.AlertmanagerURL - var sdConfig sd_config.ServiceDiscoveryConfig - if rulerConfig.AlertmanagerDiscovery { - if !strings.Contains(u.Host, "_tcp.") { - return nil, fmt.Errorf("When alertmanager-discovery is on, host name must be of the form _portname._tcp.service.fqdn (is %q)", u.Host) + srvDNSregexp := regexp.MustCompile(`^_.+._.+`) + for _, h := range rulerConfig.AlertmanagerURL { + url, err := url.Parse(h) + if err != nil { + return nil, err } - dnsSDConfig := dns.SDConfig{ - Names: []string{u.Host}, - RefreshInterval: model.Duration(rulerConfig.AlertmanagerRefreshInterval), - Type: "SRV", - Port: 0, // Ignored, because of SRV. - } - sdConfig = sd_config.ServiceDiscoveryConfig{ - DNSSDConfigs: []*dns.SDConfig{&dnsSDConfig}, + + if url.String() == "" { + continue } - } else { - sdConfig = sd_config.ServiceDiscoveryConfig{ - StaticConfigs: []*targetgroup.Group{ - { - Targets: []model.LabelSet{ - { - model.AddressLabel: model.LabelValue(u.Host), - }, - }, - }, - }, + + // Given we only support SRV lookups as part of service discovery, we need to ensure + // hosts provided follow this specification: _service._proto.name + // e.g. _http._tcp.alertmanager.com + if rulerConfig.AlertmanagerDiscovery && !srvDNSregexp.MatchString(url.Host) { + return nil, fmt.Errorf("when alertmanager-discovery is on, host name must be of the form _portname._tcp.service.fqdn (is %q)", url.Host) } + + validURLs = append(validURLs, url) } - amConfig := &config.AlertmanagerConfig{ - APIVersion: config.AlertmanagerAPIVersionV1, - Scheme: u.Scheme, - PathPrefix: u.Path, - Timeout: model.Duration(rulerConfig.NotificationTimeout), - ServiceDiscoveryConfig: sdConfig, + if len(validURLs) == 0 { + return &config.Config{}, nil } + apiVersion := config.AlertmanagerAPIVersionV1 if rulerConfig.AlertmanangerEnableV2API { - amConfig.APIVersion = config.AlertmanagerAPIVersionV2 + apiVersion = config.AlertmanagerAPIVersionV2 + } + + amConfigs := make([]*config.AlertmanagerConfig, 0, len(validURLs)) + for _, url := range validURLs { + amConfigs = append(amConfigs, amConfigFromURL(rulerConfig, url, apiVersion)) } promConfig := &config.Config{ AlertingConfig: config.AlertingConfig{ - AlertmanagerConfigs: []*config.AlertmanagerConfig{amConfig}, + AlertmanagerConfigs: amConfigs, }, } - if u.User != nil { + return promConfig, nil +} + +func amConfigFromURL(rulerConfig *Config, url *url.URL, apiVersion config.AlertmanagerAPIVersion) *config.AlertmanagerConfig { + var sdConfig sd_config.ServiceDiscoveryConfig + if rulerConfig.AlertmanagerDiscovery { + sdConfig.DNSSDConfigs = []*dns.SDConfig{{ + Names: []string{url.Host}, + RefreshInterval: model.Duration(rulerConfig.AlertmanagerRefreshInterval), + Type: "SRV", + Port: 0, // Ignored, because of SRV. + }} + } else { + sdConfig.StaticConfigs = []*targetgroup.Group{{ + Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(url.Host)}}, + }} + } + + amConfig := &config.AlertmanagerConfig{ + APIVersion: apiVersion, + Scheme: url.Scheme, + PathPrefix: url.Path, + Timeout: model.Duration(rulerConfig.NotificationTimeout), + ServiceDiscoveryConfig: sdConfig, + } + + if url.User != nil { amConfig.HTTPClientConfig = config_util.HTTPClientConfig{ BasicAuth: &config_util.BasicAuth{ - Username: u.User.Username(), + Username: url.User.Username(), }, } - if password, isSet := u.User.Password(); isSet { + if password, isSet := url.User.Password(); isSet { amConfig.HTTPClientConfig.BasicAuth.Password = config_util.Secret(password) } } - return promConfig, nil + return amConfig } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go index fb317e57ed50c..5df91741c30bd 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go @@ -50,6 +50,11 @@ var ( Name: "ruler_config_updates_total", Help: "Total number of config updates triggered by a user", }, []string{"user"}) + configUpdateFailuresTotal = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "cortex", + Name: "ruler_config_update_failures_total", + Help: "Total number of config update failures triggered by a user", + }, []string{"user", "reason"}) managersTotal = promauto.NewGauge(prometheus.GaugeOpts{ Namespace: "cortex", Name: "ruler_managers_total", @@ -76,12 +81,12 @@ type Config struct { RulePath string `yaml:"rule_path"` // URL of the Alertmanager to send notifications to. - AlertmanagerURL flagext.URLValue `yaml:"alertmanager_url"` - // Whether to use DNS SRV records to discover alertmanagers. + AlertmanagerURL flagext.StringSlice `yaml:"alertmanager_url"` + // Whether to use DNS SRV records to discover Alertmanager. AlertmanagerDiscovery bool `yaml:"enable_alertmanager_discovery"` - // How long to wait between refreshing the list of alertmanagers based on DNS service discovery. + // How long to wait between refreshing the list of Alertmanager based on DNS service discovery. AlertmanagerRefreshInterval time.Duration `yaml:"alertmanager_refresh_interval"` - // Enables the ruler notifier to use the alertmananger V2 API. + // Enables the ruler notifier to use the Alertmananger V2 API. AlertmanangerEnableV2API bool `yaml:"enable_alertmanager_v2"` // Capacity of the queue for notifications to be sent to the Alertmanager. NotificationQueueCapacity int `yaml:"notification_queue_capacity"` @@ -128,12 +133,14 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.EvaluationInterval, "ruler.evaluation-interval", 1*time.Minute, "How frequently to evaluate rules") f.DurationVar(&cfg.EvaluationDelay, "ruler.evaluation-delay-duration", 0, "Duration to delay the evaluation of rules to ensure they underlying metrics have been pushed to cortex.") f.DurationVar(&cfg.PollInterval, "ruler.poll-interval", 1*time.Minute, "How frequently to poll for rule changes") - f.Var(&cfg.AlertmanagerURL, "ruler.alertmanager-url", "URL of the Alertmanager to send notifications to.") - f.BoolVar(&cfg.AlertmanagerDiscovery, "ruler.alertmanager-discovery", false, "Use DNS SRV records to discover alertmanager hosts.") - f.DurationVar(&cfg.AlertmanagerRefreshInterval, "ruler.alertmanager-refresh-interval", 1*time.Minute, "How long to wait between refreshing alertmanager hosts.") - f.BoolVar(&cfg.AlertmanangerEnableV2API, "ruler.alertmanager-use-v2", false, "If enabled requests to alertmanager will utilize the V2 API.") + + f.Var(&cfg.AlertmanagerURL, "ruler.alertmanager-url", "Space-separated list of URL(s) of the Alertmanager(s) to send notifications to. Each Alertmanager URL is treated as a separate group in the configuration. Multiple Alertmanagers in HA per group can be supported by using DNS resolution via -ruler.alertmanager-discovery.") + f.BoolVar(&cfg.AlertmanagerDiscovery, "ruler.alertmanager-discovery", false, "Use DNS SRV records to discover Alertmanager hosts.") + f.DurationVar(&cfg.AlertmanagerRefreshInterval, "ruler.alertmanager-refresh-interval", 1*time.Minute, "How long to wait between refreshing DNS resolutions of Alertmanager hosts.") + f.BoolVar(&cfg.AlertmanangerEnableV2API, "ruler.alertmanager-use-v2", false, "If enabled requests to Alertmanager will utilize the V2 API.") f.IntVar(&cfg.NotificationQueueCapacity, "ruler.notification-queue-capacity", 10000, "Capacity of the queue for notifications to be sent to the Alertmanager.") f.DurationVar(&cfg.NotificationTimeout, "ruler.notification-timeout", 10*time.Second, "HTTP timeout duration when sending notifications to the Alertmanager.") + f.DurationVar(&cfg.SearchPendingFor, "ruler.search-pending-for", 5*time.Minute, "Time to spend searching for a pending ruler when shutting down.") f.BoolVar(&cfg.EnableSharding, "ruler.enable-sharding", false, "Distribute rule evaluation using ring backend") f.DurationVar(&cfg.FlushCheckPeriod, "ruler.flush-period", 1*time.Minute, "Period with which to attempt to flush rule groups.") @@ -173,17 +180,12 @@ type Ruler struct { } // NewRuler creates a new ruler from a distributor and chunk store. -func NewRuler(cfg Config, engine *promql.Engine, queryable promStorage.Queryable, pusher Pusher, reg prometheus.Registerer, logger log.Logger) (*Ruler, error) { +func NewRuler(cfg Config, engine *promql.Engine, queryable promStorage.Queryable, pusher Pusher, reg prometheus.Registerer, logger log.Logger, ruleStore rules.RuleStore) (*Ruler, error) { ncfg, err := buildNotifierConfig(&cfg) if err != nil { return nil, err } - ruleStore, err := NewRuleStorage(cfg.StoreConfig) - if err != nil { - return nil, err - } - ruler := &Ruler{ cfg: cfg, engine: engine, @@ -503,14 +505,18 @@ func (r *Ruler) syncManager(ctx context.Context, user string, groups store.RuleG if !exists { manager, err = r.newManager(ctx, user) if err != nil { + configUpdateFailuresTotal.WithLabelValues(user, "rule-manager-creation-failure").Inc() level.Error(r.logger).Log("msg", "unable to create rule manager", "user", user, "err", err) return } - manager.Run() + // manager.Run() starts running the manager and blocks until Stop() is called. + // Hence run it as another goroutine. + go manager.Run() r.userManagers[user] = manager } err = manager.Update(r.cfg.EvaluationInterval, files, nil) if err != nil { + configUpdateFailuresTotal.WithLabelValues(user, "rules-update-failure").Inc() level.Error(r.logger).Log("msg", "unable to update rule manager", "user", user, "err", err) return } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/compat.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/compat.go index fed3c549feea4..dbd9a26ddc5bf 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/compat.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/compat.go @@ -3,16 +3,16 @@ package rules import ( time "time" - "github.com/cortexproject/cortex/pkg/ingester/client" - "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/rulefmt" + "gopkg.in/yaml.v3" - legacy_rulefmt "github.com/cortexproject/cortex/pkg/ruler/legacy_rulefmt" + "github.com/cortexproject/cortex/pkg/ingester/client" ) // ToProto transforms a formatted prometheus rulegroup to a rule group protobuf -func ToProto(user string, namespace string, rl legacy_rulefmt.RuleGroup) *RuleGroupDesc { +func ToProto(user string, namespace string, rl rulefmt.RuleGroup) *RuleGroupDesc { rg := RuleGroupDesc{ Name: rl.Name, Namespace: namespace, @@ -23,13 +23,13 @@ func ToProto(user string, namespace string, rl legacy_rulefmt.RuleGroup) *RuleGr return &rg } -func formattedRuleToProto(rls []legacy_rulefmt.Rule) []*RuleDesc { +func formattedRuleToProto(rls []rulefmt.RuleNode) []*RuleDesc { rules := make([]*RuleDesc, len(rls)) for i := range rls { rules[i] = &RuleDesc{ - Expr: rls[i].Expr, - Record: rls[i].Record, - Alert: rls[i].Alert, + Expr: rls[i].Expr.Value, + Record: rls[i].Record.Value, + Alert: rls[i].Alert.Value, For: time.Duration(rls[i].For), Labels: client.FromLabelsToLabelAdapters(labels.FromMap(rls[i].Labels)), Annotations: client.FromLabelsToLabelAdapters(labels.FromMap(rls[i].Annotations)), @@ -40,18 +40,24 @@ func formattedRuleToProto(rls []legacy_rulefmt.Rule) []*RuleDesc { } // FromProto generates a rulefmt RuleGroup -func FromProto(rg *RuleGroupDesc) legacy_rulefmt.RuleGroup { - formattedRuleGroup := legacy_rulefmt.RuleGroup{ +func FromProto(rg *RuleGroupDesc) rulefmt.RuleGroup { + formattedRuleGroup := rulefmt.RuleGroup{ Name: rg.GetName(), Interval: model.Duration(rg.Interval), - Rules: make([]legacy_rulefmt.Rule, len(rg.GetRules())), + Rules: make([]rulefmt.RuleNode, len(rg.GetRules())), } for i, rl := range rg.GetRules() { - newRule := legacy_rulefmt.Rule{ - Record: rl.GetRecord(), - Alert: rl.GetAlert(), - Expr: rl.GetExpr(), + recordNode := yaml.Node{} + recordNode.SetString(rl.GetRecord()) + alertNode := yaml.Node{} + alertNode.SetString(rl.GetAlert()) + exprNode := yaml.Node{} + exprNode.SetString(rl.GetExpr()) + newRule := rulefmt.RuleNode{ + Record: recordNode, + Alert: alertNode, + Expr: exprNode, Labels: client.FromLabelAdaptersToLabels(rl.Labels).Map(), Annotations: client.FromLabelAdaptersToLabels(rl.Annotations).Map(), For: model.Duration(rl.GetFor()), diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/local/local.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/local/local.go new file mode 100644 index 0000000000000..7c18ce8169fc8 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/local/local.go @@ -0,0 +1,131 @@ +package local + +import ( + "context" + "flag" + "io/ioutil" + "path/filepath" + + "github.com/pkg/errors" + "github.com/prometheus/prometheus/pkg/rulefmt" + + "github.com/cortexproject/cortex/pkg/ruler/rules" +) + +type Config struct { + Directory string `yaml:"directory"` +} + +// RegisterFlags registers flags. +func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.StringVar(&cfg.Directory, prefix+"local.directory", "", "Directory to scan for rules") +} + +// Client expects to load already existing rules located at: +// cfg.Directory / userID / namespace +type Client struct { + cfg Config +} + +func NewLocalRulesClient(cfg Config) (*Client, error) { + if cfg.Directory == "" { + return nil, errors.New("directory required for local rules config") + } + + return &Client{ + cfg: cfg, + }, nil +} + +// ListAllRuleGroups implements RuleStore +func (l *Client) ListAllRuleGroups(ctx context.Context) (map[string]rules.RuleGroupList, error) { + lists := make(map[string]rules.RuleGroupList) + + root := l.cfg.Directory + infos, err := ioutil.ReadDir(root) + if err != nil { + return nil, errors.Wrapf(err, "unable to read dir %s", root) + } + + for _, info := range infos { + if !info.IsDir() { + continue + } + + list, err := l.listAllRulesGroupsForUser(ctx, info.Name()) + if err != nil { + return nil, errors.Wrapf(err, "failed to list rule groups for user %s", info.Name()) + } + + lists[info.Name()] = list + } + + return lists, nil +} + +// ListRuleGroups implements RuleStore +func (l *Client) ListRuleGroups(ctx context.Context, userID string, namespace string) (rules.RuleGroupList, error) { + if namespace != "" { + return l.listAllRulesGroupsForUserAndNamespace(ctx, userID, namespace) + } + + return l.listAllRulesGroupsForUser(ctx, userID) +} + +// GetRuleGroup implements RuleStore +func (l *Client) GetRuleGroup(ctx context.Context, userID, namespace, group string) (*rules.RuleGroupDesc, error) { + return nil, errors.New("GetRuleGroup unsupported in rule local store") +} + +// SetRuleGroup implements RuleStore +func (l *Client) SetRuleGroup(ctx context.Context, userID, namespace string, group *rules.RuleGroupDesc) error { + return errors.New("SetRuleGroup unsupported in rule local store") +} + +// DeleteRuleGroup implements RuleStore +func (l *Client) DeleteRuleGroup(ctx context.Context, userID, namespace string, group string) error { + return errors.New("DeleteRuleGroup unsupported in rule local store") +} + +func (l *Client) listAllRulesGroupsForUser(ctx context.Context, userID string) (rules.RuleGroupList, error) { + var allLists rules.RuleGroupList + + root := filepath.Join(l.cfg.Directory, userID) + infos, err := ioutil.ReadDir(root) + if err != nil { + return nil, errors.Wrapf(err, "unable to read dir %s", root) + } + + for _, info := range infos { + if info.IsDir() { + continue + } + + list, err := l.listAllRulesGroupsForUserAndNamespace(ctx, userID, info.Name()) + if err != nil { + return nil, errors.Wrapf(err, "failed to list rule group for user %s and namespace %s", userID, info.Name()) + } + + allLists = append(allLists, list...) + } + + return allLists, nil +} + +func (l *Client) listAllRulesGroupsForUserAndNamespace(ctx context.Context, userID string, namespace string) (rules.RuleGroupList, error) { + filename := filepath.Join(l.cfg.Directory, userID, namespace) + + rulegroups, allErrors := rulefmt.ParseFile(filename) + if len(allErrors) > 0 { + return nil, errors.Wrapf(allErrors[0], "error parsing %s", filename) + } + + var list rules.RuleGroupList + + for _, group := range rulegroups.Groups { + desc := rules.ToProto(userID, namespace, group) + list = append(list, desc) + } + + return list, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/store.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/store.go index d48777c377197..884e282d10ea2 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/store.go @@ -4,11 +4,10 @@ import ( "context" "errors" - "github.com/cortexproject/cortex/pkg/configs/userconfig" + "github.com/prometheus/prometheus/pkg/rulefmt" "github.com/cortexproject/cortex/pkg/configs/client" - - legacy_rulefmt "github.com/cortexproject/cortex/pkg/ruler/legacy_rulefmt" + "github.com/cortexproject/cortex/pkg/configs/userconfig" ) var ( @@ -34,11 +33,11 @@ type RuleGroupList []*RuleGroupDesc // Formatted returns the rule group list as a set of formatted rule groups mapped // by namespace -func (l RuleGroupList) Formatted() map[string][]legacy_rulefmt.RuleGroup { - ruleMap := map[string][]legacy_rulefmt.RuleGroup{} +func (l RuleGroupList) Formatted() map[string][]rulefmt.RuleGroup { + ruleMap := map[string][]rulefmt.RuleGroup{} for _, g := range l { if _, exists := ruleMap[g.Namespace]; !exists { - ruleMap[g.Namespace] = []legacy_rulefmt.RuleGroup{FromProto(g)} + ruleMap[g.Namespace] = []rulefmt.RuleGroup{FromProto(g)} continue } ruleMap[g.Namespace] = append(ruleMap[g.Namespace], FromProto(g)) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/storage.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/storage.go index 8ab6657b7c36e..ffde865e02e8d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/storage.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/storage.go @@ -14,6 +14,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk/openstack" "github.com/cortexproject/cortex/pkg/configs/client" "github.com/cortexproject/cortex/pkg/ruler/rules" + "github.com/cortexproject/cortex/pkg/ruler/rules/local" "github.com/cortexproject/cortex/pkg/ruler/rules/objectclient" ) @@ -27,6 +28,7 @@ type RuleStoreConfig struct { GCS gcp.GCSConfig `yaml:"gcs"` S3 aws.S3Config `yaml:"s3"` Swift openstack.SwiftConfig `yaml:"swift"` + Local local.Config `yaml:"local"` mock rules.RuleStore `yaml:"-"` } @@ -38,7 +40,9 @@ func (cfg *RuleStoreConfig) RegisterFlags(f *flag.FlagSet) { cfg.GCS.RegisterFlagsWithPrefix("ruler.storage.", f) cfg.S3.RegisterFlagsWithPrefix("ruler.storage.", f) cfg.Swift.RegisterFlagsWithPrefix("ruler.storage.", f) - f.StringVar(&cfg.Type, "ruler.storage.type", "configdb", "Method to use for backend rule storage (configdb, azure, gcs, s3)") + cfg.Local.RegisterFlagsWithPrefix("ruler.storage.", f) + + f.StringVar(&cfg.Type, "ruler.storage.type", "configdb", "Method to use for backend rule storage (configdb, azure, gcs, s3, swift, local)") } // Validate config and returns error on failure @@ -49,6 +53,11 @@ func (cfg *RuleStoreConfig) Validate() error { return nil } +// IsDefaults returns true if the storage options have not been set +func (cfg *RuleStoreConfig) IsDefaults() bool { + return cfg.Type == "configdb" && cfg.ConfigDB.ConfigsAPIURL.URL == nil +} + // NewRuleStorage returns a new rule storage backend poller and store func NewRuleStorage(cfg RuleStoreConfig) (rules.RuleStore, error) { if cfg.mock != nil { @@ -72,8 +81,10 @@ func NewRuleStorage(cfg RuleStoreConfig) (rules.RuleStore, error) { return newObjRuleStore(aws.NewS3ObjectClient(cfg.S3, "")) case "swift": return newObjRuleStore(openstack.NewSwiftObjectClient(cfg.Swift, "")) + case "local": + return local.NewLocalRulesClient(cfg.Local) default: - return nil, fmt.Errorf("Unrecognized rule storage mode %v, choose one of: configdb, gcs, s3, swift, azure", cfg.Type) + return nil, fmt.Errorf("Unrecognized rule storage mode %v, choose one of: configdb, gcs, s3, swift, azure, local", cfg.Type) } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/config.go index fe67bde4a0e1f..ddd93f3f7f793 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/config.go @@ -25,6 +25,6 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.StringVar(&cfg.AccessKeyID, prefix+"s3.access-key-id", "", "S3 access key ID") f.Var(&cfg.SecretAccessKey, prefix+"s3.secret-access-key", "S3 secret access key") f.StringVar(&cfg.BucketName, prefix+"s3.bucket-name", "", "S3 bucket name") - f.StringVar(&cfg.Endpoint, prefix+"s3.endpoint", "", "S3 endpoint without schema") + f.StringVar(&cfg.Endpoint, prefix+"s3.endpoint", "", "The S3 bucket endpoint. It could be an AWS S3 endpoint listed at https://docs.aws.amazon.com/general/latest/gr/s3.html or the address of an S3-compatible service in hostname:port format.") f.BoolVar(&cfg.Insecure, prefix+"s3.insecure", false, "If enabled, use http:// for the S3 endpoint instead of https://. This could be useful in local dev/test environments while using an S3-compatible backend storage, like Minio.") } diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go index 75315a71c72b9..6049cf54e4fa9 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go @@ -80,6 +80,10 @@ type Config struct { GCS gcs.Config `yaml:"gcs"` Azure azure.Config `yaml:"azure"` Filesystem filesystem.Config `yaml:"filesystem"` + + // If true, user TSDBs are not closed on shutdown. Only for testing. + // If false (default), user TSDBs are closed to make sure all resources are released and closed properly. + KeepUserTSDBOpenOnShutdown bool `yaml:"-"` } // DurationList is the block ranges for a tsdb @@ -180,7 +184,6 @@ type BucketStoreConfig struct { SyncDir string `yaml:"sync_dir"` SyncInterval time.Duration `yaml:"sync_interval"` MaxChunkPoolBytes uint64 `yaml:"max_chunk_pool_bytes"` - MaxSampleCount uint64 `yaml:"max_sample_count"` MaxConcurrent int `yaml:"max_concurrent"` TenantSyncConcurrency int `yaml:"tenant_sync_concurrency"` BlockSyncConcurrency int `yaml:"block_sync_concurrency"` @@ -208,14 +211,13 @@ func (cfg *BucketStoreConfig) RegisterFlags(f *flag.FlagSet) { f.StringVar(&cfg.SyncDir, "experimental.tsdb.bucket-store.sync-dir", "tsdb-sync", "Directory to store synchronized TSDB index headers.") f.DurationVar(&cfg.SyncInterval, "experimental.tsdb.bucket-store.sync-interval", 5*time.Minute, "How frequently scan the bucket to look for changes (new blocks shipped by ingesters and blocks removed by retention or compaction). 0 disables it.") f.Uint64Var(&cfg.MaxChunkPoolBytes, "experimental.tsdb.bucket-store.max-chunk-pool-bytes", uint64(2*units.Gibibyte), "Max size - in bytes - of a per-tenant chunk pool, used to reduce memory allocations.") - f.Uint64Var(&cfg.MaxSampleCount, "experimental.tsdb.bucket-store.max-sample-count", 0, "Max number of samples per query when loading series from the long-term storage. 0 disables the limit.") f.IntVar(&cfg.MaxConcurrent, "experimental.tsdb.bucket-store.max-concurrent", 100, "Max number of concurrent queries to execute against the long-term storage. The limit is shared across all tenants.") f.IntVar(&cfg.TenantSyncConcurrency, "experimental.tsdb.bucket-store.tenant-sync-concurrency", 10, "Maximum number of concurrent tenants synching blocks.") f.IntVar(&cfg.BlockSyncConcurrency, "experimental.tsdb.bucket-store.block-sync-concurrency", 20, "Maximum number of concurrent blocks synching per tenant.") f.IntVar(&cfg.MetaSyncConcurrency, "experimental.tsdb.bucket-store.meta-sync-concurrency", 20, "Number of Go routines to use when syncing block meta files from object storage per tenant.") f.DurationVar(&cfg.ConsistencyDelay, "experimental.tsdb.bucket-store.consistency-delay", 0, "Minimum age of a block before it's being read. Set it to safe value (e.g 30m) if your object storage is eventually consistent. GCS and S3 are (roughly) strongly consistent.") f.DurationVar(&cfg.IgnoreDeletionMarksDelay, "experimental.tsdb.bucket-store.ignore-deletion-marks-delay", time.Hour*6, "Duration after which the blocks marked for deletion will be filtered out while fetching blocks. "+ - "The idea of ignore-deletion-marks-delay is to ignore blocks that are marked for deletion with some delay. This ensures store can still serve blocks that are meant to be deleted but do not have a replacement yet."+ + "The idea of ignore-deletion-marks-delay is to ignore blocks that are marked for deletion with some delay. This ensures store can still serve blocks that are meant to be deleted but do not have a replacement yet. "+ "Default is 6h, half of the default value for -compactor.deletion-delay.") f.IntVar(&cfg.PostingOffsetsInMemSampling, "experimental.tsdb.bucket-store.posting-offsets-in-mem-sampling", store.DefaultPostingOffsetInMemorySampling, "Controls what is the ratio of postings offsets that the store will hold in memory.") } diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_stores.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_stores.go index b480c41a70a2d..381434ca8af3d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_stores.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_stores.go @@ -283,7 +283,7 @@ func (u *BucketStores) getOrCreateStore(userID string) (*store.BucketStore, erro u.indexCache, u.queryGate, u.cfg.BucketStore.MaxChunkPoolBytes, - u.cfg.BucketStore.MaxSampleCount, + 0, // No max samples limit (it's flawed in Thanos) u.logLevel.String() == "debug", // Turn on debug logging, if the log level is set to debug u.cfg.BucketStore.BlockSyncConcurrency, nil, // Do not limit timerange. diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/fakeauth/fake_auth.go b/vendor/github.com/cortexproject/cortex/pkg/util/fakeauth/fake_auth.go new file mode 100644 index 0000000000000..42557e15240df --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/util/fakeauth/fake_auth.go @@ -0,0 +1,70 @@ +// Package fakeauth provides middlewares thats injects a fake userID, so the rest of the code +// can continue to be multitenant. +package fakeauth + +import ( + "context" + "net/http" + + "github.com/weaveworks/common/middleware" + "github.com/weaveworks/common/server" + "github.com/weaveworks/common/user" + "google.golang.org/grpc" +) + +// SetupAuthMiddleware for the given server config. +func SetupAuthMiddleware(config *server.Config, enabled bool, noGRPCAuthOn []string) middleware.Interface { + if enabled { + config.GRPCMiddleware = append(config.GRPCMiddleware, + middleware.ServerUserHeaderInterceptor, + ) + config.GRPCStreamMiddleware = append(config.GRPCStreamMiddleware, + func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + for _, path := range noGRPCAuthOn { + if info.FullMethod == path { + return handler(srv, ss) + } + } + return middleware.StreamServerUserHeaderInterceptor(srv, ss, info, handler) + }, + ) + return middleware.AuthenticateUser + } + + config.GRPCMiddleware = append(config.GRPCMiddleware, + fakeGRPCAuthUniaryMiddleware, + ) + config.GRPCStreamMiddleware = append(config.GRPCStreamMiddleware, + fakeGRPCAuthStreamMiddleware, + ) + return fakeHTTPAuthMiddleware +} + +var fakeHTTPAuthMiddleware = middleware.Func(func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := user.InjectOrgID(r.Context(), "fake") + next.ServeHTTP(w, r.WithContext(ctx)) + }) +}) + +var fakeGRPCAuthUniaryMiddleware = func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + ctx = user.InjectOrgID(ctx, "fake") + return handler(ctx, req) +} + +var fakeGRPCAuthStreamMiddleware = func(srv interface{}, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + ctx := user.InjectOrgID(ss.Context(), "fake") + return handler(srv, serverStream{ + ctx: ctx, + ServerStream: ss, + }) +} + +type serverStream struct { + ctx context.Context + grpc.ServerStream +} + +func (ss serverStream) Context() context.Context { + return ss.ctx +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/log.go b/vendor/github.com/cortexproject/cortex/pkg/util/log.go index ae681fd59b868..38fe33676616b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/log.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/log.go @@ -39,7 +39,7 @@ func init() { // InitLogger initialises the global gokit logger (util.Logger) and overrides the // default logger for the server. func InitLogger(cfg *server.Config) { - l, err := NewPrometheusLogger(cfg.LogLevel) + l, err := NewPrometheusLogger(cfg.LogLevel, cfg.LogFormat) if err != nil { panic(err) } @@ -61,8 +61,11 @@ type PrometheusLogger struct { // NewPrometheusLogger creates a new instance of PrometheusLogger which exposes // Prometheus counters for various log levels. -func NewPrometheusLogger(l logging.Level) (log.Logger, error) { +func NewPrometheusLogger(l logging.Level, format logging.Format) (log.Logger, error) { logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + if format.String() == "json" { + logger = log.NewJSONLogger(log.NewSyncWriter(os.Stderr)) + } logger = level.NewFilter(logger, l.Gokit) // Initialise counters for all supported levels: diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/runtimeconfig/manager.go b/vendor/github.com/cortexproject/cortex/pkg/util/runtimeconfig/manager.go index 953361804f155..ca4f34c974e57 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/runtimeconfig/manager.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/runtimeconfig/manager.go @@ -1,27 +1,36 @@ package runtimeconfig import ( + "bytes" "context" + "crypto/sha256" + "errors" "flag" + "fmt" + "io" + "io/ioutil" "sync" "time" "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/services" ) // Loader loads the configuration from file. -type Loader func(filename string) (interface{}, error) +type Loader func(r io.Reader) (interface{}, error) // ManagerConfig holds the config for an Manager instance. // It holds config related to loading per-tenant config. type ManagerConfig struct { ReloadPeriod time.Duration `yaml:"period"` - LoadPath string `yaml:"file"` - Loader Loader `yaml:"-"` + // LoadPath contains the path to the runtime config file, requires an + // non-empty value + LoadPath string `yaml:"file"` + Loader Loader `yaml:"-"` } // RegisterFlags registers flags. @@ -44,20 +53,25 @@ type Manager struct { config interface{} configLoadSuccess prometheus.Gauge + configHash *prometheus.GaugeVec } // NewRuntimeConfigManager creates an instance of Manager and starts reload config loop based on config func NewRuntimeConfigManager(cfg ManagerConfig, registerer prometheus.Registerer) (*Manager, error) { + if cfg.LoadPath == "" { + return nil, errors.New("LoadPath is empty") + } + mgr := Manager{ cfg: cfg, - configLoadSuccess: prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "cortex_overrides_last_reload_successful", - Help: "Whether the last config reload attempt was successful.", + configLoadSuccess: promauto.With(registerer).NewGauge(prometheus.GaugeOpts{ + Name: "cortex_runtime_config_last_reload_successful", + Help: "Whether the last runtime-config reload attempt was successful.", }), - } - - if registerer != nil { - registerer.MustRegister(mgr.configLoadSuccess) + configHash: promauto.With(registerer).NewGaugeVec(prometheus.GaugeOpts{ + Name: "cortex_runtime_config_hash", + Help: "Hash of the currently active runtime config file.", + }, []string{"sha256"}), } mgr.Service = services.NewBasicService(mgr.start, mgr.loop, mgr.stop) @@ -131,7 +145,14 @@ func (om *Manager) loop(ctx context.Context) error { // loadConfig loads configuration using the loader function, and if successful, // stores it as current configuration and notifies listeners. func (om *Manager) loadConfig() error { - cfg, err := om.cfg.Loader(om.cfg.LoadPath) + buf, err := ioutil.ReadFile(om.cfg.LoadPath) + if err != nil { + om.configLoadSuccess.Set(0) + return err + } + hash := sha256.Sum256(buf) + + cfg, err := om.cfg.Loader(bytes.NewReader(buf)) if err != nil { om.configLoadSuccess.Set(0) return err @@ -141,6 +162,10 @@ func (om *Manager) loadConfig() error { om.setConfig(cfg) om.callListeners(cfg) + // expose hash of runtime config + om.configHash.Reset() + om.configHash.WithLabelValues(fmt.Sprintf("%x", hash[:])).Set(1) + return nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/services/services.go b/vendor/github.com/cortexproject/cortex/pkg/util/services/services.go index 397869555288d..18b849bdbc141 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/services/services.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/services/services.go @@ -7,7 +7,7 @@ import ( // Initializes basic service as an "idle" service -- it doesn't do anything in its Running state, // but still supports all state transitions. -func NewIdleService(up StartingFn, down StoppingFn) Service { +func NewIdleService(up StartingFn, down StoppingFn) *BasicService { run := func(ctx context.Context) error { <-ctx.Done() return nil diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go index be984e0834dd6..62a9029989189 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go @@ -100,7 +100,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.MaxGlobalMetricsWithMetadataPerUser, "ingester.max-global-metadata-per-user", 0, "The maximum number of active metrics with metadata per user, across the cluster. 0 to disable. Supported only if -distributor.shard-by-all-labels is true.") f.IntVar(&l.MaxGlobalMetadataPerMetric, "ingester.max-global-metadata-per-metric", 0, "The maximum number of metadata per metric, across the cluster. 0 to disable.") - f.IntVar(&l.MaxChunksPerQuery, "store.query-chunk-limit", 2e6, "Maximum number of chunks that can be fetched in a single query. This limit is ignored when running the Cortex blocks storage.") + f.IntVar(&l.MaxChunksPerQuery, "store.query-chunk-limit", 2e6, "Maximum number of chunks that can be fetched in a single query. This limit is enforced when fetching chunks from the long-term storage.") f.DurationVar(&l.MaxQueryLength, "store.max-query-length", 0, "Limit the query time range (end - start time). This limit is enforced in the query-frontend (on the received query), in the querier (on the query possibly split by the query-frontend) and in the chunks storage. 0 to disable.") f.IntVar(&l.MaxQueryParallelism, "querier.max-query-parallelism", 14, "Maximum number of queries will be scheduled in parallel by the frontend.") f.IntVar(&l.CardinalityLimit, "store.cardinality-limit", 1e5, "Cardinality limit for index queries. This limit is ignored when running the Cortex blocks storage.") diff --git a/vendor/github.com/digitalocean/godo/.whitesource b/vendor/github.com/digitalocean/godo/.whitesource index e0aaa3e9ebaae..6b6a735e8e4d0 100644 --- a/vendor/github.com/digitalocean/godo/.whitesource +++ b/vendor/github.com/digitalocean/godo/.whitesource @@ -5,4 +5,4 @@ "issueSettings": { "minSeverityLevel": "LOW" } -} \ No newline at end of file +} diff --git a/vendor/github.com/digitalocean/godo/1-click.go b/vendor/github.com/digitalocean/godo/1-click.go index fab04fe510f4f..d1ba001f1ccc8 100644 --- a/vendor/github.com/digitalocean/godo/1-click.go +++ b/vendor/github.com/digitalocean/godo/1-click.go @@ -13,6 +13,7 @@ const oneClickBasePath = "v2/1-clicks" // See: https://developers.digitalocean.com/documentation/v2/#1-click-applications type OneClickService interface { List(context.Context, string) ([]*OneClick, *Response, error) + InstallKubernetes(context.Context, *InstallKubernetesAppsRequest) (*InstallKubernetesAppsResponse, *Response, error) } var _ OneClickService = &OneClickServiceOp{} @@ -33,6 +34,17 @@ type OneClicksRoot struct { List []*OneClick `json:"1_clicks"` } +// InstallKubernetesAppsRequest represents a request required to install 1-click kubernetes apps +type InstallKubernetesAppsRequest struct { + Slugs []string `json:"addon_slugs"` + ClusterUUID string `json:"cluster_uuid"` +} + +// InstallKubernetesAppsResponse is the response of a kubernetes 1-click install request +type InstallKubernetesAppsResponse struct { + Message string `json:"message"` +} + // List returns a list of the available 1-click applications. func (ocs *OneClickServiceOp) List(ctx context.Context, oneClickType string) ([]*OneClick, *Response, error) { path := fmt.Sprintf(`%s?type=%s`, oneClickBasePath, oneClickType) @@ -50,3 +62,20 @@ func (ocs *OneClickServiceOp) List(ctx context.Context, oneClickType string) ([] return root.List, resp, nil } + +// InstallKubernetes installs an addon on a kubernetes cluster +func (ocs *OneClickServiceOp) InstallKubernetes(ctx context.Context, install *InstallKubernetesAppsRequest) (*InstallKubernetesAppsResponse, *Response, error) { + path := fmt.Sprintf(oneClickBasePath + "/kubernetes") + + req, err := ocs.client.NewRequest(ctx, http.MethodPost, path, install) + if err != nil { + return nil, nil, err + } + + responseMessage := new(InstallKubernetesAppsResponse) + resp, err := ocs.client.Do(ctx, req, responseMessage) + if err != nil { + return nil, resp, err + } + return responseMessage, resp, err +} diff --git a/vendor/github.com/digitalocean/godo/CHANGELOG.md b/vendor/github.com/digitalocean/godo/CHANGELOG.md index c826205fac109..729e7bb9df718 100644 --- a/vendor/github.com/digitalocean/godo/CHANGELOG.md +++ b/vendor/github.com/digitalocean/godo/CHANGELOG.md @@ -1,5 +1,28 @@ # Change Log +## [v1.42.0] - 2020-07-22 + +- #357 invoices: add category to InvoiceItem - @rbutler +- #358 apps: add support for following logs - @nanzhong + +## [v1.41.0] - 2020-07-17 + +- #355 kubernetes: Add support for surge upgrades - @varshavaradarajan + +## [v1.40.0] - 2020-07-16 + +- #347 Make Rate limits thread safe - @roidelapluie +- #353 Reuse TCP connection - @itsksaurabh + +## [v1.39.0] - 2020-07-14 + +- #345, #346 Add app platform support [beta] - @nanzhong + +## [v1.38.0] - 2020-06-18 + +- #341 Install 1-click applications on a Kubernetes cluster - @keladhruv +- #340 Add RecordsByType, RecordsByName and RecordsByTypeAndName to the DomainsService - @viola + ## [v1.37.0] - 2020-06-01 - #336 registry: URL encode repository names when building URLs. @adamwg diff --git a/vendor/github.com/digitalocean/godo/CONTRIBUTING.md b/vendor/github.com/digitalocean/godo/CONTRIBUTING.md index 33f03132a8187..d6f453baa7959 100644 --- a/vendor/github.com/digitalocean/godo/CONTRIBUTING.md +++ b/vendor/github.com/digitalocean/godo/CONTRIBUTING.md @@ -25,7 +25,7 @@ go test -mod=vendor . Godo follows [semver](https://www.semver.org) versioning semantics. New functionality should be accompanied by increment to the minor -version number. Any code merged to master is subject to release. +version number. Any code merged to main is subject to release. ## Releasing diff --git a/vendor/github.com/digitalocean/godo/README.md b/vendor/github.com/digitalocean/godo/README.md index cadeb69edbb9b..cd72d110e640a 100644 --- a/vendor/github.com/digitalocean/godo/README.md +++ b/vendor/github.com/digitalocean/godo/README.md @@ -98,9 +98,7 @@ func DropletList(ctx context.Context, client *godo.Client) ([]godo.Droplet, erro } // append the current page's droplets to our list - for _, d := range droplets { - list = append(list, d) - } + list = append(list, droplets...) // if we are at the last page, break out the for loop if resp.Links == nil || resp.Links.IsLastPage() { diff --git a/vendor/github.com/digitalocean/godo/apps.gen.go b/vendor/github.com/digitalocean/godo/apps.gen.go new file mode 100644 index 0000000000000..8eb73ce719ada --- /dev/null +++ b/vendor/github.com/digitalocean/godo/apps.gen.go @@ -0,0 +1,136 @@ +// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. +// $ bundle -pkg godo -prefix ./dev/dist/godo + +package godo + +import () + +// AppDatabaseSpec struct for AppDatabaseSpec +type AppDatabaseSpec struct { + Name string `json:"name"` + Engine AppDatabaseSpecEngine `json:"engine,omitempty"` + Version string `json:"version,omitempty"` + Size string `json:"size,omitempty"` + NumNodes int64 `json:"num_nodes,omitempty"` +} + +// AppDatabaseSpecEngine the model 'AppDatabaseSpecEngine' +type AppDatabaseSpecEngine string + +// List of AppDatabaseSpecEngine +const ( + APPDATABASESPECENGINE_UNSET AppDatabaseSpecEngine = "UNSET" + APPDATABASESPECENGINE_MYSQL AppDatabaseSpecEngine = "MYSQL" + APPDATABASESPECENGINE_PG AppDatabaseSpecEngine = "PG" + APPDATABASESPECENGINE_REDIS AppDatabaseSpecEngine = "REDIS" +) + +// AppDomainSpec struct for AppDomainSpec +type AppDomainSpec struct { + Domain string `json:"domain"` +} + +// AppRouteSpec struct for AppRouteSpec +type AppRouteSpec struct { + Path string `json:"path,omitempty"` +} + +// AppServiceSpec struct for AppServiceSpec +type AppServiceSpec struct { + Name string `json:"name"` + RunCommand string `json:"run_command,omitempty"` + BuildCommand string `json:"build_command,omitempty"` + HTTPPort int64 `json:"http_port,omitempty"` + DockerfilePath string `json:"dockerfile_path,omitempty"` + Git GitSourceSpec `json:"git,omitempty"` + GitHub GitHubSourceSpec `json:"github,omitempty"` + Envs []AppVariableDefinition `json:"envs,omitempty"` + InstanceSizeSlug string `json:"instance_size_slug,omitempty"` + InstanceCount int64 `json:"instance_count,omitempty"` + Routes []AppRouteSpec `json:"routes,omitempty"` + SourceDir string `json:"source_dir,omitempty"` + EnvironmentSlug string `json:"environment_slug,omitempty"` +} + +// AppSpec struct for AppSpec +type AppSpec struct { + Services []AppServiceSpec `json:"services,omitempty"` + StaticSites []AppStaticSiteSpec `json:"static_sites,omitempty"` + Databases []AppDatabaseSpec `json:"databases,omitempty"` + Workers []AppWorkerSpec `json:"workers,omitempty"` + Region string `json:"region,omitempty"` + Name string `json:"name"` + Domains []AppDomainSpec `json:"domains,omitempty"` +} + +// AppStaticSiteSpec struct for AppStaticSiteSpec +type AppStaticSiteSpec struct { + Name string `json:"name"` + BuildCommand string `json:"build_command,omitempty"` + Git GitSourceSpec `json:"git,omitempty"` + GitHub GitHubSourceSpec `json:"github,omitempty"` + Envs []AppVariableDefinition `json:"envs,omitempty"` + Routes []AppRouteSpec `json:"routes,omitempty"` + SourceDir string `json:"source_dir,omitempty"` + EnvironmentSlug string `json:"environment_slug,omitempty"` +} + +// AppVariableDefinition struct for AppVariableDefinition +type AppVariableDefinition struct { + Value string `json:"value,omitempty"` + Scope VariableScope `json:"scope,omitempty"` + // POSIX allows a broader env var definition, but we restrict to what is allowed by bash. http://git.savannah.gnu.org/cgit/bash.git/tree/general.h?h=bash-5.0#n124 Based on the POSIX spec and some casting to unsigned char in bash code I think this is restricted to ASCII (not unicode). + Key string `json:"key"` + Type VariableType `json:"type,omitempty"` + EncryptedValue string `json:"encrypted_value,omitempty"` +} + +// AppWorkerSpec struct for AppWorkerSpec +type AppWorkerSpec struct { + Name string `json:"name"` + RunCommand string `json:"run_command,omitempty"` + BuildCommand string `json:"build_command,omitempty"` + DockerfilePath string `json:"dockerfile_path,omitempty"` + Git GitSourceSpec `json:"git,omitempty"` + GitHub GitHubSourceSpec `json:"github,omitempty"` + Envs []AppVariableDefinition `json:"envs,omitempty"` + InstanceSizeSlug string `json:"instance_size_slug,omitempty"` + InstanceCount int64 `json:"instance_count,omitempty"` + SourceDir string `json:"source_dir,omitempty"` + EnvironmentSlug string `json:"environment_slug,omitempty"` +} + +// GitHubSourceSpec struct for GitHubSourceSpec +type GitHubSourceSpec struct { + Repo string `json:"repo"` + Branch string `json:"branch"` + DeployOnPush bool `json:"deploy_on_push,omitempty"` +} + +// GitSourceSpec struct for GitSourceSpec +type GitSourceSpec struct { + Repo string `json:"repo,omitempty"` + RequiresAuth bool `json:"requires_auth,omitempty"` + Branch string `json:"branch,omitempty"` + RepoCloneURL string `json:"repo_clone_url,omitempty"` +} + +// VariableScope the model 'VariableScope' +type VariableScope string + +// List of VariableScope +const ( + VARIABLESCOPE_UNSET VariableScope = "UNSET" + VARIABLESCOPE_RUN_TIME VariableScope = "RUN_TIME" + VARIABLESCOPE_BUILD_TIME VariableScope = "BUILD_TIME" + VARIABLESCOPE_RUN_AND_BUILD_TIME VariableScope = "RUN_AND_BUILD_TIME" +) + +// VariableType the model 'VariableType' +type VariableType string + +// List of VariableType +const ( + VARIABLETYPE_GENERAL VariableType = "GENERAL" + VARIABLETYPE_SECRET VariableType = "SECRET" +) diff --git a/vendor/github.com/digitalocean/godo/apps.go b/vendor/github.com/digitalocean/godo/apps.go new file mode 100644 index 0000000000000..2459716035e50 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/apps.go @@ -0,0 +1,278 @@ +package godo + +import ( + "context" + "fmt" + "net/http" + "time" +) + +const ( + appsBasePath = "/v2/apps" +) + +// AppLogType is the type of app logs. +type AppLogType string + +const ( + // AppLogTypeBuild represents build logs. + AppLogTypeBuild AppLogType = "BUILD" + // AppLogTypeDeploy represents deploy logs. + AppLogTypeDeploy AppLogType = "DEPLOY" + // AppLogTypeRun represents run logs. + AppLogTypeRun AppLogType = "RUN" +) + +// AppsService is an interface for interfacing with the App Platform endpoints +// of the DigitalOcean API. +type AppsService interface { + Create(ctx context.Context, create *AppCreateRequest) (*App, *Response, error) + Get(ctx context.Context, appID string) (*App, *Response, error) + List(ctx context.Context, opts *ListOptions) ([]*App, *Response, error) + Update(ctx context.Context, appID string, update *AppUpdateRequest) (*App, *Response, error) + Delete(ctx context.Context, appID string) (*Response, error) + + GetDeployment(ctx context.Context, appID, deploymentID string) (*Deployment, *Response, error) + ListDeployments(ctx context.Context, appID string, opts *ListOptions) ([]*Deployment, *Response, error) + CreateDeployment(ctx context.Context, appID string) (*Deployment, *Response, error) + + GetLogs(ctx context.Context, appID, deploymentID, component string, logType AppLogType, follow bool) (*AppLogs, *Response, error) +} + +// App represents an app. +type App struct { + ID string `json:"id"` + Spec *AppSpec `json:"spec"` + DefaultIngress string `json:"default_ingress"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at,omitempty"` + ActiveDeployment *Deployment `json:"active_deployment,omitempty"` + InProgressDeployment *Deployment `json:"in_progress_deployment,omitempty"` +} + +// Deployment represents a deployment for an app. +type Deployment struct { + ID string `json:"id"` + Spec *AppSpec `json:"spec"` + Services []*DeploymentService `json:"services,omitempty"` + Workers []*DeploymentWorker `json:"workers,omitempty"` + StaticSites []*DeploymentStaticSite `json:"static_sites,omitempty"` + + Cause string `json:"cause"` + Progress *DeploymentProgress `json:"progress"` + + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at,omitempty"` +} + +// DeploymentService represents a service component in a deployment. +type DeploymentService struct { + Name string `json:"name,omitempty"` + SourceCommitHash string `json:"source_commit_hash"` +} + +// DeploymentWorker represents a worker component in a deployment. +type DeploymentWorker struct { + Name string `json:"name,omitempty"` + SourceCommitHash string `json:"source_commit_hash"` +} + +// DeploymentStaticSite represents a static site component in a deployment. +type DeploymentStaticSite struct { + Name string `json:"name,omitempty"` + SourceCommitHash string `json:"source_commit_hash"` +} + +// DeploymentProgress represents the total progress of a deployment. +type DeploymentProgress struct { + PendingSteps int `json:"pending_steps"` + RunningSteps int `json:"running_steps"` + SuccessSteps int `json:"success_steps"` + ErrorSteps int `json:"error_steps"` + TotalSteps int `json:"total_steps"` + + Steps []*DeploymentProgressStep `json:"steps"` +} + +// DeploymentProgressStep represents the progress of a deployment step. +type DeploymentProgressStep struct { + Name string `json:"name"` + Status string `json:"status"` + Steps []*DeploymentProgressStep `json:"steps,omitempty"` + Attempts uint32 `json:"attempts"` + StartedAt time.Time `json:"started_at,omitempty"` + EndedAt time.Time `json:"ended_at,omitempty"` +} + +// AppLogs represent app logs. +type AppLogs struct { + LiveURL string `json:"live_url"` + HistoricURLs []string `json:"historic_urls"` +} + +// AppCreateRequest represents a request to create an app. +type AppCreateRequest struct { + Spec *AppSpec `json:"spec"` +} + +// AppUpdateRequest represents a request to update an app. +type AppUpdateRequest struct { + Spec *AppSpec `json:"spec"` +} + +type appRoot struct { + App *App `json:"app"` +} + +type appsRoot struct { + Apps []*App `json:"apps"` +} + +type deploymentRoot struct { + Deployment *Deployment `json:"deployment"` +} + +type deploymentsRoot struct { + Deployments []*Deployment `json:"deployments"` +} + +// AppsServiceOp handles communication with Apps methods of the DigitalOcean API. +type AppsServiceOp struct { + client *Client +} + +// Creates an app. +func (s *AppsServiceOp) Create(ctx context.Context, create *AppCreateRequest) (*App, *Response, error) { + path := appsBasePath + req, err := s.client.NewRequest(ctx, http.MethodPost, path, create) + if err != nil { + return nil, nil, err + } + + root := new(appRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.App, resp, nil +} + +// Get an app. +func (s *AppsServiceOp) Get(ctx context.Context, appID string) (*App, *Response, error) { + path := fmt.Sprintf("%s/%s", appsBasePath, appID) + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(appRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.App, resp, nil +} + +// List apps. +func (s *AppsServiceOp) List(ctx context.Context, opts *ListOptions) ([]*App, *Response, error) { + path := appsBasePath + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(appsRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Apps, resp, nil +} + +// Update an app. +func (s *AppsServiceOp) Update(ctx context.Context, appID string, update *AppUpdateRequest) (*App, *Response, error) { + path := fmt.Sprintf("%s/%s", appsBasePath, appID) + req, err := s.client.NewRequest(ctx, http.MethodPut, path, update) + if err != nil { + return nil, nil, err + } + + root := new(appRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.App, resp, nil +} + +// Delete an app. +func (s *AppsServiceOp) Delete(ctx context.Context, appID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", appsBasePath, appID) + req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + resp, err := s.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + +// GetDeployment gets an app deployment. +func (s *AppsServiceOp) GetDeployment(ctx context.Context, appID, deploymentID string) (*Deployment, *Response, error) { + path := fmt.Sprintf("%s/%s/deployments/%s", appsBasePath, appID, deploymentID) + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(deploymentRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Deployment, resp, nil +} + +// ListDeployments lists an app deployments. +func (s *AppsServiceOp) ListDeployments(ctx context.Context, appID string, opts *ListOptions) ([]*Deployment, *Response, error) { + path := fmt.Sprintf("%s/%s/deployments", appsBasePath, appID) + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(deploymentsRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Deployments, resp, nil +} + +// CreateDeployment creates an app deployment. +func (s *AppsServiceOp) CreateDeployment(ctx context.Context, appID string) (*Deployment, *Response, error) { + path := fmt.Sprintf("%s/%s/deployments", appsBasePath, appID) + req, err := s.client.NewRequest(ctx, http.MethodPost, path, nil) + if err != nil { + return nil, nil, err + } + root := new(deploymentRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Deployment, resp, nil +} + +// GetLogs retrieves app logs. +func (s *AppsServiceOp) GetLogs(ctx context.Context, appID, deploymentID, component string, logType AppLogType, follow bool) (*AppLogs, *Response, error) { + url := fmt.Sprintf("%s/%s/deployments/%s/components/%s/logs?type=%s&follow=%t", appsBasePath, appID, deploymentID, component, logType, follow) + req, err := s.client.NewRequest(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, nil, err + } + logs := new(AppLogs) + resp, err := s.client.Do(ctx, req, logs) + if err != nil { + return nil, resp, err + } + return logs, resp, nil +} diff --git a/vendor/github.com/digitalocean/godo/domains.go b/vendor/github.com/digitalocean/godo/domains.go index 43c04244738c8..5037013925039 100644 --- a/vendor/github.com/digitalocean/godo/domains.go +++ b/vendor/github.com/digitalocean/godo/domains.go @@ -18,6 +18,9 @@ type DomainsService interface { Delete(context.Context, string) (*Response, error) Records(context.Context, string, *ListOptions) ([]DomainRecord, *Response, error) + RecordsByType(context.Context, string, string, *ListOptions) ([]DomainRecord, *Response, error) + RecordsByName(context.Context, string, string, *ListOptions) ([]DomainRecord, *Response, error) + RecordsByTypeAndName(context.Context, string, string, string, *ListOptions) ([]DomainRecord, *Response, error) Record(context.Context, string, int) (*DomainRecord, *Response, error) DeleteRecord(context.Context, string, int) (*Response, error) EditRecord(context.Context, string, int, *DomainRecordEditRequest) (*DomainRecord, *Response, error) @@ -201,7 +204,7 @@ func (d DomainRecordEditRequest) String() string { return Stringify(d) } -// Records returns a slice of DomainRecords for a domain +// Records returns a slice of DomainRecord for a domain. func (s *DomainsServiceOp) Records(ctx context.Context, domain string, opt *ListOptions) ([]DomainRecord, *Response, error) { if len(domain) < 1 { return nil, nil, NewArgError("domain", "cannot be an empty string") @@ -213,21 +216,68 @@ func (s *DomainsServiceOp) Records(ctx context.Context, domain string, opt *List return nil, nil, err } - req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + return s.records(ctx, path) +} + +// RecordsByType returns a slice of DomainRecord for a domain matched by record type. +func (s *DomainsServiceOp) RecordsByType(ctx context.Context, domain, ofType string, opt *ListOptions) ([]DomainRecord, *Response, error) { + if len(domain) < 1 { + return nil, nil, NewArgError("domain", "cannot be an empty string") + } + + if len(ofType) < 1 { + return nil, nil, NewArgError("type", "cannot be an empty string") + } + + path := fmt.Sprintf("%s/%s/records?type=%s", domainsBasePath, domain, ofType) + path, err := addOptions(path, opt) if err != nil { return nil, nil, err } - root := new(domainRecordsRoot) - resp, err := s.client.Do(ctx, req, root) + return s.records(ctx, path) +} + +// RecordsByName returns a slice of DomainRecord for a domain matched by record name. +func (s *DomainsServiceOp) RecordsByName(ctx context.Context, domain, name string, opt *ListOptions) ([]DomainRecord, *Response, error) { + if len(domain) < 1 { + return nil, nil, NewArgError("domain", "cannot be an empty string") + } + + if len(name) < 1 { + return nil, nil, NewArgError("name", "cannot be an empty string") + } + + path := fmt.Sprintf("%s/%s/records?name=%s", domainsBasePath, domain, name) + path, err := addOptions(path, opt) if err != nil { - return nil, resp, err + return nil, nil, err } - if l := root.Links; l != nil { - resp.Links = l + + return s.records(ctx, path) +} + +// RecordsByTypeAndName returns a slice of DomainRecord for a domain matched by record type and name. +func (s *DomainsServiceOp) RecordsByTypeAndName(ctx context.Context, domain, ofType, name string, opt *ListOptions) ([]DomainRecord, *Response, error) { + if len(domain) < 1 { + return nil, nil, NewArgError("domain", "cannot be an empty string") } - return root.DomainRecords, resp, err + if len(ofType) < 1 { + return nil, nil, NewArgError("type", "cannot be an empty string") + } + + if len(name) < 1 { + return nil, nil, NewArgError("name", "cannot be an empty string") + } + + path := fmt.Sprintf("%s/%s/records?type=%s&name=%s", domainsBasePath, domain, ofType, name) + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + return s.records(ctx, path) } // Record returns the record id from a domain @@ -339,3 +389,22 @@ func (s *DomainsServiceOp) CreateRecord(ctx context.Context, return d.DomainRecord, resp, err } + +// Performs a domain records request given a path. +func (s *DomainsServiceOp) records(ctx context.Context, path string) ([]DomainRecord, *Response, error) { + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(domainRecordsRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.DomainRecords, resp, err +} diff --git a/vendor/github.com/digitalocean/godo/go.sum b/vendor/github.com/digitalocean/godo/go.sum index ccd0f08af3ce0..9679010753e90 100644 --- a/vendor/github.com/digitalocean/godo/go.sum +++ b/vendor/github.com/digitalocean/godo/go.sum @@ -31,6 +31,7 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= diff --git a/vendor/github.com/digitalocean/godo/godo.go b/vendor/github.com/digitalocean/godo/godo.go index c6dde8e21b3a6..c118533c7ddbb 100644 --- a/vendor/github.com/digitalocean/godo/godo.go +++ b/vendor/github.com/digitalocean/godo/godo.go @@ -11,6 +11,7 @@ import ( "net/url" "reflect" "strconv" + "sync" "time" "github.com/google/go-querystring/query" @@ -18,7 +19,7 @@ import ( ) const ( - libraryVersion = "1.37.0" + libraryVersion = "1.42.0" defaultBaseURL = "https://api.digitalocean.com/" userAgent = "godo/" + libraryVersion mediaType = "application/json" @@ -40,12 +41,14 @@ type Client struct { UserAgent string // Rate contains the current rate limit for the client as determined by the most recent - // API call. - Rate Rate + // API call. It is not thread-safe. Please consider using GetRate() instead. + Rate Rate + ratemtx sync.Mutex // Services used for communicating with the API Account AccountService Actions ActionsService + Apps AppsService Balance BalanceService BillingHistory BillingHistoryService CDNs CDNService @@ -186,6 +189,7 @@ func NewClient(httpClient *http.Client) *Client { c := &Client{client: httpClient, BaseURL: baseURL, UserAgent: userAgent} c.Account = &AccountServiceOp{client: c} c.Actions = &ActionsServiceOp{client: c} + c.Apps = &AppsServiceOp{client: c} c.Balance = &BalanceServiceOp{client: c} c.BillingHistory = &BillingHistoryServiceOp{client: c} c.CDNs = &CDNServiceOp{client: c} @@ -286,6 +290,14 @@ func (c *Client) OnRequestCompleted(rc RequestCompletionCallback) { c.onRequestCompleted = rc } +// GetRate returns the current rate limit for the client as determined by the most recent +// API call. It is thread-safe. +func (c *Client) GetRate() Rate { + c.ratemtx.Lock() + defer c.ratemtx.Unlock() + return c.Rate +} + // newResponse creates a new Response for the provided http.Response func newResponse(r *http.Response) *Response { response := Response{Response: r} @@ -322,13 +334,26 @@ func (c *Client) Do(ctx context.Context, req *http.Request, v interface{}) (*Res } defer func() { + // Ensure the response body is fully read and closed + // before we reconnect, so that we reuse the same TCPconnection. + // Close the previous response's body. But read at least some of + // the body so if it's small the underlying TCP connection will be + // re-used. No need to check for errors: if it fails, the Transport + // won't reuse it anyway. + const maxBodySlurpSize = 2 << 10 + if resp.ContentLength == -1 || resp.ContentLength <= maxBodySlurpSize { + io.CopyN(ioutil.Discard, resp.Body, maxBodySlurpSize) + } + if rerr := resp.Body.Close(); err == nil { err = rerr } }() response := newResponse(resp) + c.ratemtx.Lock() c.Rate = response.Rate + c.ratemtx.Unlock() err = CheckResponse(resp) if err != nil { diff --git a/vendor/github.com/digitalocean/godo/invoices.go b/vendor/github.com/digitalocean/godo/invoices.go index cc111f871ea81..abc9d2de603ba 100644 --- a/vendor/github.com/digitalocean/godo/invoices.go +++ b/vendor/github.com/digitalocean/godo/invoices.go @@ -49,6 +49,7 @@ type InvoiceItem struct { StartTime time.Time `json:"start_time"` EndTime time.Time `json:"end_time"` ProjectName string `json:"project_name"` + Category string `json:"category"` } // InvoiceList contains a paginated list of all of a customer's invoices. diff --git a/vendor/github.com/digitalocean/godo/kubernetes.go b/vendor/github.com/digitalocean/godo/kubernetes.go index 9b80fefb6dc7b..6e40e8250147b 100644 --- a/vendor/github.com/digitalocean/godo/kubernetes.go +++ b/vendor/github.com/digitalocean/godo/kubernetes.go @@ -66,6 +66,7 @@ type KubernetesClusterCreateRequest struct { MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy"` AutoUpgrade bool `json:"auto_upgrade"` + SurgeUpgrade bool `json:"surge_upgrade"` } // KubernetesClusterUpdateRequest represents a request to update a Kubernetes cluster. @@ -74,6 +75,7 @@ type KubernetesClusterUpdateRequest struct { Tags []string `json:"tags,omitempty"` MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy,omitempty"` AutoUpgrade *bool `json:"auto_upgrade,omitempty"` + SurgeUpgrade bool `json:"surge_upgrade,omitempty"` } // KubernetesClusterUpgradeRequest represents a request to upgrade a Kubernetes cluster. @@ -143,6 +145,7 @@ type KubernetesCluster struct { MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy,omitempty"` AutoUpgrade bool `json:"auto_upgrade,omitempty"` + SurgeUpgrade bool `json:"surge_upgrade,omitempty"` Status *KubernetesClusterStatus `json:"status,omitempty"` CreatedAt time.Time `json:"created_at,omitempty"` diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go index 043720d7d8ea5..1f30e7f8b1fc0 100644 --- a/vendor/github.com/go-openapi/spec/expander.go +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -200,11 +200,11 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error { return nil } +const rootBase = "root" // baseForRoot loads in the cache the root document and produces a fake "root" base path entry // for further $ref resolution func baseForRoot(root interface{}, cache ResolutionCache) string { // cache the root document to resolve $ref's - const rootBase = "root" if root != nil { base, _ := absPath(rootBase) normalizedBase := normalizeAbsPath(base) diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go index 961d477571a00..02d9966c1ad30 100644 --- a/vendor/github.com/go-openapi/spec/schema_loader.go +++ b/vendor/github.com/go-openapi/spec/schema_loader.go @@ -149,7 +149,15 @@ func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) toFetch := *refURL toFetch.Fragment = "" - normalized := normalizeAbsPath(toFetch.String()) + var err error + path := toFetch.String() + if path == rootBase { + path, err = absPath(rootBase) + if err != nil { + return nil, url.URL{}, false, err + } + } + normalized := normalizeAbsPath(path) data, fromCache := r.cache.Get(normalized) if !fromCache { diff --git a/vendor/github.com/gophercloud/gophercloud/.zuul.yaml b/vendor/github.com/gophercloud/gophercloud/.zuul.yaml index b6649a3d6b14e..f3c4015e074c4 100644 --- a/vendor/github.com/gophercloud/gophercloud/.zuul.yaml +++ b/vendor/github.com/gophercloud/gophercloud/.zuul.yaml @@ -36,6 +36,7 @@ parent: gophercloud-acceptance-test description: | Run gophercloud acceptance test on rocky branch + nodeset: ubuntu-xenial vars: global_env: OS_BRANCH: stable/rocky @@ -45,6 +46,7 @@ parent: gophercloud-acceptance-test description: | Run gophercloud acceptance test on queens branch + nodeset: ubuntu-xenial vars: global_env: OS_BRANCH: stable/queens @@ -54,6 +56,7 @@ parent: gophercloud-acceptance-test description: | Run gophercloud acceptance test on pike branch + nodeset: ubuntu-xenial vars: global_env: OS_BRANCH: stable/pike @@ -63,6 +66,7 @@ parent: gophercloud-acceptance-test description: | Run gophercloud acceptance test on ocata branch + nodeset: ubuntu-xenial vars: global_env: OS_BRANCH: stable/ocata @@ -72,6 +76,7 @@ parent: gophercloud-acceptance-test description: | Run gophercloud acceptance test on newton branch + nodeset: ubuntu-xenial vars: global_env: OS_BRANCH: stable/newton diff --git a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md b/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md index 23af89a3f7dc7..719aa6ecf915a 100644 --- a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md +++ b/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md @@ -1,4 +1,25 @@ -## 0.12.0 (Unreleased) +## 0.13.0 (Unlreleased) + +## 0.12.0 (June 25, 2020) + +UPGRADE NOTES + +* The URL used in the `compute/v2/extensions/bootfromvolume` package has been changed from `os-volumes_boot` to `servers`. + +IMPROVEMENTS + +* The URL used in the `compute/v2/extensions/bootfromvolume` package has been changed from `os-volumes_boot` to `servers` [GH-1973](https://github.com/gophercloud/gophercloud/pull/1973) +* Modify `baremetal/v1/nodes.LogicalDisk.PhysicalDisks` type to support physical disks hints [GH-1982](https://github.com/gophercloud/gophercloud/pull/1982) +* Added `baremetalintrospection/httpbasic` which provides an HTTP Basic Auth client [GH-1986](https://github.com/gophercloud/gophercloud/pull/1986) +* Added `baremetal/httpbasic` which provides an HTTP Basic Auth client [GH-1983](https://github.com/gophercloud/gophercloud/pull/1983) +* Added `containerinfra/v1/clusters.CreateOpts.MergeLabels` [GH-1985](https://github.com/gophercloud/gophercloud/pull/1985) + +BUG FIXES + +* Changed `containerinfra/v1/clusters.Cluster.HealthStatusReason` from `string` to `map[string]interface{}` [GH-1968](https://github.com/gophercloud/gophercloud/pull/1968) +* Fixed marshalling of `blockstorage/extensions/backups.ImportBackup.Metadata` [GH-1967](https://github.com/gophercloud/gophercloud/pull/1967) +* Fixed typo of "OAUth" to "OAuth" in `identity/v3/extensions/oauth1` [GH-1969](https://github.com/gophercloud/gophercloud/pull/1969) +* Fixed goroutine leak during reauthentication [GH-1978](https://github.com/gophercloud/gophercloud/pull/1978) ## 0.11.0 (May 14, 2020) diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go index 45623369e18e9..348dd20839679 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go @@ -8,7 +8,7 @@ for more information. Example to List Tenants - listOpts := tenants.ListOpts{ + listOpts := &tenants.ListOpts{ Limit: 2, } diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/results.go index bb109e98e315c..a67f9381d62a1 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/results.go @@ -96,8 +96,8 @@ type Token struct { OAuthToken string `q:"oauth_token"` // OAuthTokenSecret is the secret value associated with the OAuth Token. OAuthTokenSecret string `q:"oauth_token_secret"` - // OAUthExpiresAt is the date and time when an OAuth token expires. - OAUthExpiresAt *time.Time `q:"-"` + // OAuthExpiresAt is the date and time when an OAuth token expires. + OAuthExpiresAt *time.Time `q:"-"` } // TokenResult is a struct to handle @@ -127,7 +127,7 @@ func (r TokenResult) Extract() (*Token, error) { if t, err := time.Parse(gophercloud.RFC3339Milli, v); err != nil { return nil, err } else { - token.OAUthExpiresAt = &t + token.OAuthExpiresAt = &t } } diff --git a/vendor/github.com/gophercloud/gophercloud/provider_client.go b/vendor/github.com/gophercloud/gophercloud/provider_client.go index 43574402e9921..53b3ecf27f516 100644 --- a/vendor/github.com/gophercloud/gophercloud/provider_client.go +++ b/vendor/github.com/gophercloud/gophercloud/provider_client.go @@ -94,10 +94,32 @@ type ProviderClient struct { // reauthlock represents a set of attributes used to help in the reauthentication process. type reauthlock struct { sync.RWMutex - // This channel is non-nil during reauthentication. It can be used to ask the - // goroutine doing Reauthenticate() for its result. Look at the implementation - // of Reauthenticate() for details. - ongoing chan<- (chan<- error) + ongoing *reauthFuture +} + +// reauthFuture represents future result of the reauthentication process. +// while done channel is not closed, reauthentication is in progress. +// when done channel is closed, err contains the result of reauthentication. +type reauthFuture struct { + done chan struct{} + err error +} + +func newReauthFuture() *reauthFuture { + return &reauthFuture{ + make(chan struct{}), + nil, + } +} + +func (f *reauthFuture) Set(err error) { + f.err = err + close(f.done) +} + +func (f *reauthFuture) Get() error { + <-f.done + return f.err } // AuthenticatedHeaders returns a map of HTTP headers that are common for all @@ -112,9 +134,7 @@ func (client *ProviderClient) AuthenticatedHeaders() (m map[string]string) { ongoing := client.reauthmut.ongoing client.reauthmut.Unlock() if ongoing != nil { - responseChannel := make(chan error) - ongoing <- responseChannel - _ = <-responseChannel + _ = ongoing.Get() } } t := client.Token() @@ -237,21 +257,19 @@ func (client *ProviderClient) Reauthenticate(previousToken string) error { return client.ReauthFunc() } - messages := make(chan (chan<- error)) + future := newReauthFuture() // Check if a Reauthenticate is in progress, or start one if not. client.reauthmut.Lock() ongoing := client.reauthmut.ongoing if ongoing == nil { - client.reauthmut.ongoing = messages + client.reauthmut.ongoing = future } client.reauthmut.Unlock() // If Reauthenticate is running elsewhere, wait for its result. if ongoing != nil { - responseChannel := make(chan error) - ongoing <- responseChannel - return <-responseChannel + return ongoing.Get() } // Perform the actual reauthentication. @@ -264,22 +282,10 @@ func (client *ProviderClient) Reauthenticate(previousToken string) error { // Mark Reauthenticate as finished. client.reauthmut.Lock() + client.reauthmut.ongoing.Set(err) client.reauthmut.ongoing = nil client.reauthmut.Unlock() - // Report result to all other interested goroutines. - // - // This happens in a separate goroutine because another goroutine might have - // acquired a copy of `client.reauthmut.ongoing` before we cleared it, but not - // have come around to sending its request. By answering in a goroutine, we - // can have that goroutine linger until all responseChannels have been sent. - // When GC has collected all sendings ends of the channel, our receiving end - // will be closed and the goroutine will end. - go func() { - for responseChannel := range messages { - responseChannel <- err - } - }() return err } diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go index 4057344d89f0c..618a49d6b4741 100644 --- a/vendor/github.com/hashicorp/consul/api/acl.go +++ b/vendor/github.com/hashicorp/consul/api/acl.go @@ -1,6 +1,7 @@ package api import ( + "encoding/json" "fmt" "io" "io/ioutil" @@ -37,6 +38,7 @@ type ACLToken struct { Roles []*ACLTokenRoleLink `json:",omitempty"` ServiceIdentities []*ACLServiceIdentity `json:",omitempty"` Local bool + AuthMethod string `json:",omitempty"` ExpirationTTL time.Duration `json:",omitempty"` ExpirationTime *time.Time `json:",omitempty"` CreateTime time.Time `json:",omitempty"` @@ -60,6 +62,7 @@ type ACLTokenListEntry struct { Roles []*ACLTokenRoleLink `json:",omitempty"` ServiceIdentities []*ACLServiceIdentity `json:",omitempty"` Local bool + AuthMethod string `json:",omitempty"` ExpirationTime *time.Time `json:",omitempty"` CreateTime time.Time Hash []byte @@ -180,7 +183,13 @@ type ACLBindingRule struct { type ACLAuthMethod struct { Name string Type string - Description string + DisplayName string `json:",omitempty"` + Description string `json:",omitempty"` + MaxTokenTTL time.Duration `json:",omitempty"` + + // TokenLocality defines the kind of token that this auth method produces. + // This can be either 'local' or 'global'. If empty 'local' is assumed. + TokenLocality string `json:",omitempty"` // Configuration is arbitrary configuration for the auth method. This // should only contain primitive values and containers (such as lists and @@ -190,15 +199,69 @@ type ACLAuthMethod struct { CreateIndex uint64 ModifyIndex uint64 + // NamespaceRules apply only on auth methods defined in the default namespace. + // Namespacing is a Consul Enterprise feature. + NamespaceRules []*ACLAuthMethodNamespaceRule `json:",omitempty"` + // Namespace is the namespace the ACLAuthMethod is associated with. // Namespacing is a Consul Enterprise feature. Namespace string `json:",omitempty"` } +func (m *ACLAuthMethod) MarshalJSON() ([]byte, error) { + type Alias ACLAuthMethod + exported := &struct { + MaxTokenTTL string `json:",omitempty"` + *Alias + }{ + MaxTokenTTL: m.MaxTokenTTL.String(), + Alias: (*Alias)(m), + } + if m.MaxTokenTTL == 0 { + exported.MaxTokenTTL = "" + } + + return json.Marshal(exported) +} + +func (m *ACLAuthMethod) UnmarshalJSON(data []byte) error { + type Alias ACLAuthMethod + aux := &struct { + MaxTokenTTL string + *Alias + }{ + Alias: (*Alias)(m), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + var err error + if aux.MaxTokenTTL != "" { + if m.MaxTokenTTL, err = time.ParseDuration(aux.MaxTokenTTL); err != nil { + return err + } + } + + return nil +} + +type ACLAuthMethodNamespaceRule struct { + // Selector is an expression that matches against verified identity + // attributes returned from the auth method during login. + Selector string `json:",omitempty"` + + // BindNamespace is the target namespace of the binding. Can be lightly + // templated using HIL ${foo} syntax from available field names. + // + // If empty it's created in the same namespace as the auth method. + BindNamespace string `json:",omitempty"` +} + type ACLAuthMethodListEntry struct { Name string Type string - Description string + DisplayName string `json:",omitempty"` + Description string `json:",omitempty"` CreateIndex uint64 ModifyIndex uint64 @@ -246,12 +309,73 @@ func (c *KubernetesAuthMethodConfig) RenderToConfig() map[string]interface{} { } } +// OIDCAuthMethodConfig is the config for the built-in Consul auth method for +// OIDC and JWT. +type OIDCAuthMethodConfig struct { + // common for type=oidc and type=jwt + JWTSupportedAlgs []string `json:",omitempty"` + BoundAudiences []string `json:",omitempty"` + ClaimMappings map[string]string `json:",omitempty"` + ListClaimMappings map[string]string `json:",omitempty"` + OIDCDiscoveryURL string `json:",omitempty"` + OIDCDiscoveryCACert string `json:",omitempty"` + // just for type=oidc + OIDCClientID string `json:",omitempty"` + OIDCClientSecret string `json:",omitempty"` + OIDCScopes []string `json:",omitempty"` + AllowedRedirectURIs []string `json:",omitempty"` + VerboseOIDCLogging bool `json:",omitempty"` + // just for type=jwt + JWKSURL string `json:",omitempty"` + JWKSCACert string `json:",omitempty"` + JWTValidationPubKeys []string `json:",omitempty"` + BoundIssuer string `json:",omitempty"` + ExpirationLeeway time.Duration `json:",omitempty"` + NotBeforeLeeway time.Duration `json:",omitempty"` + ClockSkewLeeway time.Duration `json:",omitempty"` +} + +// RenderToConfig converts this into a map[string]interface{} suitable for use +// in the ACLAuthMethod.Config field. +func (c *OIDCAuthMethodConfig) RenderToConfig() map[string]interface{} { + return map[string]interface{}{ + // common for type=oidc and type=jwt + "JWTSupportedAlgs": c.JWTSupportedAlgs, + "BoundAudiences": c.BoundAudiences, + "ClaimMappings": c.ClaimMappings, + "ListClaimMappings": c.ListClaimMappings, + "OIDCDiscoveryURL": c.OIDCDiscoveryURL, + "OIDCDiscoveryCACert": c.OIDCDiscoveryCACert, + // just for type=oidc + "OIDCClientID": c.OIDCClientID, + "OIDCClientSecret": c.OIDCClientSecret, + "OIDCScopes": c.OIDCScopes, + "AllowedRedirectURIs": c.AllowedRedirectURIs, + "VerboseOIDCLogging": c.VerboseOIDCLogging, + // just for type=jwt + "JWKSURL": c.JWKSURL, + "JWKSCACert": c.JWKSCACert, + "JWTValidationPubKeys": c.JWTValidationPubKeys, + "BoundIssuer": c.BoundIssuer, + "ExpirationLeeway": c.ExpirationLeeway, + "NotBeforeLeeway": c.NotBeforeLeeway, + "ClockSkewLeeway": c.ClockSkewLeeway, + } +} + type ACLLoginParams struct { AuthMethod string BearerToken string Meta map[string]string `json:",omitempty"` } +type ACLOIDCAuthURLParams struct { + AuthMethod string + RedirectURI string + ClientNonce string + Meta map[string]string `json:",omitempty"` +} + // ACL can be used to query the ACL endpoints type ACL struct { c *Client @@ -666,6 +790,32 @@ func (a *ACL) PolicyRead(policyID string, q *QueryOptions) (*ACLPolicy, *QueryMe return &out, qm, nil } +// PolicyReadByName retrieves the policy details including the rule set with name. +func (a *ACL) PolicyReadByName(policyName string, q *QueryOptions) (*ACLPolicy, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/policy/name/"+url.QueryEscape(policyName)) + r.setQueryOptions(q) + found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + var out ACLPolicy + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + // PolicyList retrieves a listing of all policies. The listing does not include the // rules for any policy as those should be retrieved by subsequent calls to PolicyRead. func (a *ACL) PolicyList(q *QueryOptions) ([]*ACLPolicyListEntry, *QueryMeta, error) { @@ -1142,3 +1292,62 @@ func (a *ACL) Logout(q *WriteOptions) (*WriteMeta, error) { wm := &WriteMeta{RequestTime: rtt} return wm, nil } + +// OIDCAuthURL requests an authorization URL to start an OIDC login flow. +func (a *ACL) OIDCAuthURL(auth *ACLOIDCAuthURLParams, q *WriteOptions) (string, *WriteMeta, error) { + if auth.AuthMethod == "" { + return "", nil, fmt.Errorf("Must specify an auth method name") + } + + r := a.c.newRequest("POST", "/v1/acl/oidc/auth-url") + r.setWriteOptions(q) + r.obj = auth + + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out aclOIDCAuthURLResponse + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.AuthURL, wm, nil +} + +type aclOIDCAuthURLResponse struct { + AuthURL string +} + +type ACLOIDCCallbackParams struct { + AuthMethod string + State string + Code string + ClientNonce string +} + +// OIDCCallback is the callback endpoint to complete an OIDC login. +func (a *ACL) OIDCCallback(auth *ACLOIDCCallbackParams, q *WriteOptions) (*ACLToken, *WriteMeta, error) { + if auth.AuthMethod == "" { + return nil, nil, fmt.Errorf("Must specify an auth method name") + } + + r := a.c.newRequest("POST", "/v1/acl/oidc/callback") + r.setWriteOptions(q) + r.obj = auth + + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLToken + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, wm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go index 929d3ccd34fe2..717e6f5ae89bf 100644 --- a/vendor/github.com/hashicorp/consul/api/agent.go +++ b/vendor/github.com/hashicorp/consul/api/agent.go @@ -28,6 +28,15 @@ const ( // service will proxy connections based off the SNI header set by other // connect proxies ServiceKindMeshGateway ServiceKind = "mesh-gateway" + + // ServiceKindTerminatingGateway is a Terminating Gateway for the Connect + // feature. This service will proxy connections to services outside the mesh. + ServiceKindTerminatingGateway ServiceKind = "terminating-gateway" + + // ServiceKindIngressGateway is an Ingress Gateway for the Connect feature. + // This service will ingress connections based of configuration defined in + // the ingress-gateway config entry. + ServiceKindIngressGateway ServiceKind = "ingress-gateway" ) // UpstreamDestType is the type of upstream discovery mechanism. diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go index a42a110bcec56..7b00be967adbe 100644 --- a/vendor/github.com/hashicorp/consul/api/api.go +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -551,11 +551,11 @@ func NewClient(config *Config) (*Client, error) { // bootstrap the config defConfig := DefaultConfig() - if len(config.Address) == 0 { + if config.Address == "" { config.Address = defConfig.Address } - if len(config.Scheme) == 0 { + if config.Scheme == "" { config.Scheme = defConfig.Scheme } @@ -599,7 +599,7 @@ func NewClient(config *Config) (*Client, error) { if len(parts) == 2 { switch parts[0] { case "http": - config.Scheme = "http" + // Never revert to http if TLS was explicitly requested. case "https": config.Scheme = "https" case "unix": diff --git a/vendor/github.com/hashicorp/consul/api/config_entry.go b/vendor/github.com/hashicorp/consul/api/config_entry.go index ae0d42797ed82..dc31d6110f956 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry.go @@ -12,11 +12,13 @@ import ( ) const ( - ServiceDefaults string = "service-defaults" - ProxyDefaults string = "proxy-defaults" - ServiceRouter string = "service-router" - ServiceSplitter string = "service-splitter" - ServiceResolver string = "service-resolver" + ServiceDefaults string = "service-defaults" + ProxyDefaults string = "proxy-defaults" + ServiceRouter string = "service-router" + ServiceSplitter string = "service-splitter" + ServiceResolver string = "service-resolver" + IngressGateway string = "ingress-gateway" + TerminatingGateway string = "terminating-gateway" ProxyConfigGlobal string = "global" ) @@ -69,13 +71,13 @@ type ExposeConfig struct { type ExposePath struct { // ListenerPort defines the port of the proxy's listener for exposed paths. - ListenerPort int `json:",omitempty"` + ListenerPort int `json:",omitempty" alias:"listener_port"` // Path is the path to expose through the proxy, ie. "/metrics." Path string `json:",omitempty"` // LocalPathPort is the port that the service is listening on for the given path. - LocalPathPort int `json:",omitempty"` + LocalPathPort int `json:",omitempty" alias:"local_path_port"` // Protocol describes the upstream's service protocol. // Valid values are "http" and "http2", defaults to "http" @@ -90,9 +92,9 @@ type ServiceConfigEntry struct { Name string Namespace string `json:",omitempty"` Protocol string `json:",omitempty"` - MeshGateway MeshGatewayConfig `json:",omitempty"` + MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"` Expose ExposeConfig `json:",omitempty"` - ExternalSNI string `json:",omitempty"` + ExternalSNI string `json:",omitempty" alias:"external_sni"` CreateIndex uint64 ModifyIndex uint64 } @@ -118,7 +120,7 @@ type ProxyConfigEntry struct { Name string Namespace string `json:",omitempty"` Config map[string]interface{} `json:",omitempty"` - MeshGateway MeshGatewayConfig `json:",omitempty"` + MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"` Expose ExposeConfig `json:",omitempty"` CreateIndex uint64 ModifyIndex uint64 @@ -140,11 +142,6 @@ func (p *ProxyConfigEntry) GetModifyIndex() uint64 { return p.ModifyIndex } -type rawEntryListResponse struct { - kind string - Entries []map[string]interface{} -} - func makeConfigEntry(kind, name string) (ConfigEntry, error) { switch kind { case ServiceDefaults: @@ -157,6 +154,10 @@ func makeConfigEntry(kind, name string) (ConfigEntry, error) { return &ServiceSplitterConfigEntry{Kind: kind, Name: name}, nil case ServiceResolver: return &ServiceResolverConfigEntry{Kind: kind, Name: name}, nil + case IngressGateway: + return &IngressGatewayConfigEntry{Kind: kind, Name: name}, nil + case TerminatingGateway: + return &TerminatingGatewayConfigEntry{Kind: kind, Name: name}, nil default: return nil, fmt.Errorf("invalid config entry kind: %s", kind) } diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go b/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go index 885b78dc9115c..f3994f0dd9b58 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go @@ -31,12 +31,12 @@ type ServiceRouteMatch struct { } type ServiceRouteHTTPMatch struct { - PathExact string `json:",omitempty"` - PathPrefix string `json:",omitempty"` - PathRegex string `json:",omitempty"` + PathExact string `json:",omitempty" alias:"path_exact"` + PathPrefix string `json:",omitempty" alias:"path_prefix"` + PathRegex string `json:",omitempty" alias:"path_regex"` Header []ServiceRouteHTTPMatchHeader `json:",omitempty"` - QueryParam []ServiceRouteHTTPMatchQueryParam `json:",omitempty"` + QueryParam []ServiceRouteHTTPMatchQueryParam `json:",omitempty" alias:"query_param"` Methods []string `json:",omitempty"` } @@ -59,13 +59,13 @@ type ServiceRouteHTTPMatchQueryParam struct { type ServiceRouteDestination struct { Service string `json:",omitempty"` - ServiceSubset string `json:",omitempty"` + ServiceSubset string `json:",omitempty" alias:"service_subset"` Namespace string `json:",omitempty"` - PrefixRewrite string `json:",omitempty"` - RequestTimeout time.Duration `json:",omitempty"` - NumRetries uint32 `json:",omitempty"` - RetryOnConnectFailure bool `json:",omitempty"` - RetryOnStatusCodes []uint32 `json:",omitempty"` + PrefixRewrite string `json:",omitempty" alias:"prefix_rewrite"` + RequestTimeout time.Duration `json:",omitempty" alias:"request_timeout"` + NumRetries uint32 `json:",omitempty" alias:"num_retries"` + RetryOnConnectFailure bool `json:",omitempty" alias:"retry_on_connect_failure"` + RetryOnStatusCodes []uint32 `json:",omitempty" alias:"retry_on_status_codes"` } func (e *ServiceRouteDestination) MarshalJSON() ([]byte, error) { @@ -123,7 +123,7 @@ func (e *ServiceSplitterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIn type ServiceSplit struct { Weight float32 Service string `json:",omitempty"` - ServiceSubset string `json:",omitempty"` + ServiceSubset string `json:",omitempty" alias:"service_subset"` Namespace string `json:",omitempty"` } @@ -132,11 +132,11 @@ type ServiceResolverConfigEntry struct { Name string Namespace string `json:",omitempty"` - DefaultSubset string `json:",omitempty"` + DefaultSubset string `json:",omitempty" alias:"default_subset"` Subsets map[string]ServiceResolverSubset `json:",omitempty"` Redirect *ServiceResolverRedirect `json:",omitempty"` Failover map[string]ServiceResolverFailover `json:",omitempty"` - ConnectTimeout time.Duration `json:",omitempty"` + ConnectTimeout time.Duration `json:",omitempty" alias:"connect_timeout"` CreateIndex uint64 ModifyIndex uint64 @@ -185,19 +185,19 @@ func (e *ServiceResolverConfigEntry) GetModifyIndex() uint64 { return e.ModifyIn type ServiceResolverSubset struct { Filter string `json:",omitempty"` - OnlyPassing bool `json:",omitempty"` + OnlyPassing bool `json:",omitempty" alias:"only_passing"` } type ServiceResolverRedirect struct { Service string `json:",omitempty"` - ServiceSubset string `json:",omitempty"` + ServiceSubset string `json:",omitempty" alias:"service_subset"` Namespace string `json:",omitempty"` Datacenter string `json:",omitempty"` } type ServiceResolverFailover struct { Service string `json:",omitempty"` - ServiceSubset string `json:",omitempty"` + ServiceSubset string `json:",omitempty" alias:"service_subset"` Namespace string `json:",omitempty"` Datacenters []string `json:",omitempty"` } diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go b/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go new file mode 100644 index 0000000000000..13a5ec7072f21 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go @@ -0,0 +1,170 @@ +package api + +// IngressGatewayConfigEntry manages the configuration for an ingress service +// with the given name. +type IngressGatewayConfigEntry struct { + // Kind of the config entry. This should be set to api.IngressGateway. + Kind string + + // Name is used to match the config entry with its associated ingress gateway + // service. This should match the name provided in the service definition. + Name string + + // Namespace is the namespace the IngressGateway is associated with + // Namespacing is a Consul Enterprise feature. + Namespace string `json:",omitempty"` + + // TLS holds the TLS configuration for this gateway. + TLS GatewayTLSConfig + + // Listeners declares what ports the ingress gateway should listen on, and + // what services to associated to those ports. + Listeners []IngressListener + + // CreateIndex is the Raft index this entry was created at. This is a + // read-only field. + CreateIndex uint64 + + // ModifyIndex is used for the Check-And-Set operations and can also be fed + // back into the WaitIndex of the QueryOptions in order to perform blocking + // queries. + ModifyIndex uint64 +} + +type GatewayTLSConfig struct { + // Indicates that TLS should be enabled for this gateway service + Enabled bool +} + +// IngressListener manages the configuration for a listener on a specific port. +type IngressListener struct { + // Port declares the port on which the ingress gateway should listen for traffic. + Port int + + // Protocol declares what type of traffic this listener is expected to + // receive. Depending on the protocol, a listener might support multiplexing + // services over a single port, or additional discovery chain features. The + // current supported values are: (tcp | http). + Protocol string + + // Services declares the set of services to which the listener forwards + // traffic. + // + // For "tcp" protocol listeners, only a single service is allowed. + // For "http" listeners, multiple services can be declared. + Services []IngressService +} + +// IngressService manages configuration for services that are exposed to +// ingress traffic. +type IngressService struct { + // Name declares the service to which traffic should be forwarded. + // + // This can either be a specific service, or the wildcard specifier, + // "*". If the wildcard specifier is provided, the listener must be of "http" + // protocol and means that the listener will forward traffic to all services. + // + // A name can be specified on multiple listeners, and will be exposed on both + // of the listeners + Name string + + // Hosts is a list of hostnames which should be associated to this service on + // the defined listener. Only allowed on layer 7 protocols, this will be used + // to route traffic to the service by matching the Host header of the HTTP + // request. + // + // If a host is provided for a service that also has a wildcard specifier + // defined, the host will override the wildcard-specifier-provided + // ".*" domain for that listener. + // + // This cannot be specified when using the wildcard specifier, "*", or when + // using a "tcp" listener. + Hosts []string + + // Namespace is the namespace where the service is located. + // Namespacing is a Consul Enterprise feature. + Namespace string `json:",omitempty"` +} + +func (i *IngressGatewayConfigEntry) GetKind() string { + return i.Kind +} + +func (i *IngressGatewayConfigEntry) GetName() string { + return i.Name +} + +func (i *IngressGatewayConfigEntry) GetCreateIndex() uint64 { + return i.CreateIndex +} + +func (i *IngressGatewayConfigEntry) GetModifyIndex() uint64 { + return i.ModifyIndex +} + +// TerminatingGatewayConfigEntry manages the configuration for a terminating gateway +// with the given name. +type TerminatingGatewayConfigEntry struct { + // Kind of the config entry. This should be set to api.TerminatingGateway. + Kind string + + // Name is used to match the config entry with its associated terminating gateway + // service. This should match the name provided in the service definition. + Name string + + // Services is a list of service names represented by the terminating gateway. + Services []LinkedService `json:",omitempty"` + + // CreateIndex is the Raft index this entry was created at. This is a + // read-only field. + CreateIndex uint64 + + // ModifyIndex is used for the Check-And-Set operations and can also be fed + // back into the WaitIndex of the QueryOptions in order to perform blocking + // queries. + ModifyIndex uint64 + + // Namespace is the namespace the config entry is associated with + // Namespacing is a Consul Enterprise feature. + Namespace string `json:",omitempty"` +} + +// A LinkedService is a service represented by a terminating gateway +type LinkedService struct { + // The namespace the service is registered in + Namespace string `json:",omitempty"` + + // Name is the name of the service, as defined in Consul's catalog + Name string `json:",omitempty"` + + // CAFile is the optional path to a CA certificate to use for TLS connections + // from the gateway to the linked service + CAFile string `json:",omitempty" alias:"ca_file"` + + // CertFile is the optional path to a client certificate to use for TLS connections + // from the gateway to the linked service + CertFile string `json:",omitempty" alias:"cert_file"` + + // KeyFile is the optional path to a private key to use for TLS connections + // from the gateway to the linked service + KeyFile string `json:",omitempty" alias:"key_file"` + + // SNI is the optional name to specify during the TLS handshake with a linked service + SNI string `json:",omitempty"` +} + +func (g *TerminatingGatewayConfigEntry) GetKind() string { + return g.Kind +} + +func (g *TerminatingGatewayConfigEntry) GetName() string { + return g.Name +} + +func (g *TerminatingGatewayConfigEntry) GetCreateIndex() uint64 { + return g.CreateIndex +} + +func (g *TerminatingGatewayConfigEntry) GetModifyIndex() uint64 { + return g.ModifyIndex +} diff --git a/vendor/github.com/hashicorp/consul/api/connect_intention.go b/vendor/github.com/hashicorp/consul/api/connect_intention.go index d25cb844fb8b6..3db177c7b49ae 100644 --- a/vendor/github.com/hashicorp/consul/api/connect_intention.go +++ b/vendor/github.com/hashicorp/consul/api/connect_intention.go @@ -33,7 +33,7 @@ type Intention struct { // SourceType is the type of the value for the source. SourceType IntentionSourceType - // Action is whether this is a whitelist or blacklist intention. + // Action is whether this is an allowlist or denylist intention. Action IntentionAction // DefaultAddr, DefaultPort of the local listening proxy (if any) to @@ -99,7 +99,7 @@ func (i *Intention) partString(ns, n string) string { const IntentionDefaultNamespace = "default" // IntentionAction is the action that the intention represents. This -// can be "allow" or "deny" to whitelist or blacklist intentions. +// can be "allow" or "deny" to allowlist or denylist intentions. type IntentionAction string const ( diff --git a/vendor/github.com/hashicorp/consul/api/go.mod b/vendor/github.com/hashicorp/consul/api/go.mod index 7770aae90de96..8f2531554b95c 100644 --- a/vendor/github.com/hashicorp/consul/api/go.mod +++ b/vendor/github.com/hashicorp/consul/api/go.mod @@ -5,12 +5,12 @@ go 1.12 replace github.com/hashicorp/consul/sdk => ../sdk require ( - github.com/hashicorp/consul/sdk v0.4.0 + github.com/hashicorp/consul/sdk v0.5.0 github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/go-hclog v0.12.0 github.com/hashicorp/go-rootcerts v1.0.2 github.com/hashicorp/go-uuid v1.0.1 - github.com/hashicorp/serf v0.8.2 + github.com/hashicorp/serf v0.9.0 github.com/mitchellh/mapstructure v1.1.2 github.com/stretchr/testify v1.4.0 ) diff --git a/vendor/github.com/hashicorp/consul/api/go.sum b/vendor/github.com/hashicorp/consul/api/go.sum index 40f6fdc743989..c31fd7eb874b7 100644 --- a/vendor/github.com/hashicorp/consul/api/go.sum +++ b/vendor/github.com/hashicorp/consul/api/go.sum @@ -12,8 +12,8 @@ github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/hashicorp/consul/sdk v0.4.0 h1:zBtCfKJZcJDBvSCkQJch4ulp59m1rATFLKwNo/LYY30= -github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= +github.com/hashicorp/consul/sdk v0.5.0 h1:WC4594Wp/LkEeML/OdQKEC1yqBmEYkRp6i7X5u0zDAs= +github.com/hashicorp/consul/sdk v0.5.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= @@ -34,15 +34,14 @@ github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdv github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/memberlist v0.2.0 h1:WeeNspppWi5s1OFefTviPQueC/Bq8dONfvNjPhiEQKE= +github.com/hashicorp/memberlist v0.2.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.9.0 h1:+Zd/16AJ9lxk9RzfTDyv/TLhZ8UerqYS0/+JGCIDaa0= +github.com/hashicorp/serf v0.9.0/go.mod h1:YL0HO+FifKOW2u1ke99DGVu1zhcpZzNwrLIqBC7vbYU= github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -60,13 +59,13 @@ github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHX github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.26 h1:gPxPSwALAeHJSjarOs00QjVdV9QoBvc1D2ujQUr5BzU= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= @@ -88,21 +87,36 @@ github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJy github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3 h1:KYQXGkl6vs02hK7pK4eIbw0NpNPedieTSTEiJ//bwGs= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 h1:ACG4HJsFiNMf47Y4PeRoebLNy/2lXT9EtprMuTFWt1M= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc h1:a3CU5tJYVj92DY2LaA1kUkrsqD5/3mLDhx2NcNqyW+0= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5 h1:x6r4Jo0KNzOOzYd8lbcRsqjuqEASK6ob3auvWYM4/8U= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9 h1:1/DFK4b7JH8DmkqhUk48onnSfrPzImPoVxuomtbT2nk= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go index 2f4894ae6e75d..99b9ac2574937 100644 --- a/vendor/github.com/hashicorp/consul/api/health.go +++ b/vendor/github.com/hashicorp/consul/api/health.go @@ -17,6 +17,12 @@ const ( HealthMaint = "maintenance" ) +const ( + serviceHealth = "service" + connectHealth = "connect" + ingressHealth = "ingress" +) + const ( // NodeMaint is the special key set by a node in maintenance mode. NodeMaint = "_node_maintenance" @@ -170,7 +176,7 @@ type HealthChecks []*HealthCheck func (c HealthChecks) AggregatedStatus() string { var passing, warning, critical, maintenance bool for _, check := range c { - id := string(check.CheckID) + id := check.CheckID if id == NodeMaint || strings.HasPrefix(id, ServiceMaintPrefix) { maintenance = true continue @@ -269,11 +275,11 @@ func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) if tag != "" { tags = []string{tag} } - return h.service(service, tags, passingOnly, q, false) + return h.service(service, tags, passingOnly, q, serviceHealth) } func (h *Health) ServiceMultipleTags(service string, tags []string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { - return h.service(service, tags, passingOnly, q, false) + return h.service(service, tags, passingOnly, q, serviceHealth) } // Connect is equivalent to Service except that it will only return services @@ -286,18 +292,31 @@ func (h *Health) Connect(service, tag string, passingOnly bool, q *QueryOptions) if tag != "" { tags = []string{tag} } - return h.service(service, tags, passingOnly, q, true) + return h.service(service, tags, passingOnly, q, connectHealth) } func (h *Health) ConnectMultipleTags(service string, tags []string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { - return h.service(service, tags, passingOnly, q, true) + return h.service(service, tags, passingOnly, q, connectHealth) +} + +// Ingress is equivalent to Connect except that it will only return associated +// ingress gateways for the requested service. +func (h *Health) Ingress(service string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { + var tags []string + return h.service(service, tags, passingOnly, q, ingressHealth) } -func (h *Health) service(service string, tags []string, passingOnly bool, q *QueryOptions, connect bool) ([]*ServiceEntry, *QueryMeta, error) { - path := "/v1/health/service/" + service - if connect { +func (h *Health) service(service string, tags []string, passingOnly bool, q *QueryOptions, healthType string) ([]*ServiceEntry, *QueryMeta, error) { + var path string + switch healthType { + case connectHealth: path = "/v1/health/connect/" + service + case ingressHealth: + path = "/v1/health/ingress/" + service + default: + path = "/v1/health/service/" + service } + r := h.c.newRequest("GET", path) r.setQueryOptions(q) if len(tags) > 0 { diff --git a/vendor/github.com/hashicorp/consul/api/operator_license.go b/vendor/github.com/hashicorp/consul/api/operator_license.go index 25aa702e8ade1..51b64cef483e2 100644 --- a/vendor/github.com/hashicorp/consul/api/operator_license.go +++ b/vendor/github.com/hashicorp/consul/api/operator_license.go @@ -36,6 +36,9 @@ type License struct { // License Specific Flags Flags map[string]interface{} `json:"flags"` + // Modules is a list of the licensed enterprise modules + Modules []string `json:"modules"` + // List of features enabled by the license Features []string `json:"features"` } diff --git a/vendor/github.com/hashicorp/consul/api/operator_raft.go b/vendor/github.com/hashicorp/consul/api/operator_raft.go index a9844df2dd398..c6d7165d4de10 100644 --- a/vendor/github.com/hashicorp/consul/api/operator_raft.go +++ b/vendor/github.com/hashicorp/consul/api/operator_raft.go @@ -60,7 +60,7 @@ func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) err r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") r.setWriteOptions(q) - r.params.Set("address", string(address)) + r.params.Set("address", address) _, resp, err := requireOK(op.c.doRequest(r)) if err != nil { @@ -77,7 +77,7 @@ func (op *Operator) RaftRemovePeerByID(id string, q *WriteOptions) error { r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") r.setWriteOptions(q) - r.params.Set("id", string(id)) + r.params.Set("id", id) _, resp, err := requireOK(op.c.doRequest(r)) if err != nil { diff --git a/vendor/github.com/hashicorp/memberlist/config.go b/vendor/github.com/hashicorp/memberlist/config.go index 01abcd49acccf..31099e75f4423 100644 --- a/vendor/github.com/hashicorp/memberlist/config.go +++ b/vendor/github.com/hashicorp/memberlist/config.go @@ -1,10 +1,15 @@ package memberlist import ( + "fmt" "io" "log" + "net" "os" + "strings" "time" + + multierror "github.com/hashicorp/go-multierror" ) type Config struct { @@ -228,6 +233,35 @@ type Config struct { // RequireNodeNames controls if the name of a node is required when sending // a message to that node. RequireNodeNames bool + // CIDRsAllowed If nil, allow any connection (default), otherwise specify all networks + // allowed to connect (you must specify IPv6/IPv4 separately) + // Using [] will block all connections. + CIDRsAllowed []net.IPNet +} + +// ParseCIDRs return a possible empty list of all Network that have been parsed +// In case of error, it returns succesfully parsed CIDRs and the last error found +func ParseCIDRs(v []string) ([]net.IPNet, error) { + nets := make([]net.IPNet, 0) + if v == nil { + return nets, nil + } + var errs error + hasErrors := false + for _, p := range v { + _, net, err := net.ParseCIDR(strings.TrimSpace(p)) + if err != nil { + err = fmt.Errorf("invalid cidr: %s", p) + errs = multierror.Append(errs, err) + hasErrors = true + } else { + nets = append(nets, *net) + } + } + if !hasErrors { + errs = nil + } + return nets, errs } // DefaultLANConfig returns a sane set of configurations for Memberlist. @@ -271,6 +305,7 @@ func DefaultLANConfig() *Config { HandoffQueueDepth: 1024, UDPBufferSize: 1400, + CIDRsAllowed: nil, // same as allow all } } @@ -290,6 +325,24 @@ func DefaultWANConfig() *Config { return conf } +// IPMustBeChecked return true if IPAllowed must be called +func (c *Config) IPMustBeChecked() bool { + return len(c.CIDRsAllowed) > 0 +} + +// IPAllowed return an error if access to memberlist is denied +func (c *Config) IPAllowed(ip net.IP) error { + if !c.IPMustBeChecked() { + return nil + } + for _, n := range c.CIDRsAllowed { + if n.Contains(ip) { + return nil + } + } + return fmt.Errorf("%s is not allowed", ip) +} + // DefaultLocalConfig works like DefaultConfig, however it returns a configuration // that is optimized for a local loopback environments. The default configuration is // still very conservative and errs on the side of caution. diff --git a/vendor/github.com/hashicorp/memberlist/memberlist.go b/vendor/github.com/hashicorp/memberlist/memberlist.go index f46d466ea1778..7ee04009191e8 100644 --- a/vendor/github.com/hashicorp/memberlist/memberlist.go +++ b/vendor/github.com/hashicorp/memberlist/memberlist.go @@ -568,7 +568,7 @@ func (m *Memberlist) SendBestEffort(to *Node, msg []byte) error { buf = append(buf, msg...) // Send the message - a := Address{Addr: to.String(), Name: to.Name} + a := Address{Addr: to.Address(), Name: to.Name} return m.rawSendMsgPacket(a, to, buf) } @@ -736,7 +736,7 @@ func (m *Memberlist) hasLeft() bool { return atomic.LoadInt32(&m.leave) == 1 } -func (m *Memberlist) getNodeState(addr string) nodeStateType { +func (m *Memberlist) getNodeState(addr string) NodeStateType { m.nodeLock.RLock() defer m.nodeLock.RUnlock() diff --git a/vendor/github.com/hashicorp/memberlist/net.go b/vendor/github.com/hashicorp/memberlist/net.go index 11c2f63cd7a59..8d1d7271e92f2 100644 --- a/vendor/github.com/hashicorp/memberlist/net.go +++ b/vendor/github.com/hashicorp/memberlist/net.go @@ -176,7 +176,7 @@ type pushNodeState struct { Port uint16 Meta []byte Incarnation uint32 - State nodeStateType + State NodeStateType Vsn []uint8 // Protocol versions } @@ -220,9 +220,9 @@ func (m *Memberlist) streamListen() { // handleConn handles a single incoming stream connection from the transport. func (m *Memberlist) handleConn(conn net.Conn) { + defer conn.Close() m.logger.Printf("[DEBUG] memberlist: Stream connection %s", LogConn(conn)) - defer conn.Close() metrics.IncrCounter([]string{"memberlist", "tcp", "accept"}, 1) conn.SetDeadline(time.Now().Add(m.config.TCPTimeout)) @@ -622,12 +622,47 @@ func (m *Memberlist) handleSuspect(buf []byte, from net.Addr) { m.suspectNode(&sus) } +// ensureCanConnect return the IP from a RemoteAddress +// return error if this client must not connect +func (m *Memberlist) ensureCanConnect(from net.Addr) error { + if !m.config.IPMustBeChecked() { + return nil + } + source := from.String() + if source == "pipe" { + return nil + } + host, _, err := net.SplitHostPort(source) + if err != nil { + return err + } + + ip := net.ParseIP(host) + if ip == nil { + return fmt.Errorf("Cannot parse IP from %s", host) + } + return m.config.IPAllowed(ip) +} + func (m *Memberlist) handleAlive(buf []byte, from net.Addr) { + if err := m.ensureCanConnect(from); err != nil { + m.logger.Printf("[DEBUG] memberlist: Blocked alive message: %s %s", err, LogAddress(from)) + return + } var live alive if err := decode(buf, &live); err != nil { m.logger.Printf("[ERR] memberlist: Failed to decode alive message: %s %s", err, LogAddress(from)) return } + if m.config.IPMustBeChecked() { + innerIP := net.IP(live.Addr) + if innerIP != nil { + if err := m.config.IPAllowed(innerIP); err != nil { + m.logger.Printf("[DEBUG] memberlist: Blocked alive.Addr=%s message from: %s %s", innerIP.String(), err, LogAddress(from)) + return + } + } + } // For proto versions < 2, there is no port provided. Mask old // behavior by using the configured port @@ -1113,16 +1148,17 @@ func (m *Memberlist) mergeRemoteState(join bool, remoteNodes []pushNodeState, us nodes := make([]*Node, len(remoteNodes)) for idx, n := range remoteNodes { nodes[idx] = &Node{ - Name: n.Name, - Addr: n.Addr, - Port: n.Port, - Meta: n.Meta, - PMin: n.Vsn[0], - PMax: n.Vsn[1], - PCur: n.Vsn[2], - DMin: n.Vsn[3], - DMax: n.Vsn[4], - DCur: n.Vsn[5], + Name: n.Name, + Addr: n.Addr, + Port: n.Port, + Meta: n.Meta, + State: n.State, + PMin: n.Vsn[0], + PMax: n.Vsn[1], + PCur: n.Vsn[2], + DMin: n.Vsn[3], + DMax: n.Vsn[4], + DCur: n.Vsn[5], } } if err := m.config.Merge.NotifyMerge(nodes); err != nil { diff --git a/vendor/github.com/hashicorp/memberlist/security.go b/vendor/github.com/hashicorp/memberlist/security.go index d90114eb0c4f3..4cb4da36f05ba 100644 --- a/vendor/github.com/hashicorp/memberlist/security.go +++ b/vendor/github.com/hashicorp/memberlist/security.go @@ -106,7 +106,10 @@ func encryptPayload(vsn encryptionVersion, key []byte, msg []byte, data []byte, dst.WriteByte(byte(vsn)) // Add a random nonce - io.CopyN(dst, rand.Reader, nonceSize) + _, err = io.CopyN(dst, rand.Reader, nonceSize) + if err != nil { + return err + } afterNonce := dst.Len() // Ensure we are correctly padded (only version 0) diff --git a/vendor/github.com/hashicorp/memberlist/state.go b/vendor/github.com/hashicorp/memberlist/state.go index 83d61c93a40a2..7044f293c9d2c 100644 --- a/vendor/github.com/hashicorp/memberlist/state.go +++ b/vendor/github.com/hashicorp/memberlist/state.go @@ -13,27 +13,28 @@ import ( metrics "github.com/armon/go-metrics" ) -type nodeStateType int +type NodeStateType int const ( - stateAlive nodeStateType = iota - stateSuspect - stateDead - stateLeft + StateAlive NodeStateType = iota + StateSuspect + StateDead + StateLeft ) // Node represents a node in the cluster. type Node struct { - Name string - Addr net.IP - Port uint16 - Meta []byte // Metadata from the delegate for this node. - PMin uint8 // Minimum protocol version this understands - PMax uint8 // Maximum protocol version this understands - PCur uint8 // Current version node is speaking - DMin uint8 // Min protocol version for the delegate to understand - DMax uint8 // Max protocol version for the delegate to understand - DCur uint8 // Current version delegate is speaking + Name string + Addr net.IP + Port uint16 + Meta []byte // Metadata from the delegate for this node. + State NodeStateType // State of the node. + PMin uint8 // Minimum protocol version this understands + PMax uint8 // Maximum protocol version this understands + PCur uint8 // Current version node is speaking + DMin uint8 // Min protocol version for the delegate to understand + DMax uint8 // Max protocol version for the delegate to understand + DCur uint8 // Current version delegate is speaking } // Address returns the host:port form of a node's address, suitable for use @@ -60,7 +61,7 @@ func (n *Node) String() string { type nodeState struct { Node Incarnation uint32 // Last known incarnation number - State nodeStateType // Current state + State NodeStateType // Current state StateChange time.Time // Time last state change happened } @@ -77,7 +78,7 @@ func (n *nodeState) FullAddress() Address { } func (n *nodeState) DeadOrLeft() bool { - return n.State == stateDead || n.State == stateLeft + return n.State == StateDead || n.State == StateLeft } // ackHandler is used to register handlers for incoming acks and nacks. @@ -321,7 +322,7 @@ func (m *Memberlist) probeNode(node *nodeState) { defer func() { m.awareness.ApplyDelta(awarenessDelta) }() - if node.State == stateAlive { + if node.State == StateAlive { if err := m.encodeAndSendMsg(node.FullAddress(), pingMsg, &ping); err != nil { m.logger.Printf("[ERR] memberlist: Failed to send ping: %s", err) if failedRemote(err) { @@ -396,7 +397,7 @@ HANDLE_REMOTE_FAILURE: kNodes := kRandomNodes(m.config.IndirectChecks, m.nodes, func(n *nodeState) bool { return n.Name == m.config.Name || n.Name == node.Name || - n.State != stateAlive + n.State != StateAlive }) m.nodeLock.RUnlock() @@ -573,10 +574,10 @@ func (m *Memberlist) gossip() { } switch n.State { - case stateAlive, stateSuspect: + case StateAlive, StateSuspect: return false - case stateDead: + case StateDead: return time.Since(n.StateChange) > m.config.GossipToTheDeadTime default: @@ -623,7 +624,7 @@ func (m *Memberlist) pushPull() { m.nodeLock.RLock() nodes := kRandomNodes(1, m.nodes, func(n *nodeState) bool { return n.Name == m.config.Name || - n.State != stateAlive + n.State != StateAlive }) m.nodeLock.RUnlock() @@ -681,7 +682,7 @@ func (m *Memberlist) verifyProtocol(remote []pushNodeState) error { for _, rn := range remote { // If the node isn't alive, then skip it - if rn.State != stateAlive { + if rn.State != StateAlive { continue } @@ -710,7 +711,7 @@ func (m *Memberlist) verifyProtocol(remote []pushNodeState) error { for _, n := range m.nodes { // Ignore non-alive nodes - if n.State != stateAlive { + if n.State != StateAlive { continue } @@ -969,6 +970,11 @@ func (m *Memberlist) aliveNode(a *alive, notify chan struct{}, bootstrap bool) { // store this node in our node map. var updatesNode bool if !ok { + errCon := m.config.IPAllowed(a.Addr) + if errCon != nil { + m.logger.Printf("[WARN] memberlist: Rejected node %s (%v): %s", a.Node, net.IP(a.Addr), errCon) + return + } state = &nodeState{ Node: Node{ Name: a.Node, @@ -976,7 +982,7 @@ func (m *Memberlist) aliveNode(a *alive, notify chan struct{}, bootstrap bool) { Port: a.Port, Meta: a.Meta, }, - State: stateDead, + State: StateDead, } if len(a.Vsn) > 5 { state.PMin = a.Vsn[0] @@ -1006,12 +1012,17 @@ func (m *Memberlist) aliveNode(a *alive, notify chan struct{}, bootstrap bool) { } else { // Check if this address is different than the existing node unless the old node is dead. if !bytes.Equal([]byte(state.Addr), a.Addr) || state.Port != a.Port { + errCon := m.config.IPAllowed(a.Addr) + if errCon != nil { + m.logger.Printf("[WARN] memberlist: Rejected IP update from %v to %v for node %s: %s", a.Node, state.Addr, net.IP(a.Addr), errCon) + return + } // If DeadNodeReclaimTime is configured, check if enough time has elapsed since the node died. canReclaim := (m.config.DeadNodeReclaimTime > 0 && time.Since(state.StateChange) > m.config.DeadNodeReclaimTime) // Allow the address to be updated if a dead node is being replaced. - if state.State == stateLeft || (state.State == stateDead && canReclaim) { + if state.State == StateLeft || (state.State == StateDead && canReclaim) { m.logger.Printf("[INFO] memberlist: Updating address for left or failed node %s from %v:%d to %v:%d", state.Name, state.Addr, state.Port, net.IP(a.Addr), a.Port) updatesNode = true @@ -1096,8 +1107,8 @@ func (m *Memberlist) aliveNode(a *alive, notify chan struct{}, bootstrap bool) { state.Meta = a.Meta state.Addr = a.Addr state.Port = a.Port - if state.State != stateAlive { - state.State = stateAlive + if state.State != StateAlive { + state.State = StateAlive state.StateChange = time.Now() } } @@ -1107,7 +1118,7 @@ func (m *Memberlist) aliveNode(a *alive, notify chan struct{}, bootstrap bool) { // Notify the delegate of any relevant updates if m.config.Events != nil { - if oldState == stateDead || oldState == stateLeft { + if oldState == StateDead || oldState == StateLeft { // if Dead/Left -> Alive, notify of join m.config.Events.NotifyJoin(&state.Node) @@ -1147,7 +1158,7 @@ func (m *Memberlist) suspectNode(s *suspect) { } // Ignore non-alive nodes - if state.State != stateAlive { + if state.State != StateAlive { return } @@ -1165,7 +1176,7 @@ func (m *Memberlist) suspectNode(s *suspect) { // Update the state state.Incarnation = s.Incarnation - state.State = stateSuspect + state.State = StateSuspect changeTime := time.Now() state.StateChange = changeTime @@ -1189,7 +1200,7 @@ func (m *Memberlist) suspectNode(s *suspect) { fn := func(numConfirmations int) { m.nodeLock.Lock() state, ok := m.nodeMap[s.Node] - timeout := ok && state.State == stateSuspect && state.StateChange == changeTime + timeout := ok && state.State == StateSuspect && state.StateChange == changeTime m.nodeLock.Unlock() if timeout { @@ -1255,9 +1266,9 @@ func (m *Memberlist) deadNode(d *dead) { // If the dead message was send by the node itself, mark it is left // instead of dead. if d.Node == d.From { - state.State = stateLeft + state.State = StateLeft } else { - state.State = stateDead + state.State = StateDead } state.StateChange = time.Now() @@ -1272,7 +1283,7 @@ func (m *Memberlist) deadNode(d *dead) { func (m *Memberlist) mergeState(remote []pushNodeState) { for _, r := range remote { switch r.State { - case stateAlive: + case StateAlive: a := alive{ Incarnation: r.Incarnation, Node: r.Name, @@ -1283,14 +1294,14 @@ func (m *Memberlist) mergeState(remote []pushNodeState) { } m.aliveNode(&a, nil, false) - case stateLeft: + case StateLeft: d := dead{Incarnation: r.Incarnation, Node: r.Name, From: r.Name} m.deadNode(&d) - case stateDead: + case StateDead: // If the remote node believes a node is dead, we prefer to // suspect that node instead of declaring it dead instantly fallthrough - case stateSuspect: + case StateSuspect: s := suspect{Incarnation: r.Incarnation, Node: r.Name, From: m.config.Name} m.suspectNode(&s) } diff --git a/vendor/github.com/hashicorp/memberlist/util.go b/vendor/github.com/hashicorp/memberlist/util.go index 1e582a8a1b0e0..22bf6b440b39e 100644 --- a/vendor/github.com/hashicorp/memberlist/util.go +++ b/vendor/github.com/hashicorp/memberlist/util.go @@ -102,7 +102,7 @@ func moveDeadNodes(nodes []*nodeState, gossipToTheDeadTime time.Duration) int { numDead := 0 n := len(nodes) for i := 0; i < n-numDead; i++ { - if nodes[i].State != stateDead { + if nodes[i].State != StateDead { continue } diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md index 126fe62cdb5f4..c324bc05d8cab 100644 --- a/vendor/github.com/miekg/dns/README.md +++ b/vendor/github.com/miekg/dns/README.md @@ -28,6 +28,7 @@ A not-so-up-to-date-list-that-may-be-actually-current: * https://github.com/coredns/coredns * https://cloudflare.com * https://github.com/abh/geodns +* https://github.com/baidu/bfe * http://www.statdns.com/ * http://www.dnsinspect.com/ * https://github.com/chuangbo/jianbing-dictionary-dns diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go index db2761d45bc13..bb8667fd68bed 100644 --- a/vendor/github.com/miekg/dns/client.go +++ b/vendor/github.com/miekg/dns/client.go @@ -124,15 +124,38 @@ func (c *Client) Dial(address string) (conn *Conn, err error) { // of 512 bytes // To specify a local address or a timeout, the caller has to set the `Client.Dialer` // attribute appropriately + func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) { + co, err := c.Dial(address) + + if err != nil { + return nil, 0, err + } + defer co.Close() + return c.ExchangeWithConn(m, co) +} + +// ExchangeWithConn has the same behavior as Exchange, just with a predetermined connection +// that will be used instead of creating a new one. +// Usage pattern with a *dns.Client: +// c := new(dns.Client) +// // connection management logic goes here +// +// conn := c.Dial(address) +// in, rtt, err := c.ExchangeWithConn(message, conn) +// +// This allows users of the library to implement their own connection management, +// as opposed to Exchange, which will always use new connections and incur the added overhead +// that entails when using "tcp" and especially "tcp-tls" clients. +func (c *Client) ExchangeWithConn(m *Msg, conn *Conn) (r *Msg, rtt time.Duration, err error) { if !c.SingleInflight { - return c.exchange(m, address) + return c.exchange(m, conn) } q := m.Question[0] key := fmt.Sprintf("%s:%d:%d", q.Name, q.Qtype, q.Qclass) r, rtt, err, shared := c.group.Do(key, func() (*Msg, time.Duration, error) { - return c.exchange(m, address) + return c.exchange(m, conn) }) if r != nil && shared { r = r.Copy() @@ -141,15 +164,7 @@ func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, er return r, rtt, err } -func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) { - var co *Conn - - co, err = c.Dial(a) - - if err != nil { - return nil, 0, err - } - defer co.Close() +func (c *Client) exchange(m *Msg, co *Conn) (r *Msg, rtt time.Duration, err error) { opt := m.IsEdns0() // If EDNS0 is used use that for size. diff --git a/vendor/github.com/miekg/dns/generate.go b/vendor/github.com/miekg/dns/generate.go index f7e91a23f7b49..f713074a181c0 100644 --- a/vendor/github.com/miekg/dns/generate.go +++ b/vendor/github.com/miekg/dns/generate.go @@ -20,13 +20,13 @@ import ( // of $ after that are interpreted. func (zp *ZoneParser) generate(l lex) (RR, bool) { token := l.token - step := 1 + step := int64(1) if i := strings.IndexByte(token, '/'); i >= 0 { if i+1 == len(token) { return zp.setParseError("bad step in $GENERATE range", l) } - s, err := strconv.Atoi(token[i+1:]) + s, err := strconv.ParseInt(token[i+1:], 10, 64) if err != nil || s <= 0 { return zp.setParseError("bad step in $GENERATE range", l) } @@ -40,12 +40,12 @@ func (zp *ZoneParser) generate(l lex) (RR, bool) { return zp.setParseError("bad start-stop in $GENERATE range", l) } - start, err := strconv.Atoi(sx[0]) + start, err := strconv.ParseInt(sx[0], 10, 64) if err != nil { return zp.setParseError("bad start in $GENERATE range", l) } - end, err := strconv.Atoi(sx[1]) + end, err := strconv.ParseInt(sx[1], 10, 64) if err != nil { return zp.setParseError("bad stop in $GENERATE range", l) } @@ -75,10 +75,10 @@ func (zp *ZoneParser) generate(l lex) (RR, bool) { r := &generateReader{ s: s, - cur: start, - start: start, - end: end, - step: step, + cur: int(start), + start: int(start), + end: int(end), + step: int(step), file: zp.file, lex: &l, @@ -188,7 +188,7 @@ func (r *generateReader) ReadByte() (byte, error) { if errMsg != "" { return 0, r.parseError(errMsg, si+3+sep) } - if r.start+offset < 0 || r.end+offset > 1<<31-1 { + if r.start+offset < 0 || int64(r.end) + int64(offset) > 1<<31-1 { return 0, r.parseError("bad offset in $GENERATE", si+3+sep) } @@ -229,19 +229,19 @@ func modToPrintf(s string) (string, int, string) { return "", 0, "bad base in $GENERATE" } - offset, err := strconv.Atoi(offStr) + offset, err := strconv.ParseInt(offStr, 10, 64) if err != nil { return "", 0, "bad offset in $GENERATE" } - width, err := strconv.Atoi(widthStr) + width, err := strconv.ParseInt(widthStr, 10, 64) if err != nil || width < 0 || width > 255 { return "", 0, "bad width in $GENERATE" } if width == 0 { - return "%" + base, offset, "" + return "%" + base, int(offset), "" } - return "%0" + widthStr + base, offset, "" + return "%0" + widthStr + base, int(offset), "" } diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go index 63656873312a1..7001f6da79c88 100644 --- a/vendor/github.com/miekg/dns/msg.go +++ b/vendor/github.com/miekg/dns/msg.go @@ -398,17 +398,12 @@ Loop: return "", lenmsg, ErrLongDomain } for _, b := range msg[off : off+c] { - switch b { - case '.', '(', ')', ';', ' ', '@': - fallthrough - case '"', '\\': + if isDomainNameLabelSpecial(b) { s = append(s, '\\', b) - default: - if b < ' ' || b > '~' { // unprintable, use \DDD - s = append(s, escapeByte(b)...) - } else { - s = append(s, b) - } + } else if b < ' ' || b > '~' { + s = append(s, escapeByte(b)...) + } else { + s = append(s, b) } } s = append(s, '.') diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go index 98fadc3192306..cbcab57bcd7f8 100644 --- a/vendor/github.com/miekg/dns/msg_helpers.go +++ b/vendor/github.com/miekg/dns/msg_helpers.go @@ -423,92 +423,47 @@ Option: if off+int(optlen) > len(msg) { return nil, len(msg), &Error{err: "overflow unpacking opt"} } + e := makeDataOpt(code) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + + if off < len(msg) { + goto Option + } + + return edns, off, nil +} + +func makeDataOpt(code uint16) EDNS0 { switch code { case EDNS0NSID: - e := new(EDNS0_NSID) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) + return new(EDNS0_NSID) case EDNS0SUBNET: - e := new(EDNS0_SUBNET) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) + return new(EDNS0_SUBNET) case EDNS0COOKIE: - e := new(EDNS0_COOKIE) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) + return new(EDNS0_COOKIE) case EDNS0EXPIRE: - e := new(EDNS0_EXPIRE) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) + return new(EDNS0_EXPIRE) case EDNS0UL: - e := new(EDNS0_UL) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) + return new(EDNS0_UL) case EDNS0LLQ: - e := new(EDNS0_LLQ) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) + return new(EDNS0_LLQ) case EDNS0DAU: - e := new(EDNS0_DAU) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) + return new(EDNS0_DAU) case EDNS0DHU: - e := new(EDNS0_DHU) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) + return new(EDNS0_DHU) case EDNS0N3U: - e := new(EDNS0_N3U) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) + return new(EDNS0_N3U) case EDNS0PADDING: - e := new(EDNS0_PADDING) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) + return new(EDNS0_PADDING) default: e := new(EDNS0_LOCAL) e.Code = code - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - } - - if off < len(msg) { - goto Option + return e } - - return edns, off, nil } func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) { @@ -521,9 +476,7 @@ func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) { binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length off += 4 if off+len(b) > len(msg) { - copy(msg[off:], b) - off = len(msg) - continue + return len(msg), &Error{err: "overflow packing opt"} } // Actual data copy(msg[off:off+len(b)], b) @@ -783,28 +736,31 @@ func unpackDataAplPrefix(msg []byte, off int) (APLPrefix, int, error) { if int(prefix) > 8*len(ip) { return APLPrefix{}, len(msg), &Error{err: "APL prefix too long"} } - afdlen := int(nlen & 0x7f) - if (int(prefix)+7)/8 != afdlen { - return APLPrefix{}, len(msg), &Error{err: "invalid APL address length"} + if afdlen > len(ip) { + return APLPrefix{}, len(msg), &Error{err: "APL length too long"} } if off+afdlen > len(msg) { return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL address"} } off += copy(ip, msg[off:off+afdlen]) - if prefix%8 > 0 { + if afdlen > 0 { last := ip[afdlen-1] - zero := uint8(0xff) >> (prefix % 8) - if last&zero > 0 { + if last == 0 { return APLPrefix{}, len(msg), &Error{err: "extra APL address bits"} } } + ipnet := net.IPNet{ + IP: ip, + Mask: net.CIDRMask(int(prefix), 8*len(ip)), + } + network := ipnet.IP.Mask(ipnet.Mask) + if !network.Equal(ipnet.IP) { + return APLPrefix{}, len(msg), &Error{err: "invalid APL address length"} + } return APLPrefix{ Negation: (nlen & 0x80) != 0, - Network: net.IPNet{ - IP: ip, - Mask: net.CIDRMask(int(prefix), 8*len(ip)), - }, + Network: ipnet, }, off, nil } diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go index e52f43c924586..e18566fc8752a 100644 --- a/vendor/github.com/miekg/dns/scan.go +++ b/vendor/github.com/miekg/dns/scan.go @@ -87,16 +87,6 @@ type lex struct { column int // column in the file } -// Token holds the token that are returned when a zone file is parsed. -type Token struct { - // The scanned resource record when error is not nil. - RR - // When an error occurred, this has the error specifics. - Error *ParseError - // A potential comment positioned after the RR and on the same line. - Comment string -} - // ttlState describes the state necessary to fill in an omitted RR TTL type ttlState struct { ttl uint32 // ttl is the current default TTL @@ -130,70 +120,6 @@ func ReadRR(r io.Reader, file string) (RR, error) { return rr, zp.Err() } -// ParseZone reads a RFC 1035 style zonefile from r. It returns -// Tokens on the returned channel, each consisting of either a -// parsed RR and optional comment or a nil RR and an error. The -// channel is closed by ParseZone when the end of r is reached. -// -// The string file is used in error reporting and to resolve relative -// $INCLUDE directives. The string origin is used as the initial -// origin, as if the file would start with an $ORIGIN directive. -// -// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are all -// supported. Note that $GENERATE's range support up to a maximum of -// of 65535 steps. -// -// Basic usage pattern when reading from a string (z) containing the -// zone data: -// -// for x := range dns.ParseZone(strings.NewReader(z), "", "") { -// if x.Error != nil { -// // log.Println(x.Error) -// } else { -// // Do something with x.RR -// } -// } -// -// Comments specified after an RR (and on the same line!) are -// returned too: -// -// foo. IN A 10.0.0.1 ; this is a comment -// -// The text "; this is comment" is returned in Token.Comment. -// Comments inside the RR are returned concatenated along with the -// RR. Comments on a line by themselves are discarded. -// -// To prevent memory leaks it is important to always fully drain the -// returned channel. If an error occurs, it will always be the last -// Token sent on the channel. -// -// Deprecated: New users should prefer the ZoneParser API. -func ParseZone(r io.Reader, origin, file string) chan *Token { - t := make(chan *Token, 10000) - go parseZone(r, origin, file, t) - return t -} - -func parseZone(r io.Reader, origin, file string, t chan *Token) { - defer close(t) - - zp := NewZoneParser(r, origin, file) - zp.SetIncludeAllowed(true) - - for rr, ok := zp.Next(); ok; rr, ok = zp.Next() { - t <- &Token{RR: rr, Comment: zp.Comment()} - } - - if err := zp.Err(); err != nil { - pe, ok := err.(*ParseError) - if !ok { - pe = &ParseError{file: file, err: err.Error()} - } - - t <- &Token{Error: pe} - } -} - // ZoneParser is a parser for an RFC 1035 style zonefile. // // Each parsed RR in the zone is returned sequentially from Next. An diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go index 3fe8a82abb0bf..11b08ad1d12e5 100644 --- a/vendor/github.com/miekg/dns/scan_rr.go +++ b/vendor/github.com/miekg/dns/scan_rr.go @@ -1,6 +1,7 @@ package dns import ( + "bytes" "encoding/base64" "net" "strconv" @@ -10,15 +11,15 @@ import ( // A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces) // or an error func endingToString(c *zlexer, errstr string) (string, *ParseError) { - var s string + var buffer bytes.Buffer l, _ := c.Next() // zString for l.value != zNewline && l.value != zEOF { if l.err { - return s, &ParseError{"", errstr, l} + return buffer.String(), &ParseError{"", errstr, l} } switch l.value { case zString: - s += l.token + buffer.WriteString(l.token) case zBlank: // Ok default: return "", &ParseError{"", errstr, l} @@ -26,7 +27,7 @@ func endingToString(c *zlexer, errstr string) (string, *ParseError) { l, _ = c.Next() } - return s, nil + return buffer.String(), nil } // A remainder of the rdata with embedded spaces, split on unquoted whitespace diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go index b8c75557bc895..7776b4f066697 100644 --- a/vendor/github.com/miekg/dns/types.go +++ b/vendor/github.com/miekg/dns/types.go @@ -209,8 +209,11 @@ var CertTypeToString = map[uint16]string{ //go:generate go run types_generate.go -// Question holds a DNS question. There can be multiple questions in the -// question section of a message. Usually there is just one. +// Question holds a DNS question. Usually there is just one. While the +// original DNS RFCs allow multiple questions in the question section of a +// message, in practice it never works. Because most DNS servers see multiple +// questions as an error, it is recommended to only have one question per +// message. type Question struct { Name string `dns:"cdomain-name"` // "cdomain-name" specifies encoding (and may be compressed) Qtype uint16 @@ -442,45 +445,38 @@ func sprintName(s string) string { var dst strings.Builder for i := 0; i < len(s); { - if i+1 < len(s) && s[i] == '\\' && s[i+1] == '.' { + if s[i] == '.' { if dst.Len() != 0 { - dst.WriteString(s[i : i+2]) + dst.WriteByte('.') } - i += 2 + i++ continue } b, n := nextByte(s, i) if n == 0 { - i++ - continue - } - if b == '.' { - if dst.Len() != 0 { - dst.WriteByte('.') + // Drop "dangling" incomplete escapes. + if dst.Len() == 0 { + return s[:i] } - i += n - continue + break } - switch b { - case ' ', '\'', '@', ';', '(', ')', '"', '\\': // additional chars to escape + if isDomainNameLabelSpecial(b) { if dst.Len() == 0 { dst.Grow(len(s) * 2) dst.WriteString(s[:i]) } dst.WriteByte('\\') dst.WriteByte(b) - default: - if ' ' <= b && b <= '~' { - if dst.Len() != 0 { - dst.WriteByte(b) - } - } else { - if dst.Len() == 0 { - dst.Grow(len(s) * 2) - dst.WriteString(s[:i]) - } - dst.WriteString(escapeByte(b)) + } else if b < ' ' || b > '~' { // unprintable, use \DDD + if dst.Len() == 0 { + dst.Grow(len(s) * 2) + dst.WriteString(s[:i]) + } + dst.WriteString(escapeByte(b)) + } else { + if dst.Len() != 0 { + dst.WriteByte(b) } } i += n @@ -503,15 +499,10 @@ func sprintTxtOctet(s string) string { } b, n := nextByte(s, i) - switch { - case n == 0: + if n == 0 { i++ // dangling back slash - case b == '.': - dst.WriteByte('.') - case b < ' ' || b > '~': - dst.WriteString(escapeByte(b)) - default: - dst.WriteByte(b) + } else { + writeTXTStringByte(&dst, b) } i += n } @@ -587,6 +578,17 @@ func escapeByte(b byte) string { return escapedByteLarge[int(b)*4 : int(b)*4+4] } +// isDomainNameLabelSpecial returns true if +// a domain name label byte should be prefixed +// with an escaping backslash. +func isDomainNameLabelSpecial(b byte) bool { + switch b { + case '.', ' ', '\'', '@', ';', '(', ')', '"', '\\': + return true + } + return false +} + func nextByte(s string, offset int) (byte, int) { if offset >= len(s) { return 0, 0 @@ -1118,6 +1120,7 @@ type URI struct { Target string `dns:"octet"` } +// rr.Target to be parsed as a sequence of character encoded octets according to RFC 3986 func (rr *URI) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Priority)) + " " + strconv.Itoa(int(rr.Weight)) + " " + sprintTxtOctet(rr.Target) @@ -1279,6 +1282,7 @@ type CAA struct { Value string `dns:"octet"` } +// rr.Value Is the character-string encoding of the value field as specified in RFC 1035, Section 5.1. func (rr *CAA) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Flag)) + " " + rr.Tag + " " + sprintTxtOctet(rr.Value) } diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go index d2c8f05fe1f6f..7dd9bbc09e5fa 100644 --- a/vendor/github.com/miekg/dns/version.go +++ b/vendor/github.com/miekg/dns/version.go @@ -3,7 +3,7 @@ package dns import "fmt" // Version is current version of this library. -var Version = v{1, 1, 29} +var Version = v{1, 1, 30} // v holds the version of this library. type v struct { diff --git a/vendor/github.com/miekg/dns/zduplicate.go b/vendor/github.com/miekg/dns/zduplicate.go index a58a8c0c06229..d7ec2d974370f 100644 --- a/vendor/github.com/miekg/dns/zduplicate.go +++ b/vendor/github.com/miekg/dns/zduplicate.go @@ -104,6 +104,48 @@ func (r1 *CAA) isDuplicate(_r2 RR) bool { return true } +func (r1 *CDNSKEY) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*CDNSKEY) + if !ok { + return false + } + _ = r2 + if r1.Flags != r2.Flags { + return false + } + if r1.Protocol != r2.Protocol { + return false + } + if r1.Algorithm != r2.Algorithm { + return false + } + if r1.PublicKey != r2.PublicKey { + return false + } + return true +} + +func (r1 *CDS) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*CDS) + if !ok { + return false + } + _ = r2 + if r1.KeyTag != r2.KeyTag { + return false + } + if r1.Algorithm != r2.Algorithm { + return false + } + if r1.DigestType != r2.DigestType { + return false + } + if r1.Digest != r2.Digest { + return false + } + return true +} + func (r1 *CERT) isDuplicate(_r2 RR) bool { r2, ok := _r2.(*CERT) if !ok { @@ -172,6 +214,27 @@ func (r1 *DHCID) isDuplicate(_r2 RR) bool { return true } +func (r1 *DLV) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*DLV) + if !ok { + return false + } + _ = r2 + if r1.KeyTag != r2.KeyTag { + return false + } + if r1.Algorithm != r2.Algorithm { + return false + } + if r1.DigestType != r2.DigestType { + return false + } + if r1.Digest != r2.Digest { + return false + } + return true +} + func (r1 *DNAME) isDuplicate(_r2 RR) bool { r2, ok := _r2.(*DNAME) if !ok { @@ -339,6 +402,27 @@ func (r1 *HIP) isDuplicate(_r2 RR) bool { return true } +func (r1 *KEY) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*KEY) + if !ok { + return false + } + _ = r2 + if r1.Flags != r2.Flags { + return false + } + if r1.Protocol != r2.Protocol { + return false + } + if r1.Algorithm != r2.Algorithm { + return false + } + if r1.PublicKey != r2.PublicKey { + return false + } + return true +} + func (r1 *KX) isDuplicate(_r2 RR) bool { r2, ok := _r2.(*KX) if !ok { @@ -849,6 +933,42 @@ func (r1 *RT) isDuplicate(_r2 RR) bool { return true } +func (r1 *SIG) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*SIG) + if !ok { + return false + } + _ = r2 + if r1.TypeCovered != r2.TypeCovered { + return false + } + if r1.Algorithm != r2.Algorithm { + return false + } + if r1.Labels != r2.Labels { + return false + } + if r1.OrigTtl != r2.OrigTtl { + return false + } + if r1.Expiration != r2.Expiration { + return false + } + if r1.Inception != r2.Inception { + return false + } + if r1.KeyTag != r2.KeyTag { + return false + } + if !isDuplicateName(r1.SignerName, r2.SignerName) { + return false + } + if r1.Signature != r2.Signature { + return false + } + return true +} + func (r1 *SMIMEA) isDuplicate(_r2 RR) bool { r2, ok := _r2.(*SMIMEA) if !ok { diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go index 1cbd6d3fe53f0..5bb59fa601195 100644 --- a/vendor/github.com/miekg/dns/ztypes.go +++ b/vendor/github.com/miekg/dns/ztypes.go @@ -685,8 +685,8 @@ func (rr *ANY) copy() RR { } func (rr *APL) copy() RR { Prefixes := make([]APLPrefix, len(rr.Prefixes)) - for i := range rr.Prefixes { - Prefixes[i] = rr.Prefixes[i].copy() + for i, e := range rr.Prefixes { + Prefixes[i] = e.copy() } return &APL{rr.Hdr, Prefixes} } @@ -698,6 +698,12 @@ func (rr *AVC) copy() RR { func (rr *CAA) copy() RR { return &CAA{rr.Hdr, rr.Flag, rr.Tag, rr.Value} } +func (rr *CDNSKEY) copy() RR { + return &CDNSKEY{*rr.DNSKEY.copy().(*DNSKEY)} +} +func (rr *CDS) copy() RR { + return &CDS{*rr.DS.copy().(*DS)} +} func (rr *CERT) copy() RR { return &CERT{rr.Hdr, rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate} } @@ -712,6 +718,9 @@ func (rr *CSYNC) copy() RR { func (rr *DHCID) copy() RR { return &DHCID{rr.Hdr, rr.Digest} } +func (rr *DLV) copy() RR { + return &DLV{*rr.DS.copy().(*DS)} +} func (rr *DNAME) copy() RR { return &DNAME{rr.Hdr, rr.Target} } @@ -744,6 +753,9 @@ func (rr *HIP) copy() RR { copy(RendezvousServers, rr.RendezvousServers) return &HIP{rr.Hdr, rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers} } +func (rr *KEY) copy() RR { + return &KEY{*rr.DNSKEY.copy().(*DNSKEY)} +} func (rr *KX) copy() RR { return &KX{rr.Hdr, rr.Preference, rr.Exchanger} } @@ -847,6 +859,9 @@ func (rr *RRSIG) copy() RR { func (rr *RT) copy() RR { return &RT{rr.Hdr, rr.Preference, rr.Host} } +func (rr *SIG) copy() RR { + return &SIG{*rr.RRSIG.copy().(*RRSIG)} +} func (rr *SMIMEA) copy() RR { return &SMIMEA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} } diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/client.go b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/client.go index 375105d27b17c..bfb305ffa463a 100644 --- a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/client.go +++ b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/client.go @@ -7,6 +7,7 @@ import ( "io" "net/http" "net/http/httptrace" + "net/url" "github.com/opentracing/opentracing-go" "github.com/opentracing/opentracing-go/ext" @@ -33,6 +34,7 @@ type Transport struct { type clientOptions struct { operationName string componentName string + urlTagFunc func(u *url.URL) string disableClientTrace bool disableInjectSpanContext bool spanObserver func(span opentracing.Span, r *http.Request) @@ -49,6 +51,15 @@ func OperationName(operationName string) ClientOption { } } +// URLTagFunc returns a ClientOption that uses given function f +// to set the span's http.url tag. Can be used to change the default +// http.url tag, eg to redact sensitive information. +func URLTagFunc(f func(u *url.URL) string) ClientOption { + return func(options *clientOptions) { + options.urlTagFunc = f + } +} + // ComponentName returns a ClientOption that sets the component // name for the client-side span. func ComponentName(componentName string) ClientOption { @@ -109,6 +120,9 @@ func ClientSpanObserver(f func(span opentracing.Span, r *http.Request)) ClientOp // } func TraceRequest(tr opentracing.Tracer, req *http.Request, options ...ClientOption) (*http.Request, *Tracer) { opts := &clientOptions{ + urlTagFunc: func(u *url.URL) string { + return u.String() + }, spanObserver: func(_ opentracing.Span, _ *http.Request) {}, } for _, opt := range options { @@ -159,7 +173,7 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { tracer.start(req) ext.HTTPMethod.Set(tracer.sp, req.Method) - ext.HTTPUrl.Set(tracer.sp, req.URL.String()) + ext.HTTPUrl.Set(tracer.sp, tracer.opts.urlTagFunc(req.URL)) tracer.opts.spanObserver(tracer.sp, req) if !tracer.opts.disableInjectSpanContext { diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/server.go b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/server.go index e98f2b36deaac..db2df6620412b 100644 --- a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/server.go +++ b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/server.go @@ -108,6 +108,12 @@ func MiddlewareFunc(tr opentracing.Tracer, h http.HandlerFunc, options ...MWOpti for _, opt := range options { opt(&opts) } + // set component name, use "net/http" if caller does not specify + componentName := opts.componentName + if componentName == "" { + componentName = defaultComponentName + } + fn := func(w http.ResponseWriter, r *http.Request) { if !opts.spanFilter(r) { h(w, r) @@ -117,24 +123,32 @@ func MiddlewareFunc(tr opentracing.Tracer, h http.HandlerFunc, options ...MWOpti sp := tr.StartSpan(opts.opNameFunc(r), ext.RPCServerOption(ctx)) ext.HTTPMethod.Set(sp, r.Method) ext.HTTPUrl.Set(sp, opts.urlTagFunc(r.URL)) - opts.spanObserver(sp, r) - - // set component name, use "net/http" if caller does not specify - componentName := opts.componentName - if componentName == "" { - componentName = defaultComponentName - } ext.Component.Set(sp, componentName) + opts.spanObserver(sp, r) sct := &statusCodeTracker{ResponseWriter: w} r = r.WithContext(opentracing.ContextWithSpan(r.Context(), sp)) defer func() { - ext.HTTPStatusCode.Set(sp, uint16(sct.status)) - if sct.status >= http.StatusInternalServerError || !sct.wroteheader { + panicErr := recover() + didPanic := panicErr != nil + + if sct.status == 0 && !didPanic { + // Standard behavior of http.Server is to assume status code 200 if one was not written by a handler that returned successfully. + // https://github.com/golang/go/blob/fca286bed3ed0e12336532cc711875ae5b3cb02a/src/net/http/server.go#L120 + sct.status = 200 + } + if sct.status > 0 { + ext.HTTPStatusCode.Set(sp, uint16(sct.status)) + } + if sct.status >= http.StatusInternalServerError || didPanic { ext.Error.Set(sp, true) } sp.Finish() + + if didPanic { + panic(panicErr) + } }() h(sct.wrappedResponseWriter(), r) diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/status-code-tracker-old.go b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/status-code-tracker-old.go deleted file mode 100644 index 8704e5169b06b..0000000000000 --- a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/status-code-tracker-old.go +++ /dev/null @@ -1,145 +0,0 @@ -// +build go1.7,!go1.8 - -package nethttp - -import ( - "io" - "net/http" -) - -type statusCodeTracker struct { - http.ResponseWriter - status int - wroteheader bool -} - -func (w *statusCodeTracker) WriteHeader(status int) { - w.status = status - w.wroteheader = true - w.ResponseWriter.WriteHeader(status) -} - -func (w *statusCodeTracker) Write(b []byte) (int, error) { - if !w.wroteheader { - w.wroteheader = true - w.status = 200 - } - return w.ResponseWriter.Write(b) -} - -// wrappedResponseWriter returns a wrapped version of the original -// ResponseWriter and only implements the same combination of additional -// interfaces as the original. This implementation is based on -// https://github.com/felixge/httpsnoop. -func (w *statusCodeTracker) wrappedResponseWriter() http.ResponseWriter { - var ( - hj, i0 = w.ResponseWriter.(http.Hijacker) - cn, i1 = w.ResponseWriter.(http.CloseNotifier) - fl, i3 = w.ResponseWriter.(http.Flusher) - rf, i4 = w.ResponseWriter.(io.ReaderFrom) - ) - i2 := false - - switch { - case !i0 && !i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - }{w} - case !i0 && !i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - io.ReaderFrom - }{w, rf} - case !i0 && !i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Flusher - }{w, fl} - case !i0 && !i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Flusher - io.ReaderFrom - }{w, fl, rf} - case !i0 && i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - }{w, cn} - case !i0 && i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - io.ReaderFrom - }{w, cn, rf} - case !i0 && i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Flusher - }{w, cn, fl} - case !i0 && i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Flusher - io.ReaderFrom - }{w, cn, fl, rf} - case i0 && !i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - }{w, hj} - case i0 && !i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - io.ReaderFrom - }{w, hj, rf} - case i0 && !i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Flusher - }{w, hj, fl} - case i0 && !i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Flusher - io.ReaderFrom - }{w, hj, fl, rf} - case i0 && i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - }{w, hj, cn} - case i0 && i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - io.ReaderFrom - }{w, hj, cn, rf} - case i0 && i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Flusher - }{w, hj, cn, fl} - case i0 && i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Flusher - io.ReaderFrom - }{w, hj, cn, fl, rf} - default: - return struct { - http.ResponseWriter - }{w} - } -} diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/status-code-tracker.go b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/status-code-tracker.go index 2b2753140b28b..80a5ce08645b3 100644 --- a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/status-code-tracker.go +++ b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/status-code-tracker.go @@ -9,21 +9,15 @@ import ( type statusCodeTracker struct { http.ResponseWriter - status int - wroteheader bool + status int } func (w *statusCodeTracker) WriteHeader(status int) { w.status = status - w.wroteheader = true w.ResponseWriter.WriteHeader(status) } func (w *statusCodeTracker) Write(b []byte) (int, error) { - if !w.wroteheader { - w.wroteheader = true - w.status = 200 - } return w.ResponseWriter.Write(b) } diff --git a/vendor/github.com/opentracing/opentracing-go/.travis.yml b/vendor/github.com/opentracing/opentracing-go/.travis.yml index 8d5b75e41e751..b950e42965f00 100644 --- a/vendor/github.com/opentracing/opentracing-go/.travis.yml +++ b/vendor/github.com/opentracing/opentracing-go/.travis.yml @@ -2,8 +2,8 @@ language: go matrix: include: - - go: "1.11.x" - - go: "1.12.x" + - go: "1.13.x" + - go: "1.14.x" - go: "tip" env: - LINT=true diff --git a/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md index 7c14febe1099d..d3bfcf623594d 100644 --- a/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md +++ b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md @@ -1,6 +1,23 @@ Changes by Version ================== + +1.2.0 (2020-07-01) +------------------- + +* Restore the ability to reset the current span in context to nil (#231) -- Yuri Shkuro +* Use error.object per OpenTracing Semantic Conventions (#179) -- Rahman Syed +* Convert nil pointer log field value to string "nil" (#230) -- Cyril Tovena +* Add Go module support (#215) -- Zaba505 +* Make SetTag helper types in ext public (#229) -- Blake Edwards +* Add log/fields helpers for keys from specification (#226) -- Dmitry Monakhov +* Improve noop impementation (#223) -- chanxuehong +* Add an extension to Tracer interface for custom go context creation (#220) -- Krzesimir Nowak +* Fix typo in comments (#222) -- meteorlxy +* Improve documentation for log.Object() to emphasize the requirement to pass immutable arguments (#219) -- 疯狂的小企鹅 +* [mock] Return ErrInvalidSpanContext if span context is not MockSpanContext (#216) -- Milad Irannejad + + 1.1.0 (2019-03-23) ------------------- diff --git a/vendor/github.com/opentracing/opentracing-go/go.mod b/vendor/github.com/opentracing/opentracing-go/go.mod index ae57bfbde25c3..bf48bb5d73f74 100644 --- a/vendor/github.com/opentracing/opentracing-go/go.mod +++ b/vendor/github.com/opentracing/opentracing-go/go.mod @@ -1,5 +1,5 @@ module github.com/opentracing/opentracing-go -go 1.12 +go 1.14 require github.com/stretchr/testify v1.3.0 diff --git a/vendor/github.com/opentracing/opentracing-go/gocontext.go b/vendor/github.com/opentracing/opentracing-go/gocontext.go index 8865e7547ed27..1831bc9b26371 100644 --- a/vendor/github.com/opentracing/opentracing-go/gocontext.go +++ b/vendor/github.com/opentracing/opentracing-go/gocontext.go @@ -7,10 +7,12 @@ type contextKey struct{} var activeSpanKey = contextKey{} // ContextWithSpan returns a new `context.Context` that holds a reference to -// `span`'s SpanContext. +// the span. If span is nil, a new context without an active span is returned. func ContextWithSpan(ctx context.Context, span Span) context.Context { - if tracerWithHook, ok := span.Tracer().(TracerContextWithSpanExtension); ok { - ctx = tracerWithHook.ContextWithSpanHook(ctx, span) + if span != nil { + if tracerWithHook, ok := span.Tracer().(TracerContextWithSpanExtension); ok { + ctx = tracerWithHook.ContextWithSpanHook(ctx, span) + } } return context.WithValue(ctx, activeSpanKey, span) } diff --git a/vendor/github.com/opentracing/opentracing-go/log/field.go b/vendor/github.com/opentracing/opentracing-go/log/field.go index 5e92cc8512a87..f222ded797c17 100644 --- a/vendor/github.com/opentracing/opentracing-go/log/field.go +++ b/vendor/github.com/opentracing/opentracing-go/log/field.go @@ -122,10 +122,10 @@ func Float64(key string, val float64) Field { } } -// Error adds an error with the key "error" to a Span.LogFields() record +// Error adds an error with the key "error.object" to a Span.LogFields() record func Error(err error) Field { return Field{ - key: "error", + key: "error.object", fieldType: errorType, interfaceVal: err, } diff --git a/vendor/github.com/prometheus/alertmanager/api/metrics/metrics.go b/vendor/github.com/prometheus/alertmanager/api/metrics/metrics.go index 0f1fb9a708a79..483569ab9d7dd 100644 --- a/vendor/github.com/prometheus/alertmanager/api/metrics/metrics.go +++ b/vendor/github.com/prometheus/alertmanager/api/metrics/metrics.go @@ -51,4 +51,4 @@ func (a *Alerts) Firing() prometheus.Counter { return a.firing } func (a *Alerts) Resolved() prometheus.Counter { return a.resolved } // Invalid returns a counter of invalid alerts. -func (a *Alerts) Invalid() prometheus.Counter { return a.firing } +func (a *Alerts) Invalid() prometheus.Counter { return a.invalid } diff --git a/vendor/github.com/prometheus/alertmanager/api/v1/api.go b/vendor/github.com/prometheus/alertmanager/api/v1/api.go index 25c18a57e04af..3a1466439cf57 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v1/api.go +++ b/vendor/github.com/prometheus/alertmanager/api/v1/api.go @@ -747,7 +747,7 @@ func (api *API) respond(w http.ResponseWriter, data interface{}) { Data: data, }) if err != nil { - level.Error(api.logger).Log("msg", "Error marshalling JSON", "err", err) + level.Error(api.logger).Log("msg", "Error marshaling JSON", "err", err) return } diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/api.go b/vendor/github.com/prometheus/alertmanager/api/v2/api.go index c147a23a6f473..6cde1972fddb3 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/api.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/api.go @@ -98,13 +98,13 @@ func NewAPI( uptime: time.Now(), } - // load embedded swagger file + // Load embedded swagger file. swaggerSpec, err := loads.Analyzed(restapi.SwaggerJSON, "") if err != nil { return nil, fmt.Errorf("failed to load embedded swagger file: %v", err.Error()) } - // create new service API + // Create new service API. openAPI := operations.NewAlertmanagerAPI(swaggerSpec) // Skip the redoc middleware, only serving the OpenAPI specification and @@ -124,14 +124,16 @@ func NewAPI( openAPI.SilenceGetSilencesHandler = silence_ops.GetSilencesHandlerFunc(api.getSilencesHandler) openAPI.SilencePostSilencesHandler = silence_ops.PostSilencesHandlerFunc(api.postSilencesHandler) - openAPI.Logger = func(s string, i ...interface{}) { level.Error(api.logger).Log(i...) } - handleCORS := cors.Default().Handler api.Handler = handleCORS(openAPI.Serve(nil)) return &api, nil } +func (api *API) requestLogger(req *http.Request) log.Logger { + return log.With(api.logger, "path", req.URL.Path, "method", req.Method) +} + // Update sets the API struct members that may change between reloads of alertmanager. func (api *API) Update(cfg *config.Config, setAlertStatus setAlertStatusFn) { api.mtx.Lock() @@ -166,6 +168,7 @@ func (api *API) getStatusHandler(params general_ops.GetStatusParams) middleware. }, Cluster: &open_api_models.ClusterStatus{ Status: &status, + Peers: []*open_api_models.PeerStatus{}, }, } @@ -215,18 +218,20 @@ func (api *API) getAlertsHandler(params alert_ops.GetAlertsParams) middleware.Re // are no alerts present res = open_api_models.GettableAlerts{} ctx = params.HTTPRequest.Context() + + logger = api.requestLogger(params.HTTPRequest) ) matchers, err := parseFilter(params.Filter) if err != nil { - level.Error(api.logger).Log("msg", "failed to parse matchers", "err", err) + level.Error(logger).Log("msg", "Failed to parse matchers", "err", err) return alertgroup_ops.NewGetAlertGroupsBadRequest().WithPayload(err.Error()) } if params.Receiver != nil { receiverFilter, err = regexp.Compile("^(?:" + *params.Receiver + ")$") if err != nil { - level.Error(api.logger).Log("msg", "failed to compile receiver regex", "err", err) + level.Error(logger).Log("msg", "Failed to compile receiver regex", "err", err) return alert_ops. NewGetAlertsBadRequest(). WithPayload( @@ -271,7 +276,7 @@ func (api *API) getAlertsHandler(params alert_ops.GetAlertsParams) middleware.Re api.mtx.RUnlock() if err != nil { - level.Error(api.logger).Log("msg", "failed to get alerts", "err", err) + level.Error(logger).Log("msg", "Failed to get alerts", "err", err) return alert_ops.NewGetAlertsInternalServerError().WithPayload(err.Error()) } sort.Slice(res, func(i, j int) bool { @@ -282,6 +287,8 @@ func (api *API) getAlertsHandler(params alert_ops.GetAlertsParams) middleware.Re } func (api *API) postAlertsHandler(params alert_ops.PostAlertsParams) middleware.Responder { + logger := api.requestLogger(params.HTTPRequest) + alerts := openAPIAlertsToAlerts(params.Alerts) now := time.Now() @@ -329,12 +336,12 @@ func (api *API) postAlertsHandler(params alert_ops.PostAlertsParams) middleware. validAlerts = append(validAlerts, a) } if err := api.alerts.Put(validAlerts...); err != nil { - level.Error(api.logger).Log("msg", "failed to create alerts", "err", err) + level.Error(logger).Log("msg", "Failed to create alerts", "err", err) return alert_ops.NewPostAlertsInternalServerError().WithPayload(err.Error()) } if validationErrs.Len() > 0 { - level.Error(api.logger).Log("msg", "failed to validate alerts", "err", validationErrs.Error()) + level.Error(logger).Log("msg", "Failed to validate alerts", "err", validationErrs.Error()) return alert_ops.NewPostAlertsBadRequest().WithPayload(validationErrs.Error()) } @@ -342,18 +349,19 @@ func (api *API) postAlertsHandler(params alert_ops.PostAlertsParams) middleware. } func (api *API) getAlertGroupsHandler(params alertgroup_ops.GetAlertGroupsParams) middleware.Responder { - var receiverFilter *regexp.Regexp + logger := api.requestLogger(params.HTTPRequest) matchers, err := parseFilter(params.Filter) if err != nil { - level.Error(api.logger).Log("msg", "failed to parse matchers", "err", err) + level.Error(logger).Log("msg", "Failed to parse matchers", "err", err) return alertgroup_ops.NewGetAlertGroupsBadRequest().WithPayload(err.Error()) } + var receiverFilter *regexp.Regexp if params.Receiver != nil { receiverFilter, err = regexp.Compile("^(?:" + *params.Receiver + ")$") if err != nil { - level.Error(api.logger).Log("msg", "failed to compile receiver regex", "err", err) + level.Error(logger).Log("msg", "Failed to compile receiver regex", "err", err) return alertgroup_ops. NewGetAlertGroupsBadRequest(). WithPayload( @@ -553,12 +561,14 @@ func matchFilterLabels(matchers []*labels.Matcher, sms map[string]string) bool { } func (api *API) getSilencesHandler(params silence_ops.GetSilencesParams) middleware.Responder { + logger := api.requestLogger(params.HTTPRequest) + matchers := []*labels.Matcher{} if params.Filter != nil { for _, matcherString := range params.Filter { matcher, err := labels.ParseMatcher(matcherString) if err != nil { - level.Error(api.logger).Log("msg", "failed to parse matchers", "err", err) + level.Error(logger).Log("msg", "Failed to parse matchers", "err", err) return alert_ops.NewGetAlertsBadRequest().WithPayload(err.Error()) } @@ -568,7 +578,7 @@ func (api *API) getSilencesHandler(params silence_ops.GetSilencesParams) middlew psils, _, err := api.silences.Query() if err != nil { - level.Error(api.logger).Log("msg", "failed to get silences", "err", err) + level.Error(logger).Log("msg", "Failed to get silences", "err", err) return silence_ops.NewGetSilencesInternalServerError().WithPayload(err.Error()) } @@ -576,7 +586,7 @@ func (api *API) getSilencesHandler(params silence_ops.GetSilencesParams) middlew for _, ps := range psils { silence, err := gettableSilenceFromProto(ps) if err != nil { - level.Error(api.logger).Log("msg", "failed to unmarshal silence from proto", "err", err) + level.Error(logger).Log("msg", "Failed to unmarshal silence from proto", "err", err) return silence_ops.NewGetSilencesInternalServerError().WithPayload(err.Error()) } if !gettableSilenceMatchesFilterLabels(silence, matchers) { @@ -638,20 +648,22 @@ func gettableSilenceMatchesFilterLabels(s open_api_models.GettableSilence, match } func (api *API) getSilenceHandler(params silence_ops.GetSilenceParams) middleware.Responder { + logger := api.requestLogger(params.HTTPRequest) + sils, _, err := api.silences.Query(silence.QIDs(params.SilenceID.String())) if err != nil { - level.Error(api.logger).Log("msg", "failed to get silence by id", "err", err) + level.Error(logger).Log("msg", "Failed to get silence by id", "err", err, "id", params.SilenceID.String()) return silence_ops.NewGetSilenceInternalServerError().WithPayload(err.Error()) } if len(sils) == 0 { - level.Error(api.logger).Log("msg", "failed to find silence", "err", err) + level.Error(logger).Log("msg", "Failed to find silence", "err", err, "id", params.SilenceID.String()) return silence_ops.NewGetSilenceNotFound() } sil, err := gettableSilenceFromProto(sils[0]) if err != nil { - level.Error(api.logger).Log("msg", "failed to convert unmarshal from proto", "err", err) + level.Error(logger).Log("msg", "Failed to convert unmarshal from proto", "err", err) return silence_ops.NewGetSilenceInternalServerError().WithPayload(err.Error()) } @@ -659,10 +671,11 @@ func (api *API) getSilenceHandler(params silence_ops.GetSilenceParams) middlewar } func (api *API) deleteSilenceHandler(params silence_ops.DeleteSilenceParams) middleware.Responder { - sid := params.SilenceID.String() + logger := api.requestLogger(params.HTTPRequest) + sid := params.SilenceID.String() if err := api.silences.Expire(sid); err != nil { - level.Error(api.logger).Log("msg", "failed to expire silence", "err", err) + level.Error(logger).Log("msg", "Failed to expire silence", "err", err) return silence_ops.NewDeleteSilenceInternalServerError().WithPayload(err.Error()) } return silence_ops.NewDeleteSilenceOK() @@ -713,30 +726,31 @@ func gettableSilenceFromProto(s *silencepb.Silence) (open_api_models.GettableSil } func (api *API) postSilencesHandler(params silence_ops.PostSilencesParams) middleware.Responder { + logger := api.requestLogger(params.HTTPRequest) sil, err := postableSilenceToProto(params.Silence) if err != nil { - level.Error(api.logger).Log("msg", "failed to marshal silence to proto", "err", err) + level.Error(logger).Log("msg", "Failed to marshal silence to proto", "err", err) return silence_ops.NewPostSilencesBadRequest().WithPayload( fmt.Sprintf("failed to convert API silence to internal silence: %v", err.Error()), ) } if sil.StartsAt.After(sil.EndsAt) || sil.StartsAt.Equal(sil.EndsAt) { - msg := "failed to create silence: start time must be equal or after end time" - level.Error(api.logger).Log("msg", msg, "err", err) + msg := "Failed to create silence: start time must be before end time" + level.Error(logger).Log("msg", msg, "starts_at", sil.StartsAt, "ends_at", sil.EndsAt) return silence_ops.NewPostSilencesBadRequest().WithPayload(msg) } if sil.EndsAt.Before(time.Now()) { - msg := "failed to create silence: end time can't be in the past" - level.Error(api.logger).Log("msg", msg, "err", err) + msg := "Failed to create silence: end time can't be in the past" + level.Error(logger).Log("msg", msg, "ends_at", sil.EndsAt) return silence_ops.NewPostSilencesBadRequest().WithPayload(msg) } sid, err := api.silences.Set(sil) if err != nil { - level.Error(api.logger).Log("msg", "failed to create silence", "err", err) + level.Error(logger).Log("msg", "Failed to create silence", "err", err) if err == silence.ErrNotFound { return silence_ops.NewPostSilencesNotFound().WithPayload(err.Error()) } diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/cluster_status.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/cluster_status.go index 9db4491d82000..063caac4311a9 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/cluster_status.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/cluster_status.go @@ -38,7 +38,6 @@ type ClusterStatus struct { Name string `json:"name,omitempty"` // peers - // Minimum: 0 Peers []*PeerStatus `json:"peers"` // status diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/openapi.yaml b/vendor/github.com/prometheus/alertmanager/api/v2/openapi.yaml index 7d8f77dea7b59..84e8297a581fb 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/openapi.yaml +++ b/vendor/github.com/prometheus/alertmanager/api/v2/openapi.yaml @@ -279,7 +279,6 @@ definitions: enum: ["ready", "settling", "disabled"] peers: type: array - minimum: 0 items: $ref: '#/definitions/peerStatus' required: diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/embedded_spec.go b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/embedded_spec.go index c13f93ff5375c..77168bf683d9a 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/restapi/embedded_spec.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/restapi/embedded_spec.go @@ -1306,7 +1306,6 @@ func init() { }, "peers": { "type": "array", - "minimum": 0, "items": { "$ref": "#/definitions/peerStatus" } diff --git a/vendor/github.com/prometheus/alertmanager/asset/assets_vfsdata.go b/vendor/github.com/prometheus/alertmanager/asset/assets_vfsdata.go index bcc6f6e7ff8bc..b06dfd01439ba 100644 --- a/vendor/github.com/prometheus/alertmanager/asset/assets_vfsdata.go +++ b/vendor/github.com/prometheus/alertmanager/asset/assets_vfsdata.go @@ -136,9 +136,9 @@ var Assets = func() http.FileSystem { "/static/script.js": &vfsgen۰CompressedFileInfo{ name: "script.js", modTime: time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC), - uncompressedSize: 98980, + uncompressedSize: 98977, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\xfd\x09\x7b\xdb\xbc\xb2\x20\x0c\xfe\x15\x9b\xad\xab\x03\x44\x65\x46\xf2\x96\x84\x32\x8e\xc6\x49\xec\xec\x76\xe2\xec\xf1\xeb\xce\x07\x52\x90\x4d\x4b\x06\x15\x08\xf4\x12\x8b\xfd\xdb\xe7\x41\x81\xab\x44\x2a\x39\xdd\xb7\x67\xbe\x7b\xdf\x13\x8b\x04\x50\x28\x00\x85\xda\x50\x28\xae\x8f\x62\x19\xe8\x30\x92\x44\xd2\x7b\x27\x9e\x89\xb5\x99\x56\x61\xa0\x9d\x7e\x56\xb0\xa6\x88\x04\x05\x9a\xde\x2b\xa1\x63\x25\xd7\xb4\xcb\x99\x04\xed\x8e\x98\x02\x9d\xe4\xd5\x66\xa4\xa8\xa2\xc8\x26\x68\xc8\x41\xab\xbc\xa0\xdc\x5b\x06\x8e\x28\x90\x34\x49\x68\x01\x4a\x10\x51\x02\xb5\x05\xa2\x00\xa5\x97\x41\xad\x84\x2e\x88\x06\x0b\xbf\xdc\x81\x26\x71\xa9\x83\x6d\x88\x8b\x0e\xc4\x32\xb4\xff\xb4\xcf\x98\x08\xc8\x7b\x2d\x77\x1b\x12\x5e\xea\x76\x07\x78\xd1\x6d\xbc\x0c\xf0\xbf\x01\x13\x4e\x62\x28\xe3\x52\x46\x26\x26\x41\x09\x99\x5d\x08\x0a\x64\xf8\x32\xcc\xff\x3b\xf8\x05\x84\xc3\x02\x86\x65\x14\x39\x09\x4b\x28\x3e\x82\xb0\x40\x31\x58\x06\xfb\xff\x33\xac\x43\x12\xc0\x32\xde\x65\xc4\x03\x12\x95\x10\x7f\x0c\x51\x81\x78\xb8\x0c\xf9\xff\x9f\x63\x89\x48\x08\xb5\xa3\x29\x0f\x27\x22\xa3\xd2\x70\x9e\xc0\xa8\x18\x4e\xb4\x0c\xfc\xff\x65\x23\x1c\x91\x08\x9a\xc6\x58\x1e\xe4\x78\x81\xcd\x6d\x32\xc6\xa4\xcb\x07\xd2\x1d\x11\xf3\xde\x33\xfd\x10\x5d\x6a\x71\x6b\x5b\x40\x81\xe4\x56\xb5\x0d\x88\xbc\x15\x11\xa5\x86\xfb\x59\x43\x28\x86\xbc\xbd\xd8\x14\xe2\x72\x63\x12\x97\xda\xdf\x14\xed\xa1\x98\xc7\x9d\x65\x08\xc0\x17\x60\x10\x5e\x02\x33\x29\x83\x81\x62\x8d\x76\xeb\x00\x41\xb0\x0c\x8a\x04\x25\x68\x7e\x15\x1a\x14\x64\xf0\xa8\x1e\x1e\x84\xb5\x10\x49\x48\x93\x6b\xae\xd6\x46\x4c\x90\x62\x39\xed\xd2\x8c\x22\x45\x4c\x99\x60\xfb\x4a\xf1\x3b\x22\x29\xc4\xac\xdb\x8f\xf7\x64\x3f\xee\x74\xa8\x38\x8d\xcf\x98\x26\xaa\x13\xd3\x7e\xc6\xff\x13\x0a\xd7\x6c\x56\x81\x54\xc0\xd1\x05\x1c\xc1\xba\x7d\xb1\x27\xdb\x6d\xe5\xfa\x7d\xd1\xe9\x50\x7d\x2a\xce\x98\x72\x39\x28\x66\x5e\xe5\xc2\x6f\x22\xe4\xb9\xbe\x60\x02\x8e\x8d\x68\xa1\x09\x85\x21\x23\x8b\x1d\x64\x9b\xe5\x54\x9e\x25\x14\x56\x0d\x24\x03\x08\x71\x8a\x8b\xa0\xc0\x59\xb7\xcf\xf7\x44\x9f\x77\x3a\x34\x3e\xe5\x67\x4c\x9f\xf2\xb3\x0c\x83\xf8\x54\x9e\x31\x05\x71\x42\xa1\x79\x58\x2a\x83\x9a\xcd\x94\xee\xf4\xb2\xb9\xd2\xc5\x5c\xa9\xd3\x38\x87\x2b\x4e\xf5\x19\x93\x20\xfe\x1e\x5f\x03\x4c\x20\x30\xc5\xcc\xee\xd1\xa7\xf1\x19\xa8\x7c\xea\xd5\x5f\x42\xda\xe8\xf5\xbb\x7b\x4c\xf4\xc5\xc6\x46\x0e\x48\x2c\x00\xa2\xd0\x5a\x9e\xe4\x55\xc3\xad\x19\xac\x24\x66\xb8\x15\xca\xf8\x3f\x5e\x18\x64\x1a\x1d\x0e\x66\x81\x72\xc8\x71\x2d\xe4\x9c\x80\x66\x93\x30\x10\x38\x82\xda\x7a\xb6\x77\x55\xf4\x2e\x37\x44\x3f\x43\x66\x2f\x6e\xb7\x49\x9c\xe3\x46\xfb\x19\xbe\x3c\x43\xb2\x13\x53\x08\x58\xb7\x1f\xec\x89\x7e\xd0\xe9\x50\x7e\x1a\x98\x75\x0e\xce\xb0\xaa\x2d\x89\xb3\x92\x8e\x30\xb4\x15\xe4\x34\xc0\x13\x0a\xf7\x2d\xaf\x9b\xd0\x42\xfd\xbb\x2a\x53\xf4\x7d\xcb\xeb\x01\xf7\x24\xf8\x9e\x4a\x70\x9b\x5e\xb0\x19\xb9\x2a\x55\x3f\x30\xac\x37\xc3\x4a\xb1\x16\x68\x26\x53\x6c\xfb\x7a\x63\xa3\x4f\x15\xbb\x22\xf2\x54\x57\x17\x38\x6f\x3e\xad\x36\x3f\x3d\xeb\x4b\xd7\xef\x4b\x26\x5d\x9f\x2a\x77\x1a\xcf\x2e\x88\x74\x79\xa9\xa5\xa9\x77\xbe\x92\x55\x9c\x9e\xf5\x95\xeb\xb7\xdb\xda\xec\x62\xb3\x97\x41\x33\xed\xfa\x54\x58\x70\xb8\x84\x2e\x07\xed\x72\x9a\xc3\x3d\x30\xfc\x9a\xc2\x1d\x23\x7a\x01\xb2\xe1\xf6\x19\xec\xb8\x0c\xbb\xdd\x16\xd5\x0e\x40\x30\xe1\xfa\x34\xb6\xdd\xdc\x16\xdd\x80\xa8\x76\x15\x9b\xae\xc2\xa5\x7e\x8c\x70\x28\x96\x77\xb1\xa7\x76\x3b\xae\xeb\x0e\x62\x16\xbb\x3e\xe5\xb6\xd3\xfd\x6a\xa7\x10\x57\x3b\xe6\xa6\xe3\xb8\xa6\x63\x23\x55\xb2\xae\x83\xfa\xae\xdb\x6d\xde\xdc\x3f\x70\xc6\x5d\x9f\x06\x16\x8b\x9b\x65\x2c\x80\x57\x31\x09\x68\x95\x9d\x69\x28\x04\xf8\x01\x31\x44\xe1\xce\x22\xa5\xeb\xb9\xec\x2b\xa2\x0d\x07\xd7\x44\x51\x9a\xe0\x7f\xfd\x12\x28\xf1\x57\xa0\x2c\x1f\x19\x9b\xda\x25\xc2\xd4\x8c\xb1\x1b\x3d\xe8\x7a\xe6\xc7\x9d\x1e\x6c\xf4\xbc\x1e\xf6\xb0\xc4\x7a\x4b\x3c\xaf\xa1\x28\x88\xe4\x2c\x9a\x08\x77\x12\x9d\x13\xd9\x71\xbc\xb5\xbd\x50\x6a\xa1\x24\x9f\xcc\xfe\xed\x50\x50\xe5\x3d\xf7\xcc\xec\x02\x7d\xa1\xa2\x9b\xb5\x03\xa5\x22\x45\x9c\x0b\xad\xa7\x33\xef\xe1\xc3\xf3\x50\x5f\xc4\xbe\x1b\x44\x57\x0f\xc5\xe4\xea\x61\x10\x29\xf1\xd0\x9f\x44\xfe\xc3\x9e\xdb\x75\xbb\x0f\x2f\x42\xa9\x67\x0f\x9d\x8e\xec\x38\xee\xd5\xd0\x29\x89\xe7\xc3\x05\x9e\x09\x66\x63\x40\xcc\x8e\x70\xd5\xbb\x20\x68\xdf\xf0\x16\xcd\x84\x3b\x8d\xa6\x84\xd2\xbe\x29\xd3\xb8\x6c\x3e\x96\x97\xf8\x5b\x0e\xf5\xa8\xd8\x14\xe1\x88\xf4\xba\xdd\x3d\x4d\x33\xfe\x6a\x17\xff\x18\xfb\xa5\xb0\xde\xed\x87\x23\x22\x19\x63\x2a\xad\x61\xdf\x38\x91\x7f\x29\x02\xed\xac\x33\x7d\x37\x15\xd1\x68\x4d\xce\xe7\x32\x9e\x4c\x8c\xce\x90\xff\xca\x9a\x38\x59\xc7\x0e\xcb\xab\xb7\xdb\xcf\xc8\x0e\x85\xf5\x5e\xce\x0b\xe3\xb5\x50\xae\x49\xb7\xb5\xd7\x6d\xb7\x89\x64\x07\x48\x1d\xca\xfc\x35\x88\x48\x1a\x8e\xc8\xfa\x11\x91\x28\xad\xcc\x3f\xba\xd3\x33\xc3\x4b\xb1\xea\xf5\x33\xf4\x90\xb7\x5c\xb2\x19\x39\xa4\xf0\x71\x49\x87\x48\x6b\x1d\xa6\x9c\xbc\x58\xbc\x57\x19\x0b\xaa\x1d\x5d\x36\x3d\x38\x13\x83\xae\x27\xf7\x14\x92\x95\x99\x8b\x75\xe9\xb6\xd2\x72\xa2\xd9\x2b\xc3\xe9\xcc\xc6\xa1\x74\xa0\xbd\xf4\x85\x0f\xca\xf5\xf1\x85\x79\x0a\x40\xb9\x81\x15\x02\x86\x4f\xa2\xf2\xd2\x6e\xaf\x57\x1b\x5b\xee\x69\xd5\x98\x82\xb2\xe7\x73\x03\x6d\xd0\xf3\x94\xeb\x1b\x04\xba\x56\xeb\x7a\xdb\x30\x4e\x3b\x2a\xba\xd7\x4d\x28\xfc\x6c\xd4\x77\xd2\x4a\xbd\xe6\x5d\xd0\xdd\x7b\x95\x89\xbe\xc6\x1a\xac\xb1\x8a\xdd\xa5\xb6\x38\x1f\xc9\x5e\x77\x70\xa7\x3d\x3d\xb8\xd5\xde\x8d\x36\xaa\xc2\x6f\xd6\x2d\x56\xe3\xb8\x22\xbe\x72\xd1\x95\x97\x3f\xad\xca\xe7\xac\x06\x04\x9e\x2e\xd5\xfa\x54\x32\x28\x64\xf1\xfa\xa4\x8c\xd6\x7d\x92\x53\xa0\x40\x0a\xb4\x2a\xa4\x3c\x15\x67\xd5\x02\x95\xea\x96\xa6\x20\x1b\x04\x4e\xfe\x73\x36\x23\x2f\x4b\x94\xf4\xd2\x82\x37\x74\x34\xd3\x2a\x94\xe7\x25\xb2\xcf\xe9\xa8\xa3\x52\xd2\xf1\xb3\x57\xaa\x6f\x11\xba\xb2\x34\x90\x12\x40\xbf\xa4\xda\x94\x84\x2a\x72\xed\xa2\x6a\x05\x9f\x5f\x4d\xc4\x20\x3b\x86\x5d\xfd\x47\x85\x7f\xa7\xac\xd9\xb9\xe4\x4c\xb9\xc1\x05\x57\xcf\xa2\xa1\xd8\xd7\x24\xa6\x7d\xbe\xb7\xb3\xb3\xf9\x64\x77\x3e\xdf\xd9\xdd\xea\x3d\xd9\xe3\x03\x92\xaa\x74\x9f\xac\x52\x47\xc1\xa8\x79\x5e\xf5\x6d\x47\x9d\xc6\x9d\x9e\x2d\x64\x9b\x34\xc9\x19\xd3\x65\x14\x4a\xe2\x38\x75\x14\x56\xa0\x79\x7a\x06\x15\x35\xcc\x6a\xbb\x39\x82\x86\x71\x04\x4b\x78\xc6\x9d\x0e\x04\x55\x5c\x83\xf9\x9c\xf0\x8e\x6d\x60\x90\x04\x83\x1e\xa7\xd4\xc8\x53\xe4\x90\x3c\xc7\x4c\x97\x30\xeb\xff\x47\x4a\x78\x86\x96\xb6\x68\xe9\xbf\x46\x4b\xe7\x68\x59\xd5\xdb\xa0\x66\xf6\x5e\x21\xcf\x90\x98\xde\xaf\xd4\xb0\x32\x74\x8c\x0a\x9f\xa2\x12\x33\x54\xe2\x79\x15\x15\x81\x2b\xb9\xbb\xb5\xd9\x9d\xcf\x77\x1e\x6d\x6d\x6f\xed\xf1\xf9\xdc\xe8\xb3\xa7\x1b\x1b\xe2\xcc\xa8\xae\x19\x16\xf1\x02\x16\xf0\xa5\x89\x16\x95\x3b\x9b\x4e\x42\xc3\xe3\x13\x0a\xaf\x9b\x6b\xe1\xd4\x62\xa5\x77\x35\x83\xa9\xd5\xce\xff\x82\x88\xad\x4e\x9b\x29\xed\x46\xad\x8d\xab\x44\xa1\xa9\xd9\x9e\x71\x75\xd4\xf1\x7c\x4e\x4c\xf5\x8d\x0d\x7d\xd6\x11\x96\x24\x04\xcd\x45\x50\x37\xc9\x64\x51\x3a\xfd\x2f\x56\x58\xb0\xff\xbd\x98\xac\x2f\xa0\xd2\x4b\x72\x9c\x28\x7c\x6e\x98\xdd\x8d\xde\x9e\x72\x43\x39\x14\xb7\xc7\x23\x3b\xc5\xdf\x9b\xd6\xa1\xcb\xd8\x42\xd5\x0f\x8d\x4c\x26\xb3\x79\xf2\x21\x1a\xf9\x36\xe1\x33\xfd\x2a\x6f\xcf\xf2\xb2\x8d\xac\x7a\x42\xe1\xeb\x12\x48\x3b\x55\xb9\x21\x12\x8e\x88\xde\xeb\x65\xfc\xb2\x55\x62\x8d\x5d\x40\xa5\x7e\xa3\xb7\x67\xa6\xa5\xc0\x14\xf5\x9f\x4c\x97\x17\x14\x44\x87\xe9\xaa\x06\x8f\x0b\xf5\x6d\x25\xc7\x84\x1f\x8d\x92\x53\x6e\xac\x50\x1d\xe5\x03\x34\x81\xdf\x34\xb7\x7e\xb8\xaa\xf5\x43\x35\xef\x62\xf1\x3b\xae\x2f\xdc\x69\x74\xd3\xac\xbe\xfe\x97\x6c\x94\xba\xea\xbf\x64\xbf\x58\x44\x26\x07\xcf\x48\xaf\x47\xbd\xee\x9e\x6e\xb7\xe5\x5e\x77\x3e\xd7\x46\xdf\xea\xee\xc9\x81\xee\x48\x4f\x5b\x2d\x1c\x7b\xe4\x9a\xcb\x4d\x3b\x3f\x52\x32\x7c\x15\x88\x70\x02\x2a\x7d\x18\x4d\xa2\x48\x81\x4e\x9f\x54\x14\xcb\x21\x88\xf4\x69\x12\x9d\x37\xca\x99\x76\x7b\xd5\xa8\xe7\xf3\x55\xa5\xeb\x8c\x65\xbc\x2d\x96\xec\x4f\xc6\x57\x46\x38\x86\xe3\x77\xf8\x1e\x8b\xb3\xe7\xd0\xd8\xd3\xed\x76\xb8\xc7\xd3\xad\x17\xb1\xb8\xbc\xe7\x14\xed\x07\x4c\x9e\x86\x9d\xce\x19\x63\x2c\x3e\x55\x9d\xce\x59\xbb\x4d\x7a\x66\x06\xa3\x01\xd1\x9d\x0e\x08\xd6\x33\x82\xab\xd3\x01\xe4\xcf\x8c\x91\xdd\xad\xed\xc7\x8f\xdb\x11\x1d\x2c\x34\xf4\x7a\x34\x67\x89\x4f\x49\x30\x50\xde\x46\x0f\x75\xef\x84\x02\x97\xcd\x4c\x4d\xed\x65\x0c\x7a\x50\xed\x42\x57\x51\xa5\x03\xb3\xf9\xb5\x3b\x8b\xfd\x99\x56\x44\xc1\x26\xa5\x74\xa0\x3a\x9b\xde\x46\xcf\xc3\xa2\x53\x75\x46\xe9\xc0\xf9\xc7\xe8\xdf\xcc\x3c\x0d\x36\x36\x3d\xd5\xe9\x99\x0a\x1b\x46\xeb\x0b\x56\xa0\xb1\xd0\x9b\xa1\xa1\x84\x42\x24\x6b\x79\x5b\x5f\xee\xe5\x8c\x4d\x76\x3a\x39\x11\x96\x61\x48\x6a\x37\xf2\xf6\x63\xc3\xcd\x0a\xcb\x43\x26\xf9\x0f\x0a\xa3\x3a\x8c\x4a\xfb\xbd\x5f\xcc\x4e\x5f\x65\x1d\xc5\x6c\x01\xd9\x8d\xed\xc7\x96\x79\x76\xe7\x73\xb9\xc7\x62\xea\x2b\xc1\xc7\x7d\xc1\xe4\x03\xd1\x89\xb3\xfe\x8e\x89\xb2\xab\xe1\xd7\x0f\xca\x8e\xa1\x5b\x3b\x36\x51\x37\xb6\xed\xc7\xff\x16\xf3\xb9\xf8\xf7\xce\x23\x63\xb4\xec\xee\xd8\xa7\x47\x5d\xd4\x09\xc5\xde\x93\x47\xf3\x79\xaf\xbb\xb9\x27\x52\x74\x34\xeb\xed\x3e\xd0\x1d\xb1\xf1\xf8\x51\x22\x26\x33\xb1\x96\xbf\xd8\xd9\xe9\x57\x5f\x6c\x3f\x2e\x90\x96\xa0\x51\x15\x92\x8c\xfc\x69\x23\xc4\x25\x8e\xa8\x28\x04\x8c\xef\x75\x07\xd9\x6e\xf0\x78\x27\xe7\xb0\x6a\x2f\x48\x77\x44\xb8\xb0\x23\x3a\x1d\xda\x47\xfa\x0f\x07\x44\xb0\x1e\x68\xab\xb8\x2d\xd1\x7f\x48\xdb\x6d\x53\xb9\xa0\x78\x9e\x11\x7b\x3d\x83\x72\xce\x9d\xbe\x74\xf9\x96\xb1\x62\x3b\xcc\xb9\x72\x28\x48\x97\xdf\xa4\x8f\xa1\x43\xfb\x5a\xdd\x65\xc4\x78\xa4\xc9\x89\x38\x3f\xb8\x9d\xa2\x6f\x9f\x26\x01\xd7\xc1\x45\x49\xc9\xbf\xd4\x89\x61\xb9\x13\xd9\xcc\x73\xe3\xc9\xc4\x70\x11\xf7\x2a\x6d\xba\xda\x9f\x88\x72\x05\x38\xeb\x1a\xf5\x0c\x42\x56\x92\x64\x10\xb1\x8d\x5e\x9f\x77\x3a\x7b\xb2\xdd\x46\xb1\x23\x6e\x45\x40\x02\xa3\x23\x46\xeb\xe5\x9a\xfd\x02\xe0\x88\x89\xdc\x7f\x0a\x7e\xaa\x49\x8f\x68\xbf\xbb\x37\x4a\x27\x7e\xc6\xc4\xe9\xe8\xac\xef\x9f\x6e\x6c\x8c\xce\xd8\x6c\x70\xa4\xc9\x8c\x7a\x97\x3a\x89\x33\xe7\xd0\xf7\x18\xc4\x69\xf7\x0c\x84\x5d\x55\xe0\x70\x40\x7c\x4a\x29\x44\xe5\x4e\x73\x2d\xac\x78\xc5\x42\x48\xdd\x56\x25\x3f\x4a\x0c\x12\x78\xb6\x1c\x81\xd9\x59\x59\x3b\x25\xa6\x13\x8e\x5a\x55\xf9\x7c\x26\x1c\x91\xa0\xd3\xf9\x37\x8b\xf3\x7d\xdb\x2f\x1c\x7f\x5c\x9d\xc7\x57\x42\xea\x59\x36\xc8\x2d\xc8\x9c\xf5\xca\x0c\x52\xe5\xaa\x4e\x5e\xf3\x54\x9d\xf5\x8d\x22\xa9\xce\x98\x30\x83\x15\x38\xd8\xcc\xb3\x69\x87\x2b\xa1\xa8\xbe\xd4\xc5\xe6\x19\x04\x70\x40\x34\xa5\x34\xa1\x66\xf5\xaf\x57\xf3\x0e\x9d\xad\x69\x79\x2d\x83\xca\x7a\xc9\x5c\x27\x0b\xb3\x65\x15\xb8\xb3\xd7\xc3\x74\xcf\x66\x2a\x45\xaa\x78\x72\x08\xed\x62\x50\x5a\x05\x9c\x0d\x64\xb1\x3e\xa5\x50\x5e\x98\x00\x4a\xda\xc8\x70\x99\x0d\x15\x6e\xdc\x2e\x0c\x33\x63\xb8\xc9\x94\x33\xd5\x7a\x20\x8a\x6a\xb9\x65\xda\x5a\xaa\xb7\x05\x23\x4f\xc2\x79\xe6\x16\xbe\x5a\xd5\xf5\x36\x5a\xd8\x17\x9e\x4c\x52\x44\x2f\x9a\x6a\xdb\x8e\x8c\xd8\x49\x28\x4c\x57\x08\x97\xb4\x1e\x68\xac\x79\x2e\xeb\x1d\xb7\x8b\x95\x41\x9c\x35\xfb\x5e\x97\x2b\x43\x7c\xb6\xca\x63\x5a\xd7\x00\x38\x36\xe1\xb5\x4d\x4a\x67\x6e\x0b\x8d\x20\xc0\x66\x41\x43\xb3\xd2\xe1\xda\x52\x43\x08\xb1\x69\xd4\xd8\x14\xa2\x55\x8d\x21\x3a\xab\x63\xb1\x25\xd6\x79\x67\x5e\xbd\xfe\x78\x7c\xe4\x4e\xb9\x9a\x09\xf4\xba\x2e\xb2\xcf\x8f\x31\x19\x93\xcf\x1c\x9c\x4f\x17\xe1\x6c\x2d\x9c\xad\xc9\x48\xaf\x5d\xf3\x49\x38\x5c\x33\x2d\xd7\xd7\x9c\x8e\x74\xaf\xc4\x6c\xc6\xcf\x05\x1c\x49\x03\x83\x22\xc3\xbd\x69\xa4\x04\xec\xf6\x52\x5a\x27\x6f\x41\x89\x77\x69\xad\xd9\x4d\x88\x28\xb8\x2d\x7a\x1f\xf0\x99\x58\xdb\xf2\x52\x9f\xa1\x1f\x45\x13\xc1\x4b\x2e\x43\x35\x78\x15\x13\x45\xbd\x7d\x49\x1c\xbe\xf6\xf4\xf8\xf8\xad\x03\x46\x51\x33\xad\x36\xb3\x56\x32\xbe\xf2\x85\x2a\x1c\x77\x6a\x80\xd5\xe5\xda\xab\xa3\x4f\xa6\xba\xb7\xb1\xd9\xdb\x7e\xb4\xfd\x78\x6b\x77\xfb\xd1\x9e\x6a\xb7\xd5\x5e\xf1\xdc\x6e\x93\xee\x1c\x35\x9c\xac\xab\xf5\x70\x76\x18\xca\x50\x9b\xd9\x9a\xcf\xd5\x7f\xf5\x16\xa1\x61\x35\x8b\xc2\xf6\x02\x0a\x0d\x78\x1f\xbe\x3d\xde\xff\x54\x20\xbe\x9b\xb5\x5a\xf4\x14\x65\xad\xd4\x5a\x28\x67\x9a\xcb\xc0\xbc\xfc\x88\x95\xb0\xa4\xe3\x38\x19\xc8\x8f\x9f\x4e\x5e\x1d\xbd\x28\x60\x3e\xf1\x4a\xb2\x2e\x1b\x8d\x74\x03\x5b\xdf\xbc\x2c\xea\xee\x64\x75\x5f\xc5\xc4\x2e\xa8\x7d\xff\x28\x7b\x8f\xcc\xdb\x0d\x67\x19\x13\x1f\x8c\xa5\xf5\x67\xc2\x41\xd6\xff\xdb\x57\x1f\x4b\x23\x7a\xfc\xe7\x96\xb7\x32\x6d\x2a\xd7\xf6\x4f\x4e\xf6\xbf\x17\x8d\x7b\x5d\x2f\xb3\xf9\x86\xb5\x6e\x66\x55\x38\x97\xe7\xf3\x75\xa2\xad\x63\x2e\x13\x45\x29\xd0\xe3\xa7\xaf\x0f\x9e\x7d\x5a\xbb\x09\xf5\xc5\x1a\x5f\x1b\x85\x62\x32\x5c\x93\xfc\x4a\x0c\xd7\xfe\x1f\xa7\xa3\x3b\xce\xff\x83\x1d\x5a\x69\x70\x97\x22\x75\xaa\x8b\x93\xc0\x17\x9c\x08\x3a\x10\x1e\x6e\x87\xef\xa8\xbf\xa0\x47\xd6\xa2\xd8\xf3\xac\xae\x29\x5d\x81\x22\x61\x71\x9c\x0b\xc8\x14\x23\x0c\x47\x44\xe5\xc6\x71\x5c\xa9\xb6\xf6\xf6\xf8\xe8\xc5\xc1\xc9\x1a\x47\x58\x6b\x47\x42\x0c\xd7\x50\x9e\xac\x39\x9d\xb8\xe3\xac\xf9\xb1\x5e\x8b\xe4\xe4\x6e\x6d\x26\xc4\x9a\xd3\xc9\xc0\x74\x9c\x35\x21\xb5\x0a\xc5\x0c\x3b\x28\x8d\x26\x6e\x18\xcd\x07\x8c\xa7\x28\x8d\x66\xd3\xfb\xe3\x34\xff\x61\x80\x76\xb6\xf3\x29\xe5\xac\x30\xcc\x03\xbb\x3c\x38\xf0\x0b\x3e\x3b\xbe\x91\xef\x55\x34\x15\x4a\xdf\x19\x35\xe9\xbe\x84\x6f\x70\x66\xe5\x2b\x22\x4b\xcb\xec\xe8\xbb\xe1\x6f\x16\x63\xce\xae\xc8\x31\xb1\x4f\x50\xf8\xdf\x5e\xc5\xe4\x8b\x26\xc5\x90\xb6\xbc\xac\xff\x90\x49\x77\x04\x11\x93\xee\x39\x8c\x58\xb7\x3f\xda\x8b\x32\x4d\x77\x64\x34\x78\x44\x20\x3a\x1d\x9d\xa5\xcb\x53\xed\x5e\xf4\x43\x16\x12\xd3\x59\xa9\xa7\x30\xeb\x65\xdb\x2b\xd0\x5f\x98\x6b\x7c\x7d\x81\x2d\x0d\x93\x10\x69\x8b\x9d\x1c\x2f\x9f\xb5\x60\x66\xb0\xea\xcf\x5c\xbf\x3f\x63\x33\xd7\x4f\x91\x99\x59\x7f\x6e\x38\x22\x0b\xa8\xf8\xec\xca\x00\x04\x3f\x47\xe6\x63\x4c\xbe\x72\x33\x72\xa3\xfb\xa5\x5d\x78\x0b\x8c\x5c\xba\x3c\xe3\xd3\xb6\x46\xb7\xb4\xdf\xa5\x19\x59\x29\x92\xa6\x46\x57\x52\x7f\x38\x4d\x4f\x95\x46\xcb\xcd\xf1\x30\x3d\x9d\xc5\xa0\xba\x88\x1f\xb8\x11\x53\xb8\x88\x78\x00\x1f\xb8\xbc\x34\xa7\x9a\xc4\xb4\x1c\xa0\x53\x0e\x0d\x1a\x93\x77\x1c\x72\x34\x6a\x02\x85\x6c\xb0\x46\x29\x4a\xa7\x2a\x7f\x72\x91\x76\x70\x3b\x15\x81\x0e\xe5\xb9\x11\x62\xb9\xf0\x2a\x0e\xc0\x65\xee\xb5\x5f\x3e\xed\x92\x6e\x0b\x6d\x86\x56\x71\xda\xb4\x24\xb8\xba\x5e\x75\x09\xa4\xcb\x0d\x1c\x97\xf7\x53\xb9\x96\x0a\xaa\x54\x58\xa4\xdc\xbf\xc2\x84\xd7\xbb\x8b\x0c\xdc\x0d\x10\x46\x90\x31\xe5\x94\xc3\x66\x5b\x37\xf3\x64\xc9\xfc\x88\x29\x67\xa3\x39\x84\x21\x42\x18\xb6\xdb\xcb\xb5\x4a\xb8\x0a\xac\x25\xea\x6a\x6d\x15\xb5\x46\x58\x6b\xd4\x6e\x3f\x33\xb5\xce\x41\xb9\xe7\xc5\x76\xc8\x6b\x5d\x60\xad\x8b\x3a\x58\xb9\xb8\x29\x01\x28\x91\xe0\xb3\x66\xbf\xdf\x7a\xe1\x33\x2c\x16\xa1\xec\x10\x10\x7b\x1a\x03\x80\x0c\x01\x9a\x8e\x31\x18\xe5\x54\x9c\x35\x9d\x10\x1e\x36\xaa\x2c\xa8\x24\x59\x79\x1c\x8e\xee\x08\x2a\x2f\x60\xd8\x21\x48\xda\x71\x9c\xb2\x1a\x73\x24\xeb\x4f\x98\x2e\xab\xef\x4d\x7f\x1f\x57\xf9\x58\x4e\xe5\x19\xb3\xfd\xe8\x54\xb7\x7e\x25\x99\x01\x1e\x4f\x26\xa5\xee\xde\x96\xc0\xde\xb7\xbc\x2e\x70\xa3\x8c\xe7\xc5\x3f\xab\xc5\xbd\x85\xe2\xdf\xd5\xe2\x4d\xf0\x3d\x09\x81\x67\xfa\xb0\xda\xff\xf1\x0a\xed\x7f\x0b\x6b\x0f\xd1\xa0\x80\xa7\x2b\x2a\x6e\x97\x2a\xe2\x48\x3e\xc9\xf2\xc1\xde\x09\x22\x61\xad\x45\x1c\x82\xf0\x3e\xc9\x4e\x27\x35\x43\x70\x96\x2f\xbc\xd3\xb3\x24\xe3\xa8\xaf\x71\x56\x4a\xa1\x26\xcf\xcb\xdc\xe1\xb7\x24\x65\x96\x20\xc9\x5b\x49\x4e\xd2\xad\x5d\xda\xdc\x2f\x53\x24\xa5\x7b\x61\xed\x30\x45\xe1\x35\x1e\x2b\xe0\x79\x59\x65\x34\xe5\x75\x59\x00\xff\xd2\x96\x02\x76\xf3\xdb\x9a\x9b\xf6\x9c\x45\xb2\xf5\x1e\x7c\x91\xec\xf4\xac\x18\xe9\xeb\xcc\x5c\xfe\x22\xd3\x08\x18\x0a\xeb\xef\xd3\x38\x19\xd3\xa2\xdb\x97\xec\x8b\x74\x67\x17\xe1\x48\x13\xda\xa7\xef\x4c\x83\x3e\xc2\x2a\xad\xda\x3b\x1c\x2f\x3a\xd7\x94\x9b\x39\x09\xa4\xd9\x86\x6e\xcb\xec\x8c\xae\x3d\x72\xef\x99\x3f\x79\xbd\xf3\x76\x5b\xb9\xe7\xc8\xb5\x64\x9f\x2a\xf7\x9c\x99\xc7\x10\xb9\xb3\xd9\x73\x76\x7c\x06\x20\x16\xf8\xc4\x80\x33\xd2\x2a\xab\x89\x8e\x28\x83\x3c\xc6\x53\x66\xbc\xfc\x3a\x0a\x87\x58\x35\xc0\xfe\xfd\xca\xec\x18\x60\x12\x70\xbd\x12\x8a\x82\x60\xc7\xe2\x94\x22\xa9\xdc\x8b\xea\x06\x4e\xbb\x1f\x61\xf7\x17\xd9\x3c\x50\xeb\x03\x33\x98\x18\xb2\x43\x9f\x75\xd7\xeb\x19\xdb\xd3\x54\x85\xd0\x53\xee\x79\x02\x59\xdb\x61\x62\x69\xf7\x45\x65\x11\xcb\x96\x60\x79\x11\x55\x36\x7d\x52\xdc\xac\x7d\x7b\xf7\xf6\xa5\xd6\xd3\x13\xf1\x2b\x16\x33\xdd\x5f\xaf\x12\xb4\x99\xaa\x20\x2c\xd4\x9d\xbe\x74\xf9\x70\x78\x70\x2d\xa4\x7e\x1b\xce\xb4\x90\x42\x11\x67\xaa\xa2\x73\x25\x66\x33\xa7\x22\x99\x32\xc6\xf5\x2c\xba\x9a\xc6\x9a\xfb\x13\xd1\x6e\x1b\xaa\x74\x39\xb9\xf7\x3f\x7a\xd2\x9d\x44\x7c\x28\x86\xe0\x7f\xf2\xa4\xab\x23\xcd\x27\x18\x9d\x92\x10\x09\x31\x7a\xbf\x96\xfa\x11\x4a\x45\xaa\xd4\x09\xbd\x57\xe4\xa7\x24\xb3\x10\xa3\x5a\xea\x5a\xe8\xf0\x4a\x44\xb1\x5e\x6e\x33\x69\x6e\x63\xd0\x5a\x68\x50\xe7\xb1\x23\x82\x49\xb8\x0f\xde\x7b\xc2\x55\x62\x36\x8d\xe4\x4c\x7c\x3e\x79\x0b\xfe\x9d\x77\xef\x7f\xf5\x84\x3b\xd3\x5c\xc7\x33\x08\xa2\xfc\xf7\x27\x71\xab\x13\x08\x02\xaf\x3c\x4b\x96\x07\x84\xb1\x3d\x07\x2f\x4e\xc1\x0b\x97\xab\x4c\xcf\x08\x9d\x7f\xd4\x3f\xd2\xa1\xb0\xfa\xc8\xb2\x70\x72\x3a\xde\x9a\x83\x04\xd8\xdd\xe3\x99\x8a\x12\xa7\xfe\xf1\x50\x9e\x93\x2e\x70\x0a\x61\xe5\x15\xef\x6c\xd2\xbe\x62\xb7\x64\xc2\xcb\x21\xf5\x05\x13\x3f\xd2\x24\x08\x89\xa4\x83\xb0\xe3\x00\xda\xc1\xdc\x0b\x69\x02\x8a\x26\xc5\x79\x27\x11\xee\xb9\xd0\xfb\x93\xc9\x49\x3a\x2f\x2f\x05\x1f\x0a\x35\x23\x94\x82\xff\xa1\x34\x5f\x29\xdf\x10\x56\xb9\xb0\x93\xb4\xb7\xd9\xed\xce\xe7\x5b\xdd\xee\x1e\xcb\x5e\xd1\xdc\x07\xef\x47\xc3\x3b\x26\xf3\xf6\x66\x42\xe1\xa7\x24\xa3\x90\x68\x9a\x9e\x87\x30\x45\x74\x59\x11\x8d\xe9\xe0\xad\x24\xb1\xcb\xa9\x47\x1a\x01\x8c\x49\x14\x62\x64\x17\xba\xcf\x88\x04\xe1\xfa\xdb\x46\x4f\x4b\xac\xdf\x55\xba\xd1\x54\x48\x22\xdc\x60\x0a\xc2\x0d\xde\xc3\x7a\x77\xd9\x6d\x80\x74\xe5\x1b\x55\x39\x78\x6f\xc0\xac\x37\x1f\x7a\x06\x41\x5f\xbb\x7e\xdf\x86\xf2\x49\x77\x26\x74\xba\xfd\xec\x4c\x11\xed\x72\x1b\x72\x66\x74\x86\x12\xba\x77\x53\xa3\x8c\xfa\xdb\xae\x0f\xd2\x35\x86\xdd\x33\x25\x86\x42\xea\x90\x4f\x66\x06\xec\x09\x98\xbd\xea\x06\x6f\x68\xbb\x4d\xa4\x9b\x52\xbf\x29\x79\x63\xb4\x5c\x3c\x09\x4c\xe3\x2b\x84\xeb\x7f\xe8\xe7\xca\xca\x4c\xc8\x21\xb9\x36\xd3\x38\x20\x35\xf8\x38\xcf\x22\xa9\x85\xd4\x1b\x06\x03\x07\xa3\x0d\xc1\xa0\xee\xe1\xaf\xd2\x56\x91\x2e\xf7\x23\xa5\x09\x5e\x89\xa9\x78\xe6\x4a\x2e\x2c\x14\x78\xbe\x27\x5c\x1f\x78\xcd\x5e\x10\x2e\x37\x12\x20\xd7\x7c\xc3\x10\x30\x56\xd9\x48\xd2\x15\x7e\xb1\xcf\x46\x2c\x09\x90\x6e\x70\x6e\xfe\x39\x36\xff\xbc\xac\x6c\x63\x5b\xaf\xf4\xc6\x22\x99\xcb\x98\xcf\x0b\x2e\x31\xbb\x65\xc6\xe4\x46\x82\x55\x95\x07\xca\x1d\x4d\xf8\xf9\xcc\x33\x12\x60\xad\x4b\x69\x1f\x75\xfc\xf9\xfc\x19\x49\x8f\x08\x43\x76\x9f\x40\xc4\x48\xc0\x34\x41\x45\xdf\xe5\x30\x62\x9c\xcc\x20\xa2\xe0\xb3\x1a\x3e\x52\x8d\xae\xf9\x2e\xb3\xed\xfc\x1d\x43\x6f\x62\x97\xb7\xdb\x84\x68\xa6\xe7\xf3\xfb\x84\x9e\x8a\x33\x16\xbb\x9c\x08\x8c\x4a\x33\x35\xd8\x07\x49\xe2\x52\xb4\x81\x4e\x48\x08\xb3\xd2\xa0\x66\xb6\xaf\x20\x0d\x0f\x8c\x28\x8c\x48\x64\x2c\x0f\x30\xea\x9c\x22\x21\x04\xae\x0f\x31\x89\x8a\xe3\xb9\xea\x5b\xf0\x07\xf7\xd3\x48\xe9\x99\xe7\x27\xde\xbd\x15\x33\xdf\x25\x06\x0c\x65\x7d\x7c\x28\x0d\x48\xb0\xfb\x73\xf4\x8f\xda\x39\x4a\x20\x66\xd2\x0d\x80\x33\xe9\x0e\x21\x60\xd2\x15\x80\xf6\x68\x1e\xc9\xec\x5e\xb0\x13\xb3\x03\x8f\x0b\x07\xfb\x5a\xf9\xa2\x04\x96\x48\xb8\x6f\x79\x3b\xe0\xd7\x90\x8b\x74\x79\xe5\x68\xd7\x6d\x0d\x6e\x09\x07\x81\xaa\xa5\x17\xb4\xdb\xe1\x60\x1f\x6f\x12\x29\x37\x04\xe5\x5e\x9a\xb7\xb7\xf8\x22\x18\x28\xd7\xc8\x51\xf3\xca\x90\x02\x48\xd7\xa7\x56\x19\xfa\xfa\xb7\xca\x90\x72\xcf\xc9\xa2\x2e\xd4\xa0\x50\x8f\xc9\x2f\x09\xd2\xbd\x80\x54\x67\x55\x55\xea\xfb\xb6\xfa\x2a\x08\x6a\xb2\x63\x4f\xc1\xc4\x68\xb3\x85\x62\xf4\x63\x51\x9d\xbd\x32\xe5\x66\x0c\x6f\x56\xea\xb1\xd2\x93\x10\x2d\x38\xc6\xa5\xaa\xc6\x71\x97\x82\xc2\x30\x2c\x51\x29\xb2\xde\x05\xdc\x65\x46\x23\x07\xf3\x8c\x87\xb8\xe9\xb3\xa4\x46\xc5\x3c\x8d\xcf\xcc\x10\x9d\xd1\xad\x03\xdc\x13\xa7\xf1\xd9\x7c\x7e\x1f\x7a\x2d\xb8\xf4\x5a\x95\x4b\x2b\x4a\x15\x9b\x38\x35\x24\x55\x6e\x48\x66\xfe\x25\xe5\x8e\x81\x33\x32\x62\x31\xf8\x4c\xc0\x98\xc8\xc1\x77\x79\x3a\x3a\x73\x85\x67\xff\x8e\x2a\x72\xaa\x38\x84\xf1\xfb\x0a\x23\x7c\x7f\x51\xa3\x26\x4e\x4b\x2c\x45\x1a\xbd\x69\x52\x84\x84\xa2\x3e\xa7\x4f\xe3\x33\x46\x42\xc6\xcd\xf6\x8d\x30\xb4\x88\x96\xf0\x06\x39\x88\xdc\x90\x5d\x91\x10\x22\x37\xa4\x5e\xe4\x5e\xa6\x0f\x97\x14\x22\x9a\x3b\x60\x8b\xc0\x63\xe5\x5e\xf5\x03\xd7\xef\x07\x2c\x70\x7d\x8a\x63\x35\xbb\xce\x8c\x36\xed\xb8\x5f\x71\xf5\x22\x1a\xe9\x9c\xb8\x11\x68\xb8\x9f\x7a\xca\x95\xf0\xcb\x13\x89\x25\xca\x10\x22\x18\x81\x5f\xba\x09\xa8\xcc\x90\xbf\xcb\x53\x79\xd6\x6e\x3f\x23\x5b\xe5\x5b\x88\xaa\x42\x7b\x58\x13\xb0\x26\xbb\x17\x5e\xac\x40\x79\x0a\xb8\xc7\x55\x02\xdf\x72\x13\x20\x56\x8d\x11\x48\x65\x32\xe1\x2a\xdb\x7d\x1a\x38\x3b\x3d\x83\x80\x21\x64\x57\x41\xc8\x88\x66\x5d\x58\xd8\x24\x76\x41\x66\x42\x7f\xb2\x02\x89\x94\x25\x46\xb6\x73\xa0\x10\xdb\xa5\xe2\x60\x22\xb8\xca\x9a\x29\x74\xb3\x67\xb5\x6c\x9f\x3e\x0b\xed\xb8\xdc\x60\xc1\xb2\xcc\xcd\x80\x34\xd2\x9b\x16\x12\x98\x83\x30\x86\x66\x40\x30\xc6\x34\x0b\xe0\xcb\xb4\xaa\x18\x2f\xd0\xc4\x67\x24\x5f\xa9\xb5\x30\xa1\x70\x6f\xf4\xa4\x40\x85\xbe\xa8\x70\x21\x9e\x99\x35\x09\xc4\xb2\xbe\x8a\x1d\x3f\xe1\x8c\xa7\x07\x5f\x94\x96\xc2\x88\xfa\x6a\xaf\x3b\x9f\x73\xd4\xf2\x02\x41\x14\xf4\x68\xaa\xcd\x07\xaa\xdf\xc0\x85\xea\x3c\x40\x18\x48\x4e\x33\x7b\x33\x54\xcc\x89\xe5\x50\x8c\x42\x29\x86\x85\x4b\x73\x18\x05\x78\x6e\x38\xc8\x7e\x78\x65\x46\x1e\xa9\xcc\x42\xe4\xd3\xa9\x90\xc3\x67\x17\xe1\x64\x68\xa6\xbd\x4e\xee\xda\xfd\x29\x5c\x19\x0d\x45\xbf\x38\xe0\xe3\x4a\x48\x7d\x14\x0d\x45\x76\x72\x6a\x81\x3c\x53\xe5\x13\x54\x7a\x9f\x50\xa3\xe5\xdf\x57\xf8\xcf\x48\xd5\x19\xf6\xe8\x2a\xac\x50\x66\xf9\xec\xe9\x0f\x31\x90\xdd\xd2\xfa\xdf\x67\x1c\x85\xf7\x45\x87\xc5\xae\x3f\x9f\x77\x21\x8d\x63\x8c\x8b\x08\xcb\x4e\x11\xa1\x88\x4c\x37\xf0\x02\x18\x7a\xfb\xb8\x83\x84\xa7\x61\xe4\x71\xa3\xbd\xa0\xf2\x40\x52\x4d\x00\x66\xff\x57\x10\xfc\x3b\x14\x37\xff\x0a\x45\xa4\x8a\x49\xd3\x0e\x47\x97\xc5\xa5\x27\x51\xc4\xf8\x5e\xaf\x43\x14\x76\x4e\x2b\x0b\x74\xad\x16\xda\xec\x18\x71\x04\x57\x9e\x82\x71\x26\xf0\x93\x06\x16\x72\xad\xc8\xa9\x04\x75\x56\xa3\x92\x59\x63\x39\x3b\xf7\x55\xcd\x1e\xa2\x14\x06\xe8\x3a\x28\xd9\x1d\x48\x03\x09\x5a\x8a\xad\xd0\x15\x73\x38\x20\xea\x20\x15\x77\x23\x11\xd6\x95\x6a\x8a\x2d\x59\x06\x07\x71\x1d\xc0\xf2\x9d\xc9\xe4\x2f\x4e\x61\x2b\x00\x81\xd7\x81\xac\x5e\xa3\x4c\xfe\xea\x9c\x76\x01\x2c\x04\x75\x80\x17\x2f\x56\x26\x7f\x79\x96\xbb\x04\x1c\xc2\x3a\xf0\xcb\x37\x2d\x93\x85\x13\xdf\x11\xf8\x30\x83\x09\x5c\xc3\x10\x5a\x70\x05\x17\x95\x2e\x96\x4a\xeb\x3a\x51\xcc\x07\xcd\x66\x20\xd8\x04\x62\x76\x0d\x9c\x19\xdd\xb3\x05\x21\xbb\x82\x88\x5d\xc0\x63\xc6\x18\x91\x6c\x44\xeb\x2e\x76\x42\xd4\x74\xb5\x93\x44\x69\x20\xe0\xe2\x99\x74\xd2\x7c\x25\xc0\x68\x42\xbc\xeb\x94\xf4\x2d\x0a\x17\x2b\x76\xa1\xc3\x7b\xe5\xca\x30\x5d\x59\x77\xb3\x52\xf7\x7c\x65\xdd\xad\x72\xdd\x9a\x08\xed\x52\xd5\x6d\x53\x55\x41\xe4\xdd\x8f\xb0\x85\x4e\x2a\x6c\xe0\xa6\xc4\xa7\x1d\x23\xef\xa6\xda\x61\x4c\x0e\x9c\xa9\xe3\xc9\x86\xfd\x6f\x26\x01\xdd\x5f\xad\xc1\x98\x5c\x29\x30\xaa\x0d\xd1\x4c\x42\xcc\xce\x23\x8c\x1b\x8a\xf0\xfa\x9e\x70\x5b\xc0\xbd\x78\x70\x4b\x66\x01\xc4\x7b\x5b\x83\xb1\xf2\x6e\x15\xdc\x05\x46\xcd\x16\xc6\x8c\x1f\x13\x15\xa6\x87\x91\x09\xa5\x5e\x7a\x61\xc0\x2c\x47\xca\x41\xee\x14\x8c\x1b\x35\x99\x63\x82\x2e\x28\x8a\xfe\xf8\x84\xc2\x6d\xe3\x9c\x05\x91\x21\x03\x37\x88\x28\x70\x63\x26\xf0\x10\xf8\xb9\xf9\x7b\x5e\x99\x0c\xe4\xba\x25\xbd\xf3\x3e\x29\x5d\x50\xc8\x7d\xf8\x1c\xdd\x36\x2d\x88\x99\x76\x25\x46\x96\x47\x78\xb2\xcb\x37\x9d\x75\xc6\x44\x66\x6c\xaa\x53\x71\x36\x9f\x13\xf3\x87\xdd\x27\xb4\x6f\x56\x8d\x31\x26\xda\x6d\x27\x98\xf0\xd9\xcc\x3c\xc4\x83\x03\x45\x02\x7b\x3f\x3a\x30\x5a\x2b\x47\x5f\xa1\xad\x70\xc4\xaf\x44\x5e\x49\x41\x0c\x97\x92\x70\x33\x4b\xa6\x22\xfe\x2e\xfc\x34\xc5\xa9\xcf\x82\xde\x2f\x4f\xd5\x59\xdf\xfc\xc3\xc4\x40\x74\x9c\x35\xa7\xa3\xbd\x52\xb2\x8c\x67\xaa\x7a\x3e\xd1\xca\xbc\x9c\x79\x10\xa3\xa9\xe1\x8e\xf1\x96\xce\x98\x49\xf7\x8a\x50\x9a\x9e\xe7\x75\xcb\xd5\x42\xe5\x06\x4a\x70\x8d\x7e\x18\xa3\x32\xd8\xeb\x91\xe1\x88\x6c\x63\xb5\xd2\x11\x9c\x74\xc7\x68\x49\x5e\xf6\x4d\x91\x70\x5b\x7d\xba\x74\x5c\x1b\x0f\x62\x76\x1a\x83\x70\x2f\xcf\xbc\x3c\xd2\xe8\x92\xe2\xdd\xbe\x71\x7a\x2a\x7b\x7f\xe9\xc5\x30\xf5\x54\xe6\x59\x27\x01\x7b\xa6\x88\x00\x63\xaa\x8b\xc9\xd5\x4f\x71\x2d\xa4\xfe\x69\x54\x9a\x9f\x4a\x8c\x18\x87\x20\x09\x47\x64\xab\x8c\xf5\xa1\x22\xc6\x8e\xbd\x20\xd2\x3d\xa7\xa0\x40\xba\x43\x0a\x41\xdf\x2e\xa0\x74\x47\x83\x7c\x58\x07\x13\x61\x54\xac\xa3\x8f\x44\xba\x23\xc0\xd8\x83\xc5\x32\x8c\x48\xe8\x07\xaa\xdd\x76\xb8\xd9\x43\x6e\xd0\x6e\x07\x35\x4e\xc9\x60\x12\x06\x63\x07\x02\x45\x02\x4a\xc1\xa0\x90\xf6\xdc\x2f\x9f\xf2\x0a\x88\x58\xb7\x1f\xed\x85\x99\x2a\x1b\x75\x3a\x34\x32\x95\x9f\x29\xd2\x33\x83\x18\x84\xa7\xd1\x99\x67\xfe\xc1\x13\xdb\x5c\xc1\x0d\x4a\x97\xed\xd4\xd2\x09\xa8\x31\xfe\x74\xd9\xd1\xd8\x37\x6c\xca\x2c\xc4\xe0\x48\xa1\xbf\xd6\x4b\xf7\xb7\x18\xbc\xb2\xcd\xf1\x95\x25\xde\xc1\x65\x5e\x67\xdb\xbe\xf8\x98\xbe\x20\xce\x35\x9f\xc4\x02\xb7\xc0\x7c\xee\x04\x17\x22\x18\xa3\xb6\x6a\x1e\xe5\xa9\x38\x5b\x67\x2c\x46\x5f\x16\x7a\x3e\xca\x87\x64\x47\x0b\x44\x38\xd3\x77\x13\xd1\x78\x55\xa9\x74\x26\xa5\x16\xd4\x30\x5b\xb3\x74\xf5\xa0\x2f\x06\xe8\x02\xdb\xd7\x5a\x85\x7e\xac\x05\xb1\xb9\x10\x5c\x25\xae\xa2\x6b\x51\x7a\x5d\xc6\xe7\xe3\x1f\xe1\x82\x51\x96\x47\xc0\x99\x70\xa3\x3e\x5f\xe8\xe3\xe8\x23\x89\x41\x63\xca\x83\xc5\x7e\x6c\x51\xb9\xaf\x57\x8b\xfb\xd5\x90\xee\xe1\x0c\xf7\x1b\xfe\x42\xde\x51\x31\xdd\xf5\xc2\x25\x1b\x63\x96\x9b\x8d\xc6\x6d\x28\xa3\xfd\xd7\xfd\xe5\xb6\x18\x63\x1c\x8d\x70\xf7\x17\xe3\xfd\x20\x92\x3a\x94\xb1\x48\x32\xac\xaa\x64\x19\x1b\xfd\x20\x60\x6f\x0d\xab\xe1\xb5\xde\xf4\x18\x02\xb8\x53\xed\xf6\xfd\x94\xcf\x66\xe1\xb5\xf0\xce\x23\xc2\xe9\xde\x66\x42\x01\xaf\x3b\x05\xf6\x9c\xa3\x19\xbc\xad\x96\xe9\x96\x5a\xdd\xdd\xdf\x84\x72\x18\xdd\xd4\x39\xfb\x1d\x7b\x3e\x79\x8c\x7c\xc1\xb5\xb6\x4f\x1e\x6a\x71\x9f\x80\x93\x22\xe1\xc0\xfd\xb9\xd0\x5e\x49\x6f\xb8\x53\x6c\xbd\x6b\x64\x73\xe1\xd5\x2d\xa6\xfb\xad\x22\x3e\x18\x1e\x5f\xf2\xa9\xe5\x46\xad\xfb\x0b\x34\xbb\x43\x99\x02\x32\x0b\x59\xd0\xb4\x1a\x3c\x7b\x1e\x11\x45\x91\xeb\x73\x08\x58\x3c\x30\x72\x8d\xbb\xdc\xe3\x6e\x10\x79\x1c\x42\xd6\x33\x1c\x9b\xbb\xbe\xb7\xc5\x58\xdc\x6e\x73\x23\x6c\x22\x46\xc2\x76\xdb\x50\x76\x34\x35\xa3\xe0\xe7\xdc\x62\x0b\x64\x73\xa9\xfa\x39\x35\x55\xa7\x0a\xf9\xd7\x73\x31\xe2\xf1\x44\x13\x0a\x3e\xed\x0b\x16\xb9\x97\x7d\x7b\x11\x2f\x1b\x41\x11\x60\x25\xa8\x31\x9b\x03\x6a\x23\xac\x97\xe3\x73\xfb\xa3\x8d\x8d\xbe\xa9\x73\x3a\x3a\x33\xd5\x22\x16\xb9\xd3\x24\x22\xa8\xb8\x65\xa2\x64\xe6\xfe\x62\x12\x66\xa5\x23\xd8\xca\xe6\x3c\xcd\x6f\x0a\x1e\x67\x1e\x9f\x2e\x2d\x67\x5f\xfa\xad\x16\xad\xca\xfb\x96\xa7\x40\x79\x1a\x66\x9e\x00\x9d\x9a\x16\x10\x67\x36\x46\xe1\xb8\x4e\x2d\x23\x28\x5d\xeb\x3d\x2e\x41\x0b\x47\x04\x6f\x4b\x64\x70\xa5\xd1\x2e\x8c\x12\x82\x01\xf1\xeb\x8c\x59\xfa\xef\x19\x3e\x33\x9f\x6f\xe2\x8b\xb2\x3b\xe6\xb7\x22\x1a\xba\x20\x30\xb8\x85\xd5\x3b\x9a\x0c\xdb\xfd\xcb\x2c\x0e\x46\x0a\xe7\xe9\x38\x52\xe3\x52\xba\xc6\x76\x93\xee\x10\x84\x27\x60\xe4\x19\x51\xe1\x7b\xd2\xf5\x93\xc4\x12\x4d\x2f\x49\x9d\x63\x3c\x75\x8d\xed\x94\xbc\x4c\xd2\x9d\xd8\x78\x6c\x88\x58\x90\xc7\x87\xb0\x88\x31\x96\x0b\x81\x51\xbb\x1d\x99\x55\x1c\xb1\xe0\x34\x3a\x33\x25\xa7\x11\x6e\xfe\xd1\xc2\x89\xa6\x91\xd5\x63\xda\x37\x3f\x94\x11\xda\x7d\x1b\xa5\x53\x5d\x3f\x77\x0c\xca\x1d\x83\x6f\xd6\x10\xdb\x75\xf7\xfc\xfc\x8a\x13\xce\x57\x0f\x04\xf8\x34\x0f\x08\xcc\x90\x9d\x19\xf1\x0d\x13\xa6\xdc\x4b\xb8\x66\xeb\x3d\x18\x9a\xee\x50\x9e\x0f\x8d\x3c\xbf\x66\xeb\x5d\x58\x12\xea\xb3\xc1\x8c\x9d\xce\x60\x68\x84\xfa\xcc\x2e\xf7\xd0\x08\xf5\x21\x1b\xba\xe3\x9c\xc7\xb5\x98\x4a\x41\xb5\x9a\x41\x4d\x06\x13\x76\x3a\x81\x96\x01\x35\xb1\xa0\x5a\x06\x54\x8b\xb5\xdc\x71\xee\x0c\x6c\xb7\xb3\x98\xea\x75\xc6\x26\xd9\xdd\x93\x45\x6a\xf0\x08\xb9\x1e\x34\x19\xf6\xdd\xbe\xde\x2b\xf2\x46\xd8\x68\x0c\x79\xaa\x8d\x34\xc3\x08\xbb\xe5\x50\x0c\x32\x83\x09\xf5\x66\x8c\xb1\x09\x9d\xcf\xb1\x9f\x4d\x10\x30\xb1\x53\x6c\xe6\xdd\x58\x3b\x1a\x44\xa7\xb7\x14\xc3\x84\x8b\x20\x5d\x8e\xb1\x21\x3c\x5d\x83\x2d\x74\x78\xf3\xa5\x90\x28\x04\xf7\x34\xdf\x21\x70\xb2\x18\x3b\xba\x54\xe3\x79\x56\x63\xcb\xc3\x63\xba\x0b\xec\xe7\xa2\x71\x9b\x60\x00\x35\xfb\x64\x28\x65\x08\xca\x68\x28\x57\x29\x4e\xdb\x20\xe0\x2a\x8d\x99\x66\xca\x0d\x8b\xe0\x99\xf2\x48\x32\x2a\xda\x01\x01\x17\x94\x96\x7d\xdd\x25\xac\x62\xbb\xb7\xb3\xc8\x20\xc3\xfc\xd2\xb0\x9c\x4c\xd0\x95\x31\xe0\x25\x0c\xb8\xb1\xbe\x73\xfb\x1e\xd9\x5e\x09\xfd\xd2\xad\xe9\xaa\x90\x35\xfa\x76\x35\xa2\x0f\xef\xe2\x1b\x3d\x68\x9d\xb1\xc0\xe8\x6f\xdd\xfc\xd7\x56\xfe\x6b\x1b\x7f\x99\x9a\x41\x49\x21\x08\x99\x3c\x0d\xce\x20\xb2\xb9\x4d\x42\xc6\x58\xd4\x6e\x17\xba\x90\x69\x59\xd2\x85\x82\xf9\x3c\xd5\xae\x74\xbb\x4d\x48\xcc\x42\x6a\xe4\x34\xe1\x2c\xa2\x6e\x0b\x03\x8c\xf2\xac\x14\xf3\x39\x21\xc2\x68\x4f\xf7\x09\x3d\x0d\xce\x58\x64\x87\x58\x79\xa7\x07\xa9\xf6\xa6\x07\x8e\x93\x29\x6e\xda\x74\xb2\x65\xdf\x5a\x2e\x8b\xc6\xdf\x69\x70\xe6\x8e\x20\xca\x18\xaf\xb7\x7c\x97\xfb\x34\x38\x33\x60\x8c\xe4\x45\x21\x72\x6f\x05\x88\x99\x3e\x33\x46\x33\x42\xd3\x33\x04\xd4\x30\xa2\x2a\x76\x23\x9a\xe4\xc1\x80\x76\x7a\xf0\x8f\xac\x8c\xc2\x37\x7c\xd3\x2f\xe7\xbe\x29\xc2\x68\x96\x04\x87\x61\xc6\x86\xc1\x0b\x23\x67\x8b\x8b\x6e\x3c\x0f\xa2\xda\x0b\x06\xb8\xdc\xbb\x20\xe0\xfe\xda\x0b\x21\xf4\x82\x8d\x30\xa1\x5e\xb0\x17\xa6\x54\xf2\xc8\x16\x05\x20\x3c\x5e\xd2\xa3\x22\x16\xec\x85\x83\xc0\x0b\xb3\x20\x4a\x1b\x3d\x69\x79\x64\x7c\x3a\x3a\xeb\x1f\x1b\x45\x81\x9f\x8e\xce\x40\x43\xa7\x63\x2f\x5a\x5a\x1f\x5e\x89\x88\x9f\x97\x70\x2e\xe7\x62\x01\xce\xcc\x2c\x99\x5f\x99\x2a\x6f\x86\x31\xca\xf9\x38\xf8\x2c\x8b\xdb\x84\x19\xeb\xc2\x84\x75\xe1\x9a\x89\xfe\x6c\x6f\xd4\x6e\x4f\xf6\xfc\x34\x12\x60\xc8\xc8\x33\x16\x9e\xce\xce\xa8\xcb\xa1\xc5\xc8\x21\x8b\x4e\x27\xf8\x70\xc5\x9e\xb9\x3e\x5c\xb0\x43\xd7\x37\xdc\x7f\xb8\xce\x58\xcb\xb6\x99\x9a\x06\x9d\xde\x19\x9c\x9b\xca\x9d\x1e\x4a\x87\x29\x35\x45\x37\x6c\xea\x72\xb8\x63\x53\xd7\x87\x31\x33\xea\xe1\x8d\x29\x3c\xc7\xc2\x5b\x76\xee\x72\xd8\x67\xe7\xae\x0f\x07\x6c\xc8\x18\xbb\x35\x85\x07\xed\xf6\x98\x1e\x2b\x72\x05\xfb\x10\x43\xa7\x73\x4d\xe1\x97\xc2\x6c\x79\x43\xb8\x80\x89\xd1\xea\xae\x3b\xec\xca\x7a\x37\xdf\x67\x25\x77\xb6\xe6\x75\x87\xdd\xd9\x92\x59\x87\x6d\xc2\xa4\xc3\x36\xad\x72\x62\x00\xd3\xeb\x4e\x27\x83\xd5\xca\x60\xe5\x3d\x5d\x97\xe1\xce\x3a\xac\x57\x6d\x3d\xa6\x79\x5f\x57\x79\x5f\x69\xed\x63\x45\xee\xe0\x22\xc3\x76\x19\x87\x5e\x3f\x0b\x05\x5a\x9f\xce\xe7\x37\xeb\x8c\xdd\xa6\x77\x60\x16\x61\x2e\x62\xb7\xd0\xc7\x7e\x73\x1f\x9b\x89\x65\x47\x38\x9e\x32\x2e\xf9\x88\x3a\x30\xe9\x74\x70\xcb\x98\x55\x4f\x17\xfc\x59\x86\x42\x69\xdd\xed\x52\x2f\xb7\xb7\x6d\x0b\x62\x39\x84\x23\x76\x34\x9f\x9f\x9e\xf5\x53\xb4\x4b\xe4\x72\xe8\xfa\x90\x2a\x5e\x47\x14\x3b\x26\xdd\xbd\x6c\x4f\xcd\xe7\xdd\xbd\x20\xff\x7d\x44\xd3\xad\xf3\xd8\x6c\x9d\x1b\x2f\x86\x5b\x2f\x80\x3b\xef\x28\x3d\xb9\x7a\xa9\x98\xf3\x53\x4c\xae\xbe\xee\x3e\x7d\x5b\x4a\xce\xf9\x4b\xd5\x1d\xb8\x1b\x49\x89\xd1\x30\x41\x26\x63\xd2\x23\x96\x7b\xe5\xc5\xb0\xef\x05\xec\x3e\xf0\xba\xf0\xdb\x13\x60\x5e\xcc\x72\xc7\x73\xaa\x8f\x98\xf6\x2c\x40\x55\xdc\x98\xbc\x81\x1b\xe4\x87\x34\x19\x84\x84\x42\xe0\x06\x6c\x33\x3d\xb3\xaf\x28\x38\x81\xfb\x1b\x04\x84\x10\xb8\xca\xd4\x52\x2c\xb6\x60\x03\x77\xe6\xce\xd8\xfd\x8d\x17\x5a\x08\x49\x86\x7d\xe7\xa5\xca\xbc\xb0\xf9\xc8\xde\x57\x24\x95\x15\x48\xd9\xb8\x78\x1e\x0b\xc6\x2d\x6a\x19\x22\x41\x15\x11\x01\xdc\xfd\x0d\x01\xc4\xa9\x12\xf0\xdb\x58\x59\x4f\x20\x36\x13\x1c\xc0\xbe\xe1\x4d\xc9\xfb\x2a\x0e\x96\xd9\xa7\x22\x26\xaf\x9f\x1d\x00\xe0\xc4\xdc\x07\x5e\xaf\x34\x77\x61\x59\xbe\x7e\x29\xb1\xa6\xf5\xd2\x61\xfc\x82\xc3\xd7\x32\x43\xb4\x20\x2d\xaf\x8f\x5c\x1b\xaf\xd4\x37\x22\x38\xbb\x4c\x6c\xb8\x55\x2b\x5b\x04\x9f\x7e\x31\xd8\x68\x77\x0c\x91\x3b\x83\x90\xe6\x9b\xf2\x31\x96\xde\x47\xae\x66\x0a\x22\x37\x66\x61\xdf\x2a\x8b\x91\x3b\x73\x6f\xfa\xdd\xbd\x59\xae\x5c\x5a\x44\x66\xd0\xcd\x1c\xc7\x19\x8c\x27\xf5\x30\x26\x06\x86\xc1\x60\x42\xef\x27\xee\xbe\x3b\x63\x2a\x85\x3d\xf9\x13\x64\x0b\xba\x0c\xd0\x10\x24\x31\xa3\xee\x74\xf0\x64\x98\xe0\xb0\xe9\xbf\x73\x22\x8d\x91\xd2\xaf\x99\xb6\xa3\x36\xba\xe8\x75\xc1\xe1\x87\x4c\x97\x75\x5d\xab\xbd\x16\xc7\x1f\x30\xb4\x13\xdc\xe9\x41\x00\xaa\xc6\x13\x65\xf7\x51\x8b\x69\x57\xa4\x1a\x96\x72\x83\x8b\x70\x32\x3c\x8a\x86\x62\x96\xcb\xa8\x0b\xd6\xed\x5f\xec\xb5\x32\x69\x77\x91\x09\xa8\xa9\xb1\x30\xd9\xf5\xa0\x75\x7a\x71\xe6\x99\x7f\x5c\x1f\xce\x59\xa7\xc3\x3b\x64\x6a\xcf\x79\x90\x32\xf7\xd8\xa8\xdd\x1e\xed\xb1\x73\x4c\xd4\x26\xc9\xd5\xe9\xc5\x19\x4c\xd3\xb5\x3f\x87\x90\x82\x9d\x83\x85\x19\xc8\xa7\xa0\xcf\xd9\x79\x7e\xd7\x2f\xc9\x6c\x3d\xe8\x82\x72\x7d\x28\x27\x91\x7c\xad\x96\xce\x64\xd0\x33\x98\xa9\xdb\xd2\x23\x25\x62\x84\x77\x0a\xef\xf9\x96\x00\xbc\x5b\x74\xb7\xa0\xd2\xad\xca\x4a\xf7\xa2\xeb\x45\x03\x67\x2f\x14\x89\x31\x5b\x14\x63\x4c\x62\x82\xa5\xc2\x21\x5a\x0a\x5c\x7e\xa1\x2a\xf7\xb2\x54\x29\xbc\x7d\xe9\x00\xb6\xea\x88\x29\x8e\x3f\x21\x66\xcf\x90\xe6\x69\x3f\xae\x59\xd0\xf9\x9c\xd4\xbd\xb6\xbe\x9c\xc5\xc5\xef\x8b\x76\xdb\x58\xab\xb2\xdd\x5e\x38\x56\x8d\x41\x96\x52\x5b\x61\xec\xc0\x0c\x94\x1b\x2f\x5c\xc7\x4a\x9d\x7a\x6e\x6c\xca\x29\x2c\x86\x1e\xc8\x0c\xe8\x73\xae\x39\xe9\x42\x9e\x32\xa0\x5c\x3b\xb7\x21\xec\xdc\xbb\xb3\x45\xcb\xa1\x0e\xf5\x41\xdd\x4b\xf7\x92\x29\x77\xe6\xd5\x15\xb1\xfb\x4b\xcf\x0c\x61\xea\x29\x37\x4e\xb2\xae\x77\xbd\x72\xf4\xdc\x2c\xcd\x77\xa9\xdd\x10\x63\xdd\x33\xe7\x91\x9d\x11\x59\xda\x17\xa7\xda\xbd\x2e\x14\x47\x99\xdd\x1f\x28\xd4\x2e\x82\xf0\x28\x2a\x8d\x95\x86\x82\x99\xa6\x7d\x91\x0b\xbc\xb4\xa7\x50\xce\x84\xd2\x4f\xc5\x28\x52\x82\x3c\x53\x24\xc6\x00\x7b\x37\xa6\xc0\x17\xfb\x79\xe2\x21\xcf\xb0\x3d\xd0\xc2\x63\x51\x3e\x20\x2f\xa1\x6d\xe6\xd9\x4a\x01\xed\xee\x97\x2d\xa2\xb5\xae\xb1\x01\x5c\x85\x0e\x9e\xc6\xc6\x81\x3b\x63\x76\x9b\xb8\x37\xf9\x92\x3d\xae\x23\xd7\x22\x1d\x81\x99\xc8\xe5\x70\xdf\x2c\x08\xb5\x14\x82\x9a\x7b\xad\x9f\xa7\x51\x03\x87\x8a\x9f\xa3\xfb\x3a\xcf\x3d\x5a\x9e\xa6\x4c\x17\xcf\x22\x52\xf7\xfb\x11\x5a\xb4\x56\xe4\x0d\xb8\x3b\xf3\x9e\x29\x62\xa4\x9b\x99\xb9\x72\xec\x9a\x76\xef\x6c\xce\xa7\x7c\x28\x85\x1b\x93\x69\xf7\x36\xbd\x76\x93\x2f\x4a\xe9\xf6\x4d\x7c\xca\x8d\xe6\x1c\xb8\xfb\x10\xb1\x4d\xf4\x7f\x04\x83\xd0\x76\x15\xa6\x5d\xf5\x17\x16\x30\x82\xca\xa2\x07\xae\x3a\xa3\x89\x68\xb7\x31\x0e\x42\x94\x02\x86\x70\x76\x16\xee\x0a\x2a\x77\x46\x24\xed\x0f\xad\xb3\xcd\x7b\x46\x7a\xdd\xb2\x93\xf6\xb3\x4a\x03\xdf\x6d\x5e\x5d\x43\xe0\x9f\xee\xa6\x22\x23\x84\x91\x31\x4f\xb5\xb8\xd5\x69\x68\xa5\xd5\x57\xd6\x1b\xaa\x3a\x0e\xed\x2f\xe6\xa5\xe4\x99\x83\x78\xb6\x2a\x24\xb8\xaf\xd8\x15\x19\x93\x73\x05\xb1\x2b\xf9\x95\x80\xd8\x45\x3b\x13\x53\x14\xa5\x8a\x89\xab\xf9\xf9\x11\xbf\x12\xae\x8e\xde\x46\x37\x42\x3d\xe3\x33\x41\x68\x7a\x62\x2a\x17\xe5\x8c\x28\x3c\x4b\xc2\x3a\x07\xaf\xc8\x67\x45\xc2\x53\x71\x46\x8d\x95\x97\x1f\xa2\xfb\x0a\x38\x28\x08\x2a\xe1\x23\x0a\x24\xe8\xd2\xb1\x31\x06\x5d\x86\xa0\xdc\xe0\xdc\xfc\x73\x6c\xfe\x29\x85\x6d\x62\x02\xc4\x2c\x2c\x22\xf8\x00\x9c\x85\x38\x3f\x10\xb0\xcf\x8a\x14\x7b\xee\xab\x5a\xb8\xa0\x6f\x67\x2a\xc6\xdc\x8a\xec\x27\x9e\x97\xd0\x3e\x67\xaf\x8d\x9a\x1b\x58\xa9\x12\x30\x0c\xc1\x4b\x4f\x0d\xbf\x57\x22\x06\x2c\x9a\xa2\x8a\xa6\x68\x44\x33\xce\xd0\x34\xb6\xe7\xbb\x76\x5b\xb9\xef\x08\xe6\x32\x45\xa4\x43\xb3\x81\x74\xa8\x27\xc6\xa8\x0b\x15\x46\x21\xc3\xc8\x0c\x20\x6a\x1e\x40\xa0\x18\xef\xdb\x51\x04\x76\x14\xbe\x22\x8e\x69\xea\x50\xd2\xa2\x44\xb9\xfe\x07\xb3\xfb\x7e\x2a\x32\x32\x42\x26\x32\x63\x8b\x60\x84\x7a\x20\x8c\x98\x86\x40\xb1\x2e\x84\xe8\xae\x09\xde\xb6\xdb\x24\x43\x82\x85\xf8\x86\xda\xc1\xc3\x87\xfa\x48\x21\x65\x23\x80\xf7\x65\x78\x85\x7e\xe7\x43\xc5\xaf\xc4\xa0\xf6\x6d\x25\xd8\xa9\x14\xe6\x25\xa1\x27\xb6\x1e\xee\x76\x69\x29\xc4\xe8\xab\xb2\xa1\xc0\x82\xe8\x2c\x62\xbb\x74\x2f\x86\x13\x7a\x1f\xa3\xae\x12\x0f\xba\x1e\xf9\x60\x96\x18\x4c\x55\xe8\xe5\x6c\xa2\xca\xb1\x34\x93\xa0\x06\x04\xeb\x98\x5d\x6f\x73\xcc\xf6\x28\xf5\x50\xc5\x8e\xdb\x6d\x0b\x24\x66\x9b\xe5\xdd\xf9\x4d\x15\xd1\x02\x97\x91\x99\x9a\x49\x14\xe0\x88\xdc\x0b\x23\x71\x5d\x3e\x9f\x3f\x23\x3d\xda\x14\xcd\x32\x26\xd3\x08\x2e\xa2\x4a\xa4\x1b\xbd\x57\xed\xf6\x45\x38\xd3\x91\xba\x73\xad\xcb\x5f\x12\x9b\xd4\x13\x47\xfa\xa3\xf1\x50\xba\x1e\x5a\x06\xca\x98\x2e\x1f\x35\xd7\x02\xcf\x30\x1c\x28\xc1\x85\x37\xaa\x31\xc7\xc7\x6a\xa0\xa9\xc0\x6f\x82\x7b\xbf\x78\xbc\x52\x3e\x2d\x49\xa0\xe6\xb4\xc6\xab\x06\x56\x83\xd4\x8c\xac\x8a\x40\x6b\xb7\xb3\x5f\x50\x57\xcd\x9e\xf2\x0c\xec\x1f\xef\x8d\xaa\x44\x2c\x54\x35\xc7\xe7\x92\x54\xc7\x58\x32\x5d\x24\xbd\x3f\x91\x44\x60\x64\x5c\xce\xb7\x97\x8e\x8e\x34\xc8\xea\x29\x15\x66\x0c\x2b\x2f\x6c\xed\xf9\x94\xb6\xdf\x7e\x68\xce\xea\x92\xde\xef\x2f\x5d\x47\xd0\x74\x70\xa4\x89\x76\x39\xe6\xf7\x28\x85\x11\x28\x5d\x0e\x91\x5f\xbc\xae\xf3\x41\x95\xc7\x87\x31\x28\x2c\x9b\x3e\xf7\x5c\xe8\xf4\x34\xf9\xe9\xdd\xab\x21\x5e\x7f\x20\x72\xf0\x36\x1d\xb6\xf7\x53\x92\xab\x28\x4f\x0c\x82\x3c\x5f\xeb\x6a\x78\x73\x29\x8a\x50\x2f\xb0\xa1\x4c\x04\x9e\xaa\x33\x42\xe1\xf7\xaa\x00\x67\xcd\x6a\x42\xbb\xb4\x3b\x0b\x54\x34\x99\xd8\x9c\x3b\xbf\x93\xc5\xd8\xd0\xea\xc8\x30\x1a\x54\x13\x5a\xba\x55\xa6\x57\x04\xab\x34\xa3\x9b\xf6\xfa\x56\x8c\x8c\x09\x97\x3d\x7e\x8a\xa6\x4c\xaf\x1e\x84\x5c\x9d\x97\x6b\x55\xe1\xff\x5c\x91\x57\x52\xed\xed\xd5\x65\x27\xcb\x4a\xff\xfd\xef\xd5\xa5\xa6\xb8\x29\x16\xb4\x4a\xfb\x96\x36\x66\x42\xbf\x92\x5a\xa8\x6b\x3e\x29\x17\x9e\x48\xa2\xf1\xb6\x4e\x53\xa4\x6d\xde\x48\xa6\x37\x38\xd0\xae\xd1\x10\x6b\xe0\x1a\x02\x0d\xa1\x86\x48\xc3\x48\x83\xaf\x61\xa6\xd9\xf2\xd4\x17\x71\x9b\x30\x69\x28\xdf\x4c\xcb\xaf\x1b\xca\xb7\xd2\xf2\x61\x43\xf9\x76\x5a\xde\x6a\x28\xdf\x49\xcb\xaf\x34\xbb\x6f\x79\xbb\x09\x5c\x68\xf6\xa7\xd8\xb8\x7b\x1e\x78\x1a\xbe\x7a\x0a\xfc\x91\x27\xe0\xc8\x0b\xe0\xd8\xe3\xf0\xde\x8b\x21\x78\x8d\xb9\x5e\x60\xaa\x57\x64\x29\x23\x66\x66\x31\x7b\x8b\x66\x43\xb8\xd1\x8c\x88\xf2\xdd\x97\xd2\xf5\x08\xe5\x06\x98\xdc\x70\x08\xbc\x49\x1c\xdc\x92\x73\x0d\xd2\x6d\x0d\xb4\xc7\x31\xcc\x83\x9b\xa5\x28\x17\x72\xc0\x3f\x06\x70\x4c\x6d\x5e\xa9\x1e\x85\x3b\xcd\xba\x30\xd6\xec\x02\x6e\x35\xdb\x84\xfd\x3a\x84\xd1\x4f\x63\x8f\x83\x37\x36\xd1\xe4\x6e\x55\x53\xaf\x1a\x5d\x72\x08\x31\x93\xc0\xd9\x2d\x2a\xe1\x3e\x68\x37\x80\x5b\xb2\x8f\xe3\x00\xed\x0a\xcc\xcb\x1b\x83\x62\x1c\x34\x13\x66\x72\x0e\xea\xd6\x62\x0d\xdb\x34\x4e\xda\x98\x8c\x35\xd8\x14\xb7\x36\x89\x57\xcb\xf0\x53\x78\x56\x05\x85\x2a\x50\x9e\x8e\x89\x49\xf8\x0b\xa0\x32\x83\xa7\x68\x02\x87\x9a\x7d\x84\xa3\x3f\xd0\xea\x25\xd2\x4a\x2f\x81\x8f\xd5\x8a\x78\xc8\xe4\x97\xef\xbf\xc9\x52\x26\x84\x4b\x9d\xc0\x2b\xdd\x98\x36\x11\xc3\x5a\xe1\x6d\xed\xd4\xc8\x04\x7e\x6a\xf6\x13\x7e\x2f\x37\x97\xde\xe2\x22\x15\x77\xee\xd7\x2e\x75\x3f\x33\xdf\x02\xcc\xf1\x3a\x44\x6a\x12\xd9\x55\xfc\x31\xf9\x89\xcb\xe4\xfa\x34\xf7\x59\x48\xa3\x23\x31\x91\x47\x73\xac\x2d\x5a\xf4\x47\x98\x60\x33\xb3\x64\x6c\xf5\xb8\x54\x3d\x31\x4b\x7c\xdc\x38\x50\xe5\xb6\x06\xd2\x53\x2e\x4f\x28\x3c\xd5\x4d\x37\x67\x6a\x26\x41\x93\x31\x39\xd6\xd0\x82\x31\xf9\x6d\x3f\x6e\x82\x01\xec\xf0\x69\x65\x57\x97\xda\x33\xeb\x60\x83\xf7\x13\x0a\x27\xf5\x13\x5c\xb5\x66\x12\x78\x5e\x5b\x6d\x4c\x9e\x9a\xf9\x1a\x93\xa9\x86\x8f\x1a\x3e\x19\x94\xa6\x1a\x4e\x34\x1c\x6a\xe2\x8c\xf8\x64\x26\x1c\x6a\xfe\x2f\x81\x97\xcd\x58\xd9\x0c\x6a\xbf\x34\xfb\x06\xef\xff\xb0\xeb\xd6\xf5\x62\xb2\x63\x81\x19\xe8\xed\x8e\xc3\x2f\x48\x64\x89\x8f\x2b\x3b\xec\x4b\xc3\x0e\x7b\xaf\x61\xac\xd3\xad\xf3\x5a\xd7\x46\x3d\x87\x23\x22\x8a\xf0\x72\xe1\x72\x0c\x42\xc2\x43\x20\x9e\xbd\x0f\x18\x77\x39\x9e\x94\xe1\xfb\x30\x7b\x1f\xb1\x10\xef\xb0\x85\xf6\xfd\x28\x7b\xef\xb3\x51\xf1\x61\x11\x83\x75\x0c\xe6\xdf\x00\xff\x8d\xf0\xdf\x91\xcb\x61\xa7\xdb\xdd\xd3\x03\xc4\xd2\xa0\x63\xf3\x74\x78\xfb\xe4\x75\xca\x4b\x3a\x3d\xf0\xed\xf4\x36\x82\x52\x0d\xa5\x6a\xf1\x75\x35\xe7\xee\xbb\x15\xec\x3a\xef\xbf\x6b\x19\xc5\x8b\xca\xca\x96\xf3\xf7\xdf\x92\x77\xba\x49\x2e\x23\xab\xb1\xf9\xd9\x73\xee\x45\xe1\xb3\x6e\xc8\xc8\x99\xf2\x9c\xd8\x13\x70\xe3\x69\xd0\x46\xd6\x1c\xe2\x51\x09\x8a\x97\xef\x8d\xd4\x75\x43\x3e\x6b\x50\xee\x2d\x28\xd7\xfc\xbd\x41\x3f\x9e\x21\x7f\xff\x10\xe9\xff\x83\xae\xde\x3d\x90\xf9\x49\xca\x92\x98\x2f\xdd\x9a\xbb\x45\x27\x82\x06\xc1\xa4\x7b\x83\xb1\x95\x71\xb1\x9c\x2f\x34\x7c\x37\xac\xce\x3f\xa4\x10\x10\x8b\x40\x7e\x4e\x63\xf7\xe8\xd7\x15\x7b\xd4\x1f\xe0\xbc\x8d\xed\x9e\xf6\x8c\x6e\xf3\xad\xb1\xba\x66\xd8\x1f\x8a\x01\x6c\xf6\xd5\x90\xb3\xce\xb5\x3f\xf8\xb1\xdc\x34\x8d\xfa\x33\x0c\xb0\x6e\x8c\xf9\x38\xbe\x19\x9c\xb3\x7b\x31\xf0\xa6\x11\x87\x31\xf9\x61\x30\x20\x46\x59\xac\x9b\xac\x9b\x22\x5d\xee\x29\xce\x86\x99\x3f\x33\x7b\x46\x3a\xc7\x80\x2e\x53\xff\x90\x12\xcc\x44\x4a\xcf\xf0\xea\x7f\x86\xbe\x14\xec\x12\x54\xc9\xd3\x56\xf3\xd9\xa9\xd5\xeb\x82\xce\x49\xff\x10\x33\x80\x14\xfb\x55\xe3\x7e\xd5\xc5\x2e\x3c\x24\x01\x44\x74\x90\xa1\x88\xe4\x69\xb6\x09\x84\xe9\xc2\x9d\x51\xaf\x95\x6d\x91\x56\x92\x80\x16\x2b\x38\x62\x2d\x4b\xf5\xdb\xed\x75\xe9\xfa\xae\x3f\x48\x25\x21\x5a\x31\x09\x08\xc1\xc6\xe4\x83\x86\x0b\xd3\xed\x9b\x3f\xfd\xa3\x04\x71\xf8\x44\x28\x3d\x73\x28\x68\x41\x9c\x51\x38\xd1\x42\x39\xd4\x3e\x9d\xab\x28\x9e\x9a\x07\xc4\xc4\x09\xe2\x99\x8e\xae\x5e\x98\x97\xa1\x3c\x77\x72\x56\x7d\xa8\xc9\xa5\xb1\x6a\x6c\x23\x25\x02\x11\x5e\x5b\x20\xcf\x35\x71\x66\xe1\x44\xc8\x40\x0c\xb3\xe7\x50\x5e\x84\x7e\xa8\x8b\x17\x3c\xd0\xe1\x35\xf2\x76\x88\x0d\xf6\x15\xca\xf0\xff\x9b\x97\x08\x22\xe6\x13\x7b\x26\x19\x15\xb2\xbc\x95\x9f\x9f\xf1\x45\xfa\x5a\x5a\x3c\x32\x42\xba\x2a\x56\x8f\x82\x93\xa5\x3c\x3b\xd2\x14\xb8\x19\xc3\x0f\x3b\xb7\xe9\xd8\xcd\xec\xe2\xbb\x58\xe0\x6b\x31\x0c\x35\x8e\x37\x10\x8d\xac\xf1\x3e\x18\x7b\x12\xbe\x59\xde\xa4\x4d\x2f\xa1\x58\x56\xc7\xd0\x29\x98\x67\x45\xed\x16\x5f\xb4\xa9\xa6\x69\x57\x03\xe5\xf5\xba\x9b\xdb\x0f\x88\xda\xc0\x02\xda\xa9\x34\xec\xd1\x0d\xcc\xe7\xdd\xd9\xdd\xd9\xd9\xda\x4d\x20\xaa\xeb\x2a\x14\xa5\xab\x9b\x6a\x8f\xed\x3c\x6a\xb7\xb7\x1f\xef\x31\x95\xc0\xe8\x8f\xf5\x9f\x3c\xda\x63\x98\xdf\x8e\xf5\x36\x37\x13\xf0\xff\xa6\x83\x27\xdd\x76\x7b\x77\x07\x3b\x98\xd5\x6e\x90\x91\x69\x30\x9f\xfb\xf6\x8f\xf3\xd3\xb1\xb9\x47\x22\x81\x97\x00\x27\x82\x1d\x90\xd3\x63\xe2\xb0\xff\xe5\xc0\x26\x85\x63\xe2\xac\xff\x2f\x07\xb6\xf0\x17\x73\xa0\x6b\x5f\x31\x07\x7a\xf4\x8c\xc2\xb5\x60\xbf\x60\xd8\xb0\x11\xaf\x05\x48\x70\x1c\x9a\x40\xab\xae\x46\x91\xdd\x06\xae\x1a\xca\x33\xfd\xf6\x42\xa0\x7e\xdb\x4b\x60\x2a\x56\x24\xb7\x2c\x3e\x8f\x44\xe1\x7c\x05\x95\xa4\x80\x8b\x8f\x4d\x50\xb8\xf9\x5b\xb8\x77\x62\xc5\xd5\xac\x7b\xfe\xdb\xd0\xde\x37\x4f\x80\xef\x7b\x1a\xfc\x2b\x2b\x1d\xc7\x88\x7f\x37\x81\xdb\xa6\x6e\xd6\xc6\xe4\x46\xc0\x58\xc0\x3e\xb9\x13\x86\x1d\x5f\x19\xc3\xe9\x37\x72\xe8\x00\xe5\xe4\xbe\x60\x5c\xc2\x81\x60\xf5\x69\x09\x6a\xb6\xfa\x2d\xd9\x17\xc6\xc8\xc2\x7c\x0b\x85\xb3\xf9\x90\x28\xd8\xe8\xd1\xc1\x98\x4c\x05\xac\xf7\x60\x4c\x6e\xcd\x42\x09\x4a\x3d\x2c\xda\xa4\x83\x5b\x72\x2e\x60\xbd\x0b\x5d\x30\x23\xaa\xc6\x79\x62\x74\xa7\x51\x7c\xae\xcc\x8f\xab\x0e\xce\x8e\x51\x9e\xbd\x6a\x2b\x83\x7e\x67\xb1\xa9\xca\x9a\x65\x8d\xcc\xec\x3c\x6b\xa0\x9f\x03\x83\xd6\x85\x30\x46\x50\x75\xda\x4a\x66\x08\x85\xa3\xca\x42\x87\x85\xb1\x1a\x31\x09\xa3\x7a\x09\x6b\xe7\x27\x4a\x53\x3d\xf7\xaa\x36\x0a\xce\x0b\x7e\x7d\xce\xf5\x69\x6e\xae\x70\x34\x57\x8c\xa2\x3b\x22\xf8\xe5\x97\xb4\x61\xec\xb6\x32\xdf\x79\x5c\x30\x42\x84\xa1\xe7\x73\x0e\xb1\xeb\xd3\xc4\x96\xa5\x27\x3d\x6e\x50\x32\x86\xd3\x5a\x63\x82\x8c\xd2\xf5\x29\x04\x38\x27\x97\x8d\x84\x72\x4b\x8e\x04\x1c\xda\x8f\x25\x25\x14\x3e\x0a\x76\x29\xe0\xd5\x0a\xf2\xdd\x2c\x93\xef\xdb\xba\x6d\x51\x31\xd9\x72\x01\x90\x29\xf6\xa4\x8b\x29\x33\xd2\xc9\x2a\xc6\x6b\x9e\x08\x67\x31\x75\xf3\xa8\x63\xde\xb7\xd6\xd7\x98\xbc\x12\xa0\xc0\xa8\xe7\xc6\x00\x28\xdb\x62\x59\x4d\x16\x27\xe5\xb9\x5a\xef\x61\x92\x0c\x0a\x3f\x4b\xc4\xb0\xf2\xe3\x92\xb7\xe4\xad\x99\x84\x31\xc6\x3d\x26\xf0\x5b\xb0\x9f\x02\x8e\xff\xbe\xf5\xb9\xed\x14\x9d\xa0\xf0\x54\xb0\x63\x01\x9f\x56\xcc\x62\xb7\x3c\x8b\x27\x0d\x14\xfb\x49\xd8\x5c\x0f\x96\x6f\x19\xab\xad\xae\xe2\xba\x4c\xe0\x65\x5d\x81\x83\xbc\x38\x81\x5f\x82\xc5\x12\xde\x2f\xb3\x7b\x8e\x77\xb1\x02\xfc\xa8\x50\xc8\xd6\x5f\x8a\xd2\x19\xd2\x32\x7d\xdf\x90\x5f\x02\x78\xba\xff\x33\x86\x22\x31\xfb\x49\x85\xa0\x55\x41\x91\x87\x44\xd7\xf1\x86\x80\xa6\xbb\x3b\x4c\x37\x77\xbc\xb0\xaf\x91\xd5\x89\x62\x53\xc3\x97\xda\x19\x7a\x2f\xc8\x89\xf5\x66\xc3\xeb\xda\x95\xfa\x2d\xc8\x01\x39\x1d\x93\x8f\x02\x9e\x0a\xd2\x12\xa4\x4b\x29\x7c\x11\x64\x88\xb9\x6e\x8d\x42\xf0\x51\x40\x56\x7c\x85\x2f\xe1\x99\xa8\x61\x0d\xce\x3f\xff\xd8\xc9\x5c\xa8\x90\xaf\x41\x37\xc9\xc0\xfd\x05\x24\x1b\xbc\x90\x7f\x70\x8a\x9e\x99\x11\xbc\x13\xec\x2d\xbc\xa8\x1d\x67\xfe\x41\x0c\xf8\x2c\xd8\x3b\xf8\xde\xb8\x99\xe5\x5e\x6f\xa0\xbc\x5b\xf2\xd9\xcc\xf2\x0b\x81\x69\xd2\xd0\x28\xfa\x9b\x16\x5d\xd8\x48\x99\xc0\xd7\x4a\xf5\xd2\x71\xde\x0a\x1b\x8a\xff\x27\xdc\xcf\x47\x62\xa9\x32\x2e\x53\x69\x4c\x62\xb0\xd8\x9b\x2a\xa9\xa0\x01\x8d\x4e\x3d\x63\x36\x35\xec\x93\xaf\x96\x83\xd1\x04\x7e\x08\xb6\xf8\xc1\xb8\xf4\xf8\xbd\x19\xf3\x38\xc5\xfc\x8f\x58\x2f\xe3\x2b\x8c\xdd\x49\x6d\xc6\x14\x78\x23\xd8\x0f\x01\x32\x5e\xad\x9b\xa8\x86\xf2\x4c\x37\xd1\x75\xe5\x6b\xd2\x6d\x0d\xa4\x4d\xcf\xe9\xa9\x34\x4d\x27\x88\xb8\x56\x77\xc8\x98\x6f\x7a\xec\x4d\x14\x4d\x33\x95\x23\xaf\x5d\x96\x2b\x32\x97\x2b\xb9\xdc\xc1\x78\x61\x1f\xf3\x61\x05\x7d\xbc\x0c\x95\x4f\x0d\x8e\x1d\x9b\x04\x46\x9f\xc7\x4f\x52\x99\x27\x85\x69\x74\x34\x33\x86\x40\x68\xa6\x23\x8e\xff\x03\xdf\xd7\x3e\x11\xb1\xe5\x9f\xe9\x31\x12\xf0\xb8\x59\xb5\x89\xe3\x7a\x93\x6c\x4c\xde\x08\xd0\x31\x28\xe4\x09\x78\x55\x1b\x6f\xcf\x6c\x6c\x26\x10\xc6\x2c\x88\x21\x8a\x59\x18\xc3\xa8\x11\x36\x4a\xc3\x57\x3a\x93\x86\x7e\xed\x62\x54\x55\xde\x54\xd3\x9d\xd5\x56\x35\xb2\x2b\xe3\xe2\x4f\x2c\x17\xc7\x73\xf0\x9c\xdf\x32\x55\x49\x1a\xf5\xdf\xc0\x6c\xe7\xf3\xee\x9e\xd5\xd5\x6a\xf0\xf1\xe3\xb2\x9a\x9e\xa0\x3a\xc7\xff\x8f\xf9\xb3\x3d\x99\xb7\xe1\x82\x09\x4c\x70\xce\x1f\x25\x70\xdd\x34\xcd\xeb\xe8\xe1\x34\x2f\xdc\x56\x42\x61\xb8\x62\xa9\xaf\xe3\x6c\x2d\x5a\xf1\x8a\x73\x8b\x32\xcd\x87\x76\xf4\xf6\xde\xa5\xa1\xde\x43\x12\x9a\x89\xa1\x85\x52\x2d\xd2\x24\x11\x3c\xd3\x21\x35\x70\x2f\x4e\xf2\xca\x9b\x94\x5a\xcd\x43\x75\x7a\x86\xa6\x3b\x3d\x10\xac\x07\x31\x33\xa6\xad\xd9\x1d\x81\x0d\xd9\xb4\x95\xc2\x94\xec\x45\xa7\x52\xc5\x50\xf1\x55\xcc\xc6\x64\x14\x43\xf1\xcf\x53\x41\x02\x41\x81\x70\xcd\xee\xf9\x0b\x6f\x12\x43\x70\xe1\x91\x58\xb3\xfb\xe0\xc2\x9b\x09\x08\x6e\xbd\x28\x06\xff\xda\x9b\x89\x84\xba\xc1\x85\x79\x11\x6b\x37\xb8\x35\xef\x62\xed\xfa\xd7\x49\x0d\xb9\xe0\x88\xb9\x29\x2d\xf4\x73\x1c\x8c\x2a\x8d\x7c\x6d\x61\x99\xb9\x76\xf9\x8b\xcc\x37\xc3\x32\x65\xdd\x27\xad\xd8\x14\x05\x17\x60\x35\xf3\x54\x2d\xef\x61\xe6\x60\xb3\xfe\x78\x77\xb7\x54\x4d\x15\x24\xda\xa9\xd6\x02\xc1\x72\x76\xae\x17\xec\x86\x31\x19\xc6\x80\x48\x07\xb7\x4b\x14\x98\xa2\x96\xdb\x00\x02\x5d\x93\x14\x7e\x0b\x82\xfe\xb1\x3a\x23\xbc\xc4\xd5\xac\x18\x96\x86\x37\xcf\x62\xcc\x33\x0f\x13\x61\x04\x3e\x11\x9a\xfd\xcb\xf9\x17\x20\xa7\x40\xdf\xc9\x77\x41\x7a\x46\x40\x92\x1e\x35\x02\x86\x54\x54\x82\x6e\xa6\x2d\x08\x6d\xd5\x05\x1e\x83\xd0\xf0\x5a\xa0\x97\x96\xc2\x45\xcc\x48\x60\x0f\x4b\xba\x49\xdd\x7e\x3b\x24\x2f\xec\x3d\x6e\x4c\xa7\x35\xc8\xd4\xc4\x2e\x48\xea\x2d\x6e\x3a\x7b\x64\x36\xad\x6c\x86\x20\x93\x60\xe1\x2a\x09\x16\xfe\xe7\x96\x47\x00\x31\x0b\x50\x7a\xa5\xc2\xe1\x3f\x34\x3e\x96\x4c\x0e\x23\x2f\x62\xd7\x5a\x1b\xe7\x8d\x1b\x5a\x19\xdb\x78\x35\xf7\x3d\xcf\x77\xfc\x5d\xbc\xd2\x79\x6c\x6c\xdc\x18\x14\xfc\x4c\x95\xbc\x1b\x43\x4e\xe9\xbb\x31\xf9\x51\xcb\xff\x54\x4c\xd2\x33\xb1\xd8\x66\x59\xc2\x55\x2d\xd7\x2d\x44\x2e\xa6\x10\xc7\x6a\xd2\x68\x68\x06\xa3\x71\xbd\xc4\x4d\x5b\x64\x88\xfc\xa9\x6b\x61\x60\x8e\xc9\xa5\x40\x44\x2f\xd1\xec\x30\x7f\x34\x1e\xf9\xfc\x01\x21\x6c\x7c\x8c\x9a\xec\x19\xe2\x74\xdb\x34\x4b\x76\x21\xff\x66\x74\xfd\x3f\x4f\xe8\xdf\x8e\x2d\x9d\x56\xe0\x67\x38\x14\x9e\x4e\xdc\x7e\x05\xc9\xe5\x1c\x2f\x19\xf8\x69\x5c\x01\xbf\x70\xd7\x33\x0f\x38\x46\x15\xe0\x80\x9c\xca\x33\x0a\xfb\xe4\x2e\xce\x07\xbd\x78\xc3\xaf\x5a\xf1\xb6\x5c\x31\x3b\xe0\xcb\x6b\x5e\xa6\x83\xd5\xf6\x4f\x5c\x3c\x15\x30\x6e\xc9\x38\xce\x20\x50\xcc\xe3\x90\x24\x20\x1a\x97\xac\x95\x80\x4a\x97\xe9\xa0\x69\x99\xca\xa2\x8b\x97\x44\x57\xce\xc2\x79\x99\x85\xe3\x86\x7b\x45\x62\x4c\x58\xb0\x97\x39\x47\x8c\x3c\x8b\x51\x3e\xc7\x85\x5f\x04\x65\x9a\x91\xcf\x39\x98\x66\xb1\x56\x16\x67\xbc\x2a\xce\xd0\xa5\x12\xff\xa5\x1d\x7c\x43\x0e\x62\x2b\x0f\xca\x6a\x8b\x99\xa4\xc3\x98\x3d\x8b\xeb\xec\xa1\xb5\xd4\x63\x98\x7e\x8b\x0c\x7f\xa9\xd4\xd8\x82\xa3\x54\x7e\x3e\x15\xe4\xad\x4e\x0d\x2c\x12\x69\x76\xef\xf7\xbc\x13\x41\x48\x68\x7f\x3a\x89\x03\xc1\xa5\x77\x15\x43\xf0\xd4\x73\xc0\x81\xe0\xd0\x3b\x44\x29\xea\xdc\x3b\x10\xbc\xf3\xba\x09\x75\xfd\x1e\x35\x95\x42\xed\x06\x97\xa6\xde\x89\x20\xe6\xf7\x53\x6a\x6a\x9b\x5f\x87\xa6\x81\x7d\xeb\x5f\x53\xd3\xac\x86\x16\xe5\x22\x2d\x76\x17\x48\xae\xb7\x48\x59\x9b\x49\x82\x1d\xbd\xc3\x5d\x7f\x13\xc3\x7b\x41\x22\xdb\x05\x3e\x46\xd8\xf5\x0d\xd9\xcf\x4b\x7a\x34\x7d\x19\x21\xae\xf6\xad\x41\x34\x42\x30\x56\xf2\x50\x0a\x97\x8d\xb6\x42\x7a\x2a\x8c\x66\xc2\xc7\x3f\x58\x24\xaf\xfe\x60\x91\xbc\x8d\x9b\x5d\x9f\xd6\x41\x59\xf6\x4d\xfe\xac\xc5\xe9\x96\xbc\x8d\x2b\x8a\xac\xef\xd3\x04\x7e\x2f\x4b\x82\xdc\x77\x54\x93\x4c\x3f\x63\x36\xfd\x52\x4e\x44\xcd\xb2\xfc\x32\x1c\x1d\x45\x63\x8d\xf9\x51\xcb\xa7\xfb\xd9\x72\x64\xb7\x12\xca\xd5\x7f\xc7\xf6\x9e\x6e\xf9\x74\x9f\xc2\xf1\x32\x5e\x69\x5f\x24\x73\x60\xb6\x60\x88\x09\xa8\xbb\x66\xe0\x3d\x9b\xc0\x32\xff\xe4\xae\xdb\x1a\xe0\x37\x06\x7e\xc7\xa8\xf5\xb4\x28\x7e\x15\x46\xdb\x1c\x3b\x4f\x9b\x80\x8f\xc9\x71\x5c\xfd\x66\x79\x06\xe7\x85\x86\x9f\xb1\xfd\xac\xbd\x05\x84\x9f\x7b\xff\x64\xf6\xc6\x54\xc3\xd3\x98\x1c\xc5\x86\x18\x28\x9c\x34\x0b\x5e\x4b\x14\x36\x4e\x20\xa1\xf0\xbc\xa9\xe6\x3d\xdf\xf7\x30\x0e\xc1\xc6\x40\xf2\x96\x77\x8b\x21\x00\x31\xf9\x14\x53\x38\xd6\xa4\x45\x53\x0b\xed\x65\x85\x2e\x30\x46\x38\xbf\xfa\x01\x71\xf1\x51\xcd\x5a\x43\x4d\x81\x31\x9b\x20\xce\x4e\x23\x7f\xc5\x0d\x27\x36\xe9\x09\x99\x14\x37\x8e\x61\xbf\x2f\x63\x78\x1e\x57\x8e\xc9\xf0\x21\x88\xae\xae\x84\xc4\x03\x1d\x0a\xef\x63\x7b\xfe\x56\x67\xff\x90\x0b\x23\xa5\x2e\xb5\x51\xbc\x2e\x75\xfe\x1f\x6e\xcd\x37\x4b\xdd\x57\x8e\xe3\x28\x7c\xa9\x47\x32\x16\x14\x5e\xc7\x0c\xdf\x62\x8e\x66\x87\xc2\xbb\x98\x91\x91\x66\x28\xd4\x3e\x68\x18\x6a\x78\x1f\x9b\xe1\x7c\xd0\x70\xa5\xe1\x75\xf6\xfb\x5a\xc3\xaf\xec\x77\x4b\xc3\x97\xec\xf7\x44\x03\x17\xe9\xef\x99\x06\x91\xfd\xb6\x86\x55\xcd\xc0\xac\x7c\x4a\xe8\x59\xc9\x98\x54\xd5\xb3\xdf\x3a\x56\x41\x14\x4d\x60\x64\x8f\xfd\xe3\x34\xe8\xe7\xf3\x0a\x73\xec\x35\x48\x98\xda\x0f\x37\xc1\xf7\x55\xca\xd0\x7d\x30\xf2\x14\x04\xd2\x93\x10\x28\x4f\x43\xf0\x02\x33\xf0\xc1\x87\x98\xcd\x24\x7c\xad\x65\x12\x63\xf2\x21\x86\x7b\x7e\xe3\xad\xf7\x80\x6f\x79\xeb\xbd\x04\x7d\x3a\xdf\x62\xf6\xd0\xfd\x9f\x0f\xe1\x47\xcc\xae\x25\xbc\x69\xc4\xee\x80\x8c\xc9\x17\xdc\x40\x98\xcc\x9c\xd7\xf3\x46\xbf\xdd\x26\x0e\x7a\xe0\x5c\x3e\x9f\xe3\xf1\xad\xb1\x3e\xc6\xda\x1a\x2e\x1c\xad\x06\xea\x19\xc9\xcd\x57\xc7\x2e\xb4\xbc\x8d\x5e\xf9\xdc\x07\x86\x9e\x00\xe1\xa1\xc0\xd4\x4d\x6d\xc3\x11\xd9\xc0\x04\x1b\x6e\x6b\x3e\x37\x32\x3e\x7f\x23\xcc\x1b\x61\x90\x4a\x1f\x87\xf6\xc5\xb0\xf0\x87\xdf\x10\xc5\xa1\x00\x96\x66\x71\x12\xee\xb0\xef\x33\xe1\xe6\x79\x27\xb1\x5a\x17\xfd\x67\xc2\x0d\x00\x1f\x7b\xc0\x5d\x1f\xb8\x1b\x00\x77\x87\xc0\x5d\x41\xb3\xf7\x06\x9a\x8f\x7a\x5b\x76\xa0\x90\x3a\x7e\x20\x62\x46\x9b\x18\xb1\xd8\x7e\xd5\xa8\x82\xe5\x02\x4a\x01\x84\x90\x76\x6b\xb1\x8b\x28\x8c\xd2\x94\x19\x0b\x68\x99\xf2\xb4\xeb\x0c\x43\x61\x94\x16\x33\x82\x1c\x29\x4c\x24\x07\x23\x5c\x49\xc1\x6b\xe4\x4f\x4d\xc4\x5e\x0a\xdf\x54\x08\x62\x08\x62\x9a\xc7\x13\x71\xcc\x34\xe6\xdb\x6f\x58\xe3\x47\xb5\x87\x78\x12\xbd\x18\x27\x16\xd3\x45\x59\x73\x43\x34\x4f\xbd\x0c\xb7\x44\xd8\xc9\x0f\x28\x84\x8b\x7a\x26\xf6\x6d\xea\x29\xbc\x32\xb9\xa8\x02\x94\xc0\x04\x05\xa0\xd0\xb2\xbe\xb8\x6e\x7c\x16\xf5\xbc\xaa\xbd\x11\x53\xbb\x02\xd5\x65\xaf\x4e\xaa\x99\xd2\x84\x02\xe7\x8b\x61\x7c\x1b\x3d\x9b\x1d\xba\xdd\x4e\x7f\x0d\x4b\xbf\x85\x91\xb9\x59\x77\xe6\x11\x29\xd1\xfe\xe0\x85\xbd\x3d\xc4\x83\x7f\xd1\x0f\x70\x6e\x43\x9c\x5b\x1b\x2f\xe9\xe3\xdc\x56\xd1\xb2\x2a\x61\x90\x53\x09\x7e\x04\x26\x00\x85\x59\x29\xb2\x85\xef\xe2\xc2\x63\xc2\x94\x24\x4d\x34\x81\x1d\x71\xcc\x01\x10\x30\x8e\x1d\x71\xa4\x4d\x22\x18\xc7\x21\x0e\x29\x8c\x0c\xf1\x80\xcf\xf8\x9f\x36\xc1\x02\x16\xa9\xd1\x0a\x86\xce\x63\x83\x45\x54\xa1\xc0\x11\x22\x92\x1f\x0e\x42\xf0\x7f\x34\x8f\xc3\x6c\x1e\x87\xcb\xf3\x68\xa6\x01\x53\x57\xe1\x4e\xf6\x61\xc6\x0c\xd2\x13\xb3\xb7\xe1\x7a\x69\x6f\xf7\x70\x5a\x7c\x7b\x4d\x6f\x71\x46\xb5\xcd\x95\x90\xcf\x68\x9a\x21\x32\x9d\xd1\x72\xc3\x00\x3b\x0f\x59\xe0\x0e\x31\x61\x8e\x00\xbc\x71\x09\x18\xcd\x06\x33\x36\x42\x14\x46\x88\xc2\x68\x69\x66\x03\xd7\x87\xa0\x98\xd9\xd0\x2c\x8d\x1b\x40\x68\x40\x96\x76\xb2\xa1\xf9\x68\x09\x97\xf2\xac\x86\x7c\x85\x63\x2f\x9b\x3e\x6e\xa6\x8e\x9b\x69\xab\x06\x84\xda\x4b\xed\xad\x76\xdb\xfe\xe0\xe5\xb7\xc3\x74\xfe\xb3\xa7\xc2\xdc\x0c\xb8\x11\x7c\x98\xb2\xa0\x38\x43\xac\x7d\xa9\x92\xf2\xa8\x75\x85\x8b\x66\x24\x66\x70\x35\xe4\x89\xdc\x2a\xfa\x2b\x1a\x29\x79\xad\x70\x17\xf9\xb8\xfa\x41\x4e\xee\x71\x96\x1f\xbe\x70\xcd\xf0\xba\xc9\xb0\xae\x21\x9e\x1d\xbe\x60\x07\x61\x06\x3e\x62\x61\x79\xd5\x34\x87\xd0\xe5\xf9\x42\x45\x9c\x84\x66\xef\x44\xf9\x5a\x04\x71\x65\xb0\x29\x23\xe7\x78\x77\x8b\xfe\x45\x51\x10\x27\x30\x5a\x8e\xe3\xae\x89\xdb\x0d\xe2\x9a\x83\x70\x85\x99\xf1\x15\x8e\x5c\xd9\x91\xbf\xc2\x9b\x87\x7b\xdd\x62\x1e\x79\xb6\xd6\xe5\xe1\xdb\x0f\x03\xe2\xd4\x84\x66\x6a\xc2\xac\x2c\x32\x53\xa3\x8a\xa9\x89\xb2\xa9\x19\xb1\x68\x61\x6a\x22\xd7\xfc\xcf\x87\xc8\x0d\x60\x4c\x46\x86\xed\x46\xc8\x5a\x9a\xa6\xc7\x12\x69\x5a\x95\x2f\x4d\x51\x73\xf1\x98\xf8\xe6\x9d\x4f\xc2\x92\x30\x47\x6a\xc7\x23\x87\xfa\x09\xcc\x5c\x7b\xf7\x7f\x35\x6f\x87\xf6\x33\xf7\xe9\xfc\x94\xe9\xb1\xb2\x77\x90\x2d\xb5\x52\x85\xc3\xd2\x65\xb6\x2d\xd1\x46\x1a\x26\x09\xc9\xef\x30\x66\xb4\x35\xc0\xe9\xd2\x39\x1d\x71\x43\x07\x01\xa5\x5e\x79\x7e\x34\xcf\x87\x95\x4e\x41\x40\xcb\x13\x49\x61\xb6\x3c\xce\xcc\x0c\xc2\xfa\xaa\x90\x78\xda\xe0\xa8\x0b\x89\xa7\x17\x98\x61\x16\x94\x6f\x44\x8f\xb6\x12\x6f\xd2\x2c\x53\x15\x49\x4f\x1f\x74\x91\xf0\x51\x60\x22\xd6\x19\xc7\xb7\xde\x2d\x89\xcd\x2f\x81\x9f\x40\x49\x28\x5c\x57\xb7\x74\xf5\xdb\xd3\x43\x11\x44\x43\xf1\xf9\xe4\xd5\xb3\xe8\x6a\x1a\x49\xcc\x62\xd9\xf0\x15\x6a\x18\x36\x5e\x72\x38\xd2\x99\xf3\x76\x60\x35\x79\x2f\xf5\xa7\x65\x61\xdd\xad\xe6\xe9\x7a\x13\x83\xc3\x9c\x34\xdf\x9e\x36\x0a\xae\x36\x5a\x6d\xbb\xbd\x8e\x7f\x2b\xf1\x17\xa8\x0a\x5d\x73\x34\x21\xcb\x5e\xdf\xdc\xb4\x2e\x1f\x01\x5e\xf3\x52\xd2\xf7\x94\xd1\xe2\x61\xf1\x84\x03\x87\x21\xc7\x8f\x7a\x2c\x04\x1a\x5f\x35\xe0\x09\xa2\xce\x1c\xac\xf0\xf2\x3c\x45\x6c\xc6\x16\x95\xab\xf3\x10\xc2\x70\x64\xb5\x76\x6d\x14\x64\x6d\x47\x85\x59\x65\x6b\x82\x41\x8e\xb4\x0d\x07\x6e\x7a\x36\x6b\x41\xa4\x0d\xe4\x6d\x01\x11\xe9\x1c\x3e\x74\xcc\x6c\x8f\x8c\xe9\x40\x84\x99\x3f\x8c\x9f\x10\x2e\x1f\x08\xd7\xf7\x04\xa5\x60\xe6\x00\xaf\xb0\x07\xd7\xd4\x6d\x0d\xc2\xd8\xc3\x10\xdd\x16\x87\xd0\x6c\x75\x03\xa4\x6d\x3f\x08\x43\x0d\xa8\x63\x78\x6b\x2f\x9a\xc1\x05\xaf\x09\xb5\xcb\xa3\xfd\x3f\x69\x18\x93\x1f\x31\xf4\x00\x5f\x7d\x8b\xe1\x6b\x4c\x9c\x7f\xfe\x19\xa0\xd1\x29\x5d\x7e\x4c\xed\xb1\x9f\x3f\x38\x26\xa9\x3f\xdf\x06\x9d\x8e\xc9\xe7\x18\x0d\x75\xb4\x59\x2e\x35\xf5\x8e\x89\x19\xc7\xa5\xa6\x60\x86\x75\xc5\xe1\x5d\x0c\x27\x44\xc2\x3d\x3f\xf6\x2e\x35\xf0\x91\x87\x69\x17\xaf\x3d\xed\xfa\x49\x75\x03\xbc\x88\x3d\xe1\xf2\x04\xa6\x3c\xbd\x29\x74\xce\xb3\x30\xb9\x1b\xfb\x6b\x27\x81\xbb\xba\x91\x5c\xa0\xfc\x59\xca\x0f\x91\xe5\x5b\xb0\xdf\xa8\xe6\x78\x27\xa2\x7a\x6d\xdc\x98\xb3\x95\x82\xcd\xa2\xe0\x71\xa5\x60\xab\x28\x78\x52\x29\xe8\x16\x05\x3b\x95\x82\xec\x33\xba\x6b\xe7\x7c\xe1\xfb\xb5\x37\x7c\x51\x3f\x9f\xf2\x24\x81\x71\x9d\xc5\x58\xba\xa1\x75\xdb\x50\x9e\xdd\xd0\xda\x6f\x28\xcf\xfc\x6c\x07\x0d\xe5\xd9\x0d\xae\x67\x38\xc9\x9b\x09\x1c\xf2\x55\x86\xf5\x42\xb4\xa1\xb1\x3a\x8d\x15\x71\xc4\x99\x94\x70\xd9\xc8\x61\x84\xb1\xf5\x1f\x0a\x69\x2f\x67\x7c\xac\xb7\x8e\x13\x78\xc5\xd9\x11\x27\x63\x72\xc9\x61\x13\xb6\x36\x29\x85\xb7\xf8\x55\x8b\x9f\x9c\xed\x93\x43\xa3\xee\xbc\xe2\xf0\xd6\xfc\x47\xe1\x37\x67\xd7\x70\xbc\xdc\x63\xd9\xa3\x6d\x18\xd4\x6f\x0e\x5b\x9b\x20\xed\xc7\xba\x8c\xc4\x42\xde\x96\x0e\x44\x1b\x92\x4b\x3f\xb5\x2a\x8a\xcb\x20\x78\x38\xd1\x97\x4c\x80\xb2\x2e\xe8\xa7\x0d\xf6\x3c\x4f\xe0\xd3\x9f\x50\x38\xe2\x44\x3d\xdc\xda\xcc\x39\x9e\x2e\x4e\xc8\x32\xdc\x5c\xde\x97\x66\x47\x1a\xc6\xdf\xa2\xa0\x18\x86\x71\x9e\x70\xa6\x24\x3c\x6f\x9c\xd4\xee\xde\x2b\x7c\x1e\x48\xcf\x30\xbf\x97\x9c\xfd\x80\x5f\x0d\x88\x66\x21\x3c\xef\x6b\x05\xbc\xd1\xd8\x53\x6c\xb7\x36\x1f\x28\x57\x80\x60\x27\xe9\x42\x6c\x6d\x82\xde\xe8\xe1\xa7\x39\xe4\xe0\x8b\x61\x66\xe7\xd4\x53\xee\x39\xde\x90\xf9\xc4\x8d\xd5\xe9\x16\x8c\x1a\x97\xe9\x17\x27\xca\x1d\xd1\x8e\xe1\x2c\xcf\x39\xec\x80\x78\xf0\x8a\x53\x30\xac\xa3\x50\x68\xca\x35\xd3\x65\xc5\x62\x0a\x5f\x38\x1b\xc1\xeb\x26\x1f\x46\x89\x67\xab\xbd\x6e\x31\x97\xef\x39\xac\xf7\xe0\xfe\x1c\xdd\x20\xfa\xe1\xd6\xe6\xbc\x0b\x23\x2f\x4b\x44\x6e\x59\x08\x70\xef\x96\x7c\xc1\x31\x61\xac\x5c\x1a\xe1\xa7\x36\xcc\x20\xf1\xd8\x01\x89\x83\x83\xa0\xd9\xe1\xc3\xbb\xda\x09\xeb\xee\xc9\xdc\x2b\xfb\x5f\x5b\x9b\x78\xd2\xfc\xc5\x68\x1d\x72\x43\x97\x7c\xa8\x37\xe4\x35\xde\x1a\xdc\xd0\xa6\x07\x09\x2d\x10\xf9\xf8\x7f\xf2\x84\xc2\x8b\xba\xe5\x5a\x97\x6e\x2b\x81\xcf\x4d\xeb\x9e\x6d\xfa\x2c\x7e\xef\xfb\x8a\x8a\x95\x40\xbf\x0f\x2b\x2a\x56\xc2\x82\xbf\xfe\x81\x17\x7d\xe3\xec\x39\xfc\xa8\xa5\x34\xbc\x5e\xd4\xe4\x05\xee\xf4\x12\x8a\xa7\xcf\x09\xbc\xe1\xec\x1c\xe4\xe2\x77\x62\x16\xee\x67\xf5\xf6\x98\xa5\xf0\x42\xf7\x2a\x3c\xbc\x6a\xa3\x87\x24\x88\x37\x5b\x74\xbe\x5b\x41\x33\x6e\xc6\xa0\x82\xe6\x73\x5e\x19\xa0\xea\xdb\x32\xc4\xa6\x57\xd4\x7b\xc3\xf1\x70\x54\x05\xd0\x85\x1f\x46\x95\xdf\xe8\xa5\x21\x6d\x22\x60\x2f\x20\x0e\x6a\xf7\x5a\xc7\x71\x12\xe0\xc1\x7f\x10\xaa\x6f\x84\xe9\x52\xbc\xfe\x31\xf9\x44\xe4\x69\xf7\xcc\xc8\x60\xfb\xa9\x9a\x1e\x35\x12\xd6\xbe\xed\xc8\xd3\x5e\xa9\x68\x93\xa2\x04\x4e\x20\xa8\xc5\xc9\x4a\xea\x7f\xe4\xda\xda\xda\x9a\x93\xea\x0a\xff\x48\xa3\xb5\xd1\x04\xc2\x80\x1d\x4a\x88\x9a\xe6\xc1\xf9\x47\xfe\x23\x89\xd3\x89\x03\x22\x3b\x3d\xda\x71\xe8\x9a\xd3\x09\x02\x32\x0a\x52\x4f\xec\xa8\xa1\x4b\x3f\x40\x6e\x96\x80\xbf\x0c\x7a\xd5\x41\x4b\xe6\x28\x28\x6c\xd3\xd2\xd1\xa6\xbd\x53\xcd\x03\x12\xe7\xfc\x54\x96\xbf\x82\x6c\x63\x81\x8a\x34\xff\x99\xff\x8f\x8c\x04\x51\x46\x7f\xb3\xa1\x53\x8a\xd2\x76\x7b\x4c\x44\x00\x7e\x6c\xf6\x24\xc1\x94\xd0\x10\x58\x7a\x12\x03\xc7\x75\x3a\xb1\xe7\x9c\xfe\x0b\xbf\x76\xfe\xaf\x33\xc7\xde\x02\xc4\x40\xb3\x9a\xbb\x9b\xf6\x1c\xc8\xda\x3b\xce\xa9\x9d\x2b\x97\xd3\x8e\x73\xe6\xf4\x4b\x70\xc3\x55\x50\x36\xbd\x34\xce\xdb\xe5\xf6\x6e\x8a\xd5\x3b\xa3\x42\x87\x1e\x31\xa3\x49\x0e\x9c\x4f\x17\x62\xed\xf5\x2c\x92\xee\x73\xd4\xfe\xdd\x48\x8a\xe3\xd1\x1a\xd7\x6b\x97\xb3\x48\x3a\x9d\x4c\x2d\xfb\x82\x37\xa0\x3c\x67\xa9\xaa\x43\x3b\xce\xda\x88\x87\x13\xfc\xa6\xfb\x9a\xbe\x10\x6b\xa3\x68\x32\x89\x6e\xec\xb7\xa8\xe3\x80\xfc\xe0\x24\xa2\xa6\xd6\x0d\xbf\x9b\x79\x4e\x7f\x81\x8c\x0c\xe9\xe0\x80\x46\x30\x26\x3a\x80\x28\x80\xc8\xa8\x9a\x92\x71\xa6\x59\x84\xa7\x5b\x01\x53\xcb\xca\xb1\x73\xc2\xe5\x5a\x28\x75\xb4\xc6\x6b\x46\x80\x9f\xc7\x97\xd1\xda\x34\x9a\xcd\x42\x3f\x9c\x84\x3a\x14\x33\xa7\x63\x07\xdd\x3c\xbe\x75\x87\x56\x4e\xd9\x7c\x5c\xfc\x19\x2e\x49\xb6\xf8\xa8\xbe\x3a\xef\x55\xe4\x4f\xc4\x95\xed\xc8\x0c\x1b\xd3\xef\x34\x41\xee\x38\x9e\x19\x2a\xee\x19\x6f\xb9\xed\x79\x78\x2d\xa4\x85\x80\xf5\x1c\xda\x21\x41\x40\xc6\x24\x0c\x60\x1b\x66\xa6\x7d\xfa\xda\x37\xfc\x68\x16\xb0\xa9\x84\x49\xc0\x66\x01\x79\xa9\x29\x5c\x07\x6c\x28\x61\x58\xc7\x01\xf3\xd9\x9e\x04\x30\x26\xd7\x01\x94\xae\x65\x37\xd7\xbf\xe7\x53\x4f\x03\xff\x61\xd8\xf8\xc8\xf2\xf1\xab\x80\x5d\x49\xb8\x08\xd8\x8d\x84\x69\xdd\x2e\x2d\xe9\x87\xe7\x0d\xe5\x4f\x20\xc0\xf2\x9b\xa6\xf6\x3b\x70\x8e\x15\xee\x1a\x2a\x64\x07\xb9\xe3\xc0\x88\xe0\x9d\x04\x6e\x2b\x63\xa8\x7e\x16\x67\x4c\xae\x82\x9a\xf0\xaa\x31\xb9\x08\x20\x86\xa5\x48\xd9\xbb\x80\x04\xd5\xd8\x22\xac\x79\x13\x90\x03\x72\xca\xe1\xdc\x14\x9f\x19\xf3\xa5\x62\x6a\x4c\x91\x79\xe1\x17\xfc\xbd\x3b\xfb\xc3\xa0\x87\x91\x22\xc1\xaa\x10\x1b\x5c\x91\x5b\x72\x9b\x2f\xcb\x38\xa0\xb8\x04\xf6\xda\xff\xc1\xaa\xc5\xf9\xea\x69\xf0\x63\x4f\x81\x9f\x5e\x32\x7d\x16\xa4\x86\xce\xa1\xe1\x0e\x0b\xc3\xae\x1e\xf7\x3b\xb1\x9c\xaa\x28\x10\xb3\x99\x18\x3a\x5e\x31\xf6\xae\x3d\x08\xc8\xee\xcf\x95\x4a\x7a\x69\xc9\x2c\x9e\x4e\xd5\x52\xbb\xcd\xa5\x23\x82\x69\x40\x9c\xcf\x72\x2c\xa3\x1b\xb9\xa6\xef\xa6\xc2\x5b\x73\x3a\x18\x38\xf1\x2c\xa0\x70\xd4\xb0\xb2\x8f\xc0\xb7\x17\xf6\x03\x76\x4b\x86\x01\x14\x17\xfb\x9e\xde\x39\x70\x14\x10\xd3\xd8\x96\x64\x57\x00\x97\x0b\x34\xd7\xc2\x81\xc3\x00\xee\x02\x72\x10\xe0\x09\xea\xc7\x80\x45\x0d\x2e\x59\xfc\x24\x50\x3e\xa7\xbf\x0c\xc1\x1f\x78\x21\xf0\xd7\x5e\x0c\xfc\xbd\xa7\x52\xfa\xbf\xf0\x04\xf8\x37\x1e\x07\xff\xce\x8b\xc0\x7f\xee\x61\x38\xe4\xab\x0c\x4f\xc9\xaf\x84\x03\xcf\xb0\xcf\x9a\x91\xf1\x6d\x5c\x21\x0a\x6f\x03\x36\x8d\xe1\x67\xc0\xde\x0b\xf8\x5d\x2b\xe5\x7e\x9a\x0d\x9f\x85\xd8\x3e\xb6\x21\xb6\x34\x81\xe3\x80\x69\x09\x4f\x83\x45\x67\xac\xfd\x2a\x66\x96\xaa\xf5\xe1\xe9\x3f\xb3\x5b\x3f\x3a\x7b\xe8\x6a\x31\x43\xc7\x4d\xee\x22\x48\xb3\x4d\x75\x64\x91\x6d\x81\x59\x35\xc1\x8a\xf9\x4f\x01\x7b\x16\xc3\x49\x2d\x4e\xf8\x19\xf3\x9e\xd5\xd5\xea\xe3\x94\x2b\x91\x7f\x46\x88\xdb\xee\x12\x78\x6e\x48\xf1\x6d\x95\x14\xc3\x11\x79\xb2\xf7\x42\x94\xd0\x3b\x09\x88\x73\x70\x3b\x15\x81\x16\x43\xc3\x3c\xaf\xa2\x99\x5e\x7b\xb2\x36\x0c\xcf\x43\x3d\x83\x35\x3f\xd6\x6b\xe7\x91\xb6\x92\xc4\x36\xa4\xe9\x78\x9e\x06\xc4\xe9\xba\x86\xb4\xf2\x71\xb9\xad\xc1\x49\x40\xfe\xf5\x4a\x5e\xf3\x49\x38\x5c\x1b\x4d\x22\xae\xbd\x35\xe7\x5f\x1d\xd9\xf9\x97\xf3\x2f\xea\x3d\x15\xe4\x38\x20\x3d\xb1\xf5\xc0\x3a\xa3\xe0\x9b\x20\x9f\x02\x12\x61\x40\xe5\xcb\x80\xbd\xd5\xf0\x2b\x60\x7f\xf8\x88\xd7\x4b\xa3\xbb\xec\x76\x1f\xa8\x07\xbb\xdd\x07\x3d\xb1\x65\x7e\x13\xbd\xc1\x29\x3e\x18\xe0\xa2\x13\x9b\x2d\xfc\x3e\x68\xf8\xb2\x40\x17\x75\x8a\x8a\xfa\x06\x82\x6d\x6f\x61\xb6\xee\xed\x1d\xc6\xf4\xa0\xe7\x75\x21\x66\xa2\x1f\x17\x19\xf8\x3a\x9d\x22\xe5\x6c\xb9\x71\x9c\xe6\xf8\xdc\x7e\x3c\x9f\xef\x3c\xda\xe3\xa5\x85\x57\xac\xd7\x7d\xa0\x3a\x7c\x63\xfb\x71\x9e\xbe\x13\x3f\xdc\x82\x31\x35\xb6\xa7\x0d\xe5\x29\x9a\xc0\x97\x12\xb2\x65\xb1\xb1\xbc\x80\x36\x10\xd4\xf0\xc1\x8c\xa3\xbe\x0f\x48\xf3\x22\x84\x52\x8b\x73\xa1\x16\x96\x41\x95\xb2\x6d\x54\x48\x00\x17\x5a\x1b\x75\x61\x15\x09\x54\x56\x2e\x81\xd7\xb5\xb4\x6b\xe0\x66\x58\x0c\xf9\x9d\x67\x21\xa0\x9a\xfa\x2e\x60\x6f\xe0\x45\x5d\xab\xf5\x31\x79\x67\x64\xae\xa4\xed\x36\xc1\xdf\xbd\xae\xb1\x32\xe6\xf3\xb4\x04\x9f\x68\x02\x9f\x6b\x95\xf2\x8d\x2c\xf9\x3f\x51\x0f\xb7\xe7\x5d\xba\x41\xd4\xc3\x5e\xb7\x3b\xef\xd2\x8e\x79\x83\xbf\x12\xf8\x5e\xb3\x35\xb2\x98\x1e\x5e\x3e\x11\xb3\x2b\x9b\x1b\xa6\xaf\x03\xc2\x33\xf2\x5f\xee\x7c\xab\xb7\xb3\xb5\x2b\x76\x1f\x10\xb1\xd1\x7b\xf2\xa8\x4b\x41\xb3\xc7\xbb\xdb\x62\xe7\x01\x21\xf1\xde\xd6\x7c\xbe\xfe\x22\x20\x82\x0e\xf8\x46\xcf\xe3\xb4\x43\x3e\x9b\xa7\x8d\xcf\x01\xc1\xca\x85\x03\xed\xa9\x20\xb2\xa3\x3a\x9a\x26\x99\x1f\x2c\xce\xbf\xdd\x9a\x56\xd9\xea\xed\xf1\x01\x22\xe3\xa9\x4c\x70\x14\xf9\x42\x37\x9f\xec\xf1\xf9\x7c\xf3\x09\x63\x8c\xb7\xdb\x69\xaf\x59\xed\xcd\xdd\x47\x8f\xb7\xc5\x0e\x5d\xc8\x54\x5a\x81\xb8\xd3\x7d\xf2\x68\x37\xaf\x93\xe7\x3a\xdd\xea\x96\xea\x3c\x7a\xf4\x68\x57\xec\x2e\xe6\x6e\xac\x80\xe9\x75\xb7\x76\x1f\xe7\x75\x76\x6b\xc1\xf4\xb6\xba\xdb\xbb\x05\x3e\x8f\xea\x01\xed\xec\x6e\x95\x90\x7e\x5c\x5f\xe9\xf1\x56\x6f\xf7\x71\x5e\xe9\x49\x6d\x77\x9b\xdd\x27\x4f\x76\x36\xf3\x4a\xbd\x6e\x2d\xa8\xcd\xad\x9d\xc7\x8f\x4a\xb5\x7a\xf5\xb0\x76\x37\x77\x77\x8a\x69\xea\x6d\xd6\xc3\x7a\xfc\x78\xc7\x4e\xe6\x82\x94\x2e\xef\xd1\xab\x48\xea\x0b\xdc\xa1\xb1\x31\x86\x70\x97\x1a\x2d\x6b\x29\xde\xbf\x51\xbf\x7c\x4a\xb2\x8f\x34\xa6\xa1\x8d\x5f\x02\xb2\x4d\xe1\x77\x40\x9c\x0d\x87\x96\x5e\x6e\x96\x5f\xe2\x33\xa5\xf0\x61\x85\xe6\x2a\x1f\x10\xc3\x6c\x3b\xa8\xb2\x7e\xad\xd9\x38\x69\xbd\xec\x82\x5c\x05\xe5\xf2\x3f\x36\x0e\xfe\x17\x72\x00\xc4\xe1\x93\xb3\x8c\x97\x57\x87\xac\x57\x42\x16\x2a\xfd\x58\x98\x6f\xb5\xad\xe7\x62\x0e\x82\x80\xda\x78\xfb\x33\x5a\xaa\xfd\xa6\x2e\xa6\xb6\x9b\x60\xb3\x1f\x16\x8f\xc5\xb9\xfe\x10\xfc\xa1\x79\xcf\x36\xef\xd8\xe6\x75\x55\x36\xd2\x2a\x66\xb6\xcf\xfe\x30\xb0\xa2\xfc\xa9\x20\x13\xf2\xcb\xa8\xa4\xdd\xec\xff\x6d\x98\x26\x5e\xee\xfb\x1e\x50\xf8\xd6\x60\xa5\x3f\x8d\xe1\x6b\x60\x6f\xaf\xd5\xe8\xa1\x96\x4b\x7d\x5b\x14\x16\xd3\x0a\x9b\xd6\xa9\xba\x68\x94\x69\x14\x12\xa8\x35\xbe\xa9\xed\x70\x95\x7f\xc8\x9e\x7c\xb9\xf6\xde\x11\xba\x59\xc2\x18\x31\x93\x61\x83\xed\xb1\x99\x6a\xa0\x2a\x64\x17\x12\x74\x5d\xb5\x35\xfc\x46\xe1\x9b\x00\x64\x68\xe5\x88\xa8\xad\x65\x4d\x86\xf3\x80\x5c\x62\xa0\xaf\x0a\xe1\x48\xdb\xc8\xfb\x04\xe2\x90\x15\xca\x6a\x3c\x73\xe0\x32\x48\xb5\x57\x21\x87\xb3\x7d\xed\xc0\x8f\xec\x45\x3c\x35\xd3\x31\xac\xbc\x9b\x69\xae\x74\xb5\xda\x28\x94\xe7\x42\x4d\x55\x28\x35\xea\xa1\xf6\x6d\x96\x37\x63\x86\x3a\xf2\xab\x5c\x47\xe6\x52\x46\x1a\x13\x62\xce\x1c\xd0\x21\x6a\xcf\xfb\x64\x3f\x00\xe7\x5c\x48\xa1\xb8\x8e\xd4\xe7\x93\xb7\x0e\x08\x5b\x74\xa9\xd3\x76\x13\xee\x8b\x49\xd1\xe4\x2e\x20\x1f\x03\x9a\xff\x1f\xf0\x6c\x58\x69\xfe\x0f\xd3\x69\x1c\xd2\x05\x64\x1c\x78\x15\x34\xc2\x6b\x59\x85\x3d\xa8\x9b\x52\xeb\xc5\x0c\xc3\x55\x31\xa1\x1f\x63\x24\x18\xef\x55\x5c\xca\x21\x15\x35\x35\xc9\x4e\x2b\x32\x37\xe5\xa8\x81\x2c\xb2\x23\x11\xbf\xa1\x3c\xb3\x48\x67\x61\x7a\xe4\x31\x09\xd3\xe0\xc3\xeb\x5a\xda\x48\x9d\x4c\x09\x0c\x1b\x01\xfa\x9e\xa3\xc5\xad\x76\xb2\x74\x78\x21\x3b\x97\x70\x15\x1a\xed\xf4\x22\x5c\x7d\x8f\xe4\x2a\x24\xf7\xfe\x07\x63\xb0\x6c\x7b\x84\xb3\x18\x86\x21\xa9\xb3\x80\x5b\xa1\xbd\x91\xf7\xa1\x72\x28\x9a\xce\xe2\x28\xfd\x7e\xb6\x99\x49\x1b\x63\x4b\x21\x08\x3c\x05\xc1\xd4\x93\x10\xbc\xf1\x2e\x35\x04\xef\x3d\x0d\xc1\x89\xb7\xde\xcb\x7c\xe2\x09\x85\x69\x98\x26\x73\x38\x6f\x5c\xa8\x1b\x72\x11\x82\xf3\xe2\xe0\x93\x03\x2d\x90\x30\x0d\xed\xde\xbc\x09\x6d\x00\x70\x2b\x24\x68\x0d\x3b\x42\xa9\x48\x19\x72\xa6\x36\x1c\xf8\xae\x61\xb6\x32\x2f\xf2\xf8\x0f\xcb\x77\x1b\xae\x90\x30\x44\xa5\x49\xfe\xf6\x43\x76\x2c\xe1\x20\x64\x6f\x25\x3c\x0b\xd9\x41\x68\x34\xf3\xc3\xb0\x29\xa9\xe6\x98\xec\x87\xb5\x11\xac\x21\xb1\xf7\x36\x6d\xe6\xa8\xa3\x4a\xdf\xf6\xda\x4c\x3d\x08\xf5\x37\xa0\xc7\x24\x4d\xa9\x96\x80\xc8\xba\xb8\x0c\xd9\x57\x09\x1f\x97\xa7\x3d\x75\x67\x64\xab\xfc\x5c\x12\x84\x7c\x19\xa6\x1a\xbc\xb1\x64\x17\xa7\xa6\x84\xc4\x61\x58\x2f\xb6\x48\x9a\xdc\xe9\x63\x68\xd3\x64\xd9\x04\x4f\x47\x21\x19\x6b\x0a\x07\x21\x69\x21\xf0\x22\x57\xd2\xdb\x4a\x1f\xe5\xd1\x74\x0d\x0a\x3f\x1b\xe9\x05\x51\xb0\x97\xa8\xfa\xdf\xa5\xfb\x89\xcf\xc6\xec\xde\xf7\x9e\x85\x10\x78\xaf\x42\x18\x7a\x6f\x43\x10\xde\xcf\x10\x46\x9e\xaf\xad\xe9\xf9\x3b\x84\xe3\x10\x9e\x86\xec\x9b\x24\x8e\x69\xe0\x50\xf8\x14\xb2\xa7\x12\x4e\x1a\xbb\x79\x6a\xa6\xf5\x53\x68\xcc\xd8\xf4\x9f\x83\x10\x24\x85\x8f\x18\xc5\xbc\xbf\x5c\xf0\x2a\xa6\x36\x81\x19\x85\xe7\x2b\x90\x3f\x09\xd1\xe9\xff\xc2\x10\xdc\x25\x4e\x77\x5d\xb2\x8e\x94\x27\x0c\xee\xaa\x9b\x75\xc9\x9d\x9d\xe9\x81\x4e\x9a\x88\x79\x4d\xdc\x06\x42\x0c\xc5\xd0\xa9\x6a\xe0\xce\x91\xd0\x37\x91\x1a\xaf\xd9\x5d\xb4\xa0\x6a\xe3\xc9\x7c\x1c\x10\x82\xc1\x09\xd4\xf5\xef\x5c\xff\x2b\xc5\xcf\xc1\x2a\xf3\x10\x44\x70\x13\xda\x34\xd4\xb9\x06\x9e\x1d\x39\x64\x4e\x50\xe7\xb3\x14\x99\xc5\xa6\xc4\x6c\x1a\xc9\x99\x58\x1b\xa9\xe8\x6a\x8d\x4f\x43\x14\xe0\xee\xe2\xc9\xb4\xf3\x8e\x4f\x46\x91\xba\x12\xc3\xb5\x58\x4d\xd2\x3a\x49\x82\xbe\x6d\xea\x8d\x43\x7b\xc5\x83\xc2\xcb\x5a\xb6\x29\x07\x8e\x56\xb1\x70\xbc\x34\x75\x5f\x02\xbf\xea\xeb\xb9\xad\xf9\x3c\x0b\x8a\x1e\x48\xf4\x6f\xbc\xaf\xad\x29\x64\x4d\xb4\x4b\x02\x5f\x6a\x2b\x67\xe9\x03\xdf\x87\xf0\x8d\x13\xd9\x71\x98\x63\x0d\xcf\xf0\x7f\x27\xc1\x9c\xe9\x69\x90\x45\xc5\x50\x3c\x6f\x4d\x93\xcc\xbd\x6b\x18\x94\x9f\xc0\x8b\xb0\xf6\x10\xc8\x3f\x2c\x3e\x9a\x18\x8c\x3b\x24\xbb\xf0\x80\xb1\x18\x4f\x39\x7c\x34\x98\xbf\x0e\xed\x0d\xd1\x77\x21\x48\x41\xa4\xfb\x8d\x52\x7b\x8b\xd4\xaa\xfa\x1d\x65\xfe\x24\xf0\xb9\x8e\x49\xa6\x76\x29\x51\x15\xb7\xa7\xb6\x51\x3d\x79\x60\xd1\xf7\xc6\x4d\x80\x13\xf1\x19\xf9\x44\x2b\x4d\xe1\xd0\x58\xf7\x98\x64\x69\x1b\x1a\x06\x3b\x42\xcf\xc9\x7b\x0c\x31\x3c\x46\x43\xf9\xc8\x1a\xca\xaf\x31\xc6\xf0\x2b\x84\xcb\xe7\x3b\x9f\xe2\x7c\x52\x38\x45\x5f\x89\x5c\x4c\xd5\xb5\x70\xe7\xf6\x85\xb6\xd3\xf5\x22\x9d\xb6\x23\x0d\x1f\xc2\xca\xf5\x09\x85\xa7\x3c\x11\x1b\x93\xef\xb5\x6c\x7a\x4c\xbe\x84\x99\xf6\x49\x13\x78\x49\x42\xb0\xc9\xa3\xf2\xf4\x65\x70\xa4\xc9\xcb\xd0\x62\xb6\xde\x03\x9b\xf0\xec\xb8\x9c\xcf\x6c\xa1\x86\xc8\x6a\xa4\x0e\xda\x4a\x71\xd7\xde\xe2\x33\xc5\x85\xae\xf5\x2b\xc4\xdc\x1d\xc7\x59\xe6\x35\x40\xff\x75\xbe\x8c\xdd\x3d\x3c\x99\x19\x38\x83\xec\xb0\xa2\xed\x40\x44\x3d\xc7\x49\xe0\x5b\x93\x24\xb1\x15\x1f\x3a\x78\x8b\xb0\xd0\xf6\x6c\x0f\x33\xa7\xf3\xd5\xf4\x79\x56\x74\xf2\xdc\x60\x78\x1e\x82\x36\x0a\x21\x0f\x2d\xc7\xfc\xf1\x9f\x41\x5f\x0d\x34\x4e\x81\xbe\xf9\x83\x82\x26\xa3\x06\x0d\x22\xab\xa0\xa2\x65\x9a\xb3\xa7\x86\x23\xa4\x37\x6e\x63\x5a\x8f\x91\xe0\x8e\x90\xde\x82\xd7\xf8\x21\x85\xaf\x7f\x4f\x09\xab\xa9\x40\xba\xef\xff\x48\x07\xf1\x6a\x3a\xe0\x59\x71\x4a\xad\x86\x0a\x82\x1a\xca\xd0\x15\xca\x08\xf1\x61\x31\x23\x1f\x51\x4c\x00\x7a\x8a\x2d\xf3\xc5\xb8\xac\xf2\x42\xac\x22\x21\x1d\xfd\xef\xb0\x46\x8c\xc8\xc3\xdc\x9b\xb8\xf3\x0c\xdf\x45\x67\xb6\x7d\x3c\xd6\xc4\xc6\x14\x52\x38\xd2\x94\x96\xed\x3a\x0a\x22\x4a\xb5\xcf\x38\x4a\x35\x70\xde\xb0\xe4\xd9\x31\x55\xd0\x50\x9e\x51\x44\xd8\x50\x9e\x85\x39\x45\x0d\xe5\x59\xa2\xea\x51\x43\x79\x6e\x53\x44\xec\x48\xc2\x2c\x62\x42\x11\x67\x2a\xd4\x2c\x9c\x69\x9c\xfd\x83\xdb\x29\x97\xc3\xfd\xc9\xc4\x01\x3f\xa2\x30\x89\x9a\x18\xe6\x09\x51\x70\xcf\x5b\x5e\x1e\x80\x77\x12\xc3\xa7\x18\x0c\x65\xa6\x69\x43\xae\xeb\x70\x30\x12\x65\xe0\xdc\x67\x8b\x06\x78\x8e\xff\x02\x19\x9e\xa4\xb4\xe3\x24\x0e\x2e\x61\x0b\x27\x72\x3b\x81\xab\x08\x8d\x8f\xa8\x36\xfe\x56\x32\x33\x96\x69\x23\x8e\xa8\x5b\xa5\x5a\x1c\x66\xd2\xae\x82\x69\xbe\x1d\xb9\x7c\x1d\x75\xc1\xcf\xb8\xe8\xd2\xda\x4a\x12\xb8\x89\xd8\x77\xb8\x8b\xd8\x57\x18\x37\x62\x24\xf7\x7a\x03\xc7\xc9\x52\x05\xa5\x22\xe7\x36\x62\x9f\x61\x3f\xfa\x83\xf7\xff\x9e\x1f\x7b\x1c\xf8\x67\x4f\x01\x1f\x19\x3b\x8b\x7b\x1a\x7c\x61\xec\xa2\x6b\x7b\x5f\xea\x20\x6a\xbe\x2f\xf5\x52\x90\x98\xce\xe7\x63\x72\x1b\x81\xf3\xff\x71\xcc\x5e\xae\x9e\xd2\x70\x36\x26\x77\x11\x38\x9e\x93\x26\xfc\xe0\xe9\xc9\x3d\xc7\x93\xfb\x6a\xe5\x2c\xb5\xef\xfb\x80\x8c\xc9\x77\x01\x41\x07\xb9\x43\x16\xca\x10\x2e\x25\x97\x8e\x58\x98\xed\xdc\x23\x4d\x26\x64\x3f\x42\xed\x74\x1c\xd9\xaf\xd1\x45\xe9\x09\x65\x39\xda\x34\xad\x14\xc3\xa5\xce\x4b\x29\x3c\x8b\x9a\xf2\x11\xbf\x14\x44\x2c\x0e\x2a\x4e\x07\xf5\xd0\x81\x2c\x7b\x8f\x5f\x93\x9c\xe1\x86\x1c\x58\x7c\xbe\x0b\x1b\x34\xa5\xf0\xc6\xfa\x38\xc2\xa7\x52\x48\x3a\x56\xc3\x18\x57\xfb\xad\x57\x0a\x87\x51\xfd\xbd\xaa\x97\xc2\xf0\xb9\x2a\x36\x22\xc5\x66\xe0\xa4\x57\x91\xaa\x29\x94\x8b\xd8\xb3\x67\xa6\x1b\x8c\x4c\xfd\x2e\x20\xee\xa0\x8c\xc6\x34\x07\xe3\x08\x62\xb4\x9d\xaa\x55\xed\x0c\xa1\xc1\xb7\x4c\x79\x16\x19\xb5\x88\x8c\x4e\x91\xf9\x1f\x4e\x1a\x38\x58\xc9\x31\x57\xca\x4d\x71\x58\x46\x46\x74\x7a\x66\x2f\x59\x54\x44\x39\x61\x72\x5a\xd1\xa0\x82\x66\x61\xed\xe6\x1f\x93\x9b\x08\x9c\x0b\xad\xa7\xde\xc3\x87\x0e\xa0\x2e\x7a\x14\x41\xd7\x4e\xfd\x23\xc3\x02\xbc\xa2\xce\xac\x52\xa9\x67\x2b\x3d\xc6\x4a\x46\xb9\xfe\x18\x31\xad\x89\x33\x8a\x02\xbc\x2c\xfa\x2a\x62\x3f\x14\xbc\x8d\xd8\x0f\x09\x3f\xeb\x56\xc5\x8e\x0d\xf9\x56\xf0\xda\x3b\xd2\xe4\x3a\x22\xda\xe5\x2d\x5a\x4a\x85\x7f\x4c\x4e\x88\x46\xbe\xd6\x32\xb6\x63\x64\x7d\xa0\xaf\x22\xd0\x6e\x30\x86\x97\x06\x54\x44\x84\xf5\x57\x9e\x84\x70\x28\x48\x2b\xa2\xf0\x31\xca\x84\xde\x86\xcf\xd5\xc6\x15\xd7\xc1\x05\xaa\x6b\x69\xe2\x86\xdf\xcb\xab\x52\x4e\xdf\xad\xf2\xfd\x95\x46\xfe\x60\x98\x32\xaa\x8b\xd6\x1d\x92\x16\x76\xd3\x38\x3f\x8c\xaa\x3c\x8e\x9a\x7d\x03\xbf\xa3\xfa\x34\x2a\xc8\x6f\xac\xc8\x7a\x1a\xb1\xb7\x91\x31\x96\x3f\xd5\x6f\xa8\x94\x37\xea\x65\xde\x78\x4b\x7e\x46\x18\x77\x76\x42\x04\xdc\xf3\x9f\x9e\x76\xb9\xe1\x69\xc2\xe5\x3f\xc1\x0a\x84\x28\xbd\x2a\x28\xcc\xec\x0e\xcc\xbf\xde\x4b\x62\xfe\x18\x25\x24\xc6\x54\x0e\xb4\x7c\xff\x3c\x2e\xe7\x1c\xae\x85\xff\x22\x24\x31\x2d\x75\xf1\x3a\x84\x43\x4d\x62\x6a\xbb\xc8\xc1\xe5\x96\xa6\x59\xc7\xbc\x35\x0e\x77\xf1\xa8\x28\xab\xa1\x4a\x35\x16\x98\xfa\x31\x11\xe6\x35\xc6\xb0\xae\x96\x83\xfe\x63\x6f\xe1\xa8\xb2\x1a\xd1\xb5\x76\x40\x4e\xad\x4e\x89\x67\xf6\x67\xb4\x4e\xf7\xcf\x4c\xa5\x17\x02\xde\xd9\x24\x7b\x36\xc2\x0d\x1c\xdc\x67\x44\xba\x5f\xad\x60\x7d\x8e\xc2\xf1\x71\x02\x2f\xeb\x36\xd9\x3a\xda\x6c\xbf\x1a\x77\x00\xc4\x76\x0f\x7c\xf5\x08\x46\x96\x3d\x86\x97\x86\xa4\x51\xd7\x72\xa8\x77\x48\x04\x2c\x62\x4b\xd3\x63\xdb\x54\x66\x3b\x68\x11\x94\x37\x8d\x6e\xda\x2a\x71\x79\xab\x3c\x4f\xb7\x0a\x6a\x7e\x1b\xfe\xdd\xc6\x28\x14\x93\x61\xb1\x4d\xde\x37\x4e\x32\xc6\x30\xfe\xc9\x72\x4b\xff\x7c\x59\x86\x52\xb9\x21\x81\x31\xaf\x18\x7f\x63\x6f\x3f\x6c\xf4\x40\xe1\x7e\x5b\xbe\x03\xa1\x8a\x1f\x09\x85\xd7\x75\x53\xda\x0c\x3a\xbd\x28\x83\x31\xd1\x79\x27\xa0\xd3\xd0\x3c\x23\x01\x96\x3b\xd4\xc5\x0f\x63\x92\x37\xce\xc7\x17\x4d\x6e\xc9\xeb\x28\x0d\xff\xc4\x8b\xe7\x0d\x72\xa8\xbb\x97\x7f\x26\xe4\xd8\x7e\x3a\xb2\xc0\x58\x95\x71\x5f\x17\x65\x39\xaf\xfb\xf9\xab\x54\x0d\xe8\xd9\x6b\x1c\x34\xbd\xda\xd7\xb7\x7f\x55\x92\x32\x0a\x53\x94\xb9\x8a\xb2\x2a\xa5\x78\xc0\x18\xbf\x0e\x50\x6c\x86\xd8\xc5\x64\x47\x2e\x3f\x2b\x7f\x0a\x3e\xed\xaf\x90\xd1\xc2\xf5\xed\x85\xd5\x52\x53\xa3\x79\x04\xf8\x3f\x6c\x9e\xde\x35\x54\xf9\xe6\x2d\xc3\x69\xb7\xf3\x9f\x19\xd0\x10\x81\x46\x2c\x74\x7d\x18\xb1\xc8\xf5\xf3\x9b\x9a\xbe\xcb\x61\xc2\xfc\xf2\x37\x08\xc6\x1a\x6f\xfb\xe1\x8f\x28\xfb\x31\xca\x7e\xcc\xa0\x27\xb6\xf6\xe4\x60\x4c\xde\x45\xa0\x36\xb6\x61\x42\xbd\x5b\xf2\x22\x02\x69\xa4\x24\x3e\xe3\x77\x08\xb2\xa9\x5a\x5a\xe5\x03\x72\x2a\xd2\x51\x64\x4e\x81\x84\xc2\xe7\x15\xdb\xe0\x45\x54\xa8\x8d\xdf\x9b\x85\x9c\xae\xe3\x2b\xd6\x05\x63\xc4\xbc\xd9\x92\x9f\x23\x10\x1d\xa3\xf3\x7c\x89\x40\x6c\x20\x81\xda\x5d\xf8\x61\xb9\xfb\x85\xeb\x72\x19\xe8\x1f\xdc\x3a\x78\x6a\xfd\x25\x69\xa5\xf5\x43\x62\xd3\xe3\xb5\xdb\x86\xb3\x58\xf9\x83\x7e\x96\x88\xbd\x87\x6f\xb5\x5a\xc2\x2d\xf9\x1a\x15\x5f\xa0\xf8\xd1\x38\x1d\xf6\x1e\x0f\x7e\xb6\xc0\xda\xe1\xb5\xca\xcf\x61\x25\xf4\x3a\x15\xb0\x20\x98\xcd\xec\x19\x5b\x6f\xf2\x8f\x88\x84\x98\x1e\xe2\x7d\x64\x23\xa5\x7b\x46\x0d\xfc\x16\xd9\x0c\xa7\x9c\xd9\xb8\x93\x80\x11\xbc\x09\x60\xb4\xc2\x87\x9b\xf3\x2e\xdd\xe8\x41\xc8\x30\x2f\xc4\x98\x7c\x4f\x75\xd9\x26\x78\x1c\xe1\xd9\xe4\x54\x11\xfb\xc1\x49\x48\x0d\x01\x3e\xe4\xe0\xb3\xe8\xa1\x80\x19\xcb\x5c\xdb\xb8\x5a\x1f\x0c\xf3\x4c\x33\xd3\x73\x78\xc9\x89\xbb\x63\x5a\xe2\xc1\xd6\x7b\x0d\xbf\x34\x7e\x69\x22\xe7\xc1\xd1\xa0\xf7\x70\xeb\x01\x19\x75\xfc\x0e\x89\x36\x66\xf4\x61\x44\xbd\xae\x61\x8a\xa3\x15\xb1\xe9\x19\x5f\x90\xc5\xdd\x11\xee\xfa\xf3\xf9\x22\x2f\xb8\x2e\x7d\xe5\xa3\x9c\xd8\x32\xdf\x49\x59\x72\xa4\xd0\x68\x2b\xf7\x92\x05\xa0\x58\x04\x9a\xbd\x24\xda\x08\x13\x6e\x44\x7e\xf6\xb5\x73\xb4\xdc\x75\x29\x16\xde\x2a\x35\xa6\xba\xc6\xaf\xc2\xa8\x51\x1d\x51\x4c\x89\xa4\xee\x65\x14\x4a\x82\xd9\xcb\xf5\xa8\x29\xce\x3b\xcb\x7f\x64\xfe\x2a\xa3\x98\x1c\xe6\xb7\x4b\x46\xe4\x96\xc8\x91\x5d\x06\xbb\xb8\x29\xf7\x14\x75\xb3\x84\xb7\xe5\xca\xc0\x68\x6e\x1d\xd6\x90\x55\x36\x9c\x8e\xbb\xb9\xf3\xe0\x85\x20\x63\xa2\x47\xd6\xf0\x7c\x40\x7a\x1b\xa8\x9c\xc7\x7f\x89\x73\x37\x43\xb9\xe7\xdd\x12\x81\x50\x8c\x2a\x10\xe5\x76\x2c\x1f\xb1\x3b\x08\x46\x7f\xa7\xb8\x3d\x2a\xab\x33\x25\x31\x2d\x4a\x62\xda\x1a\xf3\x81\x55\x85\xfe\x5a\x64\xf7\x17\x15\xc3\x5f\x65\xc5\xed\x65\x49\x31\x7c\x09\xfe\x63\x54\x01\xfd\xc7\xa8\x02\x1a\xbd\xe3\xcc\x6c\xac\xae\xd1\xb3\x57\xe9\x82\x35\x20\xe3\x1c\x62\x45\x0f\xf4\x1f\x2f\xc0\x5b\x52\xf5\x7a\x8b\xca\xe0\xf6\x62\x8d\xb7\x78\xb1\x8f\xf7\xf0\x1b\x14\x45\xbd\x9d\x85\x7a\xfe\x78\x11\xd2\x92\xe2\xa9\x16\x6b\xec\x96\x6b\x70\x96\x0d\xc8\x0c\x97\xe3\xd7\x11\xbb\xde\x4b\x41\xb8\xcb\x5f\xd2\x81\xd1\x70\x8d\x9d\xbe\xe6\x80\x7d\xc1\x5d\xde\xf5\x90\x67\xf7\xba\xf0\xc5\x28\x62\x7c\x04\xf1\xc8\x56\x87\x82\xc5\xd7\x9a\x5c\xc7\x66\x0a\xb9\x99\x9f\x04\x9e\x0b\x0a\xcf\x34\xe1\x6e\x70\x95\x9e\x9e\x97\xc6\xda\xa0\xfe\x66\x67\xac\xd1\x88\xbd\x91\x30\x1a\xb1\xa6\xae\x32\xe7\xa8\xec\x38\x0f\x97\x82\x00\x28\x4d\x20\x1a\xd5\xc5\xa9\xe6\x5e\x2f\x4a\xc1\x1f\xa5\x69\x4f\x67\x4d\x7b\x05\xc3\x5a\x21\xb8\xb1\x67\xe7\x93\x11\x9b\x48\xd2\x7b\xd8\xa5\x70\x5d\xcb\x36\xc6\x64\x36\x32\xf2\x67\x1b\xec\xa1\xd3\x36\x6a\x60\xc5\x05\xd1\x87\xa7\x1b\xa7\xff\xfc\x73\x76\x9f\x10\xfa\xa0\x33\x70\xe1\x9f\x7f\xfe\xf9\xe7\x7f\xb6\xe6\xff\xe3\x9f\x7f\x66\x67\x0f\xcf\x1d\x6a\xd8\xed\x64\x04\xba\x71\x7e\xa5\x1b\xc8\x04\xbe\x71\xe2\xfc\xf3\x8f\x43\xed\xd1\x5c\x76\x7d\x23\x81\xe1\x5f\x6e\xd2\x6e\x31\xe9\x5b\x98\x90\x85\xbb\xad\x81\xa5\xb5\x0b\x0f\x85\xc2\xf5\x08\xcc\x5b\x4e\x13\xea\x89\x15\xf6\xcc\x4b\xcf\x71\x00\xa9\x65\x6c\x0c\xd9\x6d\xcf\x41\x67\x5e\x70\x83\x7e\xb5\x8c\x86\x84\xeb\x5f\x98\xe9\x9e\x7a\x97\x1a\x7c\x65\xbf\x4e\x99\x6d\x7a\x7f\x64\x37\x7d\xb6\x86\xf9\xa6\xff\xeb\xed\x50\x6c\x65\xde\x5f\xc2\x2f\xfe\x33\x7a\x19\x89\x37\x4f\x3a\xdf\x4e\xcc\x06\x88\x29\x4d\xc7\x42\x69\xe3\x6e\x33\x28\x2a\x8c\x17\x68\xde\xfa\xfe\x74\x79\x4b\x5b\x1d\x74\x79\x0c\xfe\xd8\x5b\xef\x41\x06\xb2\x9e\x99\xfa\x23\x0f\xb9\x39\x4f\x4d\x27\x8c\x65\xa0\xcb\x19\x13\x4b\x36\x66\xab\x89\xe4\xd3\x88\x22\xe8\x5a\xbd\xef\xaa\x96\xd0\x51\x2b\x68\x8d\x20\xb2\xb1\x46\x17\x8d\xc0\xd2\x2b\xfa\x08\x6a\x3a\x5a\x95\xf6\x12\x53\x21\xf0\x8f\x10\x9a\x3f\x53\xcc\x88\xc2\x5f\x01\x5e\x42\x39\x07\xdf\x3c\x45\x30\x63\x71\xc7\xf9\x1f\x0f\xd3\xf3\x92\x7e\xe3\xc1\x31\xce\x9d\x11\x32\x91\x4d\x77\x5c\xcc\xb3\xbd\x29\x35\xc1\xf8\xd3\xeb\xf2\x19\x5a\x0a\x6b\x92\xc3\xb2\x2b\x22\xd9\x04\x6f\xe6\x9c\x90\x00\xee\x83\x2b\xef\x6a\x64\x93\xbb\x3d\x35\x5a\xf9\xb7\x55\x1b\x95\xff\x48\xe0\x40\x53\xfb\x4d\x38\x0a\xa9\xb6\xb5\x4a\x7b\x5d\xbb\x25\xad\x00\xde\xd8\x6b\x50\x96\x5c\x63\x7b\xbd\xc0\x49\x50\x97\x3d\x30\x84\xaa\xa3\x3f\x74\x8a\xa5\x07\x1a\x5e\x87\x75\x67\xf0\x96\x43\x5b\x8b\xe6\x31\x92\x09\x62\x58\x10\xdd\x38\x24\x9a\xe6\x5f\x0b\x2f\xb1\x0a\x11\x41\xb0\x98\x44\xe9\x98\xc4\xf8\x76\x89\xce\xee\x42\x32\x71\x6d\x0e\x8d\x84\x54\x1c\x5e\x76\x59\xae\x8d\xe4\x9b\x7a\x13\xe0\x1f\xbd\x6b\xd7\xaf\x3a\x4d\xcc\xc4\x0c\xd9\x98\x9c\x44\xa0\x21\xa0\xd0\x62\x63\x32\x31\xbf\xa3\x25\x48\x63\x2f\x8a\x81\x4f\xf0\xa2\x7c\x64\x36\x55\x30\xf0\xbd\x38\x32\xb0\xf1\x21\x8e\xbc\x10\xf8\x2b\xaf\x65\x3a\x1a\xe6\x5e\x36\x2c\x1b\x93\x28\xdd\xf4\x41\x04\x6f\x42\x4c\x09\x1a\xa2\xb1\x4c\xbd\xa2\x8c\x67\x65\xdf\xd2\x32\x28\xca\x46\xb6\x6c\x34\x22\xa2\xac\xa4\x94\xf8\x81\xb2\x3b\x56\xd9\x1d\x3b\xb3\x3b\x56\xc3\xfd\xfb\x3c\xd9\xa3\xdd\xa8\x8b\xca\x53\x53\xbb\xe3\xba\x76\xbd\xee\x02\xdd\xfb\x4f\x17\xe9\xde\x86\x36\x5c\xb1\x7d\xf2\x29\x82\x99\x4d\x22\x0e\x11\x85\x0b\x76\x55\xd0\x5f\x36\xa9\xaf\xbc\x16\xbb\x72\x39\xf8\x4f\x3d\xcb\xa9\xb3\xf1\x86\x76\xbc\x17\x05\x6f\x36\x50\xa7\x6c\x9f\x04\xa3\x1c\x6a\x40\xfb\x17\x6c\xba\x0c\xf5\xa3\x37\x64\x53\xc4\xaa\x00\x18\x2d\x00\xb4\x9b\xee\x9c\xed\x93\x61\x01\x70\x64\x00\x9e\x2f\x01\xf4\xcf\xbd\xf3\x05\x70\xa3\x05\x70\x8f\x17\xf9\xc1\x64\x71\x5e\x9e\x60\x87\x37\x65\xdf\x58\x41\x5a\x98\xef\xfa\x06\x94\xcb\xc7\x74\x30\x26\x17\xa3\xec\xc1\x1b\x93\x56\xfe\x00\xfe\x96\xe5\xcd\xb3\x88\xac\xf7\x68\xf5\x5e\xdd\x5d\x3d\x64\x23\x76\x09\x67\xc7\xc4\x87\x3b\x4a\x5d\x4c\x91\xc3\x5d\x7f\x80\xec\x25\xbb\x3a\xcb\x51\x02\x53\x8a\xd9\xc5\xb7\xbc\xbb\x9c\x7a\x67\x11\xb9\x43\x6b\x2d\x02\x19\xc1\x41\x48\xee\x52\xaf\x56\x42\xe1\xbc\x8e\x5f\x97\xce\xea\x6e\x1a\x55\x9d\x96\xb7\x5b\xf9\xcc\x51\x2d\xe3\x3f\x92\x64\x75\x34\xea\x47\x59\x8d\x46\xbd\xb7\x26\x38\x8c\x47\xec\x48\xc2\x6d\x2d\xd0\xbb\x11\xb1\xe7\xc5\xf6\x8e\xd1\x78\x64\x28\x7c\xdb\x1e\xdc\xe2\x5d\xc1\xf4\x9d\x7f\x98\x9e\x19\xcf\x4e\xc4\xb9\xb8\x75\xc0\x8f\x4c\xcd\xef\xe9\xe7\x38\xf6\x1b\xa5\xd0\x09\x27\xf2\x21\xa2\x73\x50\x8b\x80\x4c\xe0\xd9\x2a\xd3\x75\xf1\xb3\x97\x1d\xb9\x98\xa8\xce\xa6\x3e\x12\xae\x7f\x8d\x49\x7e\x8b\x9a\xc2\x26\x2f\x05\xc5\x14\x68\x7b\x3b\xfe\x70\x19\xcf\x22\x2f\x6a\x3e\x91\xcf\x50\x93\x84\x31\xd9\x1f\xc1\xc1\xc8\x58\x8f\xbb\x62\x3b\xbd\xd1\x78\x34\xaa\x4d\x4e\xb2\x6f\xac\xb7\xde\xf6\x76\x97\x76\x1e\xf5\x9e\x6c\xef\x3e\x06\xcd\x88\xda\xeb\x0e\xd4\x46\x6f\x7b\xb7\xfb\x64\xd7\x53\xf4\x21\xfe\x7a\x34\xef\x82\x60\xe9\xeb\x47\x0f\x34\xc4\x8c\x88\x0d\x22\xb0\x14\x2f\x71\x88\x87\x5b\xbb\x3b\x9b\xf6\x6a\x87\x7d\xfd\x64\x77\xde\xa5\xd4\xbc\x9e\x77\x81\x33\xb1\x41\xb6\x76\x77\x1e\xc4\x1d\x12\xa7\x37\x40\xe2\xf4\x06\x08\xba\x2e\x76\x1e\xf0\xce\x26\x7d\xd8\xdb\xd9\x9a\x77\x21\x64\x41\x87\x04\x7b\xbd\xee\x60\xcb\xdb\x78\x92\x31\xf1\x7b\xfe\xdc\xe3\x1b\x84\xf4\x76\xb6\x1e\x04\xa6\xf2\x8e\xe9\xb9\x07\x7c\xd3\x0b\xc1\x7f\xed\xc5\x9d\xed\x6e\xf7\x81\xee\x90\xcd\xbd\x70\xd0\xf5\x7a\x34\x49\xe0\xb2\x71\x8d\x8f\xcc\xd6\x39\x4c\xad\x60\x97\x3f\x4f\x28\x7c\x5c\xa1\x96\xbc\x0b\x60\x73\xdb\xce\x6e\xde\x0c\x76\xbb\x36\x52\x70\x75\xbb\x9e\xd8\xb2\x2b\x82\x5f\xc7\x5d\x5d\x77\xb7\x5b\x80\xc7\x08\xc0\xe5\xea\xa9\xe6\xb1\x80\xff\xe6\xa2\x4a\xd3\x5d\x50\xc1\x7b\x0b\xb6\xe0\xe6\xe2\x2d\x92\x05\xf5\x79\x7b\x41\x36\xed\x2c\xc8\x9c\xdd\x05\x96\xf9\x68\xe1\x7e\xc7\xe3\x45\x59\xf3\x64\xf1\xd6\x46\xaf\xbb\xa8\x07\xf4\x7a\x86\xde\x7f\xff\xc5\x14\xe5\x44\xde\x13\x5b\x38\x51\xc7\x7f\xb9\xd0\xfe\x6b\xc3\xce\x57\x30\xb5\x4a\x36\x87\x4f\xc6\x9e\x7c\x3a\x82\x2e\xb4\x28\x9c\xd4\x6d\xf9\x6c\xb6\xf7\xe4\xe0\x96\x9c\x8c\x40\xfe\xfb\xdf\x3d\x78\x49\x94\x21\x8f\x5e\x5b\x0e\x5e\x12\x0d\x8a\x7a\x9a\x7a\xc6\x90\x7d\xbe\x42\x8b\x3e\xb1\xbe\x14\xc7\xc1\x68\xbf\x15\x7d\xbd\x24\x63\xf2\x7c\x04\x72\xe3\x85\x30\xaa\x97\xfd\xd2\x91\xdd\xe8\xbf\x56\xc0\x7f\x69\xe0\x3b\x5d\x07\xe2\x2c\xa9\xc0\xfb\x06\xa3\xf4\xd7\x08\x0c\xa9\x1f\x8f\xe0\xd3\xc8\x06\x3e\x6c\x38\x1d\x7c\xbd\xb9\xe2\x02\x6d\xa1\xfb\xf5\x16\xb4\xbe\xcd\x05\x52\xdc\x5a\x20\xc5\xed\x05\x52\xdc\x59\x20\xc5\xdd\x05\x52\x7c\xb4\x40\x8a\x8f\x17\x48\xf1\xc9\x02\x29\xf6\xba\x8b\xb4\xd8\x5b\x4a\xe1\xdd\xdb\x4c\x12\x32\x26\x3f\xb3\x31\x57\x06\x3d\x26\x97\xc5\x5c\x7c\x2a\xbd\xfe\x58\xbc\xf6\x4a\xaf\xdf\xd6\xbf\xfe\x5d\xbc\x76\xd3\xd7\x5b\xff\x5f\xf2\xbe\x44\xb9\x6d\x1c\x6d\xf0\x55\x14\x8e\x76\x06\xdc\x7c\x62\xe4\x5c\x9d\x30\xcd\x52\xf9\x90\x63\x27\x8e\x9d\xc4\xb9\x7b\xf2\xa7\x00\x92\x92\x68\x51\xa4\x42\x82\x94\xed\x44\xfb\x1a\xfb\x40\xfb\x62\x5b\xf8\x00\xf0\x10\x49\xd9\x99\xee\xf9\x77\xaa\xb6\xba\x2b\x16\x49\x10\x00\x81\xef\xc6\x77\x08\xb9\xad\xbc\xfd\xc5\x58\xc3\x07\x01\x73\x97\x01\xcc\x27\xf0\x7a\x62\xc2\x8b\x6e\xc0\x56\xbc\x95\x70\x27\xea\x4c\x07\x62\x2d\xb3\x74\x46\x2e\x22\x82\xee\x42\x26\x24\x6b\xd3\x84\x3f\xbe\x0a\x5c\x28\xbc\x90\x5f\x4d\x9c\xe3\x08\x9e\x6f\x81\x9d\x57\x1c\x72\x57\xa6\x73\x31\xe1\xbd\x98\xe0\xf3\x09\x1e\xc4\x29\x8f\xad\xe3\x03\xe3\xab\x89\x11\x29\x9f\xb7\x00\xae\x74\x72\x7f\x7d\x76\xae\xbc\xdc\x95\x5b\xc0\x9b\x2d\xd8\x58\x4f\x99\xd2\xc1\x08\x85\x56\xb8\xe9\x23\x57\x24\x6f\x46\x4b\x9e\x43\xb8\x53\x48\x0d\xea\x04\x3c\x35\x60\x4e\x5e\x4c\xe0\x72\x02\xc4\x77\x12\x13\x0f\xdb\x85\xb8\x50\xc6\x92\x7c\xc0\xd3\x9a\x95\xbc\xad\xe3\x50\xf0\x26\x1d\xcb\x9b\x6e\xe2\x53\x15\x8b\x3d\xc7\xfb\xfb\xea\xbe\xca\x56\xad\xee\xee\x2a\x41\xc4\x33\x64\x02\xb0\x57\x13\x95\x14\x6c\x02\xbe\x45\x3f\x60\x0d\x5d\xcc\xff\x3c\x01\x83\x2e\x97\x61\xe0\x62\x28\xca\x3d\x4c\x99\x00\x98\xee\x60\x28\x3d\xc7\x4b\x6b\xd6\x25\xf9\x3c\xc1\x80\xed\xf7\x32\x89\xee\xa7\x89\xe3\x76\x46\x74\x97\x06\xaa\x5d\x3b\x03\xba\x6f\xfb\x40\xc7\x36\x07\xfa\xc1\xa6\x40\xfb\x62\x8d\x57\x76\x02\xec\xca\x76\x81\x1d\xd8\x58\xde\xe9\x4b\xf7\x56\xfe\xa0\x9f\xc5\xcb\x0f\xc5\x7b\x87\x72\x73\x5e\xa2\x71\xec\xc1\x1a\x22\xa6\x23\xd5\xb5\xd8\xf5\x72\xa2\x82\x5b\x94\x70\x56\x04\xe3\x54\x63\xc4\xbf\x4c\x64\xb9\x05\xfd\x76\x47\x94\x0f\xc6\x06\xd5\xc2\xda\x6f\x8a\xeb\xf7\x2f\x97\x41\xf2\xab\x31\xfd\x4b\x3f\xf2\x82\x68\xfa\x27\x02\xfa\xdb\xc3\xde\x55\x62\x02\x1d\x00\x24\x00\xa2\x58\x8d\x02\x66\xca\x3b\x25\x74\xed\x77\x86\x43\xb5\x84\x3e\x95\x10\x7e\xea\x92\x88\xe1\x64\x3e\x4d\xca\xa8\x24\xce\x3a\x0f\xe7\xda\xd1\xc8\x80\xa4\xe2\xde\x88\x0a\x14\x07\x6d\x51\xf5\x21\x61\xd2\x68\xe1\xb3\x0e\xc7\xc2\xa7\x4a\x9b\xc8\x5a\x1b\xb8\x73\x1b\x8d\x9f\x9f\xc4\x9f\xcf\xa3\xfb\xf6\x50\x40\x95\x90\xdf\xd7\x6b\xa0\xcc\xf9\x41\x5d\xfb\xce\x0e\x7c\x44\x73\xe0\x44\xfc\x7b\x2a\xfe\xc1\x74\x77\xaf\x31\xea\xe6\x85\xcc\xcb\xd3\xd6\x7b\xef\x2d\xa1\x0c\x7e\x9c\x0a\x45\xf8\xce\xd0\x84\x33\xfd\xe3\xb5\xfe\x51\xf8\xdb\xa0\xc5\x25\x63\xca\x04\xb3\x86\xa0\xe3\x73\xb4\x9f\x62\xcc\xb6\x67\x71\xfb\xad\x2d\x8b\xdb\x84\xa9\x80\x28\xd6\xd1\xbb\x5e\xab\xb4\xe3\xf9\x13\xf5\x3c\x64\x5d\xf4\xfa\xc8\x25\x28\x1c\xdd\x3d\x93\x61\xdd\x26\xe4\x4c\x15\xe9\x5d\x19\xf0\x78\xf8\xf0\x89\xff\x08\xe9\x91\x67\x00\x46\x42\xe3\xc5\xcc\x80\x07\x8f\xd5\xef\x85\x21\xb5\x07\x41\x0d\x0d\x94\xb1\xbe\x9a\xe0\xb5\x2e\x6f\x64\xf1\x24\x58\x10\x73\x0d\xfd\xf6\xe7\xbf\x0f\x95\x95\xd1\x63\xed\xba\xa0\x56\x66\x28\xba\x13\xcb\x7c\xa7\x14\xf3\x9d\x56\x54\x78\x7a\xcf\xff\x39\x1c\x09\xed\xc1\x95\xbf\xcd\xbb\x84\xdf\x35\x7a\x86\x69\xda\x19\xa0\x60\xe8\x83\x8c\x6d\x3b\x23\x86\x01\xf2\xd3\x21\x67\xa6\xcc\x68\xb0\x60\xce\xeb\x09\xcc\x98\xf2\x85\x5d\xb6\x2e\xee\x37\x7b\xc6\x10\xf4\xd6\x6b\x98\x0a\xb8\xdb\xb5\x97\x8c\x18\x86\x29\x28\xa6\xfa\x75\x55\xdc\x1a\x17\xbf\x3e\xa0\x3d\xa9\x6f\xf7\x05\x15\x95\x37\xd7\xb0\x12\x1d\x7c\xc6\x4c\xf7\x0f\x75\x4b\x76\x58\x3c\xbe\x62\xed\xc9\xab\xe6\xe8\x5b\xfd\x49\xae\xc5\x61\x89\x72\xef\x5a\x4d\x78\x97\xe4\xcb\x04\x12\x90\xa5\x11\x9b\xe6\x49\xbe\x29\x9a\x09\x90\xdf\xd9\xb4\xce\x4b\x3c\xd8\xa4\x6e\x98\x43\x13\x55\xf1\xf6\x99\x16\x93\x13\x5f\x29\x10\x57\x7d\x27\xea\xe2\xea\x4b\xb1\x78\xce\xe5\x36\x34\xe9\xbd\x25\x53\x06\x6a\xa5\xb9\x5e\xe8\x48\xad\xb3\x0e\x50\xe8\x33\x82\x81\xdf\xa6\x5e\xf6\x85\x78\x16\x32\x10\x77\xd1\xe1\x47\x2c\xff\x51\x4c\x12\x59\x0f\x9e\x61\x7a\xd5\xcf\xea\xe0\xe9\x8a\xa1\xc3\x34\xc3\x03\x10\xb5\x45\x0b\x86\x75\x96\x30\xba\xad\x13\x91\x10\x8b\x06\xe3\x89\xcc\xa8\x38\x46\x24\xc2\x15\x37\x0c\xf1\xff\x1d\xac\x68\xbd\x2f\x01\xc5\x30\xc4\xdc\xc5\xbf\x63\xfb\x48\xb0\x98\x12\x2c\xc6\x4c\x0c\x8a\x37\xd7\x70\xd8\x39\x9a\x0a\x33\x8c\x36\xa3\x0c\xe1\xb4\x0d\x58\xe5\xb1\xac\x78\xc1\x38\x9f\xc5\x59\xe8\xf5\xa2\x98\xf7\x98\xdf\xf3\x17\x4b\x7e\x65\x88\xef\x3f\x64\x70\xe8\x13\xe3\x63\x12\x47\xd3\xde\xf1\xf9\xd9\x93\xc7\xc3\x9d\xde\x24\x4e\x16\x94\x1b\x26\x7c\x52\x39\x1f\x2e\x5a\xd1\xf6\xc8\x17\xf8\x1a\x99\xe6\x96\x11\x8e\x33\x0c\x0d\x3a\x6f\xe3\x26\xcd\x5a\x65\x2a\x0e\xf5\x59\xe9\x7a\xa4\x45\x6f\x99\xe4\x5f\x0c\xc4\x55\xa0\xea\x9c\x44\xe0\xcb\x34\xab\x42\xbf\x6e\x02\x20\xd2\x8b\xcf\xa0\x4e\xcd\x36\x71\xe5\x9c\xc1\x9c\xc3\xa5\xf8\xdb\x29\xd2\x7e\x99\x94\x12\xe8\x05\x93\xe9\x63\xe1\x38\x13\x02\xdf\x21\x9e\x07\x9e\x34\x37\x4a\x8e\x7c\x46\x4e\x11\x42\x4f\x05\x7c\x2b\xef\x54\x2a\x13\x18\xb3\xf2\x73\xb9\x4a\x56\xa6\x92\xf2\x96\xa1\x01\xc7\x82\x38\xcb\xd3\x32\xa1\xc3\xfa\x6a\x89\xf7\x69\xf4\x0f\x5c\x5d\x95\x2d\x6c\x49\x53\x2e\xd7\x18\x5d\x79\xbf\xb1\x4e\x0f\xec\xed\x4b\x8d\xc7\x24\x45\x5a\xcc\x5a\x1b\xae\xdb\x04\x78\x8e\x54\xf8\x51\x55\xdb\xf8\xba\x4d\x8c\x5e\xc0\xcd\x84\xc2\xe7\x19\xc9\x74\x9b\x49\xd5\x6f\x59\xe5\xf2\x16\x1f\x47\xd5\xb6\xae\x48\xa4\xeb\x3e\x00\x55\x9b\x7b\xdd\xdc\x5c\x2c\x5a\xfc\x41\x6e\xee\xae\xcc\x33\xd7\x97\xa1\x26\xfb\x32\x09\xc8\x0a\xa3\x4c\xe8\x58\x0f\x75\x91\x91\x90\x7c\x63\xb0\x35\x06\xf8\x2d\xd9\x97\x84\x26\x12\x88\x8a\x49\x8d\xb2\x42\xfe\x4d\x04\x86\xfa\xeb\x1a\x30\xa0\xda\x73\xcc\x04\x50\x60\xdc\xa6\x78\x82\xd0\x21\x76\x3e\xc3\x1f\x73\x72\xc2\x40\xfc\x04\x57\x83\xcd\x19\x53\x29\xb8\xf6\x98\x33\x89\xe0\x1d\x73\x58\x04\x6f\xbb\x90\x1e\xcb\x2b\x59\xf4\xfa\x2e\x89\x06\x58\x68\xd6\xb5\x13\xac\xf2\x95\xc8\xc2\xe9\xc0\x16\x76\x22\xab\x9f\x27\x16\xe6\x5c\x3c\x60\x4e\x1c\xc1\x11\x73\xdc\x08\xbe\x37\xbb\x0d\x26\xe4\x92\x1c\x31\xd8\x19\xee\xa0\xc5\xe3\xe7\x4f\xbc\x7c\xfc\x54\xda\x3f\x34\xf2\x60\x5d\x30\x7c\xf2\xf0\x01\x96\x53\x52\xed\x1e\x3e\xc2\xab\x11\xbf\xbb\x63\x73\xd4\xaa\x0e\x98\xac\x9f\xa4\x56\xf1\x90\x88\x35\x1d\x0d\x32\x3b\x2b\x13\xfc\x9b\xf0\xba\x93\xaa\xc9\x6e\x1f\xe3\xf0\xa3\x39\xf9\xce\x00\xfb\x44\x2f\x34\x41\x8f\x6d\xbc\xa7\xdd\x23\xd9\xd6\x4a\x77\x3e\xba\xf7\xf8\xd2\x7c\xda\x56\x0a\xf1\xce\x50\x66\x5c\xca\x64\x56\x6d\x05\xdc\x15\x5a\x73\x48\x38\xe8\x6a\xa8\xb2\xc6\x1a\x37\x7f\x2f\x5e\x8a\x2a\xf5\x28\x03\xe2\xe2\xa1\x33\x03\x8a\xa5\x53\x4c\x78\xb1\x95\x8f\xd5\xcb\xf1\x57\x82\x02\x5e\x75\x63\xae\x2e\xc7\x42\xd1\x51\xeb\x35\x03\x17\xd1\x42\x7c\x5d\x50\x4d\xbb\x2a\xbf\x6c\x97\xbc\x60\x40\x11\x18\x2c\x7a\x3d\x20\xc1\x5d\x2c\x97\x1e\x01\xb5\x5c\x53\x97\x80\xb3\x18\x04\xed\xd5\x40\x29\x24\xba\x95\x5b\x69\xb3\x22\x1f\xc4\xf2\xcb\xba\x07\x62\x56\x55\x3a\xf0\xa3\x6d\x71\x29\x5a\xe8\x0b\x9a\x00\x13\x67\xcf\x25\x18\x34\x82\xa3\x4b\xcc\xae\x52\x81\x09\xe6\x5e\xdf\xec\xa1\x58\xe9\x98\x4c\x04\x2d\xc4\xb5\x0e\x94\x0c\xf7\x9c\x39\x73\xd2\x95\xe7\xd8\xf9\x41\x53\x5b\xd0\x38\x13\xe8\x73\x81\xce\x2f\x05\x71\x39\x11\xd2\xc3\x3b\x7d\xff\x93\xb8\x75\x4e\x4d\x70\x03\x3b\x01\xfa\x48\xde\x5f\x6f\x26\x6d\x92\xd0\xf9\xa4\x28\xa9\x5a\x11\x6f\xb0\x80\x5e\x72\x77\xe7\x59\x15\x90\x77\xee\x0f\xb1\xf8\x1d\x35\x47\xb8\x70\xbe\xe5\x06\x42\x85\x7f\x87\x4e\x18\xef\x98\x2a\xb7\x0b\x91\xf8\x3c\xd1\x7e\x67\xa7\xb5\xbd\x40\xb3\x4b\xb2\xc7\xe0\xc9\xe6\x1b\x4f\x9f\xb4\xbe\x90\xea\x17\xee\x57\x5e\x08\xc9\xab\xb2\xc5\x73\xf1\xcf\x27\xf1\xcf\x4b\x38\x23\x09\x0c\xb1\x8a\x97\x9a\x7c\x67\x4b\xec\x73\x67\x58\x2c\x00\x66\xec\x45\xbe\x25\x24\x69\x06\x67\xcc\x84\xf7\xcc\xa9\x65\x60\x39\xa2\x62\x83\xfe\x95\x52\xad\xd7\xd2\xf4\x28\xc4\x72\xd3\x84\xcf\xac\x59\xfa\x4f\xfd\x43\x33\x18\xb6\xc5\xff\x57\xb2\xcc\xec\xb5\xd5\x5e\xef\x2d\x30\x4b\x13\x56\xf2\x14\x3d\xbd\x67\x70\x98\x99\xa6\xaa\x0d\x9f\xe8\x8a\x9a\x87\x99\xaa\xaf\xf7\x86\x15\x15\xd6\x3e\x33\x13\x0e\x19\x29\x25\x26\x2f\x4b\xd0\x0a\x53\x88\x4c\xa6\x09\x1f\x99\x8a\xa1\xfb\xd4\x49\xf0\xf0\xcc\xee\x9b\x4d\xb8\x13\x29\x46\x22\x84\x1b\x95\x93\x81\x5b\x74\x6d\x7f\x64\xe6\xba\xb4\xbe\x7d\xf9\x05\xf1\x7a\x4e\x3e\x31\xb8\xc0\x9d\x92\x32\x76\xb2\x5e\xc3\xcb\xf6\x0e\xe8\x07\xc9\x3a\xf7\xa5\x78\xb4\x42\xd6\x49\xc7\x18\xa6\x79\x25\x19\x68\xff\x59\x69\x15\xaa\x74\xbd\x8b\x62\xb8\xbe\xc1\x85\x2c\x8e\x17\x6f\x18\x50\x14\xc0\xf1\xea\x44\x73\xc3\x0c\x45\xdd\x44\xc6\x3e\x3c\xe7\xf0\x85\x81\x8b\x62\x36\x36\x3b\x65\x58\xda\x1d\xa2\xb4\x1a\xce\x77\x20\x75\x8d\xfd\xc4\xa7\x3c\x4e\x0c\x98\x4f\x4c\x48\xd2\x1b\x96\x54\x2b\x67\xe8\x1f\x91\xd6\x93\x47\xf8\xb5\x18\x13\xee\x76\x09\x7e\xe8\xf1\x6b\x8e\x04\x2c\x60\x80\xb9\x0c\x91\xdc\x18\x99\x6f\x0b\xbf\xd3\x31\x38\x47\x18\x9f\x03\x4a\xdd\x10\x3b\x5a\x71\x47\xf0\x9d\x53\x46\x50\x8e\x31\x21\x13\xbf\xb9\x45\xc7\xb8\x5a\x49\x33\x12\x1b\x05\xd5\x5d\x26\x2b\x7e\xd5\xe3\xb0\xb9\x75\x25\x20\x41\xee\x69\x1f\x95\xab\x5a\x92\x20\xf9\x1c\x65\x83\xd2\x6d\x02\x67\x28\xf6\x2c\x49\x05\xb7\xb1\xae\xd4\x6e\x24\x29\x50\xe0\x16\x5b\x15\x93\xd5\x6e\x44\x02\x5f\x7d\x39\x4d\xb6\x92\x9b\x8a\x1f\xf0\xff\x6a\xba\x08\x63\x6a\xba\x74\x5c\x4c\xf7\xa1\x4d\x95\x9d\xa0\x32\x55\x25\xe5\xbe\xc1\xe9\x06\x0e\xc9\xe5\x04\x43\xc1\x4d\x7d\xd3\xc4\x51\x71\xed\xed\x05\x23\x79\x65\x42\xd5\x71\x69\x7d\xdc\xa0\x3e\xee\x23\x7b\xf3\x15\x89\x0d\xe5\x3b\x12\x1f\x8a\xd5\xc3\xb7\xab\x28\x50\x5b\xf5\xc7\x75\x50\xda\x97\x63\x62\x32\x66\x8b\xee\x17\xcd\x7e\x6b\x36\x53\x18\x59\x6d\xf5\xa4\xde\x6a\xb7\xd6\xd9\x6e\xd1\xec\x69\xb3\x59\xd1\x59\xd9\x6a\xa7\x09\xe4\xe8\xf5\x96\x3a\xaa\xc3\xbe\x29\x9d\xf2\x53\xc1\x1d\x75\x78\x9a\x3e\x5c\xd1\xb9\x95\x19\x4c\x9c\x4b\xc2\x53\xf5\x5a\xab\x4d\x2e\x42\x87\x25\x9c\x6b\x2c\xc9\xd9\xda\x5c\xcb\x2e\x9f\x6d\xcc\x61\xb2\x2e\x13\xbe\xdd\xbe\xe3\x0a\xa1\xbc\x45\xcf\xf7\x8b\xac\xc6\xec\xd9\xad\xc6\x60\x87\x72\xf2\x58\x05\xf7\xf0\x36\x43\x3c\xb8\xdd\xe4\xb5\xe5\x73\x5b\x87\x55\x8f\x91\x14\xc2\xdb\xcf\x9a\x7e\xb6\xc3\x6d\x3d\xa3\x54\x97\xaf\x4d\xc8\x52\x87\x5c\x07\xce\x91\x0b\xd7\x51\x8d\xcd\x46\xe4\x24\x22\xd7\x01\x39\xa0\xdc\xb7\xa2\x78\x45\xa4\x73\xa3\x09\x34\xdd\xe6\x4c\x5a\xf5\xb9\x93\xde\x97\xd7\x42\xcd\xa2\xa7\x2a\xf0\xb5\xa0\x20\xda\xe7\xe5\xd4\x7e\xa9\x1a\x00\xe3\xf6\x55\x40\x8c\xfd\xc2\x04\x91\x66\x6c\x11\x70\x95\x53\x3b\x59\x40\xef\x5c\x5a\xa9\x7b\x41\x8a\xcf\xaf\x7c\xde\xc3\xbc\x69\x96\xa1\xfd\x2e\x1b\x51\xb5\xda\x29\x88\xdb\x59\x5c\x09\xa6\xd4\x9e\x41\x8c\xc1\x1c\x65\xd2\x8f\x13\x69\x57\x84\x28\x15\x6a\xec\xbe\xf2\xa3\xf1\x19\x8c\x03\x79\xa3\xe2\xc8\x25\xbd\x83\x5c\x07\xdd\x75\x62\x99\x46\x46\x88\xbe\xda\x33\x8b\xdf\x35\xfe\x76\x4f\x9f\x4c\xdd\x33\xee\xc6\x42\x2f\xde\x8b\x9b\x73\x12\x53\x72\x0b\x6a\x21\xb5\x98\xa8\x6a\x0e\x4d\xe4\x34\x70\xae\x97\x24\x46\x40\x87\x40\xf0\xa3\x00\xf9\xb7\x98\x7c\x96\xb6\x38\x8a\xfd\xa0\x17\x68\x95\x5a\xda\x7e\x0c\xf4\xd4\xde\x25\x97\xac\xf0\xc0\x89\x2c\x17\x22\xcb\x33\xc1\x9d\xdb\x38\x61\xc6\x6d\x3f\x5e\x57\x7d\xcd\x11\xc7\xab\xf3\xb8\x24\x5c\xb0\x02\xfc\x5a\x39\x9f\x14\xd7\xae\xe1\x53\xf5\x00\x83\x0e\xa9\xd5\x1f\x95\x7b\x4c\x98\x83\x14\x1d\x73\x89\x33\x21\xb2\x84\xe2\xcf\x3e\xe4\x0e\x13\x22\x8b\x27\xae\xc6\xd0\x17\x7f\xfa\xda\x4a\xc8\xb4\x84\xb2\x64\x24\x6c\x1a\x0a\xe7\x64\x97\x41\x0e\x9e\x59\xb3\x16\x7a\x26\x4a\x29\xa7\x9c\xa4\x66\x21\xa9\xcc\x19\xf4\x2b\x06\xc1\x5c\x19\x04\x71\x69\x4f\xb8\xd8\xe1\x39\x25\x13\x21\xab\x9a\xb6\xf8\xd6\x0d\x9f\xc3\x49\x0d\x88\x27\x4d\x20\x5e\x4a\xc0\xa5\x11\x82\xa5\x17\xa4\xcb\x90\x5e\xf5\xe8\x64\x22\x33\x04\xed\xa2\x5f\xea\x56\xf0\x85\x0a\x22\x28\x50\xa6\xce\xa4\xe9\x22\xb6\x94\x60\x5c\x71\xbd\xd3\x00\x8c\x2e\x8a\x2e\x13\xb0\xda\x2f\x5d\x00\x1f\x6c\x7a\xba\x2d\x9b\x1e\x80\x1b\x2d\x2e\x2a\x2d\x1a\xfe\x9b\xe2\x71\x62\xd1\x8b\x12\xb0\xe6\xc4\x97\x6c\x48\x22\x71\x13\xa2\x90\xd6\xa8\x72\x81\xe0\x41\x5f\x80\x7c\xaa\xce\x10\x83\xb4\x72\xc4\x2c\x0f\xfd\xd4\xf9\x72\xdc\x29\x24\xca\xd3\xe5\x83\xf1\xc9\xf8\xdd\xb8\x9e\x45\x6d\x92\xfe\xfa\x71\x97\x45\x3f\x74\x9c\x78\xc5\x29\xf8\x10\xa4\xf2\xc4\x8b\xfd\x6a\xd7\x2d\x69\x5b\x1a\xc7\x69\xa7\x2e\xc1\x13\x35\x31\x40\xda\xfc\xde\x46\x15\xa6\x32\x85\x55\xa0\xad\xc7\x1b\xde\xb8\x7e\xbc\xc1\xdf\xb3\x78\x73\x0f\xaf\x02\x69\x6e\x5e\x9b\x10\xa6\xdb\xf3\x7b\xe4\x1d\xcf\x75\xd2\x39\xaf\x73\x8f\x0e\x09\xa6\xd2\x62\x97\x52\xee\xee\xb7\x75\xd4\x7b\x11\x10\x2f\x95\xb6\xe9\x45\xb3\x27\x9d\x1e\xa0\x9f\xa2\x3f\x82\xd6\x60\xd8\x67\x1b\x23\xb7\x80\xce\x6c\x0e\x6c\x4f\x1e\x4a\xcf\x52\x67\x4c\xfe\xd8\x81\xfb\x30\xfc\x6a\xc2\x32\xed\xb0\x2e\x35\x98\xd4\xfd\x4d\xe0\x9f\xd9\x73\x92\xa6\xed\x21\x2c\xcf\x39\x2c\xc4\x7c\x61\x96\x9a\x6b\x50\xe9\xba\xda\x83\x6f\xa4\xb7\xac\xf2\x4e\x4e\x2c\x7a\x6c\x02\xfb\x8e\xf4\x78\x26\x11\xf8\x92\xb0\x14\x32\xe0\x90\xa7\x66\xcd\x05\x9d\xd6\xe9\xbf\x64\x14\xdf\x05\x3d\x23\xae\x64\x34\xf4\x83\xd9\x8c\x72\xc0\x67\xcf\x3a\x5e\xae\x8c\x2b\xf9\xdf\x25\x99\x88\xd1\x5d\x38\xf4\x89\x9b\x0a\x8a\x59\x61\x5e\x7e\x95\x79\x19\x82\x6f\x95\x8c\xef\xa1\xe2\x51\xe8\x2e\x5c\x6f\xa8\x9c\x72\xe5\xc7\xca\x78\xbc\xc6\x82\x04\xa5\x5f\x6e\x98\x42\xdc\x16\x88\xd0\xf0\x53\x46\xe7\xd5\x0e\x40\xd4\x07\xb4\xab\x8e\xe7\x3a\xd1\xcd\x55\x1b\xf6\x76\x6b\x7e\x05\xbb\x93\x9f\xb4\x4a\x6f\x4f\x4d\x1b\x00\x75\xb1\xd9\xe2\x51\x51\xbd\xbe\x21\xa3\x7c\x97\x11\x30\x95\xad\xed\x64\xa9\x82\x0f\x00\x4b\xed\x79\x40\x7c\xc9\x58\x4d\xbd\xb2\xd3\x54\x72\x03\x2e\xb8\x81\xaf\xb8\x81\x5d\xbe\xe8\xe3\x8b\xf8\x56\xd3\x13\x3c\xeb\x98\xd7\x0e\xc2\x2c\x2e\x49\x26\x17\x44\xef\xdc\xb6\x37\x4a\xa0\xaa\xcb\x43\x99\xf4\x49\x9e\xa7\xca\x07\x24\x4e\x82\x69\x10\xd1\x70\x5b\x1d\x81\xdf\xb4\x43\x45\xbb\xfc\xa9\x9b\x5d\xd9\x11\xd0\x3d\x41\x1a\xc6\xb6\x0f\xec\x48\xd2\x87\xdd\x36\x00\xd0\xaf\x3c\x14\xaf\x3c\x11\xaf\x5c\xc9\xd6\xe3\xf4\xc6\x22\x12\x89\x4f\xbd\xab\x36\x57\x93\xd4\xe7\x3c\xdc\xf0\x28\xd1\xce\x26\x5e\x90\x52\x16\xfe\xa9\xf2\x11\xfb\x5d\x24\xf7\x07\x8d\xec\x04\x54\xb5\x05\x38\xd4\x2b\x4b\x3d\x2f\xf1\xd3\xb4\xc3\x17\x67\x5f\x70\x37\x38\x4d\x1d\x99\x01\x77\xe9\xa3\x2f\x89\x1f\x90\x53\x97\x1c\x0a\xba\x50\xa4\xbf\xd5\x1e\x39\xe3\x54\x65\xcb\x95\xdd\x94\x59\x72\xaf\x5c\xb2\x9b\xa2\x28\x76\x91\xde\x98\x43\x88\xdb\x1c\x68\x66\x67\x40\x73\xdb\x07\xfa\xd6\xa6\xc0\x42\x3b\x01\xf6\x5c\x4e\xff\x3c\x2d\x9c\x83\x78\x20\xc6\xf9\x52\xfa\xce\x44\x93\x60\x6a\xc0\x3c\xd5\xae\x46\x7e\x92\x06\x71\x74\x1c\x4d\x62\xed\x2f\x34\x8d\x3f\xc8\x9b\x95\xaf\x66\x59\x10\x7a\x07\xe8\x47\x54\xbf\xf7\x3e\xf5\x93\xea\xbd\x84\x46\xee\xac\x96\x47\x38\x0f\x36\xfa\xca\x2b\xbd\x5f\xb9\xe4\x22\xd5\xee\x36\x6a\x8a\x61\x96\x62\xe6\xb1\xd3\x54\x3c\xbe\x94\x8f\xe1\xb8\x9b\xc5\xd5\xe5\x07\x2d\x06\xc9\xba\x8f\x98\xe6\xed\x3c\xad\x0b\x11\x89\x10\x22\xe4\xa9\xc3\x49\x07\xe9\xd3\x39\xbc\xbe\x6d\x42\x7e\x35\x55\xe4\x48\x05\x94\xa4\x50\x08\xf5\x27\x29\x8c\x69\x85\x60\xb0\x2b\xfb\x07\xbb\x46\x22\xa6\xe8\xb1\x09\xd7\xad\xec\x1c\xa5\xe9\x59\x5c\x53\x24\x65\x89\xd5\x88\x5b\x61\x2c\x3d\xdd\x9c\xa8\xac\xa7\x1a\x24\xc5\x6d\x2b\xf1\xc3\x98\x7a\xe4\xce\x8e\x20\x0c\xe6\x1a\xce\xba\x56\x2b\xb1\xd8\x6b\x99\x00\xe0\x55\x33\xce\xeb\x91\xa2\x61\x2b\xb2\x9c\xc0\x3e\x56\xd7\x3c\x91\x16\x50\x0c\xd1\xc1\xc0\xf1\x06\x7f\x3c\xb1\x33\x8b\x02\x7b\x6a\xbb\xe0\x5e\xdb\x29\x27\x58\xbc\x86\xd6\xcd\x1d\x2e\x04\xa2\xd7\x14\xdc\x54\x10\xb4\x03\xd5\xab\x20\x3d\xc8\xec\x04\xef\x6d\xb2\x3c\xdd\xa9\x27\x3a\x05\xf7\xc0\x2e\x64\xf7\x4b\x0a\x85\xf4\xb6\xb3\x19\x9e\xe6\x5e\xdb\x0b\x5e\xd7\x4f\xc6\x94\x28\x7e\xe6\xaf\xab\x81\x41\x52\x3d\x11\x53\x61\xce\x25\xb9\x4a\x41\x89\x6f\x93\xb5\x98\xe6\x58\x4c\x0f\x15\xae\x67\xb4\x9a\x53\xa2\x1c\xa7\xcf\xc9\xc4\x04\x77\x6c\xa7\x7a\x66\xbb\x95\x99\x3d\x2d\xa3\xe2\x9a\xaf\xe6\x9c\x84\x85\x16\x85\xb0\xb3\xc2\xa4\x23\xdf\xa4\x56\x3a\x0e\x48\x58\x51\x0e\xd1\xe0\xd0\xde\x51\xc8\x49\xde\x54\xc7\xa6\x13\xa1\xa7\xb5\xc5\x4e\xe9\xf7\x9e\x67\x35\x66\x79\xbf\x3d\x4a\x2a\x2a\xc5\xe4\x9d\x07\xd5\x26\xd7\x29\xa9\x3e\x7b\xd4\xfe\x3a\xaf\x04\x16\x16\x6d\x1f\xab\x50\x34\xb1\xee\x7d\x07\x3d\x34\x3c\xa1\x7a\x6a\x07\xa8\xcd\x70\x34\xf6\xd4\x7e\x2b\x96\xe6\x29\x26\x7d\xea\xaf\xeb\x02\xdc\xc3\xea\xc8\x4d\xbd\xec\x92\x7c\x53\x0a\x16\xf8\x35\x51\x71\x21\xe1\x5c\x4a\x5d\xec\x04\x70\x00\xcc\xe9\x42\x5b\xa2\xb7\xd8\x89\xbd\x40\x8e\x4f\x6b\x36\xe1\x99\x04\x6b\xd9\x89\x7b\x20\x3b\x11\x60\xfd\x8c\x3a\xb3\x26\xc4\x1c\xd8\xb1\x33\x2b\xa5\xb8\x1a\x18\xab\xa0\x2f\x04\x43\xd5\xdf\x58\x76\xd4\x8c\xfa\x12\x10\x57\x8d\xfa\xaa\x42\xdd\xfd\x22\xd8\x8b\x16\x1d\xed\xeb\x39\x35\xe3\xbd\xdc\x7d\x15\xef\x55\xa0\xec\x6f\x9b\xe2\xe4\xdb\x4d\x21\x6c\x67\x33\x02\xcc\x65\x8d\x26\x4f\x37\x7b\xf9\xb6\x4d\x31\x96\x4d\x1e\xd4\xe5\xd6\xbd\xd4\x79\x03\xef\xda\xa8\x25\x99\x93\xbd\x14\xa9\x3e\x66\x16\x7b\xe3\xc3\x0e\x44\xa6\x1d\x99\x77\x8d\x7b\x74\x19\xdc\xcb\xef\x1b\x6b\x78\xdb\x4a\xdb\x69\x62\xdf\xd9\x11\x7a\x76\x04\xf4\x1b\x3a\x22\xf5\xed\xfe\x7a\x0d\x07\xdd\xa2\x41\x3d\x1a\xd2\x8f\x95\xf6\xce\x1e\x08\x91\xe1\xd8\x7e\x8b\x5a\x0d\x3d\xb7\x85\x2c\x09\x6a\x00\x19\xc3\xcd\x9e\xd8\x7d\xa0\x27\x6a\x44\x1f\xdc\x05\x76\x35\x14\x77\x77\xd0\x4f\x73\x2e\x45\x3d\x9c\x0f\x9b\xe2\x71\x98\x0e\x00\x77\xe7\x82\xcf\x8b\xb6\x6c\x26\xfe\x91\x11\xcb\x45\xb4\xb7\x78\x0d\x43\x09\x75\x36\x94\xb5\x09\x47\xa9\x23\xd8\x8d\x1f\xaf\xe1\x7b\xa7\x76\x99\x58\xfd\x51\x84\x87\x0b\x26\xbc\xee\x54\xcc\xb5\x4b\xb4\x33\xa3\x24\x91\xc9\x6a\xbf\xa7\xf2\x94\x7c\xa6\x8a\x8e\x19\xcb\x24\xf6\x32\x7c\xd5\x80\x97\x13\x0c\x79\x85\x89\x13\x8f\x12\x8b\x4e\x6c\xb1\x3b\xac\xed\xad\xe9\x46\x8e\x4a\xfd\x66\xda\x12\x38\x1c\x54\xd4\x8b\x3a\x5e\x07\x8d\x14\xd1\x3d\xca\xd6\x6b\x62\x42\x28\x07\xc5\x2c\xc6\xc5\xa0\xde\xc6\x49\x9a\x90\xbb\xd0\xf3\xd1\x79\x97\x12\x3d\x63\x95\xd4\x4e\xb0\xd5\x70\x16\xa7\xdc\x7e\x3a\x7c\xfa\xe0\x9e\x51\x91\x1e\xce\x52\xb8\xc2\xf5\x20\xd4\xf9\x21\xf7\xde\x75\x38\xee\x9f\x5b\xea\xa8\x02\x38\xf6\xec\x9d\x35\x64\x4e\xcd\x90\x88\x3b\x8a\xd2\x3d\xea\x10\xf1\x1a\x7c\x67\xd3\xd2\x38\x65\xaa\x99\xb4\x00\x09\xc2\x33\x27\x07\x29\x70\x60\x26\xb0\x57\x76\x0e\xec\xb5\xd8\xbc\x09\xb0\xb7\xa8\xc5\x7c\xb3\x43\x01\x8a\x4c\xf0\xe0\x14\x5c\x8c\xd3\xc5\x1e\xdc\xd0\x9e\x08\xf6\x19\x80\xbb\x2f\x60\xef\xc0\xa6\x82\x55\x65\x42\x50\x3f\x4a\xd7\x28\xff\x7c\x48\x0b\xaf\x3c\x81\x06\xf0\x22\x75\x58\x42\x0c\x2f\xc8\x0d\x13\x5e\xc9\x8b\x74\x49\x23\xc3\x84\xe7\xa9\x33\x49\xe0\xbd\x10\xee\x5f\xa4\x80\x27\x7a\x73\xf2\x4a\xfd\x7a\x9e\x12\xe3\x24\xa6\x5e\x10\x4d\x2d\xcb\x32\xcc\xaf\xd2\x55\xff\x73\xab\xa8\xf3\x8e\x44\x16\x8f\xdf\x2f\x97\x7e\xb2\x4f\x53\x1f\xbd\x27\xdf\xa4\x2d\xa7\xb3\x14\x8b\x7d\x34\x9d\x58\xa2\x5a\x9d\x3d\x5e\xcd\xf6\x94\xfb\xf0\x39\x45\x97\x2d\x4c\x6f\xff\xb1\x13\x0b\xe6\x64\x99\x40\x04\x73\x1d\x71\xf6\x29\x75\x3e\xa6\xc4\x70\x43\x9a\xa6\xa7\x42\x3a\x37\xe1\x4b\x87\xa0\xf6\x22\x15\x9f\xfc\x29\x25\x32\xc1\x5a\x0f\xff\x1d\xac\x68\x12\x09\xcd\xc5\xfc\x6a\xaa\x15\x79\x83\xe6\x19\x0c\xa2\x7c\xf9\x2b\xf6\x29\x69\x9c\xd2\x6c\xa1\xae\x5c\xbf\x4f\x37\xa1\xfe\x4b\x5a\x58\xa4\xa2\x10\x37\x8c\x65\x9c\xc7\x62\xcb\x12\x79\x1d\x06\x86\x09\x3c\x74\x66\x09\xf8\xa1\xb3\x48\x20\x0b\xbb\x57\xc5\x0f\x65\x05\x39\xac\x53\x8e\xac\x96\x86\xed\xcb\x90\x85\x42\x5a\x0f\xdc\xb9\x21\x04\x75\x34\x44\xb9\xe1\x76\xdf\x9f\x24\xd4\x2b\x17\xd1\x7c\x10\x70\x7f\xa1\x97\x4b\x65\xcb\x41\x88\xaa\x34\x09\x83\x68\xde\x53\xf1\x07\xa2\xa5\x6f\xda\x73\x12\x85\x12\xf8\x78\x08\x06\xa3\xee\x5c\x10\x94\xc8\x33\xc0\xe0\x09\x8d\xd2\x25\x4d\xfc\x88\x1b\x82\x33\x8a\x06\x93\x38\xe2\x06\xd6\x03\xf4\x93\xa0\xbc\xed\x66\x49\x2a\xe8\x80\xb1\x8c\x83\x08\xf3\x72\xab\x07\x71\x26\x14\x50\xdf\x00\x23\x8a\x23\x01\x05\xd5\xa9\x18\x62\x35\x64\x8c\x10\x4e\x46\xba\x49\x04\x6d\x0b\xa4\x6b\xbd\xa3\x07\x7c\x47\x83\xdf\x74\xa6\xdf\x8e\xe7\x8f\x75\xa6\xdf\xae\xfd\xda\x0c\xc4\x4b\x5b\xb7\xea\x65\x40\x82\x98\xcc\x09\x0b\xb1\x2c\xb5\x8c\x23\x80\x4f\x45\xfc\xc0\x1a\x3d\xf2\x42\x09\x2c\xd4\x30\x21\x97\x3f\x05\xd8\x78\x21\xa2\x05\x0f\x78\x28\x16\xa3\x1f\xb6\x3b\x51\x40\x2d\xcc\xb6\x8a\x22\x8c\x47\x03\xa4\xf8\xbd\x45\x32\xb8\xdf\x5b\xb0\xc1\x7d\xbd\xe7\xd5\xcd\x66\x3c\xea\x89\xa6\xe9\xa2\xc7\xe2\xc4\xf3\x93\x41\x12\x4c\x67\x7c\x30\xec\x71\xff\x92\x0f\x16\x19\xf7\xbd\x62\x8f\xb2\xd4\x4f\x06\xa9\x1f\xfa\xae\xdc\xda\x80\x07\x34\x2c\x9e\x0e\x16\xf1\xf5\xe0\x86\x26\x2b\x9f\xcd\x03\x7e\x43\x2b\x35\x11\x37\x0e\x11\x52\xfe\xe6\xba\x6e\x05\xbb\x93\xbb\xff\x70\x8c\x7f\xdc\xe5\x98\x03\x5f\xc6\x25\x45\x61\xdb\xe7\x4c\x07\x13\xea\xf9\x1e\x5e\x2b\xe8\x1a\xa4\xbe\x1b\x47\x1e\x4d\xae\x24\x44\xa5\x21\x91\x99\xfd\x4c\xb1\xe2\xc4\x38\xc4\x44\xa4\x3d\x76\xd5\xe3\xb3\x20\xed\x61\xb5\x9b\xca\xd0\xc6\x5d\x4d\x61\xd7\xb0\x68\xdb\xf2\xaa\xbd\x28\xf2\x57\x23\x99\xd8\xd4\x31\xee\xbe\x0e\x48\x1e\x4b\x5c\x9d\x75\xa0\xf5\xc7\x14\x8c\x59\xe2\x4f\x0c\xb8\xf7\x5f\x17\x34\xa7\xa9\x9b\x04\x4b\x6e\xdf\x0b\x64\x69\x48\x92\x38\x91\x69\x25\xfe\x32\xa4\xae\x4f\xee\xfd\x33\xbd\x37\x05\xc3\x30\xcd\x91\x61\xd8\x89\xa9\xab\x37\x2e\xdb\xe0\xe4\x9c\x93\x08\xed\xda\xd9\x66\xf1\xd4\x39\x09\x1b\x4b\xa7\x97\x2a\x88\x26\xb1\x86\x89\xa1\x61\xc2\x2c\x24\x04\x3d\x76\xbe\xc0\x22\x6c\x7a\x56\x15\x62\xbf\xeb\xa3\xf4\x3b\xd4\xa9\xd3\x39\xc1\x2c\xf9\x05\xec\xe5\xc5\x88\x13\xda\x9b\xd0\x01\xf3\xc3\x70\x90\x86\x34\x9d\x0d\x62\x84\x55\x5c\xf0\xbe\x60\x7c\xc4\x50\x67\x4f\xb8\xec\x45\xf6\xb9\xa4\xea\xbe\xd5\xfd\x01\x1e\x8d\xa6\x62\x33\x6b\x9f\x50\xb7\xe8\xf9\xb7\x98\x55\xd7\x9c\x3c\x43\x02\xc2\x34\x94\x3e\x5a\x9d\x94\x7d\x15\xeb\xd0\xc4\xce\x26\xd7\xba\x09\x19\x93\x3f\xca\x1c\xbf\x65\x26\x5f\x2c\xf3\xb5\x6a\x92\xa3\x5a\x0a\xcd\x3b\x51\x25\xa8\xff\xd9\x46\xdc\x8a\xd8\xf9\x69\x28\xf6\xa2\xc8\xb2\x59\x30\x78\x2a\x78\xa1\xb4\x41\x24\x78\xda\x29\xa3\x5b\x64\x39\x75\xcc\x36\x74\x9c\x11\x74\x81\x9b\x73\x38\xcf\x48\x86\x91\x2d\x52\xf5\x29\xf3\x70\xde\xf2\x65\x7c\x35\xa9\xbf\xda\x68\x9c\xc8\x9c\x77\xbd\xf6\xa9\x36\x46\x38\xcf\x08\x51\xc3\xcb\x92\x28\x5c\xea\x55\xb7\x98\x9c\x9c\xd1\xdd\xca\x98\xf5\x87\xf2\xc1\xda\x84\xab\x56\xbc\x1d\x13\x1d\x61\x64\xa5\xcb\x30\xe0\x02\x33\xef\xde\x9b\x62\x60\x8a\x22\xe1\x34\x99\xfa\x82\x01\x5e\x76\x20\xfe\x06\x22\xd5\x5d\x96\x9e\x4b\x75\xbf\x2d\xe7\xad\x84\xfc\x59\x28\xc4\xe2\x79\x48\x8c\x6f\x2c\xa4\x82\x55\x96\x94\x52\x02\x28\x26\x84\x5a\x85\x70\x15\x62\x35\x0f\x64\x3a\xbb\x92\xd3\x70\x41\xdb\xc7\xea\xf7\xcc\x30\x61\x5f\xfd\x16\x7c\xf9\xf0\x96\x3c\x67\x3f\xd4\x52\xe9\xb8\x40\x22\x64\x1e\x51\xbc\x4a\xe8\xb2\x46\xba\x0d\xdb\x50\x34\x7b\xb7\x68\xbb\x1a\xec\x0c\x87\xd8\xea\x12\xe1\x13\x27\x7d\xda\x29\x2a\x09\x89\xe5\x94\x0b\xd9\xeb\x85\x29\x04\x17\x49\xfc\x69\x88\x65\xdb\x6e\x24\x64\x55\xa1\xa6\x15\xf1\x17\x41\x94\xa5\x0d\x9c\x47\x43\xad\x98\x99\xdd\xc6\x6d\xba\x48\x26\x0d\x49\x31\xd3\xae\xf1\x96\xe1\xb6\xe1\xd6\x26\x5c\x74\x80\xcd\x2f\x51\xee\xa8\x73\x7c\x7c\xc3\x9d\xd1\x84\x37\x09\x5d\x9c\x25\x8a\xf6\xae\xe1\xbc\x75\x1a\xd1\xef\x3b\xc3\x91\x31\x54\x25\x5a\x6d\xfc\x77\x0d\xc7\xad\x6d\xcf\x05\xd7\x90\x21\xe9\x82\x97\xc9\xe8\x73\xbc\x29\x03\xd2\x93\xea\xad\xeb\xf2\x16\xc8\x0a\xb0\x9b\x11\xff\xe7\x21\xf9\xff\x2b\xda\x1f\xd7\xa5\x12\xe9\xdf\x23\xef\xdf\xed\x9b\x86\x66\xfd\x27\x1d\x70\xf2\xaa\xa2\x35\x05\xd3\x48\x88\x5d\x93\x81\xeb\x0b\xe1\xbb\xdc\x71\x89\x9f\xc7\xa1\x10\x14\x56\x0a\x05\xbf\x35\x51\xb0\xc2\x6c\x06\xf7\xbb\x74\x43\xef\x59\xe4\xcc\xc9\xb7\x50\x06\x51\x58\xbe\xa9\xd3\xae\x5f\x77\xcc\xf0\x5b\x28\xa4\x05\x73\x0d\x67\xb5\x11\x33\x28\xa8\xce\x2d\x93\xea\x66\x24\x32\x47\x67\x44\xd5\x68\xc2\x9a\xec\xf6\x19\xe1\xa0\x6e\xf8\xd5\xc4\xdb\x18\x3e\x93\xc0\x19\xe9\x0b\x70\x97\x79\xdd\x43\x27\x4c\xe0\x9d\x22\x81\x94\xa1\xe4\xfd\x36\xdc\x66\xa9\x71\x8e\x08\xf1\x9d\x39\x39\x0b\xa5\x2d\x79\x8f\x42\xe4\x93\x4a\x02\x70\xb3\x9a\x62\xb2\x93\xff\x9f\x15\xfc\x3f\x32\x31\xcf\xa4\x90\x9a\x2c\xfa\x45\xc8\x4d\xe2\x23\x2d\x56\xb1\x85\x24\x15\x35\x6c\x19\xa7\x81\xb4\x03\xe1\x09\x4c\xe0\x2a\xc5\x49\x6e\xb5\x50\xf4\xd2\x01\x46\x60\xf7\xc2\x20\xe5\x52\x21\xc0\xdb\x25\x41\x5c\x0e\x86\x42\x3b\x78\x58\x92\xc4\x52\x89\x40\xd2\x8c\xba\x43\xcf\x1b\x4c\x42\xff\xb2\xd7\xe8\x58\xbf\x76\x22\x68\x37\x0c\x7f\xbf\x0e\xc5\xcc\xbf\x0b\xca\xbc\x17\xb6\x49\x88\x2f\x03\x52\x16\x4a\x5f\xc3\x9c\x9c\x86\xb8\xa8\xa6\x2d\x68\x8e\x61\x02\xc1\xdc\x87\xaf\x21\xb3\xfa\x23\x79\xcb\xbe\x08\x31\x8a\xcb\x84\x25\x8e\xd2\x2d\xe2\xd2\x8f\x9b\x22\xae\xec\xe1\x36\xf2\x62\x9d\x4f\x20\x07\x0b\x14\x1d\x6e\x27\x9d\xfe\x95\xdf\x21\x22\x1e\x17\x65\x80\x10\x95\x08\x47\xa6\x77\x48\x12\xc1\xb9\xb8\xe6\x5c\xef\x4a\x76\x29\x80\xad\x57\xac\xf6\x0e\xf6\x85\x92\xc1\x61\xa8\x60\xe1\xbb\x59\xae\x90\xb2\x04\x61\x83\x7e\x28\xe4\x30\xc9\x2a\x0e\x9a\x18\xab\xa4\xc0\x91\xa1\x59\x1b\xd6\x62\x43\xae\x63\x94\x8b\x52\xf0\xd0\xe4\x36\x2c\x74\x91\x0c\x76\xd4\x34\x95\xc6\xb6\xa0\xc9\x34\x88\x06\xa1\x3f\x11\xfa\xdc\xe0\x41\x52\x1a\x18\x36\x96\x4d\x48\x5a\xb8\x4e\x72\xc6\x47\x12\xd9\x32\xa1\xfc\x7d\x0f\x6f\x88\x44\xe2\x16\x1b\x75\x47\x74\xdc\x52\x15\xde\x29\x17\xb8\x45\x15\xfe\xcf\x51\x7a\x1f\x31\x77\xe8\xf9\xbf\xaa\xf7\x56\x37\xec\x4f\xab\xb8\xdc\xb4\xbb\xac\x05\x5a\x8d\x6e\xae\xa8\xe8\xe3\x34\xe6\x3d\x5c\x70\x85\x00\x5f\x4d\x08\x1c\xfa\xf3\x27\x66\xa0\xcb\x40\x99\xb5\xb7\x91\x87\xa7\x25\x79\x38\x08\x21\x80\x0c\x2d\xdc\x5f\x28\xf1\x4d\x60\x0e\x46\x6d\x8d\x32\x97\x4c\x04\x1b\x44\x62\x6b\xd8\xb5\xcb\xd4\x00\xf4\xad\xaa\x4e\x7e\x11\xca\xa9\x0e\x8b\x35\x5f\xcd\x02\xee\x0f\xd2\x25\x75\xa5\xdd\x69\x43\x5c\x65\x72\xee\x35\x90\xea\x6f\x92\xc8\x05\x1b\x3c\x50\xe8\x3a\xe7\x10\xc3\x11\x71\x01\xfd\x13\x82\xd1\x9c\x1c\x15\x1b\x54\x92\x5f\x35\x05\x8d\xe0\x73\xf2\x36\x04\x0e\x91\x60\x56\x1a\xc1\x25\x6e\xbc\xee\x14\x81\x57\x24\xa1\x80\x01\x94\xe0\x66\xe0\x66\x68\x51\xae\x99\x00\x25\xe6\x68\x6d\x82\xe9\xbf\xa5\x7e\x58\xfd\x84\x65\x38\x78\xa4\x26\xd4\x1d\x99\x12\x92\xef\x21\x22\xe4\x18\x25\xda\x2f\x26\x60\x62\xd3\x08\x70\xf4\xc8\xac\xa8\x28\xe2\x3e\x75\xc6\x98\xd9\x51\xb4\x7c\x56\xef\x62\x4e\x5e\x87\x58\x9d\x1a\x28\x24\x30\x14\x1d\x94\x26\x55\xe3\x34\x96\x3b\x28\x01\x28\xed\x4d\xd0\xc6\x88\x01\x8e\xca\x3a\x16\x2d\x33\xa1\x4e\xbd\x52\x46\x56\x09\xc0\xf0\x7c\x8b\x6d\x15\x2d\xce\x2c\x56\x16\xe7\xf7\xa1\xf3\x3c\x24\x86\x3b\xf3\xdd\x39\xe2\xf9\x67\xa5\xa8\x5d\x2d\x05\xc3\x7f\x13\x96\xce\x9d\x4a\x77\x83\xa2\xf1\x57\x13\x8f\x4d\x3e\x6e\x33\xcd\xce\x68\x34\xc5\xc4\x2f\x49\x00\x11\xbc\x09\x05\x2a\x7d\x6a\x93\x22\x6e\x61\x9e\x9d\x93\x57\xc5\xb3\x05\x17\xf0\x2b\x80\x58\x56\x7c\x1b\x08\xf5\x36\x89\xc3\xe2\x52\xcc\x91\xc5\x97\x15\x46\x8e\xef\x7e\xd6\x1f\x8b\xcf\x90\xbe\xd7\x3b\x18\xe8\x25\x7d\x1f\x16\xf5\xea\x12\x53\x7c\xa5\x2c\x9f\xc9\xe1\x65\x60\x2a\xee\x56\x45\xa8\x46\x2f\x5e\xe0\xe2\x29\xcf\xcd\x6d\x3d\x5f\x9a\xb7\x84\xe8\x52\x22\x5c\xa4\x48\x8f\x84\xff\x2f\xad\xd6\x59\xfa\xd9\xbe\x8f\x7e\x65\x9f\xd0\x6d\x08\x9d\x76\x8b\x6c\x2c\x2f\x43\xa7\x51\xe2\xfd\x30\xab\xc6\xee\x2d\xb2\x32\xba\x2e\xca\x8b\xe8\xba\x97\xa1\x2c\x58\x9c\x88\x5b\xb9\x0b\xc6\xdc\xbf\xda\x8f\x3d\xdf\x00\xcc\x86\x62\x02\xcf\xbb\x77\x7c\xee\x5f\x79\xf1\x2a\x2a\xb6\x3c\xc9\xb1\xca\xef\xf6\x37\xb2\xe5\x46\xfb\x4c\x0c\xfd\x9a\x17\xb5\x2b\x0f\x7d\x13\x84\x4a\x4b\xf3\x5b\xe6\x9f\x72\xdb\x06\xac\xf8\xbd\x06\x1d\xcf\x8b\x7c\x31\xb9\xf3\x0f\x3f\xca\x9d\xea\xf1\xe2\x3f\x60\x92\x3b\x4f\x80\xe5\xce\xce\x03\x48\x3b\xbe\xa8\xa4\x24\xae\x80\xc6\x38\x1c\xd0\x8c\xc7\x6d\xf2\xe4\x56\xa3\xf4\x6d\xb8\xd9\x9c\xd0\x1c\xee\x08\x6d\xa1\x84\x9a\xe7\x81\x3a\xee\xd9\xca\x11\xa5\x35\xb0\xda\xcb\x4e\xad\x17\xe3\xff\xfc\x6f\xa3\x02\x7e\x10\xb6\x7e\xec\x2d\x2c\x9f\x0f\xa1\x48\x13\x24\x63\x6f\x64\xc6\xd0\x3c\x97\x87\x7a\x0b\x1a\x0a\x72\xe5\xe5\x48\x84\x0a\xdf\x3a\x13\xfa\x39\x52\xa1\x40\xfc\x5e\xc8\xdf\x32\x21\x95\x09\xb3\xd6\xb9\x9c\x91\x08\xee\x0c\xcd\x35\x2c\xbb\x20\xa4\x72\xac\xb4\x53\x1c\x2b\x4d\xf3\x16\xfa\x26\x47\x52\x4e\xec\xab\x8e\x7d\x5e\xe6\xa0\xa8\xaf\x84\xdc\x59\xae\x21\x78\x9a\xa3\x55\xe9\x2a\x6f\xb7\x18\xf5\x65\xa8\xe7\x37\x99\x25\x21\x81\xcc\x89\x72\x74\x86\x76\xa8\xa2\x38\xfd\x58\x66\x1c\x63\x21\xe6\x3c\xc2\xd2\x47\xae\x93\xe5\x42\x20\x0d\x9c\x43\x92\x21\x2a\xc4\xaa\xe0\x48\x28\x44\x88\x89\xf4\x60\xe1\x23\xc3\xb0\x85\xaa\x60\xcc\x68\xaa\x77\xd9\xc6\x8b\x34\x73\x5d\x3f\xad\xca\xb9\x25\x24\x26\xf1\xaa\x17\xc5\x83\x69\xc6\xb9\x9f\xa4\x1d\x6a\xcd\x91\xdc\xec\x34\x17\xe3\x6d\x80\xb2\x00\x75\xe3\xee\xa4\x90\x80\x83\x68\xb0\x0a\x3c\x3e\x33\xc0\xb8\x3f\x1c\x2e\x2f\xdb\xc0\x1f\x17\x4f\x22\xc0\x26\x91\xee\xe7\xed\x65\xd0\x90\x5c\x4f\xe2\xa4\x20\x9d\x08\x1d\x62\xf1\x78\x4e\x36\xcc\x86\x87\x24\x02\x96\x57\xc2\xeb\x5b\x17\x56\x17\xe0\x98\xe4\x66\x59\xa7\x83\xeb\xdd\x3a\x23\x3e\xb8\x95\xa0\x48\xfa\xf3\x67\x62\x31\xab\x3f\xea\xc7\xb6\x46\x3e\x71\x83\x16\x4c\xdb\xcd\x45\xcf\x45\x11\x9e\x78\x6d\x82\x9f\xb7\xc5\x3e\xab\x41\x47\xe2\x85\x1d\xd3\xc6\x96\xab\x9c\x04\x79\x0b\xbb\xa8\xac\xd4\x80\xf1\x68\x1b\x91\x58\x26\xc1\x42\x1e\xe8\x78\x39\x09\x4c\xa0\x2d\xb2\x2c\xfc\xa2\xb9\x2c\xcc\x49\xfc\x17\x9c\x58\xc8\x91\xf3\xbc\x78\x5d\xec\xa3\x50\x31\x6a\x7a\x46\x39\xdf\x7d\x64\x92\x3d\xb5\xf9\xd0\xf3\xad\xa9\x65\xb4\x93\x35\x3c\xa0\x55\x27\x5d\x12\x78\x19\x4d\x7d\x3c\x41\x45\x12\x17\xe4\x24\xce\x2b\xe4\x2d\xce\x4b\xe2\x86\xc9\x95\x61\x9e\x6f\x13\x49\x4a\xb1\x63\xf2\xb8\xe7\x05\xac\xb7\x60\xf7\x7b\x8b\xa4\xd5\x0e\x20\x8d\x49\x37\x88\x1d\xef\x51\xb9\xfc\x18\x12\xde\xb2\xdd\xcb\xb0\x66\x87\x32\xd0\x77\xb8\x10\x04\x2e\x3b\x18\x97\x3e\xc6\xdd\xed\x78\xae\x93\x44\x8f\xb7\x70\xd0\xda\x31\xee\x7e\xb3\xa1\xd4\x9f\xb5\xcd\xd9\x34\x47\x45\x56\x3e\xa3\xa1\x3f\x8b\x2f\xf1\x92\x78\x29\x84\x01\x69\x64\x91\x07\x11\x82\xc6\x8d\x25\xe6\x98\x9b\xd6\x79\x13\x0e\xb7\xc8\x09\x8b\x38\x4b\x7d\xb9\xba\xc5\x59\xff\xe9\x4d\xed\x43\x9f\xe6\x7e\xd9\xfe\xa2\x63\x75\xf4\x21\xf7\xb9\x60\xf0\xc7\xb9\xf3\x70\x08\x27\xc8\xe6\xbf\xe5\xce\x83\x27\x70\xdd\x5c\x8b\xf6\xba\x8b\x65\x51\xcc\x44\x17\x33\xc2\xd2\x52\x05\x1d\x3a\xe7\x84\x9b\xb5\xf2\x8b\xb7\x95\x68\xf6\x3a\xa6\xae\x23\x7d\xde\xdd\x20\xd1\xbc\xbd\x41\x22\x3a\xd8\xb2\x94\x2c\xcc\x2a\x8b\x7e\x94\x37\xca\x22\xcb\x3a\x25\xc8\xd1\x8e\x64\xf2\x82\x21\x26\x2f\xa0\x3b\xe8\x65\xcb\x9e\x60\x91\x69\x9a\x20\xd7\x3a\xe3\x90\xe5\xb2\x28\xd5\xbb\x2d\x76\xc0\xeb\x5c\xdb\x01\x3f\x70\xac\xcf\x41\x51\xef\xe5\xa3\x03\x41\x7d\x25\x0c\xa1\x06\x8c\x3d\x9e\xf3\x5f\xe9\x31\x93\x07\x76\x2d\xbc\xf0\x06\xb6\xb4\x59\xbf\xa8\x83\x25\xf9\x5d\x2c\xe9\xb8\x64\x49\x6f\x73\xc2\x0a\x06\xf4\xad\x76\x3f\x2e\xee\x9f\x94\xf7\xf9\x08\x3f\xf4\x40\x31\xb1\x31\xb2\x1a\xf1\x19\x95\xd5\xd0\xaf\x9d\x57\xf9\x99\x5f\xf2\xb3\x2c\x27\xae\x09\x41\x95\xa7\x59\x7d\xe4\x6a\x72\x55\xcf\x14\x4f\xa3\x15\x9e\xf6\xae\xc6\xd3\x0e\xb6\xf3\xb4\xf3\xdc\x1c\xbd\x93\x3c\xed\x40\xf1\xb4\x8b\xdc\x04\x92\x38\x7b\x72\xbe\x12\xa0\x64\xad\x55\x01\x51\x58\x17\xed\x20\x27\x7b\xf8\xd2\x5f\xc8\xfd\x24\xb1\x99\xb4\x59\x73\x24\xad\xf9\xfe\x9f\x20\xc1\x9f\x35\x24\xf8\xe8\x17\xa5\xf7\xb3\x1b\xa5\xf7\xd7\x1d\x94\xbc\x4c\x39\x2b\xf1\x36\x96\x98\x3a\x71\xee\xa0\x6d\x2a\x80\xc8\x72\x17\x58\xeb\xf9\x2c\x86\x00\x62\x81\x6b\x47\x3e\x09\xd0\xcd\x62\xb2\x45\xc6\x04\xe5\x9f\xb7\x69\x15\xc2\xb0\xb5\x79\x0e\xc6\x38\x42\x1b\xaf\xd4\x83\xa5\x71\x03\xab\xb5\x27\x70\x99\x57\xc3\x2d\x93\x51\x69\x67\x4a\xe1\x5f\x15\x59\xbf\xe7\x62\xee\xad\x22\x2b\xbb\x85\xc8\x7a\x94\xc3\x04\x22\x13\x88\xeb\x08\xd4\x3b\x8b\xa1\x72\xa4\x01\xae\x39\xfa\x15\xa9\xe6\x84\x32\x3f\xec\xcd\xfd\xab\xde\x24\x4e\x8a\x2f\xd7\x76\x3a\x75\xa8\xfa\x17\x75\xf7\xa7\x64\xa6\x8b\xbc\x7e\x72\x53\x8e\x59\xb9\xab\xe4\x27\x99\xd6\x1a\x4b\x26\xec\x60\xcd\x04\xa4\xfe\x77\xb8\x45\x4f\xfe\xfe\xf7\x3b\xdc\x62\xf3\x9f\x3f\x8f\x62\x92\x21\xe0\x18\xe9\x2c\x5e\x19\xb5\xcd\x14\xf8\xe5\xc6\x8b\x65\xe8\x73\x7f\xb0\xf0\xa3\xac\x67\xdc\xa5\x42\x18\x20\xbb\x48\x34\x4c\x38\x95\x3f\x77\xcc\x16\x2c\x2c\xa4\x0c\xf1\x66\x69\x4b\xdc\x47\x2a\x9c\xc9\x19\x7e\x2d\xfe\x60\x94\xc4\x87\xdc\x11\x4c\x7f\x0d\x2f\x6e\x60\x9a\xaf\x6e\x90\xa6\x9e\xdf\xc0\x94\xdf\x77\x3c\xd7\x51\x8d\x9f\x6f\x21\x64\xb5\x0a\x53\xe5\xc9\xbd\xdd\xf6\x7c\x9b\x8b\x21\x0d\xc9\x8b\x9c\x24\x96\xbb\x6a\x18\x74\xb9\x16\xc7\x2c\xfa\x50\x51\xc9\x37\xb9\xca\x81\xfb\xb1\x93\x8a\x48\x06\x1c\x09\x16\x5c\x96\x9d\x6c\x3b\x1f\xc7\x62\x5b\xa7\xfc\x86\x42\x73\x91\xe5\xae\xd6\x85\xe5\x27\xf2\x55\x32\x74\xd3\x34\x1b\xe7\x7e\x55\x2b\x61\x6f\xa1\x09\xb5\x00\xc7\x1d\x98\x93\x69\x02\x06\xa7\xec\x38\xf2\xfc\x4b\x4c\xa5\xcf\x65\x45\xc2\x8d\xb3\xc2\xc4\x0f\xa9\x5c\xcc\x0e\xef\xcb\x3a\xc8\xd1\x90\xbc\xc9\x25\xdf\x97\x46\xc8\x64\xf0\x70\xcb\x72\x97\x78\xf3\x56\x55\x41\xb3\x7b\xc6\x5d\x99\x03\x01\x3d\xcf\xfd\x0a\x4b\xfa\xd4\x69\xbc\x48\x2c\x96\x20\x9f\x9e\x8f\x48\xe6\x10\x8e\x49\xd0\x8f\x80\x3a\xdc\x62\x4b\x70\x0b\xa9\x0a\x91\x70\xf8\x4b\xa2\x95\xaf\x44\xab\xa0\x10\xa4\xfc\xdb\xbf\xef\xcb\x77\xb7\x6d\x08\xae\x50\xaf\x89\xe3\x48\x07\x10\xcb\x5f\x95\x58\xfe\x4a\x62\xf9\x9f\xd9\xa5\x42\x56\xdb\x2c\x3b\x27\x04\xb3\x0c\x85\x8d\x0f\x39\x4a\x26\xef\xf3\x86\x98\x56\x11\x65\x8e\x73\x73\xf4\x5c\x68\xd1\xb6\x12\xd0\xc4\x95\x2b\xaf\x4e\x72\x73\xf4\x22\x97\xe6\x9a\x0c\xba\x32\xfe\x4a\x48\xc6\x50\x3c\x36\x59\x2b\x98\xa9\x82\x8b\x4a\x33\x61\x08\x09\x3b\x76\xe7\xe5\x59\x8c\xe2\x44\x3b\xc3\xe1\xff\x28\x0d\xc8\x1d\x24\xaf\x57\xbb\x92\x2e\xab\x25\x19\xfc\x9c\x13\x74\x43\x47\xfa\x67\xcf\xc9\xc7\x1c\x22\x48\x2c\x36\x2b\x5c\x08\xb5\x10\xb0\x36\xe1\xcb\x16\x49\x41\xc8\x08\xb1\x14\xea\xcf\x65\xe8\xdc\x31\x0a\x0e\x6c\x2a\x05\x87\x3d\xc0\x20\xb6\x10\x98\xf8\x33\xc7\xec\x36\xec\xc1\xf6\xf3\x22\x97\x26\x5e\xaf\x2a\x1e\x6c\x3e\x1c\xcc\x7c\xea\x55\xd5\xe9\xa3\x2a\x98\xf5\x04\xa8\x71\xca\xd2\x5e\xa5\x2d\xde\xd0\x2f\xec\x12\x37\x84\x21\xa8\xe0\xb6\x20\xc4\x1a\x52\x0a\x1f\xe5\xe1\x9f\x74\xc1\xc2\x76\x3b\xcd\x76\x04\x1d\x2c\x5d\x78\x9e\x12\x7f\x64\x3c\x47\x59\x8f\x48\xb1\xc5\x34\x6c\x79\xc3\xd0\xb6\x8d\xbd\xb0\x56\xaa\x6a\x4e\x3e\xe5\x62\xa5\x27\x10\x60\x60\xe9\xa7\x10\x4a\x77\x49\x48\xac\xd7\x30\x09\xf5\xfd\xf2\x8c\x1c\x12\xeb\x0c\xe2\xb0\xb4\x98\x6c\x2c\x88\x02\x14\xf9\x79\xf1\xa8\x1c\x35\xd6\xa3\xbe\xce\x01\x63\xf8\x5d\xdc\x6f\xfd\x58\xd5\xf7\xba\x12\x20\x5c\xeb\xbc\x7f\x93\xb8\x5a\x9e\x6f\x36\x0e\xbd\x1f\x48\x76\xf2\x32\x20\x84\x3b\x77\x30\x72\x71\x47\xb0\x36\xbe\xd6\x3e\xae\xe9\x68\x9b\x5b\xd9\x83\xaa\xb1\x68\x3f\x0e\x43\xba\x4c\xfd\x1e\x0d\x43\x75\xe0\x65\x98\x5f\xed\x2d\x6e\x62\xb5\xd7\x65\x30\x4f\xfd\x65\xfd\x99\x2f\x53\xb8\x24\x1f\x42\x98\x00\x83\x54\x08\x05\x92\xdc\xbe\x94\x66\xe8\xd9\x8e\x61\x42\xe4\xd5\x62\x4a\x5e\xe6\x65\x4c\x49\x14\x73\x7d\xf2\xa6\x7a\x4c\x3c\x69\xbf\xe6\x49\x1c\x4d\x0d\x13\xb8\x77\xc3\x69\x3d\xb5\xbe\xe9\x50\x57\xb7\x59\xd2\xb0\x86\xda\xd2\xa4\x24\xfe\x19\xb8\x71\x98\x2d\x64\x4e\x43\xa5\x67\x54\xa4\x6c\xed\x3d\xa0\x0f\xc6\xd4\xc4\x13\xaf\x98\x78\x52\x2e\x40\x24\x6e\x2d\x72\x42\x31\xe5\xd9\x0a\x45\xa3\x03\xa4\x86\x9b\xfa\x6b\xaf\x7a\x51\x48\xf4\x85\x9b\xc2\x46\xb2\x92\xdb\xcf\xfc\xdf\x33\xdb\xca\xc4\xaa\x89\xb6\x02\xc7\xad\x15\x2e\xff\x85\xd5\x55\x8e\xd2\xff\x1d\x8b\xab\xf5\xb8\x26\x7d\xaf\x35\x9b\xf8\xbe\xc7\xa8\x5b\x75\x66\x0d\xb4\xc8\x20\x74\x71\xaf\x3b\xfd\x2a\xca\x40\xf7\xfe\x8b\xc4\xd1\x4f\xd1\xe5\x2e\xb6\xe8\x9b\xda\x8d\xde\x77\x22\x73\x64\x78\x94\xd3\x81\x71\xd7\xb7\x7d\xb8\xf7\x5f\xff\x4c\xff\x27\xa9\x38\xdc\xff\x14\x0f\x6d\xa1\x7b\xfc\xf3\xde\x8c\x2f\xc2\xe2\x55\x21\x80\x08\x69\x9e\x9b\x95\xc8\x3f\xd9\xd5\x34\xa1\x8b\xc5\x37\xdf\x0b\x30\xc6\xcd\x98\xd0\x30\x15\x5c\x3b\xf3\xba\x4d\xac\xc7\x68\x45\xfa\x7d\x38\x8a\xec\xe1\xef\xc7\x58\x86\x69\x94\x60\x15\x26\xea\x75\x39\x1a\x6b\x0f\xe3\xe4\x9f\xd1\xcf\x7f\x26\x3f\xff\x19\x49\x47\x63\x57\xa2\xa6\x98\x32\x4d\x7c\x6a\x98\x10\xb4\x76\x21\xc5\xc3\x24\x5e\xa5\x28\x1a\xa2\x61\x2b\xf6\xb6\xa6\x34\xbe\x24\x99\x07\x0f\x60\xe7\x11\x56\xf0\xf3\x64\x5e\x69\x79\x36\xf3\x6f\x41\xef\x64\x3b\x04\x46\x25\x04\xba\x9e\x02\xc1\x4c\x83\x20\x47\x10\xf4\x6f\x8d\xdf\x10\x78\x28\x27\x78\x7f\x01\xa2\xff\x5b\xe6\xdd\x36\xc5\xbf\x1a\xe5\xff\x1b\x16\xbc\xb0\xdd\xd4\x3e\xe6\x5f\x46\xfe\x49\x1b\x6c\x57\x0a\x8f\xb0\x8e\xe7\xda\xde\x9d\x76\x3c\xd7\x41\x63\xa1\x27\xb4\xbf\xa7\x6b\xc8\x3d\x55\x4f\xdb\xf3\xda\xf2\x98\xef\xa0\x17\x42\x25\x94\xa6\x3d\xa8\xaa\x1a\x85\x54\x73\x9b\xd2\xc6\xb3\x56\x37\x33\x81\xcb\xdb\x7c\xcc\xea\xcf\x5b\x1d\xcc\x64\x93\xfa\x71\x43\x19\x14\xd2\x76\x76\xf1\x6b\xd3\xa6\xea\x28\x67\xc3\xcb\xaa\xd5\x68\x93\x94\xfa\x5e\xbf\xb9\x98\xd5\x2a\x59\xed\x67\xeb\x32\xd9\x88\x23\x93\x8d\xc8\x6c\x80\x68\xe6\x3b\x12\x42\xd8\x76\xef\x5b\xed\xae\x84\x8e\xb4\xdc\x62\x4d\x13\xf8\x76\x4f\xda\x7f\xcd\x4f\xb6\xe2\x15\x9b\x28\xaf\xd8\x44\x79\xc5\x9e\x86\x98\x4e\xbe\xf0\x76\x45\x71\xfb\x35\xf8\x35\x6f\x57\xdf\x92\x4e\x9d\xb0\x19\xed\x70\x7b\x9f\xd1\xa4\xdb\x67\xd4\xf3\x30\x58\x22\xd3\x7b\xb2\x68\xee\x49\x9b\x9b\x18\x1d\xf6\xca\x58\x0d\xe9\x7c\xea\x91\xc8\x54\xb5\x20\x67\x92\x0b\x2d\x0d\x13\x96\xcd\xfe\x5a\xa2\x6e\x0b\x27\x6e\xbd\x21\x47\x31\xe1\xe6\xa8\xb1\xae\xe5\x82\xcf\xbc\x16\x12\x85\x1e\x63\x3a\x67\x61\xc5\xa8\x28\xff\xb7\x7f\xb9\xbb\xdd\x7a\x5f\xb6\x8c\x3e\xc0\xec\x6d\x15\x2d\x62\xe1\xa1\x87\x79\x93\x6b\x28\x7f\xe3\x3a\x3b\xac\x44\x0f\x37\xea\x8b\xdc\x3e\xae\xd9\xd7\x24\x70\xda\x41\xc2\xb4\x09\x6e\xd5\x26\x77\x74\xc7\x3d\x57\xa7\x5c\xe1\x7e\x7b\x21\x4c\x3d\x81\x58\xf2\x53\x37\x53\x85\xa9\xf8\xe7\xcd\xd8\x85\xf7\x29\x06\x4b\x79\x8e\xb1\xe0\x83\x47\x06\xcc\xb7\xc8\x40\x15\xe2\x13\x6d\x10\x94\x4d\xfd\xe7\x81\xe6\x55\xda\x73\xf8\xd2\x53\xb5\x92\x76\xbb\x80\xb7\x88\x04\xd6\xc7\x89\xe3\x6d\x2d\x1f\xd6\x0e\x82\xb7\xb5\xbc\x5f\x6d\x79\xd8\xf6\x79\x95\x44\x0e\x0e\xb7\xe8\x43\x08\x1c\x6e\xb1\x43\x88\xc5\xd5\xe7\x67\xd5\x15\x4e\xbd\x4d\x1b\x7f\xab\x5d\x20\xd6\x2e\x9a\x63\xf2\x47\x48\xb8\x07\x2f\x42\x55\xe5\x67\xd7\x53\x69\x11\x92\x42\x15\xa5\x98\x9e\xb5\xa1\x4a\x77\xf7\xb1\x2f\xfb\xc8\x54\x1f\x0f\x80\xda\xd9\xda\x84\xa0\xb5\x8f\x5b\xb8\x00\xc8\x13\x0f\x55\x5e\x2f\x86\xb1\x87\x25\x40\xa3\xd1\x25\x99\x7b\xd0\xe6\x44\xac\xcc\x98\x42\x72\x1e\xf0\x04\x1d\x2b\x0c\xa4\x8c\x50\x49\x1f\x54\x3a\xc7\x7e\x95\xc9\x27\x4f\x3d\x95\xed\xfe\x42\xe8\x03\x5b\xb4\xf9\x42\xe2\xa3\x21\x99\xb0\x8a\x7d\xf2\x75\xe2\xe7\x81\xbf\x52\xa9\x4e\xa5\x5d\xe4\x7c\x2b\xc0\xd6\x8e\x78\x1e\xf6\x8c\xbb\x57\x9e\xea\xef\xc2\x03\x4c\x44\x92\x39\x82\x94\x1b\x98\xf1\xc2\x37\x6c\xe3\x3d\x56\x0a\x34\x6a\x27\x12\x0b\xb1\x8c\xcd\x23\x3c\x1a\x92\x53\xaf\x9c\x5e\x26\x4f\x1a\x3a\x5f\xac\x1e\x8a\xf1\x9a\xd5\x35\xf5\xb9\x5e\xa8\x67\x0a\x18\xd7\x26\x1c\xdf\x40\x36\x4e\x3a\x9e\xeb\x93\x81\x6f\x1d\xcf\xf5\x71\xfa\xb5\xa7\x6a\xcb\x9c\xdd\x72\x09\x93\x78\x55\x5d\xc1\x02\x28\xcf\xf1\x38\x0b\x14\xc8\x4a\xde\xfe\xcd\x83\xd4\x33\x21\xf5\xc8\xb5\x67\x42\x64\x42\xd1\xfa\x40\x15\x42\x50\x2f\xdc\x57\x2f\x1c\xd7\x5e\xe0\x95\x17\xc6\x98\xb4\xa0\xda\xf9\x49\xad\xad\xf6\xcc\xd8\xf3\x5a\xb3\x2a\x60\xa0\x2c\x60\xb9\x97\x53\x59\xef\x85\x43\x20\xae\x96\x80\x15\x47\x2e\xd0\x2a\xd8\x1f\x9d\x11\xe3\xd4\x5f\xe9\xf4\xb9\x62\xa4\xd5\x04\x38\x7c\x48\x31\x4d\x99\x31\xf6\x02\x5e\x3e\x9c\x4e\x64\x5a\x27\x60\xce\x64\x23\x8a\xa1\x69\x9e\x99\x58\x14\x41\xf5\x92\x9c\x79\x40\x2d\xb6\x02\x8a\xa5\x13\xac\x2b\xc4\x62\x6a\xd1\x7e\x15\x6d\x8b\x05\xae\x65\x10\x69\x13\xf7\x8d\x57\xaa\x34\x63\x4f\x87\x69\x96\x82\x5d\xd5\xae\x2f\xf1\xa5\x64\xbb\x3a\x9a\x40\x85\x56\x5b\x46\x9b\x2d\xaf\x46\xdb\x4a\xff\xa2\x1a\x5d\x42\x7e\xae\xcf\xd9\x60\x6b\xb3\x0f\xd2\x39\xf2\xeb\xa6\x69\x0f\x9d\xd8\x0f\x3d\xb2\xf3\xfb\x17\x8a\x3e\x14\x19\xae\x54\x2b\xe5\x91\x14\x07\x83\x61\xc4\xe6\x5f\x7a\xea\x70\xaf\x00\x95\x22\x6f\xcd\x95\xa7\x32\x6f\x17\x90\x92\x7b\x26\xc8\x94\xdb\x2b\x12\x7b\x60\xec\xeb\xfa\x98\xba\xe9\xa4\x68\x1a\xca\xa6\xbb\x66\xeb\xae\x5c\x92\x95\x07\x31\x96\x41\x85\x4b\x72\x2e\xb8\x2d\x05\x56\x4a\xcc\xef\xa4\x60\x35\x7b\x64\x98\xf0\xb6\x8e\x82\x6d\x6a\x48\x55\xa8\x75\xc3\x60\x89\x07\x09\x2e\x27\x43\xc0\xff\x4c\xa3\xe5\xa4\x61\x12\x5c\x96\xf2\x7a\x8d\xc8\xc5\x1e\x0d\x7b\x42\x15\x68\xb3\xd9\xe0\xd3\x81\xd0\xd5\xbc\x24\x5e\x56\x9b\x7d\xad\xc7\x03\x3c\x96\x0e\xa0\x16\x7b\x03\x2a\xef\xe7\x2b\x69\x5d\xb7\xdc\x93\x1b\xc2\x2b\x8a\x09\xe8\xc3\x93\x8e\x63\x84\x8e\xb9\x0f\xbc\x80\x86\xf1\xb4\x57\xbd\x50\xdc\x6a\xcb\x17\xa3\x2e\x8a\xd9\x4c\xba\x1a\x6c\x1a\xe9\xdf\x79\xf5\xe7\x2a\x71\x47\x9d\x96\xd7\x48\xb9\x1b\xc6\xa9\x3a\x78\x4e\x5a\x1d\x19\xda\x96\x3a\xf6\xae\x74\xa7\xbc\xbd\xc9\x24\x8e\x2b\x9c\xd8\xaf\x3a\xfd\xdd\xb4\x77\x6a\x85\x0b\x21\x0b\x0e\xb6\x89\x41\x55\x29\xe8\xa8\xb3\x21\x7b\x83\x6c\x7b\x37\xf1\x7b\x57\x71\xd6\x4b\x33\xf5\x63\x45\x23\xde\xe3\x71\x4f\x96\xbf\xad\x51\x8f\x91\x58\x94\x57\xad\x81\xd6\x35\x56\x79\x49\xc9\x9c\x1c\x78\xb2\x72\x58\xd5\x8b\x32\x8e\x26\x41\xb2\x90\x24\x84\x3e\xb6\x2f\x29\x71\x53\x13\xdc\x13\xdb\x18\xcb\xd1\x34\xd5\x15\x53\xff\xbe\x45\xf7\x69\xd3\x6a\x7b\x37\x46\xd1\x46\x98\x81\xe0\x38\x2c\xeb\xf9\xbc\xde\xb2\x8e\x35\x09\xf5\x43\xa7\xe2\x24\x13\x78\x5c\x6e\x46\x37\xb7\x6a\x1b\x37\x3b\xd2\x6c\x04\xa9\xe3\x4a\xbe\x6e\xae\xa4\x5c\x2e\xa3\x66\x78\xfa\xab\xc7\x38\xf0\x43\x9f\xab\x20\xf3\xb5\x09\x2f\x3a\xb7\x23\xcc\x49\x64\xde\x35\xfe\xae\xea\x10\xcb\xa4\x2a\x98\x40\xfd\x95\xd7\xe2\x76\x5e\xcd\x4f\x5a\x4b\xf8\x8c\xa5\x87\x0c\xdf\x0b\x38\x66\x2b\xe5\xce\x2f\xfa\x06\x57\x91\x55\xf0\x70\xa3\xea\xbd\xa3\xb6\x69\xc4\xed\x5f\xec\x75\x4e\x5e\x78\x80\xae\xf2\x58\xe1\xa8\x7e\x86\x2e\x45\x49\x89\x93\xcf\x3b\x4c\xbb\xef\x33\x19\x77\x60\x38\xff\xcb\xb0\x4b\x53\xc9\x43\x0c\x42\xc0\x37\xdf\xb7\x2d\xd3\x25\x25\x61\xba\x99\xa3\xa8\xb5\xd6\x71\xcd\x17\xc1\xf3\xa4\xab\x06\x3c\xf7\xa4\x1d\xf9\xf3\x16\x34\xfa\xcf\x0c\x58\x2e\x26\x5b\x51\x8d\xab\x38\x56\x41\xa7\xef\x1e\x0a\x8d\x29\x26\xde\x1f\x6f\x2a\xc2\xf2\x39\xca\xac\xd8\x82\xad\x5a\xb0\x05\xbb\x50\xa5\xbe\x65\x2f\x98\x33\x8f\xa0\x01\xe4\xc3\xaf\xfa\xa7\xd7\xb3\xd8\x54\x65\xff\x0f\x81\x8f\x24\xdc\x14\x68\x81\x99\x41\xc8\x07\x0f\x12\x59\x0c\xb6\xce\x01\x4a\x33\xce\x7b\x4f\x4c\xa9\x6f\x0a\xf9\x82\x44\xa3\x53\x4e\xe6\xe4\x48\xbd\x65\xda\x17\xbc\x34\x1d\xae\x4d\x78\xd3\x0a\x80\x69\x42\x56\x09\x82\x82\x8a\xe6\xfd\x58\xd3\x01\x12\xa8\x96\xad\xe2\x1b\x56\x20\xf4\xe0\x28\x82\xf8\xa5\x4f\x46\x7f\x8b\x8b\x01\x9d\xad\xa5\xe7\xcd\x56\x97\x1a\xb6\xb7\x56\xae\x34\x58\x82\xd7\x04\xe9\x8f\xa5\x82\x0c\xf5\xfa\xe9\x63\x4e\x7b\x4e\xde\x14\x3c\xbc\x84\xbd\x72\x91\xda\x23\x70\xe4\xe6\x7d\xf6\x74\x70\x77\x84\xf5\xf2\x65\xc8\x4f\xd6\x66\x33\xe1\xed\x36\x13\x54\x2a\xd6\x26\x7c\xf2\xaa\x25\x01\xbe\xd4\xd7\x7a\x23\xc7\x85\x86\x50\xed\x92\x5e\x1f\xac\x28\x12\xbf\x31\x5a\x51\x71\x7e\xbd\x86\x97\x1d\x1a\x9d\xf6\xf5\x8a\xfa\xdd\x8a\x1c\xfa\x0f\x70\x88\xe4\x0e\xbc\xf4\xe0\x92\x9a\xd5\x1a\xcf\xc1\x84\x24\xba\x26\x57\xf2\xac\x38\xc5\x52\x09\xfa\xbe\x78\xda\x35\xa9\x62\x36\xa7\xde\xd4\xef\xe1\xbf\x83\x65\x10\x86\xf1\x4a\x5d\xa8\x0f\x50\xb8\x8c\xbc\x98\xc7\xcb\xde\xa2\x1e\x35\x20\x8f\xb4\x50\xd4\x59\x77\x0c\xf7\xd5\xc4\x9a\xd1\x26\x24\xfd\x2a\xb9\x4a\x54\xa4\xec\x03\x25\x4b\xff\x68\x31\x93\x77\x78\x61\x14\x84\xa8\x3b\x2c\x2c\xea\x63\xc9\x44\xf6\x59\xfc\xb3\x87\x71\xd5\xc5\x04\x6f\xd9\xef\x5c\x76\x32\x14\xf0\x81\xf3\xe7\x7d\x0c\x10\x9b\xf1\x45\x78\x18\x27\x86\x09\x7e\xdf\xf1\x12\xc8\xfa\x4e\x3f\x01\xda\xff\x0f\x96\xc6\x76\xa9\x4a\x23\xf6\x01\xd4\xa1\x0c\x5f\x9b\xed\xd2\x59\x41\x72\xdc\xce\x0f\x52\xa0\xaa\xe5\xa7\xa0\xd9\xf0\xbf\x43\x7e\xda\x15\xdf\xee\xf6\xff\xad\xf2\x53\xd7\x18\x1b\xf2\x53\xdc\x97\xa9\x28\x0d\x13\x26\x5d\xab\xd6\x6b\x1c\xbd\xc9\x83\xc1\x2d\x0a\xfa\x7d\x74\x0a\xc7\xb6\xb5\x8c\x01\x73\x12\xf7\xdb\x0e\x0b\xeb\x26\xc3\x9d\xe1\xcd\x36\xc3\xa4\xd0\x77\x59\xbf\xdb\xec\xd2\x99\x36\x60\x17\x33\x8d\xdf\x2f\xd3\x06\x2c\x15\x28\xde\x60\x4a\x29\xe3\xb5\x60\x23\x57\x40\xe1\xa0\xf5\xca\x93\xe9\xcb\x82\x3e\x70\xc5\x47\xe5\x47\x4e\xfa\x60\x1c\x1f\x18\xf0\x1c\xa9\xfa\x87\xe2\x9e\x14\x05\x7a\x94\xe3\xa3\xe3\x10\x0b\xef\x99\xc5\x63\x21\x49\xd4\x1e\xd2\x71\xf9\x50\x5a\x0b\xbd\xfa\xcb\x07\xe5\x73\x69\x56\xf4\x7a\xec\x4a\x8f\xbb\x5f\x3e\xd3\x16\x08\xf9\x60\xb7\x3a\x21\xee\xe3\x6d\x41\x07\x15\x26\x14\x4f\xb5\xcd\xc7\xa8\x9d\x11\x21\x55\x7f\xee\xa9\xb3\x22\x53\x95\xd3\x33\x21\x13\x32\x82\x2f\x65\x04\x8a\x2b\x32\x2c\x64\x84\xb5\x09\x69\xbf\x5d\x3c\x4d\xc2\xa6\x6e\x5f\x03\xa2\x16\x2b\xd3\x29\x5d\xf8\xb6\xb2\x44\xc9\x3c\x68\x0f\xdb\x85\x98\x2d\x7d\xec\xca\x4a\x12\xf5\x6e\xa2\x32\xca\x57\xa2\xcb\xec\xbe\x61\x42\xde\x6f\xca\xc7\xd2\x6e\xa1\x4a\xb8\x5f\x49\x9b\xc5\x93\x67\x55\x05\x51\x7d\x55\x58\xa2\x81\xb1\x2f\x4b\x29\xf4\xce\x65\x4d\x04\x1c\xb8\xce\x18\x79\x23\x3f\x8d\xe4\x93\xbc\xfd\x58\xa8\x0b\x47\xcb\x4f\x16\x58\x96\x2e\xea\x41\x74\xb8\x7c\x46\x2b\x42\xa6\x8b\xc1\xce\xb0\x25\xf0\x5e\x88\xa4\x7f\x7e\x60\xf9\xdd\xb7\x19\x5a\xa3\x1c\x49\x84\xd4\x73\xa6\x44\x00\x43\xc0\x14\x34\xe5\x73\xbf\x59\xf1\x46\xbe\x50\x46\x61\xdc\x19\x6e\x12\xf3\xa2\x8d\x3e\x4f\xc3\x36\x8d\x44\xe8\xaa\x91\x32\xa9\x63\x5c\xf5\x9a\x88\x59\x7e\x12\xda\x91\x50\x25\x7b\x86\x44\x8c\x3d\x2a\x7d\xb9\x5f\x05\xd2\x41\xbb\xfc\xf0\x2d\xa2\x9a\x9e\xb2\xaa\xc0\x52\x9f\x63\x59\x7d\x65\x53\x54\x2b\xc2\xc3\xd7\x6b\xe2\x57\x2d\x39\x75\x70\xca\x3a\xc0\x29\xfb\x2b\xc1\xe9\xb5\xef\x27\xe5\xa6\x1e\x85\xed\x9b\x2a\x03\xa6\xfb\x10\x69\x78\x42\x34\xf3\x24\x9a\xb9\xb1\x87\xc9\x6c\xe5\xd5\x52\xb0\x46\x58\x74\x10\x8c\x0e\x6a\x5d\x41\xa9\x3f\xff\x49\xef\xb1\x4e\xcb\x2d\x71\x64\xc1\x84\x10\x31\x2e\xb6\x20\xef\x0b\x52\x72\x25\x23\x41\x2c\x76\x04\x5d\xe4\x40\x95\x75\xe9\x1d\x47\xb2\x52\xb3\xca\x79\xf1\x57\x7c\xc0\x1e\xd6\x7d\xb9\xe5\x07\x70\x8b\xf2\x36\x0e\xfd\x2f\x0e\xad\x4b\xd3\xdc\x7e\xf4\xec\x2f\x1e\xfd\x7d\xea\x27\xb7\x1f\x3d\xff\xeb\x46\x7f\xae\x4b\xf5\xdc\x7e\xf4\xb7\x7f\xdd\xe8\x6f\x55\x6d\x9f\x5b\x0f\xce\x5a\x5d\xab\xff\xb5\xc1\x7f\xf5\xc3\xd9\xf3\x92\x6a\x61\x4c\x64\x64\xd1\x3d\x8b\xfe\x56\x73\xc4\xae\x31\x4f\x59\x2a\x49\x75\xdf\x2f\x26\xb3\xac\xc4\x10\xd4\x92\x8f\xff\x6d\xf2\xdb\xe4\xb7\xc9\xd3\x5a\xe2\xf1\xc1\x84\x2e\x82\xf0\xca\x00\x63\x11\x47\xb1\x4c\xf0\x54\x7c\xa0\xd7\x6f\x78\xb4\xaa\x40\xa9\x67\x8a\xe7\xaf\x61\xd6\x2a\x0b\xe8\xc0\x01\x27\xb2\xdc\xeb\xa6\xf7\xe3\xe3\x52\xb8\x7f\x99\xc2\xa2\x0f\x52\xe9\xb8\xde\x2c\xd3\xdc\x96\x23\x83\xa5\x52\xd4\x58\xca\x03\x92\x0b\x14\x35\xd8\xf7\x67\x1d\x7e\x33\xb4\xea\x41\xb2\x4b\x58\x1f\x7c\xe0\x40\xe1\xce\x9d\xac\x70\xe4\xe8\x74\x41\xa9\x3b\x70\xac\xd7\x24\xb2\xdc\x71\xd3\xff\xe3\x4b\x2e\x3e\xe0\x04\xdc\x22\x29\x7e\x91\x14\x93\x3b\xb8\x93\xee\x81\x69\xd1\x63\x99\x83\x71\x0f\x30\xc5\xee\x4c\x66\xd8\xfd\x0e\x9d\xd9\xb2\x1e\xb6\xe9\x18\xca\xa7\x8c\xf7\xdb\xb3\x60\x7c\x6d\x8d\x60\x28\x3d\xfd\xc3\x14\xad\x0f\x57\xb9\xca\x1f\x0b\x97\xc4\xef\x43\xd2\x47\x57\x56\xd8\x25\x59\x1f\x3e\x7a\x40\xf1\xf2\x6b\xa3\x2c\x9e\xec\x87\xc2\x2e\xd9\xf3\xe0\x82\x8b\x0f\x16\x5f\xfe\x0d\x22\xcb\xdd\x6f\xd8\x70\x6a\xad\x4f\x39\x11\xcb\x03\x1f\xb0\xee\x45\xb5\xfd\x6f\xdd\x0b\x1f\x79\xeb\x35\x2c\xfb\xce\x8f\x3d\xbb\xac\xd2\x03\xf4\xa1\xad\xce\x55\x8d\x35\x4c\xf5\xd3\xd2\x6e\x2d\x9e\x9f\xeb\xab\x35\xac\x8a\x16\xaa\xc6\x19\x3e\x97\xbf\xd7\x70\xd5\x17\x02\xd5\xb2\x0f\xd3\x3e\xac\xfa\x20\x5a\xea\x5c\xd2\xcb\x24\x5e\xf8\x7c\xe6\x67\xa9\x15\xc4\xf7\xbc\xd8\x4d\xe5\x14\x82\x68\x2a\x7f\x2c\x68\x44\xa7\x7e\x72\x4f\x76\x79\xe4\x87\x4b\x63\x2d\x38\xbb\x1c\x50\xde\x35\xd6\x70\xd9\xa9\x5e\x1e\xb6\x4a\x42\xcd\x82\x85\xcb\xfe\xa6\xe2\xdd\xd7\x6b\xad\x36\x48\x41\x5d\x1d\x7d\xa6\xfd\x8d\x14\xac\xab\x7e\x43\xbb\xee\x0b\xa8\x36\x21\x31\x47\x46\xaf\x4c\xd8\xb0\x36\x61\x77\x8b\x52\xdc\x92\x86\xeb\xee\x9c\x5c\x2a\x9d\x5b\x03\x6d\x58\x6b\xa5\xaa\x13\xcc\x42\x92\x58\x7b\x98\x61\x4f\x05\x12\x42\x3d\xac\x50\x6a\x47\x63\xa5\x6e\xa8\xc3\x43\xd8\x97\xd7\x11\xcd\x0d\x13\x0e\x3b\x24\xa1\x71\xbf\x32\x20\x13\x82\x1d\xfe\x19\xf0\x78\x3a\x0d\x7d\x21\x1f\x0e\x16\x9e\xbe\x19\x06\xd3\x19\x2f\xdd\x37\x17\x6c\xf0\xa8\xb7\xe4\x83\x07\xbd\xa5\x0c\x5d\xa9\x25\x1b\x64\x31\xe7\xf1\xc2\x00\x63\x67\x79\xd9\x4b\xe3\x30\xf0\x7a\xc9\x94\x51\x32\x84\x9e\xfc\xdf\xda\xb9\xff\xc8\x2c\xf1\x75\xbf\xc2\x1a\x22\x4e\x83\xa8\x7a\x02\x5a\x5b\x17\x31\x15\x96\xd0\xc8\xd3\xf6\xea\x86\xf7\x80\x82\xb2\xa6\x3c\xa9\xde\xc6\x35\xf9\x2a\x59\x86\x94\x2a\x77\xfb\x24\x31\xe1\x0a\xb5\xd0\xa6\x72\x50\xad\x8e\xda\xea\xde\xd6\x30\xc0\x6c\xb0\xc2\x20\x0a\x83\xc8\x2f\x43\x2a\x9b\xdf\xd5\x11\xd4\xbe\x61\x8f\x8f\xfc\x55\x4d\x0b\xab\xb8\x80\x14\x5e\xcc\xa4\xea\x9c\x93\xac\xe1\x74\xbb\xa9\xa6\x46\x46\xd5\x39\x0a\xf5\xd0\xb8\x0b\xc6\x43\x8c\xdc\xd6\xc1\x98\x2a\x7d\x64\xd2\x74\xbf\x85\x43\x21\xb0\xba\xd7\x9b\x5c\x5b\x6d\xa4\x00\x90\x82\x34\xcf\xfa\xa4\x92\x7f\xe5\xa2\x0d\x30\x7f\xf4\xed\x1d\x9d\x80\xe5\xbc\xab\x81\xf6\xc9\x3e\x6e\x05\x6d\x56\x9c\x19\xc0\x49\xdf\x99\x26\xc4\x48\x30\x95\xdf\xb7\xce\xd5\xb8\x24\xc7\x7d\x30\x10\xe7\x40\xe6\x3e\x8f\x4c\x38\xe9\x13\x23\xe5\x57\xa1\x9f\xce\x7c\x5f\x7a\x45\x67\x21\x18\x61\x4c\x3d\x99\x6e\x81\xcc\x31\x43\x98\xa9\x9f\xf8\x49\x12\x27\xea\xd1\x55\x40\x8c\x43\x1a\x84\xbe\xd7\xe3\x71\x4f\xbc\xd3\xdb\x3f\x3f\xef\x4d\x92\x78\x21\x2b\x58\x9a\x2a\x41\xc3\xda\x84\xeb\xca\x57\xb4\xed\x10\x89\x9c\xc4\x72\xc3\x1a\xeb\xfb\xd6\x87\xe8\xae\x11\x06\xec\x1e\x8b\x63\x9e\xf2\x84\x2e\x07\x0f\xad\xa1\x35\x1c\xd0\x70\x39\xa3\xd6\xe3\x81\x17\xa4\xfc\x9e\x9b\xa6\x65\x03\x6b\x11\x44\x96\x2b\xd4\xd9\x0b\x74\xb8\x28\xfb\x40\xd9\x86\xae\xfc\x34\x5e\xf8\x83\x87\xd6\x6f\xd6\x10\xdf\xac\xde\x2e\x5f\x3e\xef\x4b\xa1\xab\x82\x2b\x52\x2d\x3c\x23\x89\xc5\xde\x42\x62\xb9\xcc\x7c\x56\x56\x10\x48\xf4\xaf\x02\xa3\x68\x45\xea\x28\xed\xa8\x2d\x85\x9d\x8b\x0f\x3e\xec\x93\xe4\xf6\x30\xa6\xc5\x2f\x4d\xf5\x59\xe2\xd3\x79\x2f\x2a\x50\x55\x5e\x27\xeb\x4d\x06\x7c\x2a\x2d\xea\xb4\x7a\x94\x12\x4c\x88\xcc\x0b\x28\xe6\xa7\x3b\xd2\x1d\xd4\x28\xc1\x7a\xb3\x1f\xcc\x66\x45\x0a\x7c\x8c\xd6\x70\x56\xdf\x69\x75\x88\x61\xb9\x29\xb8\xe2\x0f\x07\x5e\xad\x99\xc5\x2d\x4a\x22\xf2\x29\x21\xa6\x69\xae\xb5\x50\xf6\x39\x21\x3f\x5e\xd9\x45\xab\xa2\x8c\x68\x8f\x5b\xd4\xa1\x10\x71\x8b\x7a\xde\x38\xf7\x23\x7e\x12\xa4\xdc\x8f\x7c\xa1\x11\xc7\xcb\x54\x9a\xea\xb8\x29\x5a\x44\x34\x0f\xa6\x94\xc7\x89\x95\xa5\x7e\xb2\x3b\xf5\x23\x6e\x05\x91\xe7\x5f\x9e\x4d\x88\xf1\x2e\x09\x3c\x74\x51\xf9\x7d\xf8\xf3\x67\x6b\x77\x33\x9a\xce\x74\x36\x4a\xde\xac\x3e\x11\x4c\xc8\x1d\xb1\x5b\x3c\x09\x5f\xfa\x57\x3f\x7f\x26\xd6\xc2\xe7\x54\xfd\x4c\x67\xc1\x84\xe3\xef\x9d\xdf\x85\x40\x87\xa5\x8d\x7e\xfe\x8c\x2c\x99\x32\x4e\xfc\xf2\xe2\x55\x24\x10\xc6\x34\x7f\x24\xd6\x32\xf1\xc5\xe0\x07\x72\x37\x48\x19\x69\x30\x4b\xfc\x09\xf8\x8e\x58\x1d\xc8\x9c\x8b\x98\x70\xd3\xa2\xcf\x28\x71\x49\xf6\xf7\xbf\xfb\x16\xf3\x1d\xc7\xc9\x2c\xe6\x8b\x0b\xfa\x1e\x2f\xe8\x7b\x7c\x42\x2d\x2a\x9f\x51\x8b\x8e\x94\xa7\x44\xb6\xb6\x95\xef\x09\x5f\xa3\xc1\x7b\x0d\xee\xd4\x6e\x3d\x76\x49\x2c\x77\x0a\x11\xe0\xc0\x5c\xb4\x7b\x63\x27\x96\xfb\x06\xdc\x33\xf1\xf7\x0c\xdc\x23\xf1\xf7\x48\x74\xb1\xd7\x77\xbe\x44\xa4\x6f\xc2\xbb\x4e\xca\xa3\xdc\xac\x46\x89\x8d\x45\x0c\x30\xdf\xbd\xac\x5e\xfc\x76\xcb\x19\x99\xaa\x9f\x35\xe2\xf6\x11\xe1\x70\x24\x9e\x5a\x54\xa6\x6a\xee\x3b\x67\x7d\xf2\xc3\x9d\xda\xaf\x53\x70\x53\xfb\x8a\x82\xcb\xed\x86\x66\x50\xfa\x15\xf4\x47\xda\x35\x37\xb2\xe8\xda\x96\x9a\x14\x68\x7f\xe4\x4b\xf2\xb6\x0f\xc6\xdf\xf0\x68\xf9\x0c\xe4\xd5\x48\x5c\xb9\x39\xa6\x04\x79\xd7\x87\xc4\x62\x14\x8e\x04\xea\xfb\xa3\xb2\xb0\x88\x5d\x56\x1b\x49\x2c\xfa\xde\xc4\xb4\xae\x13\xf4\xaf\x5d\x8b\x25\x3a\xf4\xc9\x5e\xdf\x14\x8b\x76\x96\x8a\x25\x6c\xa1\xf3\xec\x8d\x3d\x26\x7f\x5c\x4b\x96\x81\x07\x39\x35\x06\xbf\x5e\xaf\xcd\x67\x67\x81\xf3\xe3\x15\x0d\x22\xfb\x47\x10\x05\xdc\x3e\xe8\x93\xb9\x6b\x92\xa1\xb9\x5e\x43\x64\x8d\xc3\xc5\x48\xf7\xdb\x8b\x30\x59\x31\xe6\x9a\xc2\xa3\xe1\x5e\x10\xf5\xb8\x89\x7f\x92\x11\x66\x4d\x36\x1c\xc7\x1f\xed\x93\xc7\xa6\x1d\x91\xe4\x0f\xff\x2b\xf0\x3f\xfc\xaf\xa6\x2d\x7e\x3a\xe2\xa7\xd0\x67\xc6\xe1\x02\xce\x02\xd3\xc6\x5f\xce\x59\xb0\x26\x7c\x16\xa4\xe6\xb3\xff\x1b\x00\x00\xff\xff\x86\x75\x45\xdf\xa4\x82\x01\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\xfd\x09\x7b\xdb\xbc\xb2\x20\x0c\xfe\x15\x9b\xad\xab\x03\x44\x65\x46\xf2\x96\x84\x32\x8e\xc6\x49\xec\xec\x76\xe2\xec\xf1\xeb\xce\x07\x52\x90\x4d\x4b\x06\x15\x08\xf4\x12\x8b\xfd\xdb\xe7\x41\x81\xab\x44\x2a\x39\xdd\xb7\x67\xbe\x7b\xdf\x13\x8b\x04\x50\x28\x00\x85\xda\x50\x28\xae\x8f\x62\x19\xe8\x30\x92\x44\xd2\x7b\x27\x9e\x89\xb5\x99\x56\x61\xa0\x9d\x7e\x56\xb0\xa6\x88\x04\x05\x9a\xde\x2b\xa1\x63\x25\xd7\xb4\xcb\x99\x04\xed\x8e\x98\x02\x9d\xe4\xd5\x66\xa4\xa8\xa2\xc8\x26\x68\xc8\x41\xab\xbc\xa0\xdc\x5b\x06\x8e\x28\x90\x34\x49\x68\x01\x4a\x10\x51\x02\xb5\x05\xa2\x00\xa5\x97\x41\xad\x84\x2e\x88\x06\x0b\xbf\xdc\x81\x26\x71\xa9\x83\x6d\x88\x8b\x0e\xc4\x32\xb4\xff\xb4\xcf\x98\x08\xc8\x7b\x2d\x77\x1b\x12\x5e\xea\x76\x07\x78\xd1\x6d\xbc\x0c\xf0\xbf\x01\x13\x4e\x62\x28\xe3\x52\x46\x26\x26\x41\x09\x99\x5d\x08\x0a\x64\xf8\x32\xcc\xff\x3b\xf8\x05\x84\xc3\x02\x86\x65\x14\x39\x09\x4b\x28\x3e\x82\xb0\x40\x31\x58\x06\xfb\xff\x33\xac\x43\x12\xc0\x32\xde\x65\xc4\x03\x12\x95\x10\x7f\x0c\x51\x81\x78\xb8\x0c\xf9\xff\x9f\x63\x89\x48\x08\xb5\xa3\x29\x0f\x27\x22\xa3\xd2\x70\x9e\xc0\xa8\x18\x4e\xb4\x0c\xfc\xff\x65\x23\x1c\x91\x08\x9a\xc6\x58\x1e\xe4\x78\x81\xcd\x6d\x32\xc6\xa4\xcb\x07\xd2\x1d\x11\xf3\xde\x33\xfd\x10\x5d\x6a\x71\x6b\x5b\x40\x81\xe4\x56\xb5\x0d\x88\xbc\x15\x11\xa5\x86\xfb\x59\x43\x28\x86\xbc\xbd\xd8\x14\xe2\x72\x63\x12\x97\xda\xdf\x14\xed\xa1\x98\xc7\x9d\x65\x08\xc0\x17\x60\x10\x5e\x02\x33\x29\x83\x81\x62\x8d\x76\xeb\x00\x41\xb0\x0c\x8a\x04\x25\x68\x7e\x15\x1a\x14\x64\xf0\xa8\x1e\x1e\x84\xb5\x10\x49\x48\x93\x6b\xae\xd6\x46\x4c\x90\x62\x39\xed\xd2\x8c\x22\x45\x4c\x99\x60\xfb\x4a\xf1\x3b\x22\x29\xc4\xac\xdb\x8f\xf7\x64\x3f\xee\x74\xa8\x38\x8d\xcf\x98\x26\xaa\x13\xd3\x7e\xc6\xff\x13\x0a\xd7\x6c\x56\x81\x54\xc0\xd1\x05\x1c\xc1\xba\x7d\xb1\x27\xdb\x6d\xe5\xfa\x7d\xd1\xe9\x50\x7d\x2a\xce\x98\x72\x39\x28\x66\x5e\xe5\xc2\x6f\x22\xe4\xb9\xbe\x60\x02\x8e\x8d\x68\xa1\x09\x85\x21\x23\x8b\x1d\x64\x9b\xe5\x54\x9e\x25\x14\x56\x0d\x24\x03\x08\x71\x8a\x8b\xa0\xc0\x59\xb7\xcf\xf7\x44\x9f\x77\x3a\x34\x3e\xe5\x67\x4c\x9f\xf2\xb3\x0c\x83\xf8\x54\x9e\x31\x05\x71\x42\xa1\x79\x58\x2a\x83\x9a\xcd\x94\xee\xf4\xb2\xb9\xd2\xc5\x5c\xa9\xd3\x38\x87\x2b\x4e\xf5\x19\x93\x20\xfe\x1e\x5f\x03\x4c\x20\x30\xc5\xcc\xee\xd1\xa7\xf1\x19\xa8\x7c\xea\xd5\x5f\x42\xda\xe8\xf5\xbb\x7b\x4c\xf4\xc5\xc6\x46\x0e\x48\x2c\x00\xa2\xd0\x5a\x9e\xe4\x55\xc3\xad\x19\xac\x24\x66\xb8\x15\xca\xf8\x3f\x5e\x18\x64\x1a\x1d\x0e\x66\x81\x72\xc8\x71\x2d\xe4\x9c\x80\x66\x93\x30\x10\x38\x82\xda\x7a\xb6\x77\x55\xf4\x2e\x37\x44\x3f\x43\x66\x2f\x6e\xb7\x49\x9c\xe3\x46\xfb\x19\xbe\x3c\x43\xb2\x13\x53\x08\x58\xb7\x1f\xec\x89\x7e\xd0\xe9\x50\x7e\x1a\x98\x75\x0e\xce\xb0\xaa\x2d\x89\xb3\x92\x8e\x30\xb4\x15\xe4\x34\xc0\x13\x0a\xf7\x2d\xaf\x9b\xd0\x42\xfd\xbb\x2a\x53\xf4\x7d\xcb\xeb\x01\xf7\x24\xf8\x9e\x4a\x70\x9b\x5e\xb0\x19\xb9\x2a\x55\x3f\x30\xac\x37\xc3\x4a\xb1\x16\x68\x26\x53\x6c\xfb\x7a\x63\xa3\x4f\x15\xbb\x22\xf2\x54\x57\x17\x38\x6f\x3e\xad\x36\x3f\x3d\xeb\x4b\xd7\xef\x4b\x26\x5d\x9f\x2a\x77\x1a\xcf\x2e\x88\x74\x79\xa9\xa5\xa9\x77\xbe\x92\x55\x9c\x9e\xf5\x95\xeb\xb7\xdb\xda\xec\x62\xb3\x97\x41\x33\xed\xfa\x54\x58\x70\xb8\x84\x2e\x07\xed\x72\x9a\xc3\x3d\x30\xfc\x9a\xc2\x1d\x23\x7a\x01\xb2\xe1\xf6\x19\xec\xb8\x0c\xbb\xdd\x16\xd5\x0e\x40\x30\xe1\xfa\x34\xb6\xdd\xdc\x16\xdd\x80\xa8\x76\x15\x9b\xae\xc2\xa5\x7e\x8c\x70\x28\x96\x77\xb1\xa7\x76\x3b\xae\xeb\x0e\x62\x16\xbb\x3e\xe5\xb6\xd3\xfd\x6a\xa7\x10\x57\x3b\xe6\xa6\xe3\xb8\xa6\x63\x23\x55\xb2\xae\x83\xfa\xae\xdb\x6d\xde\xdc\x3f\x70\xc6\x5d\x9f\x06\x16\x8b\x9b\x65\x2c\x80\x57\x31\x09\x68\x95\x9d\x69\x28\x04\xf8\x01\x31\x44\xe1\xce\x22\xa5\xeb\xb9\xec\x2b\xa2\x0d\x07\xd7\x44\x51\x9a\xe0\x7f\xfd\x12\x28\xf1\x57\xa0\x2c\x1f\x19\x9b\xda\x25\xc2\xd4\x8c\xb1\x1b\x3d\xe8\x7a\xe6\xc7\x9d\x1e\x6c\xf4\xbc\x1e\xf6\xb0\xc4\x7a\x4b\x3c\xaf\xa1\x28\x88\xe4\x2c\x9a\x08\x77\x12\x9d\x13\xd9\x71\xbc\xb5\xbd\x50\x6a\xa1\x24\x9f\xcc\xfe\xed\x50\x50\xe5\x3d\xf7\xcc\xec\x02\x7d\xa1\xa2\x9b\xb5\x03\xa5\x22\x45\x9c\x0b\xad\xa7\x33\xef\xe1\xc3\xf3\x50\x5f\xc4\xbe\x1b\x44\x57\x0f\xc5\xe4\xea\x61\x10\x29\xf1\xd0\x9f\x44\xfe\xc3\x9e\xdb\x75\xbb\x0f\x2f\x42\xa9\x67\x0f\x9d\x8e\xec\x38\xee\xd5\xd0\x29\x89\xe7\xc3\x05\x9e\x09\x66\x63\x40\xcc\x8e\x70\xd5\xbb\x20\x68\xdf\xf0\x16\xcd\x84\x3b\x8d\xa6\x84\xd2\xbe\x29\xd3\xb8\x6c\x3e\x96\x97\xf8\x5b\x0e\xf5\xa8\xd8\x14\xe1\x88\xf4\xba\xdd\x3d\x4d\x33\xfe\x6a\x17\xff\x18\xfb\xa5\xb0\xde\xed\x87\x23\x22\x19\x63\x2a\xad\x61\xdf\x38\x91\x7f\x29\x02\xed\xac\x33\x7d\x37\x15\xd1\x68\x4d\xce\xe7\x32\x9e\x4c\x8c\xce\x90\xff\xca\x9a\x38\x59\xc7\x0e\xcb\xab\xb7\xdb\xcf\xc8\x0e\x85\xf5\x5e\xce\x0b\xe3\xb5\x50\xae\x49\xb7\xb5\xd7\x6d\xb7\x89\x64\x07\x48\x1d\xca\xfc\x35\x88\x48\x1a\x8e\xc8\xfa\x11\x91\x28\xad\xcc\x3f\xba\xd3\x33\xc3\x4b\xb1\xea\xf5\x33\xf4\x90\xb7\x5c\xb2\x19\x39\xa4\xf0\x71\x49\x87\x48\x6b\x1d\xa6\x9c\xbc\x58\xbc\x57\x19\x0b\xaa\x1d\x5d\x36\x3d\x38\x13\x83\xae\x27\xf7\x14\x92\x95\x99\x8b\x75\xe9\xb6\xd2\x72\xa2\xd9\x2b\xc3\xe9\xcc\xc6\xa1\x74\xa0\xbd\xf4\x85\x0f\xca\xf5\xf1\x85\x79\x0a\x40\xb9\x81\x15\x02\x86\x4f\xa2\xf2\xd2\x6e\xaf\x57\x1b\x5b\xee\x69\xd5\x98\x82\xb2\xe7\x73\x03\x6d\xd0\xf3\x94\xeb\x1b\x04\xba\x56\xeb\x7a\xdb\x30\x4e\x3b\x2a\xba\xd7\x4d\x28\xfc\x6c\xd4\x77\xd2\x4a\xbd\xe6\x5d\xd0\xdd\x7b\x95\x89\xbe\xc6\x1a\xac\xb1\x8a\xdd\xa5\xb6\x38\x1f\xc9\x5e\x77\x70\xa7\x3d\x3d\xb8\xd5\xde\x8d\x36\xaa\xc2\x6f\xd6\x2d\x56\xe3\xb8\x22\xbe\x72\xd1\x95\x97\x3f\xad\xca\xe7\xac\x06\x04\x9e\x2e\xd5\xfa\x54\x32\x28\x64\xf1\xfa\xa4\x8c\xd6\x7d\x92\x53\xa0\x40\x0a\xb4\x2a\xa4\x3c\x15\x67\xd5\x02\x95\xea\x96\xa6\x20\x1b\x04\x4e\xfe\x73\x36\x23\x2f\x4b\x94\xf4\xd2\x82\x37\x74\x34\xd3\x2a\x94\xe7\x25\xb2\xcf\xe9\xa8\xa3\x52\xd2\xf1\xb3\x57\xaa\x6f\x11\xba\xb2\x34\x90\x12\x40\xbf\xa4\xda\x94\x84\x2a\x72\xed\xa2\x6a\x05\x9f\x5f\x4d\xc4\x20\x3b\x86\x5d\xfd\x47\x85\x7f\xa7\xac\xd9\xb9\xe4\x4c\xb9\xc1\x05\x57\xcf\xa2\xa1\xd8\xd7\x24\xa6\x7d\xbe\xb7\xb3\xb3\xf9\x64\x77\x3e\xdf\xd9\xdd\xea\x3d\xd9\xe3\x03\x92\xaa\x74\x9f\xac\x52\x47\xc1\xa8\x79\x5e\xf5\x6d\x47\x9d\xc6\x9d\x9e\x2d\x64\x9b\x34\xc9\x19\xd3\x65\x14\x4a\xe2\x38\x75\x14\x56\xa0\x79\x7a\x06\x15\x35\xcc\x6a\xbb\x39\x82\x86\x71\x04\x4b\x78\xc6\x9d\x0e\x04\x55\x5c\x83\xf9\x9c\xf0\x8e\x6d\x60\x90\x04\x83\x1e\xa7\xd4\xc8\x53\xe4\x90\x3c\xc7\x4c\x97\x30\xeb\xff\x47\x4a\x78\x86\x96\xb6\x68\xe9\xbf\x46\x4b\xe7\x68\x59\xd5\xdb\xa0\x66\xf6\x5e\x21\xcf\x90\x98\xde\xaf\xd4\xb0\x32\x74\x8c\x0a\x9f\xa2\x12\x33\x54\xe2\x79\x15\x15\x81\x2b\xb9\xbb\xb5\xd9\x9d\xcf\x77\x1e\x6d\x6d\x6f\xed\xf1\xf9\xdc\xe8\xb3\xa7\x1b\x1b\xe2\xcc\xa8\xae\x19\x16\xf1\x02\x16\xf0\xa5\x89\x16\x95\x3b\x9b\x4e\x42\xc3\xe3\x13\x0a\xaf\x9b\x6b\xe1\xd4\x62\xa5\x77\x35\x83\xa9\xd5\xce\xff\x82\x88\xad\x4e\x9b\x29\xed\x46\xad\x8d\xab\x44\xa1\xa9\xd9\x9e\x71\x75\xd4\xf1\x7c\x4e\x4c\xf5\x8d\x0d\x7d\xd6\x11\x96\x24\x04\xcd\x45\x50\x37\xc9\x64\x51\x3a\xfd\x2f\x56\x58\xb0\xff\xbd\x98\xac\x2f\xa0\xd2\x4b\x72\x9c\x28\x7c\x6e\x98\xdd\x8d\xde\x9e\x72\x43\x39\x14\xb7\xc7\x23\x3b\xc5\xdf\x9b\xd6\xa1\xcb\xd8\x42\xd5\x0f\x8d\x4c\x26\xb3\x79\xf2\x21\x1a\xf9\x36\xe1\x33\xfd\x2a\x6f\xcf\xf2\xb2\x8d\xac\x7a\x42\xe1\xeb\x12\x48\x3b\x55\xb9\x21\x12\x8e\x88\xde\xeb\x65\xfc\xb2\x55\x62\x8d\x5d\x40\xa5\x7e\xa3\xb7\x67\xa6\xa5\xc0\x14\xf5\x9f\x4c\x97\x17\x14\x44\x87\xe9\xaa\x06\x8f\x0b\xf5\x6d\x25\xc7\x84\x1f\x8d\x92\x53\x6e\xac\x50\x1d\xe5\x03\x34\x81\xdf\x34\xb7\x7e\xb8\xaa\xf5\x43\x35\xef\x62\xf1\x3b\xae\x2f\xdc\x69\x74\xd3\xac\xbe\xfe\x97\x6c\x94\xba\xea\xbf\x64\xbf\x58\x44\x26\x07\xcf\x48\xaf\x47\xbd\xee\x9e\x6e\xb7\xe5\x5e\x77\x3e\xd7\x46\xdf\xea\xee\xc9\x81\xee\x48\x4f\x5b\x2d\x1c\x7b\xe4\x9a\xcb\x4d\x3b\x3f\x52\x32\x7c\x15\x88\x70\x02\x2a\x7d\x18\x4d\xa2\x48\x81\x4e\x9f\x54\x14\xcb\x21\x88\xf4\x69\x12\x9d\x37\xca\x99\x76\x7b\xd5\xa8\xe7\xf3\x55\xa5\xeb\x8c\x65\xbc\x2d\x96\xec\x4f\xc6\x57\x46\x38\x86\xe3\x77\xf8\x1e\x8b\xb3\xe7\xd0\xd8\xd3\xed\x76\xb8\xc7\xd3\xad\x17\xb1\xb8\xbc\xe7\x14\xed\x07\x4c\x9e\x86\x9d\xce\x19\x63\x2c\x3e\x55\x9d\xce\x59\xbb\x4d\x7a\x66\x06\xa3\x01\xd1\x9d\x0e\x08\xd6\x33\x82\xab\xd3\x01\xe4\xcf\x8c\x91\xdd\xad\xed\xc7\x8f\xdb\x11\x1d\x2c\x34\xf4\x7a\x34\x67\x89\x4f\x49\x30\x50\xde\x46\x0f\x75\xef\x84\x02\x97\xcd\x4c\x4d\xed\x65\x0c\x7a\x50\xed\x42\x57\x51\xa5\x03\xb3\xf9\xb5\x3b\x8b\xfd\x99\x56\x44\xc1\x26\xa5\x74\xa0\x3a\x9b\xde\x46\xcf\xc3\xa2\x53\x75\x46\xe9\xc0\xf9\xc7\xe8\xdf\xcc\x3c\x0d\x36\x36\x3d\xd5\xe9\x99\x0a\x1b\x46\xeb\x0b\x56\xa0\xb1\xd0\x9b\xa1\xa1\x84\x42\x24\x6b\x79\x5b\x5f\xee\xe5\x8c\x4d\x76\x3a\x39\x11\x96\x61\x48\x6a\x37\xf2\xf6\x63\xc3\xcd\x0a\xcb\x43\x26\xf9\x0f\x0a\xa3\x3a\x8c\x4a\xfb\xbd\x5f\xcc\x4e\x5f\x65\x1d\xc5\x6c\x01\xd9\x8d\xed\xc7\x96\x79\x76\xe7\x73\xb9\xc7\x62\xea\x2b\xc1\xc7\x7d\xc1\xe4\x03\xd1\x89\xb3\xfe\x8e\x89\xb2\xab\xe1\xd7\x0f\xca\x8e\xa1\x5b\x3b\x36\x51\x37\xb6\xed\xc7\xff\x16\xf3\xb9\xf8\xf7\xce\x23\x63\xb4\xec\xee\xd8\xa7\x47\x5d\xd4\x09\xc5\xde\x93\x47\xf3\x79\xaf\xbb\xb9\x27\x52\x74\x34\xeb\xed\x3e\xd0\x1d\xb1\xf1\xf8\x51\x22\x26\x33\xb1\x96\xbf\xd8\xd9\xe9\x57\x5f\x6c\x3f\x2e\x90\x96\xa0\x51\x15\x92\x8c\xfc\x69\x23\xc4\x25\x8e\xa8\x28\x04\x8c\xef\x75\x07\xd9\x6e\xf0\x78\x27\xe7\xb0\x6a\x2f\x48\x77\x44\xb8\xb0\x23\x3a\x1d\xda\x47\xfa\x0f\x07\x44\xb0\x1e\x68\xab\xb8\x2d\xd1\x7f\x48\xdb\x6d\x53\xb9\xa0\x78\x9e\x11\x7b\x3d\x83\x72\xce\x9d\xbe\x74\xf9\x96\xb1\x62\x3b\xcc\xb9\x72\x28\x48\x97\xdf\xa4\x8f\xa1\x43\xfb\x5a\xdd\x65\xc4\x78\xa4\xc9\x89\x38\x3f\xb8\x9d\xa2\x6f\x9f\x26\x01\xd7\xc1\x45\x49\xc9\xbf\xd4\x89\x61\xb9\x13\xd9\xcc\x73\xe3\xc9\xc4\x70\x11\xf7\x2a\x6d\xba\xda\x9f\x88\x72\x05\x38\xeb\x1a\xf5\x0c\x42\x56\x92\x64\x10\xb1\x8d\x5e\x9f\x77\x3a\x7b\xb2\xdd\x46\xb1\x23\x6e\x45\x40\x02\xa3\x23\x46\xeb\xe5\x9a\xfd\x02\xe0\x88\x89\xdc\x7f\x0a\x7e\xaa\x49\x8f\x68\xbf\xbb\x37\x4a\x27\x7e\xc6\xc4\xe9\xe8\xac\xef\x9f\x6e\x6c\x8c\xce\xd8\x6c\x70\xa4\xc9\x8c\x7a\x97\x3a\x89\x33\xe7\xd0\xf7\x18\xc4\x69\xf7\x0c\x84\x5d\x55\xe0\x70\x40\x7c\x4a\x29\x44\xe5\x4e\x73\x2d\xac\x78\xc5\x42\x48\xdd\x56\x25\x3f\x4a\x0c\x12\x78\xb6\x1c\x81\xd9\x59\x59\x3b\x25\xa6\x13\x8e\x5a\x55\xf9\x7c\x26\x1c\x91\xa0\xd3\xf9\x37\x8b\xf3\x7d\xdb\x2f\x1c\x7f\x5c\x9d\xc7\x57\x42\xea\x59\x36\xc8\x2d\xc8\x9c\xf5\xca\x0c\x52\xe5\xaa\x4e\x5e\xf3\x54\x9d\xf5\x8d\x22\xa9\xce\x98\x30\x83\x15\x38\xd8\xcc\xb3\x69\x87\x2b\xa1\xa8\xbe\xd4\xc5\xe6\x19\x04\x70\x40\x34\xa5\x34\xa1\x66\xf5\xaf\x57\xf3\x0e\x9d\xad\x69\x79\x2d\x83\xca\x7a\xc9\x5c\x27\x0b\xb3\x65\x15\xb8\xb3\xd7\xc3\x74\xcf\x66\x2a\x45\xaa\x78\x72\x08\xed\x62\x50\x5a\x05\x9c\x0d\x64\xb1\x3e\xa5\x50\x5e\x98\x00\x4a\xda\xc8\x70\x99\x0d\x15\x6e\xdc\x2e\x0c\x33\x63\xb8\xc9\x94\x33\xd5\x7a\x20\x8a\x6a\xb9\x65\xda\x5a\xaa\xb7\x05\x23\x4f\xc2\x79\xe6\x16\xbe\x5a\xd5\xf5\x36\x5a\xd8\x17\x9e\x4c\x52\x44\x2f\x9a\x6a\xdb\x8e\x8c\xd8\x49\x28\x4c\x57\x08\x97\xb4\x1e\x68\xac\x79\x2e\xeb\x1d\xb7\x8b\x95\x41\x9c\x35\xfb\x5e\x97\x2b\x43\x7c\xb6\xca\x63\x5a\xd7\x00\x38\x36\xe1\xb5\x4d\x4a\x67\x6e\x0b\x8d\x20\xc0\x66\x41\x43\xb3\xd2\xe1\xda\x52\x43\x08\xb1\x69\xd4\xd8\x14\xa2\x55\x8d\x21\x3a\xab\x63\xb1\x25\xd6\x79\x67\x5e\xbd\xfe\x78\x7c\xe4\x4e\xb9\x9a\x09\xf4\xba\x2e\xb2\xcf\x8f\x31\x19\x93\xcf\x1c\x9c\x4f\x17\xe1\x6c\x2d\x9c\xad\xc9\x48\xaf\x5d\xf3\x49\x38\x5c\x33\x2d\xd7\xd7\x9c\x8e\x74\xaf\xc4\x6c\xc6\xcf\x05\x1c\x49\x03\x83\x22\xc3\xbd\x69\xa4\x04\xec\xf6\x52\x5a\x27\x6f\x41\x89\x77\x69\xad\xd9\x4d\x88\x28\xb8\x2d\x7a\x1f\xf0\x99\x58\xdb\xf2\x52\x9f\xa1\x1f\x45\x13\xc1\x4b\x2e\x43\x35\x78\x15\x13\x45\xbd\x7d\x49\x1c\xbe\xf6\xf4\xf8\xf8\xad\x03\x46\x51\x33\xad\x36\xb3\x56\x32\xbe\xf2\x85\x2a\x1c\x77\x6a\x80\xd5\xe5\xda\xab\xa3\x4f\xa6\xba\xb7\xb1\xd9\xdb\x7e\xb4\xfd\x78\x6b\x77\xfb\xd1\x9e\x6a\xb7\xd5\x5e\xf1\xdc\x6e\x93\xee\x1c\x35\x9c\xac\xab\xf5\x70\x76\x18\xca\x50\x9b\xd9\x9a\xcf\xd5\x7f\xf5\x16\xa1\x61\x35\x8b\xc2\xf6\x02\x0a\x0d\x78\x1f\xbe\x3d\xde\xff\x54\x20\xbe\x9b\xb5\x5a\xf4\x14\x65\xad\xd4\x5a\x28\x67\x9a\xcb\xc0\xbc\xfc\x88\x95\xb0\xa4\xe3\x38\x19\xc8\x8f\x9f\x4e\x5e\x1d\xbd\x28\x60\x3e\xf1\x4a\xb2\x2e\x1b\x8d\x74\x03\x5b\xdf\xbc\x2c\xea\xee\x64\x75\x5f\xc5\xc4\x2e\xa8\x7d\xff\x28\x7b\x8f\xcc\xdb\x0d\x67\x19\x13\x1f\x8c\xa5\xf5\x67\xc2\x41\xd6\xff\xdb\x57\x1f\x4b\x23\x7a\xfc\xe7\x96\xb7\x32\x6d\x2a\xd7\xf6\x4f\x4e\xf6\xbf\x17\x8d\x7b\x5d\x2f\xb3\xf9\x86\xb5\x6e\x66\x55\x38\x97\xe7\xf3\x75\xa2\xad\x63\x2e\x13\x45\x29\xd0\xe3\xa7\xaf\x0f\x9e\x7d\x5a\xbb\x09\xf5\xc5\x1a\x5f\x1b\x85\x62\x32\x5c\x93\xfc\x4a\x0c\xd7\xfe\x1f\xa7\xa3\x3b\xce\xff\x83\x1d\x5a\x69\x70\x97\x22\x75\xaa\x8b\x93\xc0\x17\x9c\x08\x3a\x10\x1e\x6e\x87\xef\xa8\xbf\xa0\x47\xd6\xa2\xd8\xf3\xac\xae\x29\x5d\x81\x22\x61\x71\x9c\x0b\xc8\x14\x23\x0c\x47\x44\xe5\xc6\x71\x5c\xa9\xb6\xf6\xf6\xf8\xe8\xc5\xc1\xc9\x1a\x47\x58\x6b\x47\x42\x0c\xd7\x50\x9e\xac\x39\x9d\xb8\xe3\xac\xf9\xb1\x5e\x8b\xe4\xe4\x6e\x6d\x26\xc4\x9a\xd3\xc9\xc0\x74\x9c\x35\x21\xb5\x0a\xc5\x0c\x3b\x28\x8d\x26\x6e\x18\xcd\x07\x8c\xa7\x28\x8d\x66\xd3\xfb\xe3\x34\xff\x61\x80\x76\xb6\xf3\x29\xe5\xac\x30\xcc\x03\xbb\x3c\x38\xf0\x0b\x3e\x3b\xbe\x91\xef\x55\x34\x15\x4a\xdf\x19\x35\xe9\xbe\x84\x6f\x70\x66\xe5\x2b\x22\x4b\xcb\xec\xe8\xbb\xe1\x6f\x16\x63\xce\xae\xc8\x31\xb1\x4f\x50\xf8\xdf\x5e\xc5\xe4\x8b\x26\xc5\x90\xb6\xbc\xac\xff\x90\x49\x77\x04\x11\x93\xee\x39\x8c\x58\xb7\x3f\xda\x8b\x32\x4d\x77\x64\x34\x78\x44\x20\x3a\x1d\x9d\xa5\xcb\x53\xed\x5e\xf4\x43\x16\x12\xd3\x59\xa9\xa7\x30\xeb\x65\xdb\x2b\xd0\x5f\x98\x6b\x7c\x7d\x81\x2d\x0d\x93\x10\x69\x8b\x9d\x1c\x2f\x9f\xb5\x60\x66\xb0\xea\xcf\x5c\xbf\x3f\x63\x33\xd7\x4f\x91\x99\x59\x7f\x6e\x38\x22\x0b\xa8\xf8\xec\xca\x00\x04\x3f\x47\xe6\x63\x4c\xbe\x72\x33\x72\xa3\xfb\xa5\x5d\x78\x0b\x8c\x5c\xba\x3c\xe3\xd3\xb6\x46\xb7\xb4\xdf\xa5\x19\x59\x29\x92\xa6\x46\x57\x52\x7f\x38\x4d\x4f\x95\x46\xcb\xcd\xf1\x30\x3d\x9d\xc5\xa0\xba\x88\x1f\xb8\x11\x53\xb8\x88\x78\x00\x1f\xb8\xbc\x34\xa7\x9a\xc4\xb4\x1c\xa0\x53\x0e\x0d\x1a\x93\x77\x1c\x72\x34\x6a\x02\x85\x6c\xb0\x46\x29\x4a\xa7\x2a\x7f\x72\x91\x76\x70\x3b\x15\x81\x0e\xe5\xb9\x11\x62\xb9\xf0\x2a\x0e\xc0\x65\xee\xb5\x5f\x3e\xed\x92\x6e\x0b\x6d\x86\x56\x71\xda\xb4\x24\xb8\xba\x5e\x75\x09\xa4\xcb\x0d\x1c\x97\xf7\x53\xb9\x96\x0a\xaa\x54\x58\xa4\xdc\xbf\xc2\x84\xd7\xbb\x8b\x0c\xdc\x0d\x10\x46\x90\x31\xe5\x94\xc3\x66\x5b\x37\xf3\x64\xc9\xfc\x88\x29\x67\xa3\x39\x84\x21\x42\x18\xb6\xdb\xcb\xb5\x4a\xb8\x0a\xac\x25\xea\x6a\x6d\x15\xb5\x46\x58\x6b\xd4\x6e\x3f\x33\xb5\xce\x41\xb9\xe7\xc5\x76\xc8\x6b\x5d\x60\xad\x8b\x3a\x58\xb9\xb8\x29\x01\x28\x91\xe0\xb3\x66\xbf\xdf\x7a\xe1\x33\x2c\x16\xa1\xec\x10\x10\x7b\x1a\x03\x80\x0c\x01\x9a\x8e\x31\x18\xe5\x54\x9c\x35\x9d\x10\x1e\x36\xaa\x2c\xa8\x24\x59\x79\x1c\x8e\xee\x08\x2a\x2f\x60\xd8\x21\x48\xda\x71\x9c\xb2\x1a\x73\x24\xeb\x4f\x98\x2e\xab\xef\x4d\x7f\x1f\x57\xf9\x58\x4e\xe5\x19\xb3\xfd\xe8\x54\xb7\x7e\x25\x99\x01\x1e\x4f\x26\xa5\xee\xde\x96\xc0\xde\xb7\xbc\x2e\x70\xa3\x8c\xe7\xc5\x3f\xab\xc5\xbd\x85\xe2\xdf\xd5\xe2\x4d\xf0\x3d\x09\x81\x67\xfa\xb0\xda\xff\xf1\x0a\xed\x7f\x0b\x6b\x0f\xd1\xa0\x80\xa7\x2b\x2a\x6e\x97\x2a\xe2\x48\x3e\xc9\xf2\xc1\xde\x09\x22\x61\xad\x45\x1c\x82\xf0\x3e\xc9\x4e\x27\x35\x43\x70\x96\x2f\xbc\xd3\xb3\x24\xe3\xa8\xaf\x71\x56\x4a\xa1\x26\xcf\xcb\xdc\xe1\xb7\x24\x65\x96\x20\xc9\x5b\x49\x4e\xd2\xad\x5d\xda\xdc\x2f\x53\x24\xa5\x7b\x61\xed\x30\x45\xe1\x35\x1e\x2b\xe0\x79\x59\x65\x34\xe5\x75\x59\x00\xff\xd2\x96\x02\x76\xf3\xdb\x9a\x9b\xf6\x9c\x45\xb2\xf5\x1e\x7c\x91\xec\xf4\xac\x18\xe9\xeb\xcc\x5c\xfe\x22\xd3\x08\x18\x0a\xeb\xef\xd3\x38\x19\xd3\xa2\xdb\x97\xec\x8b\x74\x67\x17\xe1\x48\x13\xda\xa7\xef\x4c\x83\x3e\xc2\x2a\xad\xda\x3b\x1c\x2f\x3a\xd7\x94\x9b\x39\x09\xa4\xd9\x86\x6e\xcb\xec\x8c\xae\x3d\x72\xef\x99\x3f\x79\xbd\xf3\x76\x5b\xb9\xe7\xc8\xb5\x64\x9f\x2a\xf7\x9c\x99\xc7\x10\xb9\xb3\xd9\x73\x76\x7c\x06\x20\x16\xf8\xc4\x80\x33\xd2\x2a\xab\x89\x8e\x28\x83\x3c\xc6\x53\x66\xbc\xfc\x3a\x0a\x87\x58\x35\xc0\xfe\xfd\xca\xec\x18\x60\x12\x70\xbd\x12\x8a\x82\x60\xc7\xe2\x94\x22\xa9\xdc\x8b\xea\x06\x4e\xbb\x1f\x61\xf7\x17\xd9\x3c\x50\xeb\x03\x33\x98\x18\xb2\x43\x9f\x75\xd7\xeb\x19\xdb\xd3\x54\x85\xd0\x53\xee\x79\x02\x59\xdb\x61\x62\x69\xf7\x45\x65\x11\xcb\x96\x60\x79\x11\x55\x36\x7d\x52\xdc\xac\x7d\x7b\xf7\xf6\xa5\xd6\xd3\x13\xf1\x2b\x16\x33\xdd\x5f\xaf\x12\xb4\x99\xaa\x20\x2c\xd4\x9d\xbe\x74\xf9\x70\x78\x70\x2d\xa4\x7e\x1b\xce\xb4\x90\x42\x11\x67\xaa\xa2\x73\x25\x66\x33\xa7\x22\x99\x32\xc6\xf5\x2c\xba\x9a\xc6\x9a\xfb\x13\xd1\x6e\x1b\xaa\x74\x39\xb9\xf7\x3f\x7a\xd2\x9d\x44\x7c\x28\x86\xe0\x7f\xf2\xa4\xab\x23\xcd\x27\x18\x9d\x92\x10\x09\x31\x7a\xbf\x96\xfa\x11\x4a\x45\xaa\xd4\x09\xbd\x57\xe4\xa7\x24\xb3\x10\xa3\x5a\xea\x5a\xe8\xf0\x4a\x44\xb1\x5e\x6e\x33\x69\x6e\x63\xd0\x5a\x68\x50\xe7\xb1\x23\x82\x49\xb8\x0f\xde\x7b\xc2\x55\x62\x36\x8d\xe4\x4c\x7c\x3e\x79\x0b\xfe\x9d\x77\xef\x7f\xf5\x84\x3b\xd3\x5c\xc7\x33\x08\xa2\xfc\xf7\x27\x71\xab\x13\x08\x02\xaf\x3c\x4b\x96\x07\x84\xb1\x3d\x07\x2f\x4e\xc1\x0b\x97\xab\x4c\xcf\x08\x9d\x7f\xd4\x3f\xd2\xa1\xb0\xfa\xc8\xb2\x70\x72\x3a\xde\x9a\x83\x04\xd8\xdd\xe3\x99\x8a\x12\xa7\xfe\xf1\x50\x9e\x93\x2e\x70\x0a\x61\xe5\x15\xef\x6c\xd2\xbe\x62\xb7\x64\xc2\xcb\x21\xf5\x05\x13\x3f\xd2\x24\x08\x89\xa4\x83\xb0\xe3\x00\xda\xc1\xdc\x0b\x69\x02\x8a\x26\xc5\x79\x27\x11\xee\xb9\xd0\xfb\x93\xc9\x49\x3a\x2f\x2f\x05\x1f\x0a\x35\x23\x94\x82\xff\xa1\x34\x5f\x29\xdf\x10\x56\xb9\xb0\x93\xb4\xb7\xd9\xed\xce\xe7\x5b\xdd\xee\x1e\xcb\x5e\xd1\xdc\x07\xef\x47\xc3\x3b\x26\xf3\xf6\x66\x42\xe1\xa7\x24\xa3\x90\x68\x9a\x9e\x87\x30\x45\x74\x59\x11\x8d\xe9\xe0\xad\x24\xb1\xcb\xa9\x47\x1a\x01\x8c\x49\x14\x62\x64\x17\xba\xcf\x88\x04\xe1\xfa\xdb\x46\x4f\x4b\xac\xdf\x55\xba\xd1\x54\x48\x22\xdc\x60\x0a\xc2\x0d\xde\xc3\x7a\x77\xd9\x6d\x80\x74\xe5\x1b\x55\x39\x78\x6f\xc0\xac\x37\x1f\x7a\x06\x41\x5f\xbb\x7e\xdf\x86\xf2\x49\x77\x26\x74\xba\xfd\xec\x4c\x11\xed\x72\x1b\x72\x66\x74\x86\x12\xba\x77\x53\xa3\x8c\xfa\xdb\xae\x0f\xd2\x35\x86\xdd\x33\x25\x86\x42\xea\x90\x4f\x66\x06\xec\x09\x98\xbd\xea\x06\x6f\x68\xbb\x4d\xa4\x9b\x52\xbf\x29\x79\x63\xb4\x5c\x3c\x09\x4c\xe3\x2b\x84\xeb\x7f\xe8\xe7\xca\xca\x4c\xc8\x21\xb9\x36\xd3\x38\x20\x35\xf8\x38\xcf\x22\xa9\x85\xd4\x1b\x06\x03\x07\xa3\x0d\xc1\xa0\xee\xe1\xaf\xd2\x56\x91\x2e\xf7\x23\xa5\x09\x5e\x89\xa9\x78\xe6\x4a\x2e\x2c\x14\x78\xbe\x27\x5c\x1f\x78\xcd\x5e\x10\x2e\x37\x12\x20\xd7\x7c\xc3\x10\x30\x56\xd9\x48\xd2\x15\x7e\xb1\xcf\x46\x2c\x09\x90\x6e\x70\x6e\xfe\x39\x36\xff\xbc\xac\x6c\x63\x5b\xaf\xf4\xc6\x22\x99\xcb\x98\xcf\x0b\x2e\x31\xbb\x65\xc6\xe4\x46\x82\x55\x95\x07\xca\x1d\x4d\xf8\xf9\xcc\x33\x12\x60\xad\x4b\x69\x1f\x75\xfc\xf9\xfc\x19\x49\x8f\x08\x43\x76\x9f\x40\xc4\x48\xc0\x34\x41\x45\xdf\xe5\x30\x62\x9c\xcc\x20\xa2\xe0\xb3\x1a\x3e\x52\x8d\xae\xf9\x2e\xb3\xed\xfc\x1d\x43\x6f\x62\x97\xb7\xdb\x84\x68\xa6\xe7\xf3\xfb\x84\x9e\x8a\x33\x16\xbb\x9c\x08\x8c\x4a\x33\x35\xd8\x07\x49\xe2\x52\xb4\x81\x4e\x48\x08\xb3\xd2\xa0\x66\xb6\xaf\x20\x0d\x0f\x8c\x28\x8c\x48\x64\x2c\x0f\x30\xea\x9c\x22\x21\x04\xae\x0f\x31\x89\x8a\xe3\xb9\xea\x5b\xf0\x07\xf7\xd3\x48\xe9\x99\xe7\x27\xde\xbd\x15\x33\xdf\x25\x06\x0c\x65\x7d\x7c\x28\x0d\x48\xb0\xfb\x73\xf4\x8f\xda\x39\x4a\x20\x66\xd2\x0d\x80\x33\xe9\x0e\x21\x60\xd2\x15\x80\xf6\x68\x1e\xc9\xec\x5e\xb0\x13\xb3\x03\x8f\x0b\x07\xfb\x5a\xf9\xa2\x04\x96\x48\xb8\x6f\x79\x3b\xe0\xd7\x90\x8b\x74\x79\xe5\x68\xd7\x6d\x0d\x6e\x09\x07\x81\xaa\xa5\x17\xb4\xdb\xe1\x60\x1f\x6f\x12\x29\x37\x04\xe5\x5e\x9a\xb7\xb7\xf8\x22\x18\x28\xd7\xc8\x51\xf3\xca\x90\x02\x48\xd7\xa7\x56\x19\xfa\xfa\xb7\xca\x90\x72\xcf\xc9\xa2\x2e\xd4\xa0\x50\x8f\xc9\x2f\x09\xd2\xbd\x80\x54\x67\x55\x55\xea\xfb\xb6\xfa\x2a\x08\x6a\xb2\x63\x4f\xc1\xc4\x68\xb3\x85\x62\xf4\x63\x51\x9d\xbd\x32\xe5\x66\x0c\x6f\x56\xea\xb1\xd2\x93\x10\x2d\x38\xc6\xa5\xaa\xc6\x71\x97\x82\xc2\x30\x2c\x51\x29\xb2\xde\x05\xdc\x65\x46\x23\x07\xf3\x8c\x87\xb8\xe9\xb3\xa4\x46\xc5\x3c\x8d\xcf\xcc\x10\x9d\xd1\xad\x03\xdc\x13\xa7\xf1\xd9\x7c\x7e\x1f\x7a\x2d\xb8\xf4\x5a\x95\x4b\x2b\x4a\x15\x9b\x38\x35\x24\x55\x6e\x48\x66\xfe\x25\xe5\x8e\x81\x33\x32\x62\x31\xf8\x4c\xc0\x98\xc8\xc1\x77\x79\x3a\x3a\x73\x85\x67\xff\x8e\x2a\x72\xaa\x38\x84\xf1\xfb\x0a\x23\x7c\x7f\x51\xa3\x26\x4e\x4b\x2c\x45\x1a\xbd\x69\x52\x84\x84\xa2\x3e\xa7\x4f\xe3\x33\x46\x42\xc6\xcd\xf6\x8d\x30\xb4\x88\x96\xf0\x06\x39\x88\xdc\x90\x5d\x91\x10\x22\x37\xa4\x5e\xe4\x5e\xa6\x0f\x97\x14\x22\x9a\x3b\x60\x8b\xc0\x63\xe5\x5e\xf5\x03\xd7\xef\x07\x2c\x70\x7d\x8a\x63\x35\xbb\xce\x8c\x36\xed\xb8\x5f\x71\xf5\x22\x1a\xe9\x9c\xb8\x11\x68\xb8\x9f\x7a\xca\x95\xf0\xcb\x13\x89\x25\xca\x10\x22\x18\x81\x5f\xba\x09\xa8\xcc\x90\xbf\xcb\x53\x79\xd6\x6e\x3f\x23\x5b\xe5\x5b\x88\xaa\x42\x7b\x58\x13\xb0\x26\xbb\x17\x5e\xac\x40\x79\x0a\xb8\xc7\x55\x02\xdf\x72\x13\x20\x56\x8d\x11\x48\x65\x32\xe1\x2a\xdb\x7d\x1a\x38\x3b\x3d\x83\x80\x21\x64\x57\x41\xc8\x88\x66\x5d\x58\xd8\x24\x76\x41\x66\x42\x7f\xb2\x02\x89\x94\x25\x46\xb6\x73\xa0\x10\xdb\xa5\xe2\x60\x22\xb8\xca\x9a\x29\x74\xb3\x67\xb5\x6c\x9f\x3e\x0b\xed\xb8\xdc\x60\xc1\xb2\xcc\xcd\x80\x34\xd2\x9b\x16\x12\x98\x83\x30\x86\x66\x40\x30\xc6\x34\x0b\xe0\xcb\xb4\xaa\x18\x2f\xd0\xc4\x67\x24\x5f\xa9\xb5\x30\xa1\x70\x6f\xf4\xa4\x40\x85\xbe\xa8\x70\x21\x9e\x99\x35\x09\xc4\xb2\xbe\x8a\x1d\x3f\xe1\x8c\xa7\x07\x5f\x94\x96\xc2\x88\xfa\x6a\xaf\x3b\x9f\x73\xd4\xf2\x02\x41\x14\xf4\x68\xaa\xcd\x07\xaa\xdf\xc0\x85\xea\x3c\x40\x18\x48\x4e\x33\x7b\x33\x54\xcc\x89\xe5\x50\x8c\x42\x29\x86\x85\x4b\x73\x18\x05\x78\x6e\x38\xc8\x7e\x78\x65\x46\x1e\xa9\xcc\x42\xe4\xd3\xa9\x90\xc3\x67\x17\xe1\x64\x68\xa6\xbd\x4e\xee\xda\xfd\x29\x5c\x19\x0d\x45\xbf\x38\xe0\xe3\x4a\x48\x7d\x14\x0d\x45\x76\x72\x6a\x81\x3c\x53\xe5\x13\x54\x7a\x9f\x50\xa3\xe5\xdf\x57\xf8\xcf\x48\xd5\x19\xf6\xe8\x2a\xac\x50\x66\xf9\xec\xe9\x0f\x31\x90\xdd\xd2\xfa\xdf\x67\x1c\x85\xf7\x45\x87\xc5\xae\x3f\x9f\x77\x21\x8d\x63\x8c\x8b\x08\xcb\x4e\x11\xa1\x88\x4c\x37\xf0\x02\x18\x7a\xfb\xb8\x83\x84\xa7\x61\xe4\x71\xa3\xbd\xa0\xf2\x40\x52\x4d\x00\x66\xff\x57\x10\xfc\x3b\x14\x37\xff\x0a\x45\xa4\x8a\x49\xd3\x0e\x47\x97\xc5\xa5\x27\x51\xc4\xf8\x5e\xaf\x43\x14\x76\x4e\x2b\x0b\x74\xad\x16\xda\xec\x18\x71\x04\x57\x9e\x82\x71\x26\xf0\x93\x06\x16\x72\xad\xc8\xa9\x04\x75\x56\xa3\x92\x59\x63\x39\x3b\xf7\x55\xcd\x1e\xa2\x14\x06\xe8\x3a\x28\xd9\x1d\x48\x03\x09\x5a\x8a\xad\xd0\x15\x73\x38\x20\xea\x20\x15\x77\x23\x11\xd6\x95\x6a\x8a\x2d\x59\x06\x07\x71\x1d\xc0\xf2\x9d\xc9\xe4\x2f\x4e\x61\x2b\x00\x81\xd7\x81\xac\x5e\xa3\x4c\xfe\xea\x9c\x76\x01\x2c\x04\x75\x80\x17\x2f\x56\x26\x7f\x79\x96\xbb\x04\x1c\xc2\x3a\xf0\xcb\x37\x2d\x93\x85\x13\xdf\x11\xf8\x30\x83\x09\x5c\xc3\x10\x5a\x70\x05\x17\x95\x2e\x96\x4a\xeb\x3a\x51\xcc\x07\xcd\x66\x20\xd8\x04\x62\x76\x0d\x9c\x19\xdd\xb3\x05\x21\xbb\x82\x88\x5d\xc0\x63\xc6\x18\x91\x6c\x44\xeb\x2e\x76\x42\xd4\x74\xb5\x93\x44\x69\x20\xe0\xe2\x99\x74\xd2\x7c\x25\xc0\x68\x42\xbc\xeb\x94\xf4\x2d\x0a\x17\x2b\x76\xa1\xc3\x7b\xe5\xca\x30\x5d\x59\x77\xb3\x52\xf7\x7c\x65\xdd\xad\x72\xdd\x9a\x08\xed\x52\xd5\x6d\x53\x55\x41\xe4\xdd\x8f\xb0\x85\x4e\x2a\x6c\xe0\xa6\xc4\xa7\x1d\x23\xef\xa6\xda\x61\x4c\x0e\x9c\xa9\xe3\xc9\x86\xfd\x6f\x26\x01\xdd\x5f\xad\xc1\x98\x5c\x29\x30\xaa\x0d\xd1\x4c\x42\xcc\xce\x23\x8c\x1b\x8a\xf0\xfa\x9e\x70\x5b\xc0\xbd\x78\x70\x4b\x66\x01\xc4\x7b\x5b\x83\xb1\xf2\x6e\x15\xdc\x05\x46\xcd\x16\xc6\x8c\x1f\x13\x15\xa6\x87\x91\x09\xa5\x5e\x7a\x61\xc0\x2c\x47\xca\x41\xee\x14\x8c\x1b\x35\x99\x63\x82\x2e\x28\x8a\xfe\xf8\x84\xc2\x6d\xe3\x9c\x05\x91\x21\x03\x37\x88\x28\x70\x63\x26\xf0\x10\xf8\xb9\xf9\x7b\x5e\x99\x0c\xe4\xba\x25\xbd\xf3\x3e\x29\x5d\x50\xc8\x7d\xf8\x1c\xdd\x36\x2d\x88\x99\x76\x25\x46\x96\x47\x78\xb2\xcb\x37\x9d\x75\xc6\x44\x66\x6c\xaa\x53\x71\x36\x9f\x13\xf3\x87\xdd\x27\xb4\x6f\x56\x8d\x31\x26\xda\x6d\x27\x98\xf0\xd9\xcc\x3c\xc4\x83\x03\x45\x02\x7b\x3f\x3a\x30\x5a\x2b\x47\x5f\xa1\xad\x70\xc4\xaf\x44\x5e\x49\x41\x0c\x97\x92\x70\x33\x4b\xa6\x22\xfe\x2e\xfc\x34\xc5\xa9\xcf\x82\xde\x2f\x4f\xd5\x59\xdf\xfc\xc3\xc4\x40\x74\x9c\x35\xa7\xa3\xbd\x52\xb2\x8c\x67\xaa\x7a\x3e\xd1\xca\xbc\x9c\x79\x10\xa3\xa9\xe1\x8e\xf1\x96\xce\x98\x49\xf7\x8a\x50\x9a\x9e\xe7\x75\xcb\xd5\x42\xe5\x06\x4a\x70\x8d\x7e\x18\xa3\x32\xd8\xeb\x91\xe1\x88\x6c\x63\xb5\xd2\x11\x9c\x74\xc7\x68\x49\x5e\xf6\x4d\x91\x70\x5b\x7d\xba\x74\x5c\x1b\x0f\x62\x76\x1a\x83\x70\x2f\xcf\xbc\x3c\xd2\xe8\x92\xe2\xdd\xbe\x71\x7a\x2a\x7b\x7f\xe9\xc5\x30\xf5\x54\xe6\x59\x27\x01\x7b\xa6\x88\x00\x63\xaa\x8b\xc9\xd5\x4f\x71\x2d\xa4\xfe\x69\x54\x9a\x9f\x4a\x8c\x18\x87\x20\x09\x47\x64\xab\x8c\xf5\xa1\x22\xc6\x8e\xbd\x20\xd2\x3d\xa7\xa0\x40\xba\x43\x0a\x41\xdf\x2e\xa0\x74\x47\x83\x7c\x58\x07\x13\x61\x54\xac\xa3\x8f\x44\xba\x23\xc0\xd8\x83\xc5\x32\x8c\x48\xe8\x07\xaa\xdd\x76\xb8\xd9\x43\x6e\xd0\x6e\x07\x35\x4e\xc9\x60\x12\x06\x63\x07\x02\x45\x02\x4a\xc1\xa0\x90\xf6\xdc\x2f\x9f\xf2\x0a\x88\x58\xb7\x1f\xed\x85\x99\x2a\x1b\x75\x3a\x34\x32\x95\x9f\x29\xd2\x33\x83\x18\x84\xa7\xd1\x99\x67\xfe\xc1\x13\xdb\x5c\xc1\x0d\x4a\x97\xed\xd4\xd2\x09\xa8\x31\xfe\x74\xd9\xd1\xd8\x37\x6c\xca\x2c\xc4\xe0\x48\xa1\xbf\xd6\x4b\xf7\xb7\x18\xbc\xb2\xcd\xf1\x95\x25\xde\xc1\x65\x5e\x67\xdb\xbe\xf8\x98\xbe\x20\xce\x35\x9f\xc4\x02\xb7\xc0\x7c\xee\x04\x17\x22\x18\xa3\xb6\x6a\x1e\xe5\xa9\x38\x5b\x67\x2c\x46\x5f\x16\x7a\x3e\xca\x87\x64\x47\x0b\x44\x38\xd3\x77\x13\xd1\x78\x55\xa9\x74\x26\xa5\x16\xd4\x30\x5b\xb3\x74\xf5\xa0\x2f\x06\xe8\x02\xdb\xd7\x5a\x85\x7e\xac\x05\xb1\xb9\x10\x5c\x25\xae\xa2\x6b\x51\x7a\x5d\xc6\xe7\xe3\x1f\xe1\x82\x51\x96\x47\xc0\x99\x70\xa3\x3e\x5f\xe8\xe3\xe8\x23\x89\x41\x63\xca\x83\xc5\x7e\x6c\x51\xb9\xaf\x57\x8b\xfb\xd5\x90\xee\xe1\x0c\xf7\x1b\xfe\x42\xde\x51\x31\xdd\xf5\xc2\x25\x1b\x63\x96\x9b\x8d\xc6\x6d\x28\xa3\xfd\xd7\xfd\xe5\xb6\x18\x63\x1c\x8d\x70\xf7\x17\xe3\xfd\x20\x92\x3a\x94\xb1\x48\x32\xac\xaa\x64\x19\x1b\xfd\x20\x60\x6f\x0d\xab\xe1\xb5\xde\xf4\x18\x02\xb8\x53\xed\xf6\xfd\x94\xcf\x66\xe1\xb5\xf0\xce\x23\xc2\xe9\xde\x66\x42\x01\xaf\x3b\x05\xf6\x9c\xa3\x19\xbc\xad\x96\xe9\x96\x5a\xdd\xdd\xdf\x84\x72\x18\xdd\xd4\x39\xfb\x1d\x7b\x3e\x79\x8c\x7c\xc1\xb5\xb6\x4f\x1e\x6a\x71\x9f\x80\x93\x22\xe1\xc0\xfd\xb9\xd0\x5e\x49\x6f\xb8\x53\x6c\xbd\x6b\x64\x73\xe1\xd5\x2d\xa6\xfb\xad\x22\x3e\x18\x1e\x5f\xf2\xa9\xe5\x46\xad\xfb\x0b\x34\xbb\x43\x99\x02\x32\x0b\x59\xd0\xb4\x1a\x3c\x7b\x1e\x11\x45\x91\xeb\x73\x08\x58\x3c\x30\x72\x8d\xbb\xdc\xe3\x6e\x10\x79\x1c\x42\xd6\x33\x1c\x9b\xbb\xbe\xb7\xc5\x58\xdc\x6e\x73\x23\x6c\x22\x46\xc2\x76\xdb\x50\x76\x34\x35\xa3\xe0\xe7\xdc\x62\x0b\x64\x73\xa9\xfa\x39\x35\x55\xa7\x0a\xf9\xd7\x73\x31\xe2\xf1\x44\x13\x0a\x3e\xed\x0b\x16\xb9\x97\x7d\x7b\x11\x2f\x1b\x41\x11\x60\x25\xa8\x31\x9b\x03\x6a\x23\xac\x97\xe3\x73\xfb\xa3\x8d\x8d\xbe\xa9\x73\x3a\x3a\x33\xd5\x22\x16\xb9\xd3\x24\x22\xa8\xb8\x65\xa2\x64\xe6\xfe\x62\x12\x66\xa5\x23\xd8\xca\xe6\x3c\xcd\x6f\x0a\x1e\x67\x1e\x9f\x2e\x2d\x67\x5f\xfa\xad\x16\xad\xca\xfb\x96\xa7\x40\x79\x1a\x66\x9e\x00\x9d\x9a\x16\x10\x67\x36\x46\xe1\xb8\x4e\x2d\x23\x28\x5d\xeb\x3d\x2e\x41\x0b\x47\x04\x6f\x4b\x64\x70\xa5\xd1\x2e\x8c\x12\x82\x01\xf1\xeb\x8c\x59\xfa\xef\x19\x3e\x33\x9f\x6f\xe2\x8b\xb2\x3b\xe6\xb7\x22\x1a\xba\x20\x30\xb8\x85\xd5\x3b\x9a\x0c\xdb\xfd\xcb\x2c\x0e\x46\x0a\xe7\xe9\x38\x52\xe3\x52\xba\xc6\x76\x93\xee\x10\x84\x27\x60\xe4\x19\x51\xe1\x7b\xd2\xf5\x93\xc4\x12\x4d\x2f\x49\x9d\x63\x3c\x75\x8d\xed\x94\xbc\x4c\xd2\x9d\xd8\x78\x6c\x88\x58\x90\xc7\x87\xb0\x88\x31\x96\x0b\x81\x51\xbb\x1d\x99\x55\x1c\xb1\xe0\x34\x3a\x33\x25\xa7\x11\x6e\xfe\xd1\xc2\x89\xa6\x91\xd5\x63\xda\x37\x3f\x94\x11\xda\x7d\x1b\xa5\x53\x5d\x3f\x77\x0c\xca\x1d\x83\x6f\xd6\x10\xdb\x75\xf7\xfc\xfc\x8a\x13\xce\x57\x0f\x04\xf8\x34\x0f\x08\xcc\x90\x9d\x19\xf1\x0d\x13\xa6\xdc\x4b\xb8\x66\xeb\x3d\x18\x9a\xee\x50\x9e\x0f\x8d\x3c\xbf\x66\xeb\x5d\x58\x12\xea\xb3\xc1\x8c\x9d\xce\x60\x68\x84\xfa\xcc\x2e\xf7\xd0\x08\xf5\x21\x1b\xba\xe3\x9c\xc7\xb5\x98\x4a\x41\xb5\x9a\x41\x4d\x06\x13\x76\x3a\x81\x96\x01\x35\xb1\xa0\x5a\x06\x54\x8b\xb5\xdc\x71\xee\x0c\x6c\xb7\xb3\x98\xea\x75\xc6\x26\xd9\xdd\x93\x45\x6a\xf0\x08\xb9\x1e\x34\x19\xf6\xdd\xbe\xde\x2b\xf2\x46\xd8\x68\x0c\x79\xaa\x8d\x34\xc3\x08\xbb\xe5\x50\x0c\x32\x83\x09\xf5\x66\x8c\xb1\x09\x9d\xcf\xb1\x9f\x4d\x10\x30\xb1\x53\x6c\xe6\xdd\x58\x3b\x1a\x44\xa7\xb7\x14\xc3\x84\x8b\x20\x5d\x8e\xb1\x21\x3c\x5d\x83\x2d\x74\x78\xf3\xa5\x90\x28\x04\xf7\x34\xdf\x21\x70\xb2\x18\x3b\xba\x54\xe3\x79\x56\x63\xcb\xc3\x63\xba\x0b\xec\xe7\xa2\x71\x9b\x60\x00\x35\xfb\x64\x28\x65\x08\xca\x68\x28\x57\x29\x4e\xdb\x20\xe0\x2a\x8d\x99\x66\xca\x0d\x8b\xe0\x99\xf2\x48\x32\x2a\xda\x01\x01\x17\x94\x96\x7d\xdd\x25\xac\x62\xbb\xb7\xb3\xc8\x20\xc3\xfc\xd2\xb0\x9c\x4c\xd0\x95\x31\xe0\x25\x0c\xb8\xb1\xbe\x73\xfb\x1e\xd9\x5e\x09\xfd\xd2\xad\xe9\xaa\x90\x35\xfa\x76\x35\xa2\x0f\xef\xe2\x1b\x3d\x68\x9d\xb1\xc0\xe8\x6f\xdd\xfc\xd7\x56\xfe\x6b\x1b\x7f\x99\x9a\x41\x49\x21\x08\x99\x3c\x0d\xce\x20\xb2\xb9\x4d\x42\xc6\x58\xd4\x6e\x17\xba\x90\x69\x59\xd2\x85\x82\xf9\x3c\xd5\xae\x74\xbb\x4d\x48\xcc\x42\x6a\xe4\x34\xe1\x2c\xa2\x6e\x0b\x03\x8c\xf2\xac\x14\xf3\x39\x21\xc2\x68\x4f\xf7\x09\x3d\x0d\xce\x58\x64\x87\x58\x79\xa7\x07\xa9\xf6\xa6\x07\x8e\x93\x29\x6e\xda\x74\xb2\x65\xdf\x5a\x2e\x8b\xc6\xdf\x69\x70\xe6\x8e\x20\xca\x18\xaf\xb7\x7c\x97\xfb\x34\x38\x33\x60\x8c\xe4\x45\x21\x72\x6f\x05\x88\x99\x3e\x33\x46\x33\x42\xd3\x33\x04\xd4\x30\xa2\x2a\x76\x23\x9a\xe4\xc1\x80\x76\x7a\xf0\x8f\xac\x8c\xc2\x37\x7c\xd3\x2f\xe7\xbe\x29\xc2\x68\x96\x04\x87\x61\xc6\x86\xc1\x0b\x23\x67\x8b\x8b\x6e\x3c\x0f\xa2\xda\x0b\x06\xb8\xdc\xbb\x20\xe0\xfe\xda\x0b\x21\xf4\x82\x8d\x30\xa1\x5e\xb0\x17\xa6\x54\xf2\xc8\x16\x05\x20\x3c\x5e\xd2\xa3\x22\x16\xec\x85\x83\xc0\x0b\xb3\x20\x4a\x1b\x3d\x69\x79\x64\x7c\x3a\x3a\xeb\x1f\x1b\x45\x81\x9f\x8e\xce\x40\x43\xa7\x63\x2f\x5a\x5a\x1f\x5e\x89\x88\x9f\x97\x70\x2e\xe7\x62\x01\xce\xcc\x2c\x99\x5f\x99\x2a\x6f\x86\x31\xca\xf9\x38\xf8\x2c\x8b\xdb\x84\x19\xeb\xc2\x84\x75\xe1\x9a\x89\xfe\x6c\x6f\xd4\x6e\x4f\xf6\xfc\x34\x12\x60\xc8\xc8\x33\x16\x9e\xce\xce\xa8\xcb\xa1\xc5\xc8\x21\x8b\x4e\x27\xf8\x70\xc5\x9e\xb9\x3e\x5c\xb0\x43\xd7\x37\xdc\x7f\xb8\xce\x58\xcb\xb6\x99\x9a\x06\x9d\xde\x19\x9c\x9b\xca\x9d\x1e\x4a\x87\x29\x35\x45\x37\x6c\xea\x72\xb8\x63\x53\xd7\x87\x31\x33\xea\xe1\x8d\x29\x3c\xc7\xc2\x5b\x76\xee\x72\xd8\x67\xe7\xae\x0f\x07\x6c\xc8\x18\xbb\x35\x85\x07\xed\xf6\x98\x1e\x2b\x72\x05\xfb\x10\x43\xa7\x73\x4d\xe1\x97\xc2\x6c\x79\x43\xb8\x80\x89\xd1\xea\xae\x3b\xec\xca\x7a\x37\xdf\x67\x25\x77\xb6\xe6\x75\x87\xdd\xd9\x92\x59\x87\x6d\xc2\xa4\xc3\x36\xad\x72\x62\x00\xd3\xeb\x4e\x27\x83\xd5\xca\x60\xe5\x3d\x5d\x97\xe1\xce\x3a\xac\x57\x6d\x3d\xa6\x79\x5f\x57\x79\x5f\x69\xed\x63\x45\xee\xe0\x22\xc3\x76\x19\x87\x5e\x3f\x0b\x05\x5a\x9f\xce\xe7\x37\xeb\x8c\xdd\xa6\x77\x60\x16\x61\x2e\x62\xb7\xd0\xc7\x7e\x73\x1f\x9b\x89\x65\x47\x38\x9e\x32\x2e\xf9\x88\x3a\x30\xe9\x74\x70\xcb\x98\x55\x4f\x17\xfc\x59\x86\x42\x69\xdd\xed\x52\x2f\xb7\xb7\x6d\x0b\x62\x39\x84\x23\x76\x34\x9f\x9f\x9e\xf5\x53\xb4\x4b\xe4\x72\xe8\xfa\x90\x2a\x5e\x47\x14\x3b\x26\xdd\xbd\x6c\x4f\xcd\xe7\xdd\xbd\x20\xff\x7d\x44\xd3\xad\xf3\xd8\x6c\x9d\x1b\x2f\x86\x5b\x2f\x80\x3b\xef\x28\x3d\xb9\x7a\xa9\x98\xf3\x53\x4c\xae\xbe\xee\x3e\x7d\x5b\x4a\xce\xf9\x4b\xd5\x1d\xb8\x1b\x49\x89\xd1\x30\x41\x26\x63\xd2\x23\x96\x7b\xe5\xc5\xb0\xef\x05\xec\x3e\xf0\xba\xf0\xdb\x13\x60\x5e\xcc\x72\xc7\x73\xaa\x8f\x98\xf6\x2c\x40\x55\xdc\x98\xbc\x81\x1b\xe4\x87\x34\x19\x84\x84\x42\xe0\x06\x6c\x33\x3d\xb3\xaf\x28\x38\x81\xfb\x1b\x04\x84\x10\xb8\xca\xd4\x52\x2c\xb6\x60\x03\x77\xe6\xce\xd8\xfd\x8d\x17\x5a\x08\x49\x86\x7d\xe7\xa5\xca\xbc\xb0\xf9\xc8\xde\x57\x24\x95\x15\x48\xd9\xb8\x78\x1e\x0b\xc6\x2d\x6a\x19\x22\x41\x15\x11\x01\xdc\xfd\x0d\x01\xc4\xa9\x12\xf0\xdb\x58\x59\x4f\x20\x36\x13\x1c\xc0\xbe\xe1\x4d\xc9\xfb\x2a\x0e\x96\xd9\xa7\x22\x26\xaf\x9f\x1d\x00\xe0\xc4\xdc\x07\x5e\xaf\x34\x77\x61\x59\xbe\x7e\x29\xb1\xa6\xf5\xd2\x61\xfc\x82\xc3\xd7\x32\x43\xb4\x20\x2d\xaf\x8f\x5c\x1b\xaf\xd4\x37\x22\x38\xbb\x4c\x6c\xb8\x55\x2b\x5b\x04\x9f\x7e\x31\xd8\x68\x77\x0c\x91\x3b\x83\x90\xe6\x9b\xf2\x31\x96\xde\x47\xae\x66\x0a\x22\x37\x66\x61\xdf\x2a\x8b\x91\x3b\x73\x6f\xfa\xdd\xbd\x59\xae\x5c\x5a\x44\x66\xd0\xcd\x1c\xc7\x19\x8c\x27\xf5\x30\x26\x06\x86\xc1\x60\x42\xef\x27\xee\xbe\x3b\x63\x2a\x85\x3d\xf9\x13\x64\x0b\xba\x0c\xd0\x10\x24\x31\xa3\xee\x74\xf0\x64\x98\xe0\xb0\xe9\xbf\x73\x22\x8d\x91\xd2\xaf\x99\xb6\xa3\x36\xba\xe8\x75\xc1\xe1\x87\x4c\x97\x75\x5d\xab\xbd\x16\xc7\x1f\x30\xb4\x13\xdc\xe9\x41\x00\xaa\xc6\x13\x65\xf7\x51\x8b\x69\x57\xa4\x1a\x96\x72\x83\x8b\x70\x32\x3c\x8a\x86\x62\x96\xcb\xa8\x0b\xd6\xed\x5f\xec\xb5\x32\x69\x77\x91\x09\xa8\xa9\xb1\x30\xd9\xf5\xa0\x75\x7a\x71\xe6\x99\x7f\x5c\x1f\xce\x59\xa7\xc3\x3b\x64\x6a\xcf\x79\x90\x32\xf7\xd8\xa8\xdd\x1e\xed\xb1\x73\x4c\xd4\x26\xc9\xd5\xe9\xc5\x19\x4c\xd3\xb5\x3f\x87\x90\x82\x9d\x83\x85\x19\xc8\xa7\xa0\xcf\xd9\x79\x7e\xd7\x2f\xc9\x6c\x3d\xe8\x82\x72\x7d\x28\x27\x91\x7c\xad\x96\xce\x64\xd0\x33\x98\xa9\xdb\xd2\x23\x25\x62\x84\x77\x0a\xef\xf9\x96\x00\xbc\x5b\x74\xb7\xa0\xd2\xad\xca\x4a\xf7\xa2\xeb\x45\x03\x67\x2f\x14\x89\x31\x5b\x14\x63\x4c\x62\x82\xa5\xc2\x21\x5a\x0a\x5c\x7e\xa1\x2a\xf7\xb2\x54\x29\xbc\x7d\xe9\x00\xb6\xea\x88\x29\x8e\x3f\x21\x66\xcf\x90\xe6\x69\x3f\xae\x59\xd0\xf9\x9c\xd4\xbd\xb6\xbe\x9c\xc5\xc5\xef\x8b\x76\xdb\x58\xab\xb2\xdd\x5e\x38\x56\x8d\x41\x96\x52\x5b\x61\xec\xc0\x0c\x94\x1b\x2f\x5c\xc7\x4a\x9d\x7a\x6e\x6c\xca\x29\x2c\x86\x1e\xc8\x0c\xe8\x73\xae\x39\xe9\x42\x9e\x32\xa0\x5c\x3b\xb7\x21\xec\xdc\xbb\xb3\x45\xcb\xa1\x0e\xf5\x41\xdd\x4b\xf7\x92\x29\x77\xe6\xd5\x15\xb1\xfb\x4b\xcf\x0c\x61\xea\x29\x37\x4e\xb2\xae\x77\xbd\x72\xf4\xdc\x2c\xcd\x77\xa9\xdd\x10\x63\xdd\x33\xe7\x91\x9d\x11\x59\xda\x17\xa7\xda\xbd\x2e\x14\x47\x99\xdd\x1f\x28\xd4\x2e\x82\xf0\x28\x2a\x8d\x95\x86\x82\x99\xa6\x7d\x91\x0b\xbc\xb4\xa7\x50\xce\x84\xd2\x4f\xc5\x28\x52\x82\x3c\x53\x24\xc6\x00\x7b\x37\xa6\xc0\x17\xfb\x79\xe2\x21\xcf\xb0\x3d\xd0\xc2\x63\x51\x3e\x20\x2f\xa1\x6d\xe6\xd9\x4a\x01\xed\xee\x97\x2d\xa2\xb5\xae\xb1\x01\x5c\x85\x0e\x9e\xc6\xc6\x81\x3b\x63\x76\x9b\xb8\x37\xf9\x92\x3d\xae\x23\xd7\x22\x1d\x81\x99\xc8\xe5\x70\xdf\x2c\x08\xb5\x14\x82\x9a\x7b\xad\x9f\xa7\x51\x03\x87\x8a\x9f\xa3\xfb\x3a\xcf\x3d\x5a\x9e\xa6\x4c\x17\xcf\x22\x52\xf7\xfb\x11\x5a\xb4\x56\xe4\x0d\xb8\x3b\xf3\x9e\x29\x62\xa4\x9b\x99\xb9\x72\xec\x9a\x76\xef\x6c\xce\xa7\x7c\x28\x85\x1b\x93\x69\xf7\x36\xbd\x76\x93\x2f\x4a\xe9\xf6\x4d\x7c\xca\x8d\xe6\x1c\xb8\xfb\x10\xb1\x4d\xf4\x7f\x04\x83\xd0\x76\x15\xa6\x5d\xf5\x17\x16\x30\x82\xca\xa2\x07\xae\x3a\xa3\x89\x68\xb7\x31\x0e\x42\x94\x02\x86\x70\x76\x16\xee\x0a\x2a\x77\x46\x24\xed\x0f\xad\xb3\xcd\x7b\x46\x7a\xdd\xb2\x93\xf6\xb3\x4a\x03\xdf\x6d\x5e\x5d\x43\xe0\x9f\xee\xa6\x22\x23\x84\x91\x31\x4f\xb5\xb8\xd5\x69\x68\xa5\xd5\x57\xd6\x1b\xaa\x3a\x0e\xed\x2f\xe6\xa5\xe4\x99\x83\x78\xb6\x2a\x24\xb8\xaf\xd8\x15\x19\x93\x73\x05\xb1\x2b\xf9\x95\x80\xd8\x45\x3b\x13\x53\x14\xa5\x8a\x89\xab\xf9\xf9\x11\xbf\x12\xae\x8e\xde\x46\x37\x42\x3d\xe3\x33\x41\x68\x7a\x62\x2a\x17\xe5\x8c\x28\x3c\x4b\xc2\x3a\x07\xaf\xc8\x67\x45\xc2\x53\x71\x46\x8d\x95\x97\x1f\xa2\xfb\x0a\x38\x28\x08\x2a\xe1\x23\x0a\x24\xe8\xd2\xb1\x31\x06\x5d\x86\xa0\xdc\xe0\xdc\xfc\x73\x6c\xfe\x29\x85\x6d\x62\x02\xc4\x2c\x2c\x22\xf8\x00\x9c\x85\x38\x3f\x10\xb0\xcf\x8a\x14\x7b\xee\xab\x5a\xb8\xa0\x6f\x67\x2a\xc6\xdc\x8a\xec\x27\x9e\x97\xd0\x3e\x67\xaf\x8d\x9a\x1b\x58\xa9\x12\x30\x0c\xc1\x4b\x4f\x0d\xbf\x57\x22\x06\x2c\x9a\xa2\x8a\xa6\x68\x44\x33\xce\xd0\x34\xb6\xe7\xbb\x76\x5b\xb9\xef\x08\xe6\x32\x45\xa4\x43\xb3\x81\x74\xa8\x27\xc6\xa8\x0b\x15\x46\x21\xc3\xc8\x0c\x20\x6a\x1e\x40\xa0\x18\xef\xdb\x51\x04\x76\x14\xbe\x22\x8e\x69\xea\x50\xd2\xa2\x44\xb9\xfe\x07\xb3\xfb\x7e\x2a\x32\x32\x42\x26\x32\x63\x8b\x60\x84\x7a\x20\x8c\x98\x86\x40\xb1\x2e\x84\xe8\xae\x09\xde\xb6\xdb\x24\x43\x82\x85\xf8\x86\xda\xc1\xc3\x87\xfa\x48\x21\x65\x23\x80\xf7\x65\x78\x85\x7e\xe7\x43\xc5\xaf\xc4\xa0\xf6\x6d\x25\xd8\xa9\x14\xe6\x25\xa1\x27\xb6\x1e\xee\x76\x69\x29\xc4\xe8\xab\xb2\xa1\xc0\x82\xe8\x2c\x62\xbb\x74\x2f\x86\x13\x7a\x1f\xa3\xae\x12\x0f\xba\x1e\xf9\x60\x96\x18\x4c\x55\xe8\xe5\x6c\xa2\xca\xb1\x34\x93\xa0\x06\x04\xeb\x98\x5d\x6f\x73\xcc\xf6\x28\xf5\x50\xc5\x8e\xdb\x6d\x0b\x24\x66\x9b\xe5\xdd\xf9\x4d\x15\xd1\x02\x97\x91\x99\x9a\x49\x14\xe0\x88\xdc\x0b\x23\x71\x5d\x3e\x9f\x3f\x23\x3d\xda\x14\xcd\x32\x26\xd3\x08\x2e\xa2\x4a\xa4\x1b\xbd\x57\xed\xf6\x45\x38\xd3\x91\xba\x73\xad\xcb\x5f\x12\x9b\xd4\x13\x47\xfa\xa3\xf1\x50\xba\x1e\x5a\x06\xca\x98\x2e\x1f\x35\xd7\x02\xcf\x30\x1c\x28\xc1\x85\x37\xaa\x31\xc7\xc7\x6a\xa0\xa9\xc0\x6f\x82\x7b\xbf\x78\xbc\x52\x3e\x2d\x49\xa0\xe6\xb4\xc6\xab\x06\x56\x83\xd4\x8c\xac\x8a\x40\x6b\xb7\xb3\x5f\x50\x57\xcd\x9e\xf2\x0c\xec\x1f\xef\x8d\xaa\x44\x2c\x54\x35\xc7\xe7\x92\x54\xc7\x58\x32\x5d\x24\xbd\x3f\x91\x44\x60\x64\x5c\xce\xb7\x97\x8e\x8e\x34\xc8\xea\x29\x15\x66\x0c\x2b\x2f\x6c\xed\xf9\x94\xb6\xdf\x7e\x68\xce\xea\x92\xde\xef\x2f\x5d\x47\xd0\x74\x70\xa4\x89\x76\x39\xe6\xf7\x28\x85\x11\x28\x5d\x0e\x91\x5f\xbc\xae\xf3\x41\x95\xc7\x87\x31\x28\x2c\x9b\x3e\xf7\x5c\xe8\xf4\x34\xf9\xe9\xdd\xab\x21\x5e\x7f\x20\x72\xf0\x36\x1d\xb6\xf7\x53\x92\xab\x28\x4f\x0c\x82\x3c\x5f\xeb\x6a\x78\x73\x29\x8a\x50\x2f\xb0\xa1\x4c\x04\x9e\xaa\x33\x42\xe1\xf7\xaa\x00\x67\xcd\x6a\x42\xbb\xb4\x3b\x0b\x54\x34\x99\xd8\x9c\x3b\xbf\x93\xc5\xd8\xd0\xea\xc8\x30\x1a\x54\x13\x5a\xba\x55\xa6\x57\x04\xab\x34\xa3\x9b\xf6\xfa\x56\x8c\x8c\x09\x97\x3d\x7e\x8a\xa6\x4c\xaf\x1e\x84\x5c\x9d\x97\x6b\x55\xe1\xff\x5c\x91\x57\x52\xed\xed\xd5\x65\x27\xcb\x4a\xff\xfd\xef\xd5\xa5\xa6\xb8\x29\x16\xb4\x4a\xfb\x96\x36\x66\x42\xbf\x92\x5a\xa8\x6b\x3e\x29\x17\x9e\x48\xa2\xf1\xb6\x4e\x53\xa4\x6d\xde\x48\xa6\x37\x38\xd0\xae\xd1\x10\x6b\xe0\x1a\x02\x0d\xa1\x86\x48\xc3\x48\x83\xaf\x61\xa6\xd9\xf2\xd4\x17\x71\x9b\x30\x69\x28\xdf\x4c\xcb\xaf\x1b\xca\xb7\xd2\xf2\x61\x43\xf9\x76\x5a\xde\x6a\x28\xdf\x49\xcb\xaf\x34\xbb\x6f\x79\xbb\x09\x5c\x68\xf6\xa7\xd8\xb8\x7b\x1e\x78\x1a\xbe\x7a\x0a\xfc\x91\x27\xe0\xc8\x0b\xe0\xd8\xe3\xf0\xde\x8b\x21\x78\x8d\xb9\x5e\x60\xaa\x57\x64\x29\x23\x66\x66\x31\x7b\x8b\x66\x43\xb8\xd1\x8c\x88\xf2\xdd\x97\xd2\xf5\x08\xe5\x06\x98\xdc\x70\x08\xbc\x49\x1c\xdc\x92\x73\x0d\xd2\x6d\x0d\xb4\xc7\x31\xcc\x83\x9b\xa5\x28\x17\x72\xc0\x3f\x06\x70\x4c\x6d\x5e\xa9\x1e\x85\x3b\xcd\xba\x30\xd6\xec\x02\x6e\x35\xdb\x84\xfd\x3a\x84\xd1\x4f\x63\x8f\x83\x37\x36\xd1\xe4\x6e\x55\x53\xaf\x1a\x5d\x72\x08\x31\x93\xc0\xd9\x2d\x2a\xe1\x3e\x68\x37\x80\x5b\xb2\x8f\xe3\x00\xed\x0a\xcc\xcb\x1b\x83\x62\x1c\x34\x13\x66\x72\x0e\xea\xd6\x62\x0d\xdb\x34\x4e\xda\x98\x8c\x35\xd8\x14\xb7\x36\x89\x57\xcb\xf0\x53\x78\x56\x05\x85\x2a\x50\x9e\x8e\x89\x49\xf8\x0b\xa0\x32\x83\xa7\x68\x02\x87\x9a\x7d\x84\xa3\x3f\xd0\xea\x25\xd2\x4a\x2f\x81\x8f\xd5\x8a\x78\xc8\xe4\x97\xef\xbf\xc9\x52\x26\x84\x4b\x9d\xc0\x2b\xdd\x98\x36\x11\xc3\x5a\xe1\x6d\xed\xd4\xc8\x04\x7e\x6a\xf6\x13\x7e\x2f\x37\x97\xde\xe2\x22\x15\x77\xee\xd7\x2e\x75\x3f\x33\xdf\x02\xcc\xf1\x3a\x44\x6a\x12\xd9\x55\xfc\x31\xf9\x89\xcb\xe4\xfa\x34\xf7\x59\x48\xa3\x23\x31\x91\x47\x73\xac\x2d\x5a\xf4\x47\x98\x60\x33\xb3\x64\x6c\xf5\xb8\x54\x3d\x31\x4b\x7c\xdc\x38\x50\xe5\xb6\x06\xd2\x53\x2e\x4f\x28\x3c\xd5\x4d\x37\x67\x6a\x26\x41\x93\x31\x39\xd6\xd0\x82\x31\xf9\x6d\x3f\x6e\x82\x01\xec\xf0\x69\x65\x57\x97\xda\x33\xeb\x60\x83\xf7\x13\x0a\x27\xf5\x13\x5c\xb5\x66\x12\x78\x5e\x5b\x6d\x4c\x9e\x9a\xf9\x1a\x93\xa9\x86\x8f\x1a\x3e\x19\x94\xa6\x1a\x4e\x34\x1c\x6a\xe2\x8c\xf8\x64\x26\x1c\x6a\xfe\x2f\x81\x97\xcd\x58\xd9\x0c\x6a\xbf\x34\xfb\x06\xef\xff\xb0\xeb\xd6\xf5\x62\xb2\x63\x81\x19\xe8\xed\x8e\xc3\x2f\x48\x64\x89\x8f\x2b\x3b\xec\x4b\xc3\x0e\x7b\xaf\x61\xac\xd3\xad\xf3\x5a\xd7\x46\x3d\x87\x23\x22\x8a\xf0\x72\xe1\x72\x0c\x42\xc2\x43\x20\x9e\xbd\x0f\x18\x77\x39\x9e\x94\xe1\xfb\x30\x7b\x1f\xb1\x10\xef\xb0\x85\xf6\xfd\x28\x7b\xef\xb3\x51\xf1\x61\x11\x83\x75\x0c\xe6\xdf\x00\xff\x8d\xf0\xdf\x91\xcb\x61\xa7\xdb\xdd\xd3\x03\xc4\xd2\xa0\x63\xf3\x74\x78\xfb\xe4\x75\xca\x4b\x3a\x3d\xf0\xed\xf4\x36\x82\x52\x0d\xa5\x6a\xf1\x75\x35\xe7\xee\xbb\x15\xec\x3a\xef\xbf\x6b\x19\xc5\x8b\xca\xca\x96\xf3\xf7\xdf\x92\x77\xba\x49\x2e\x23\xab\xb1\xf9\xd9\x73\xee\x45\xe1\xb3\x6e\xc8\xc8\x99\xf2\x9c\xd8\x13\x70\xe3\x69\xd0\x46\xd6\x1c\xe2\x51\x09\x8a\x97\xef\x8d\xd4\x75\x43\x3e\x6b\x50\xee\x2d\x28\xd7\xfc\xbd\x41\x3f\x9e\x21\x7f\xff\x10\xe9\xff\x83\xae\xde\x3d\x90\xf9\x49\xca\x92\x98\x2f\xdd\x9a\xbb\x45\x27\x82\x06\xc1\xa4\x7b\x83\xb1\x95\x71\xb1\x9c\x2f\x34\x7c\x37\xac\xce\x3f\xa4\x10\x10\x8b\x40\x7e\x4e\x63\xf7\xe8\xd7\x15\x7b\xd4\x1f\xe0\xbc\x8d\xed\x9e\xf6\x8c\x6e\xf3\xad\xb1\xba\x66\xd8\x1f\x8a\x01\x6c\xf6\xd5\x90\xb3\xce\xb5\x3f\xf8\xb1\xdc\x34\x8d\xfa\x33\x0c\xb0\x6e\x8c\xf9\x38\xbe\x19\x9c\xb3\x7b\x31\xf0\xa6\x11\x87\x31\xf9\x61\x30\x20\x46\x59\xac\x9b\xac\x9b\x22\x5d\xee\x29\xce\x86\x99\x3f\x33\x7b\x46\x3a\xc7\x80\x2e\x53\xff\x90\x12\xcc\x44\x4a\xcf\xf0\xea\x7f\x86\xbe\x14\xec\x12\x54\xc9\xd3\x56\xf3\xd9\xa9\xd5\xeb\x82\xce\x49\xff\x10\x33\x80\x14\xfb\x55\xe3\x7e\xd5\xc5\x2e\x3c\x24\x01\x44\x74\x90\xa1\x88\xe4\x69\xb6\x09\x84\xe9\xc2\x9d\x51\xaf\x95\x6d\x91\x56\x92\x80\x16\x2b\x38\x62\x2d\x4b\xf5\xdb\xed\x75\xe9\xfa\xae\x3f\x48\x25\x21\x5a\x31\x09\x08\xc1\xc6\xe4\x83\x86\x0b\xd3\xed\x9b\x3f\xfd\xa3\x04\x71\xf8\x44\x28\x3d\x73\x28\x68\x41\x9c\x51\x38\xd1\x42\x39\xd4\x3e\x9d\xab\x28\x9e\x9a\x07\xc4\xc4\x09\xe2\x99\x8e\xae\x5e\x98\x97\xa1\x3c\x77\x72\x56\x7d\xa8\xc9\xa5\xb1\x6a\x6c\x23\x25\x02\x11\x5e\x5b\x20\xcf\x35\x71\x66\xe1\x44\xc8\x40\x0c\xb3\xe7\x50\x5e\x84\x7e\xa8\x8b\x17\x3c\xd0\xe1\x35\xf2\x76\x88\x0d\xf6\x15\xca\xf0\xff\x9b\x97\x08\x22\xe6\x13\x7b\x26\x19\x15\xb2\xbc\x95\x9f\x9f\xf1\x45\xfa\x5a\x5a\x3c\x32\x42\xba\x2a\x56\x8f\x82\x93\xa5\x3c\x3b\xd2\x14\xb8\x19\xc3\x0f\x3b\xb7\xe9\xd8\xcd\xec\xe2\xbb\x58\xe0\x6b\x31\x0c\x35\x8e\x37\x10\x8d\xac\xf1\x3e\x18\x7b\x12\xbe\x59\xde\xa4\x4d\x2f\xa1\x58\x56\xc7\xd0\x29\x98\x67\x45\xed\x16\x5f\xb4\xa9\xa6\x69\x57\x03\xe5\xf5\xba\x9b\xdb\x0f\x88\xda\xc0\x02\xda\xa9\x34\xec\xd1\x0d\xcc\xe7\xdd\xd9\xdd\xd9\xd9\xda\x4d\x20\xaa\xeb\x2a\x14\xa5\xab\x9b\x6a\x8f\xed\x3c\x6a\xb7\xb7\x1f\xef\x31\x95\xc0\xe8\x8f\xf5\x9f\x3c\xda\x63\x98\xdf\x8e\xf5\x36\x37\x13\xf0\xff\xa6\x83\x27\xdd\x76\x7b\x77\x07\x3b\x98\xd5\x6e\x90\x91\x69\x30\x9f\xfb\xf6\x8f\xf3\xd3\xb1\xb9\x47\x22\x81\x97\x00\x27\x82\x1d\x90\xd3\x63\xe2\xb0\xff\xe5\xc0\x26\x85\x63\xe2\xac\xff\x2f\x07\xb6\xf0\x17\x73\xa0\x6b\x5f\x31\x07\x7a\xf4\x8c\xc2\xb5\x60\xbf\x60\xd8\xb0\x11\xaf\x05\x48\x70\x1c\x9a\x40\xab\xae\x46\x91\xdd\x06\xae\x1a\xca\x33\xfd\xf6\x42\xa0\x7e\xdb\x4b\x60\x2a\x56\x24\xb7\x2c\x3e\x8f\x44\xe1\x7c\x05\x95\xa4\x80\x8b\x8f\x4d\x50\xb8\xf9\x5b\xb8\x77\x62\xc5\xd5\xac\x7b\xfe\xdb\xd0\xde\x37\x4f\x80\xef\x7b\x1a\xfc\x2b\x2b\x1d\xc7\x88\x7f\x37\x81\xdb\xa6\x6e\xd6\xc6\xe4\x46\xc0\x58\xc0\x3e\xb9\x13\x86\x1d\x5f\x19\xc3\xe9\x37\x72\xe8\x00\xe5\xe4\xbe\x60\x5c\xc2\x81\x60\xf5\x69\x09\x6a\xb6\xfa\x2d\xd9\x17\xc6\xc8\xc2\x7c\x0b\x85\xb3\xf9\x90\x28\xd8\xe8\xd1\xc1\x98\x4c\x05\xac\xf7\x60\x4c\x6e\xcd\x42\x09\x4a\x3d\x2c\xda\xa4\x83\x5b\x72\x2e\x60\xbd\x0b\x5d\x30\x23\xaa\xc6\x79\x62\x74\xa7\x51\x7c\xae\xcc\x8f\xab\x0e\xce\x8e\x51\x9e\xbd\x6a\x2b\x83\x7e\x67\xb1\xa9\xca\x9a\x65\x8d\xcc\xec\x3c\x6b\xa0\x9f\x03\x83\xd6\x85\x30\x46\x50\x75\xda\x4a\x66\x08\x85\xa3\xca\x42\x87\x85\xb1\x1a\x31\x09\xa3\x7a\x09\x6b\xe7\x27\x4a\x53\x3d\xf7\xaa\x36\x0a\xce\x0b\x7e\x7d\xce\xf5\x69\x6e\xae\x70\x34\x57\x8c\xa2\x3b\x22\xf8\xe5\x97\xb4\x61\xec\xb6\x32\xdf\x79\x5c\x30\x42\x84\xa1\xe7\x73\x0e\xb1\xeb\xd3\xc4\x96\xa5\x27\x3d\x6e\x50\x32\x86\xd3\x5a\x63\x82\x8c\xd2\xf5\x29\x04\x38\x27\x97\x8d\x84\x72\x4b\x8e\x04\x1c\xda\x8f\x25\x25\x14\x3e\x0a\x76\x29\xe0\xd5\x0a\xf2\xdd\x2c\x93\xef\xdb\xba\x6d\x51\x31\xd9\x72\x01\x90\x29\xf6\xa4\x8b\x29\x33\xd2\xc9\x2a\xc6\x6b\x9e\x08\x67\x31\x75\xf3\xa8\x63\xde\xb7\xd6\xd7\x98\xbc\x12\xa0\xc0\xa8\xe7\xc6\x00\x28\xdb\x62\x59\x4d\x16\x27\xe5\xb9\x5a\xef\x61\x92\x0c\x0a\x3f\x4b\xc4\xb0\xf2\xe3\x92\xb7\xe4\xad\x99\x84\x31\xc6\x3d\x26\xf0\x5b\xb0\x9f\x02\x8e\xff\xbe\xf5\xb9\xed\x14\x9d\xa0\xf0\x54\xb0\x63\x01\x9f\x56\xcc\x62\xb7\x3c\x8b\x27\x0d\x14\xfb\x49\xd8\x5c\x0f\x96\x6f\x19\xab\xad\xae\xe2\xba\x4c\xe0\x65\x5d\x81\x83\xbc\x38\x81\x5f\x82\xc5\x12\xde\x2f\xb3\x7b\x8e\x77\xb1\x02\xfc\xa8\x50\xc8\xd6\x5f\x8a\xd2\x19\xd2\x32\x7d\xdf\x90\x5f\x02\x78\xba\xff\x33\x86\x22\x31\xfb\x49\x85\xa0\x55\x41\x91\x87\x44\xd7\xf1\x86\x80\xa6\xbb\x3b\x4c\x37\x77\xbc\xb0\xaf\x91\xd5\x89\x62\x53\xc3\x97\xda\x19\x7a\x2f\xc8\x89\xf5\x66\xc3\xeb\xda\x95\xfa\x2d\xc8\x01\x39\x1d\x93\x8f\x02\x9e\x0a\xd2\x12\xa4\x4b\x29\x7c\x11\x64\x88\xb9\x6e\x8d\x42\xf0\x51\x40\x56\x7c\x85\x2f\xe1\x99\xa8\x61\x0d\xce\x3f\xff\xd8\xc9\x5c\xa8\x90\xaf\x41\x37\xc9\xc0\xfd\x05\x24\x1b\xbc\x90\x7f\x70\x8a\x9e\x99\x11\xbc\x13\xec\x2d\xbc\xa8\x1d\x67\xfe\x41\x0c\xf8\x2c\xd8\x3b\xf8\xde\xb8\x99\xe5\x5e\x6f\xa0\xbc\x5b\xf2\xd9\xcc\xf2\x0b\x81\x69\xd2\xd0\x28\xfa\x9b\x16\x5d\xd8\x48\x99\xc0\xd7\x4a\xf5\xd2\x71\xde\x0a\x1b\x8a\xff\x27\xdc\xcf\x47\x62\xa9\x32\x2e\x53\x69\x4c\x62\xb0\xd8\x9b\x2a\xa9\xa0\x01\x8d\x4e\x3d\x63\x36\x35\xec\x93\xaf\x96\x83\xd1\x04\x7e\x08\xb6\xf8\xc1\xb8\xf4\xf8\xbd\x19\xf3\x38\xc5\xfc\x8f\x58\x2f\xe3\x2b\x8c\xdd\x49\x6d\xc6\x14\x78\x23\xd8\x0f\x01\x32\x5e\xad\x9b\xa8\x86\xf2\x4c\x37\xd1\x75\xe5\x6b\xd2\x6d\x0d\xa4\x4d\xcf\xe9\xa9\x34\x4d\x27\x88\xb8\x56\x77\xc8\x98\x6f\x7a\xec\x4d\x14\x4d\x33\x95\x23\xaf\x5d\x96\x2b\x32\x97\x2b\xb9\xdc\xc1\x78\x61\x1f\xf3\x61\x05\x7d\xbc\x0c\x95\x4f\x0d\x8e\x1d\x9b\x04\x46\x9f\xc7\x4f\x52\x99\x27\x85\x69\x74\x34\x33\x86\x40\x68\xa6\x23\x8e\xff\x03\xdf\xd7\x3e\x11\xb1\xe5\x9f\xe9\x31\x12\xf0\xb8\x59\xb5\x89\xe3\x7a\x93\x6c\x4c\xde\x08\xd0\x31\x28\xe4\x09\x78\x55\x1b\x6f\xcf\x6c\x6c\x26\x10\xc6\x2c\x88\x21\x8a\x59\x18\xc3\xa8\x11\x36\x4a\xc3\x57\x3a\x93\x86\x7e\xed\x62\x54\x55\xde\x54\xd3\x9d\xd5\x56\x35\xb2\x2b\xe3\xe2\x4f\x2c\x17\xc7\x73\xf0\x9c\xdf\x32\x55\x49\x1a\xf5\xdf\xc0\x6c\xe7\xf3\xee\x9e\xd5\xd5\x6a\xf0\xf1\xe3\xb2\x9a\x9e\xa0\x3a\xc7\xff\x8f\xf9\xb3\x3d\x99\xb7\xe1\x82\x09\x4c\x70\xce\x1f\x25\x70\xdd\x34\xcd\xeb\xe8\xe1\x34\x2f\xdc\x56\x42\x61\xb8\x62\xa9\xaf\xe3\x6c\x2d\x5a\xf1\x8a\x73\x8b\x32\xcd\x87\x76\xf4\xf6\xde\xa5\xa1\xde\x43\x12\x9a\x89\xa1\x85\x52\x2d\xd2\x24\x11\x3c\xd3\x21\x35\x70\x2f\x4e\xf2\xca\x9b\x94\x5a\xcd\x43\x75\x7a\x86\xa6\x3b\x3d\x10\xac\x07\x31\x33\xa6\xad\xd9\x1d\x81\x0d\xd9\xb4\x95\xc2\x94\xec\x45\xa7\x52\xc5\x50\xf1\x55\xcc\xc6\x64\x14\x43\xf1\xcf\x53\x41\x02\x41\x81\x70\xcd\xee\xf9\x0b\x6f\x12\x43\x70\xe1\x91\x58\xb3\xfb\xe0\xc2\x9b\x09\x08\x6e\xbd\x28\x06\xff\xda\x9b\x89\x84\xba\xc1\x85\x79\x11\x6b\x37\xb8\x35\xef\x62\xed\xfa\xd7\x49\x0d\xb9\xe0\x88\xb9\x29\x2d\xf4\x73\x1c\x8c\x2a\x8d\x7c\x6d\x61\x99\xb9\x76\xf9\x8b\xcc\x37\xc3\x32\x65\xdd\x27\xad\xd8\x14\x05\x17\x60\x35\xf3\x54\x2d\xef\x61\xe6\x60\xb3\xfe\x78\x77\xb7\x54\x4d\x15\x24\xda\xa9\xd6\x02\xc1\x72\x76\xae\x17\xec\x86\x31\x19\xc6\x80\x48\x07\xb7\x4b\x14\x98\xa2\x96\xdb\x00\x02\x5d\x93\x14\x7e\x0b\x82\xfe\xb1\x3a\x23\xbc\xc4\xd5\xac\x18\x96\x86\x37\xcf\x62\xcc\x33\x0f\x13\x61\x04\x3e\x11\x9a\xfd\xcb\xf9\x17\x20\xa7\x40\xdf\xc9\x77\x41\x7a\x46\x40\x92\x1e\x35\x02\x86\x54\x54\x82\x6e\xa6\x2d\x08\x6d\xd5\x05\x1e\x83\xd0\xf0\x5a\xa0\x97\x96\xc2\x45\xcc\x48\x60\x0f\x4b\xba\x49\xdd\x7e\x3b\x24\x2f\xec\x3d\x6e\x4c\xa7\x35\xc8\xd4\xc4\x2e\x48\xea\x2d\x6e\x3a\x7b\x64\x36\xad\x6c\x86\x20\x93\x60\xe1\x2a\x09\x16\xfe\xe7\x96\x47\x00\x31\x0b\x50\x7a\xa5\xc2\xe1\x3f\x34\x3e\x96\x4c\x0e\x23\x2f\x62\xd7\x5a\x1b\xe7\x8d\x1b\x5a\x19\xdb\x78\x35\xf7\x3d\xcf\x77\xfc\x5d\xbc\xd2\x79\x6c\x6c\xdc\x18\x14\xfc\x4c\x95\xbc\x1b\x43\x4e\xe9\xbb\x31\xf9\x51\xcb\xff\x54\x4c\xd2\x33\xb1\xd8\x66\x59\xc2\x55\x2d\xd7\x2d\x44\x2e\xa6\x10\xc7\x6a\xd2\x68\x68\x06\xa3\x71\xbd\xc4\x4d\x5b\x64\x88\xfc\xa9\x6b\x61\x60\x8e\xc9\xa5\x40\x44\x2f\xd1\xec\x30\x7f\x34\x1e\xf9\xfc\x01\x21\x6c\x7c\x8c\x9a\xec\x19\xe2\x74\xdb\x34\x4b\x76\x21\xff\x66\x74\xfd\x3f\x4f\xe8\xdf\x8e\x2d\x9d\x56\xe0\x67\x38\x14\x9e\x4e\xdc\x7e\x05\xc9\xe5\x1c\x2f\x19\xf8\x69\x5c\x01\xbf\x70\xd7\x33\x0f\x38\x46\x15\xe0\x80\x9c\xca\x33\x0a\xfb\xe4\x2e\xce\x07\xbd\x78\xc3\xaf\x5a\xf1\xb6\x5c\x31\x3b\xe0\xcb\x6b\x5e\xa6\x83\xd5\xf6\x4f\x5c\x3c\x15\x30\x6e\xc9\x38\xce\x20\x50\xcc\xe3\x90\x24\x20\x1a\x97\xac\x95\x80\x4a\x97\xe9\xa0\x69\x99\xca\xa2\x8b\x97\x44\x57\xce\xc2\x79\x99\x85\xe3\x86\x7b\x45\x62\x4c\x58\xb0\x97\x39\x47\x8c\x3c\x8b\x51\x3e\xc7\x85\x5f\x04\x65\x9a\x91\xcf\x39\x98\x66\xb1\x56\x16\x67\xbc\x2a\xce\xd0\xa5\x12\xff\xa5\x1d\x7c\x43\x0e\x62\x2b\x0f\xca\x6a\x8b\x99\xa4\xc3\x98\x3d\x8b\xeb\xec\xa1\xb5\xd4\x63\x98\x7e\x8b\x0c\x7f\xa9\xd4\xd8\x82\xa3\x54\x7e\x3e\x15\xe4\xad\x4e\x0d\x2c\x12\x69\x76\xef\xf7\xbc\x13\x41\x48\x68\x7f\x3a\x89\x03\xc1\xa5\x77\x15\x43\xf0\xd4\x73\xc0\x81\xe0\xd0\x3b\x44\x29\xea\xdc\x3b\x10\xbc\xf3\xba\x09\x75\xfd\x1e\x35\x95\x42\xed\x06\x97\xa6\xde\x89\x20\xe6\xf7\x53\x6a\x6a\x9b\x5f\x87\xa6\x81\x7d\xeb\x5f\x53\xd3\xac\x86\x16\xe5\x22\x2d\x76\x17\x48\xae\xb7\x48\x59\x9b\x49\x82\x1d\xbd\xc3\x5d\x7f\x13\xc3\x7b\x41\x22\xdb\x05\x3e\x46\xd8\xf5\x0d\xd9\xcf\x4b\x7a\x34\x7d\x19\x21\xae\xf6\xad\x41\x34\x42\x30\x56\xf2\x50\x0a\x97\x8d\xb6\x42\x7a\x2a\x8c\x66\xc2\xc7\x3f\x58\x24\xaf\xfe\x60\x91\xbc\x8d\x9b\x5d\x9f\xd6\x41\x59\xf6\x4d\xfe\xac\xc5\xe9\x96\xbc\x8d\x2b\x8a\xac\xef\xd3\x04\x7e\x2f\x4b\x82\xdc\x77\x54\x93\x4c\x3f\x63\x36\xfd\x52\x4e\x44\xcd\xb2\xfc\x32\x1c\x1d\x45\x63\x8d\xf9\x51\xcb\xa7\xfb\xd9\x72\x64\xb7\x12\xca\xd5\x7f\xc7\xf6\x9e\x6e\xf9\x74\x9f\xc2\xf1\x32\x5e\x69\x5f\x24\x73\x60\xb6\x60\x88\x09\xa8\xbb\x66\xe0\x3d\x9b\xc0\x32\xff\xe4\xae\xdb\x1a\xe0\x37\x06\x7e\xc7\xa8\xf5\xb4\x28\x7e\x15\x46\xdb\x1c\x3b\x4f\x9b\x80\x8f\xc9\x71\x5c\xfd\x66\x79\x06\xe7\x85\x86\x9f\xb1\xfd\xac\xbd\x05\x84\x9f\x7b\xff\x64\xf6\xc6\x54\xc3\xd3\x98\x1c\xc5\x86\x18\x28\x9c\x34\x0b\x5e\x4b\x14\x36\x4e\x20\xa1\xf0\xbc\xa9\xe6\x3d\xdf\xf7\x30\x0e\xc1\xc6\x40\xf2\x96\x77\x8b\x21\x00\x31\xf9\x14\x53\x38\xd6\xa4\x45\x53\x0b\xed\x65\x85\x2e\x30\x46\x38\xbf\xfa\x01\x71\xf1\x51\xcd\x5a\x43\x4d\x81\x31\x9b\x20\xce\x4e\x23\x7f\xc5\x0d\x27\x36\xe9\x09\x99\x14\x37\x8e\x61\xbf\x2f\x63\x78\x1e\x57\x8e\xc9\xf0\x21\x88\xae\xae\x84\xc4\x03\x1d\x0a\xef\x63\x7b\xfe\x56\x67\xff\x90\x0b\x23\xa5\x2e\xb5\x51\xbc\x2e\x75\xfe\x1f\x6e\xcd\x37\x4b\xdd\x57\x8e\xe3\x28\x7c\xa9\x47\x32\x16\x14\x5e\xc7\x0c\xdf\x62\x8e\x66\x87\xc2\xbb\x98\x91\x91\x66\x28\xd4\x3e\x68\x18\x6a\x78\x1f\x9b\xe1\x7c\xd0\x70\xa5\xe1\x75\xf6\xfb\x5a\xc3\xaf\xec\x77\x4b\xc3\x97\xec\xf7\x44\x03\x17\xe9\xef\x99\x06\x91\xfd\xb6\x86\x55\xcd\xc0\xac\x7c\x4a\xe8\x59\xc9\x98\x54\xd5\xb3\xdf\x3a\x56\x41\x14\x4d\x60\x64\x8f\xfd\xe3\x34\xe8\xe7\xf3\x0a\x73\xec\x35\x48\x98\xda\x0f\x37\xc1\xf7\x55\xca\xd0\x7d\x30\xf2\x14\x04\xd2\x93\x10\x28\x4f\x43\xf0\x02\x33\xf0\xc1\x87\x98\xcd\x24\x7c\xad\x65\x12\x63\xf2\x21\x86\x7b\x7e\xe3\xad\xf7\x80\x6f\x79\xeb\xbd\x04\x7d\x3a\xdf\x62\xf6\xd0\xfd\x9f\x0f\xe1\x47\xcc\xae\x25\xbc\x69\xc4\xee\x80\x8c\xc9\x17\xdc\x40\x98\xcc\x9c\xd7\xf3\x46\xbf\xdd\x26\x0e\x7a\xe0\x5c\x3e\x9f\xe3\xf1\xad\xb1\x3e\xc6\xda\x1a\x2e\x1c\xad\x06\xea\x19\xc9\xcd\x57\xc7\x2e\xb4\xbc\x8d\x5e\xf9\xdc\x07\x86\x9e\x00\xe1\xa1\xc0\xd4\x4d\x6d\xc3\x11\xd9\xc0\x04\x1b\x6e\x6b\x3e\x37\x32\x3e\x7f\x23\xcc\x1b\x61\x90\x4a\x1f\x87\xf6\xc5\xb0\xf0\x87\xdf\x10\xc5\xa1\x00\x96\x66\x71\x12\xee\xb0\xef\x33\xe1\xe6\x79\x27\xb1\x5a\x17\xfd\x67\xc2\x0d\x00\x1f\x7b\xc0\x5d\x1f\xb8\x1b\x00\x77\x87\xc0\x5d\x41\xb3\xf7\x06\x9a\x8f\x7a\x5b\x76\xa0\x90\x3a\x7e\x20\x62\x46\x9b\x18\xb1\xd8\x7e\xd5\xa8\x82\xe5\x02\x4a\x01\x84\x90\x76\x6b\xb1\x8b\x28\x8c\xd2\x94\x19\x0b\x68\x99\xf2\xb4\xeb\x0c\x43\x61\x94\x16\x33\x82\x1c\x29\x4c\x24\x07\x23\x5c\x49\xc1\x6b\xe4\x4f\x4d\xc4\x5e\x0a\xdf\x54\x08\x62\x08\x62\x9a\xc7\x13\x71\xcc\x34\xe6\xdb\x6f\x58\xe3\x47\xb5\x87\x78\x12\xbd\x18\x27\x16\xd3\x45\x59\x73\x43\x34\x4f\xbd\x0c\xb7\x44\xd8\xc9\x0f\x28\x84\x8b\x7a\x26\xf6\x6d\xea\x29\xbc\x32\xb9\xa8\x02\x94\xc0\x04\x05\xa0\xd0\xb2\xbe\xb8\x6e\x7c\x16\xf5\xbc\xaa\xbd\x11\x53\xbb\x02\xd5\x65\xaf\x4e\xaa\x99\xd2\x84\x02\xe7\x8b\x61\x7c\x1b\x3d\x9b\x1d\xba\xdd\x4e\x7f\x0d\x4b\xbf\x85\x91\xb9\x59\x77\xe6\x11\x29\xd1\xfe\xe0\x85\xbd\x3d\xc4\x83\x7f\xd1\x0f\x70\x6e\x43\x9c\x5b\x1b\x2f\xe9\xe3\xdc\x56\xd1\xb2\x2a\x61\x90\x53\x09\x7e\x04\x26\x00\x85\x59\x29\xb2\x85\xef\xe2\xc2\x63\xc2\x94\x24\x4d\x34\x81\x1d\x71\xcc\x01\x10\x30\x8e\x1d\x71\xa4\x4d\x22\x18\xc7\x21\x0e\x29\x8c\x0c\xf1\x80\xcf\xf8\x9f\x36\xc1\x02\x16\xa9\xd1\x0a\x86\xce\x63\x83\x45\x54\xa1\xc0\x11\x22\x92\x1f\x0e\x42\xf0\x7f\x34\x8f\xc3\x6c\x1e\x87\xcb\xf3\x68\xa6\x01\x53\x57\xe1\x4e\xf6\x61\xc6\x0c\xd2\x13\xb3\xb7\xe1\x7a\x69\x6f\xf7\x70\x5a\x7c\x7b\x4d\x6f\x71\x46\xb5\xcd\x95\x90\xcf\x68\x9a\x21\x32\x9d\xd1\x72\xc3\x00\x3b\x0f\x59\xe0\x0e\x31\x61\x8e\x00\xbc\x71\x09\x18\xcd\x06\x33\x36\x42\x14\x46\x88\xc2\x68\x69\x66\x03\xd7\x87\xa0\x98\xd9\xd0\x2c\x8d\x1b\x40\x68\x40\x96\x76\xb2\xa1\xf9\x68\x09\x97\xf2\xac\x86\x7c\x85\x63\x2f\x9b\x3e\x6e\xa6\x8e\x9b\x69\xab\x06\x84\xda\x4b\xed\xad\x76\xdb\xfe\xe0\xe5\xb7\xc3\x74\xfe\xb3\xa7\xc2\xdc\x0c\xb8\x11\x7c\x98\xb2\xa0\x38\x43\xac\x7d\xa9\x92\xf2\xa8\x75\x85\x8b\x66\x24\x66\x70\x35\xe4\x89\xdc\x2a\xfa\x2b\x1a\x29\x79\xad\x70\x17\xf9\xb8\xfa\x41\x4e\xee\x71\x96\x1f\xbe\x70\xcd\xf0\xba\xc9\xb0\xae\x21\x9e\x1d\xbe\x60\x07\x61\x06\x3e\x62\x61\x79\xd5\x34\x87\xd0\xe5\xf9\x42\x45\x9c\x84\x66\xef\x44\xf9\x5a\x04\x71\x65\xb0\x29\x23\xe7\x78\x77\x8b\xfe\x45\x51\x10\x27\x30\x5a\x8e\xe3\xae\x89\xdb\x0d\xe2\x9a\x83\x70\x85\x99\xf1\x15\x8e\x5c\xd9\x91\xbf\xc2\x9b\x87\x7b\xdd\x62\x1e\x79\xb6\xd6\xe5\xe1\xdb\x0f\x03\xe2\xd4\x84\x66\x6a\xc2\xac\x2c\x32\x53\xa3\x8a\xa9\x89\xb2\xa9\x19\xb1\x68\x61\x6a\x22\xd7\xfc\xcf\x87\xc8\x0d\x60\x4c\x46\x86\xed\x46\xc8\x5a\x9a\xa6\xc7\x12\x69\x5a\x95\x2f\x4d\x51\x73\xf1\x98\xf8\xe6\x9d\x4f\xc2\x92\x30\x47\x6a\xc7\x23\x87\xfa\x09\xcc\x5c\x7b\xf7\x7f\x35\x6f\x87\xf6\x33\xf7\xe9\xfc\x94\xe9\xb1\xb2\x77\x90\x2d\xb5\x52\x85\xc3\xd2\x65\xb6\x2d\xd1\x46\x1a\x26\x09\xc9\xef\x30\x66\xb4\x35\xc0\xe9\xd2\x39\x1d\x71\x43\x07\x01\xa5\x5e\x79\x7e\x34\xcf\x87\x95\x4e\x41\x40\xcb\x13\x49\x61\xb6\x3c\xce\xcc\x0c\xc2\xfa\xaa\x90\x78\xda\xe0\xa8\x0b\x89\xa7\x17\x98\x61\x16\x94\x6f\x44\x8f\xb6\x12\x6f\xd2\x2c\x53\x15\x49\x4f\x1f\x74\x91\xf0\x51\x60\x22\xd6\x19\xc7\xb7\xde\x2d\x89\xcd\x2f\x81\x9f\x40\x49\x28\x5c\x57\xb7\x74\xf5\xdb\xd3\x43\x11\x44\x43\xf1\xf9\xe4\xd5\xb3\xe8\x6a\x1a\x49\xcc\x62\xd9\xf0\x15\x6a\x18\x36\x5e\x72\x38\xd2\x99\xf3\x76\x60\x35\x79\x2f\xf5\xa7\x65\x61\xdd\xad\xe6\xe9\x7a\x13\x83\xc3\x9c\x34\xdf\x9e\x36\x0a\xae\x36\x5a\x6d\xbb\xbd\x8e\x7f\x2b\xf1\x17\xa8\x0a\x5d\x73\x34\x21\xcb\x5e\xdf\xdc\xb4\x2e\x1f\x01\x5e\xf3\x52\xd2\xf7\x94\xd1\xe2\x61\xf1\x84\x03\x87\x21\xc7\x8f\x7a\x2c\x04\x1a\x5f\x35\xe0\x09\xa2\xce\x1c\xac\xf0\xf2\x3c\x45\x6c\xc6\x16\x95\xab\xf3\x10\xc2\x70\x64\xb5\x76\x6d\x14\x64\x6d\x47\x85\x59\x65\x6b\x82\x41\x8e\xb4\x0d\x07\x6e\x7a\x36\x6b\x41\xa4\x0d\xe4\x6d\x01\x11\xe9\x1c\x3e\x74\xcc\x6c\x8f\x8c\xe9\x40\x84\x99\x3f\x8c\x9f\x10\x2e\x1f\x08\xd7\xf7\x04\xa5\x60\xe6\x00\xaf\xb0\x07\xd7\xd4\x6d\x0d\xc2\xd8\xc3\x10\xdd\x16\x87\xd0\x6c\x75\x03\xa4\x6d\x3f\x08\x43\x0d\xa8\x63\x78\x6b\x2f\x9a\xc1\x05\xaf\x09\xb5\xcb\xa3\xfd\x3f\x69\x18\x93\x1f\x31\xf4\x00\x5f\x7d\x8b\xe1\x6b\x4c\x9c\x7f\xfe\x19\xa0\xd1\x29\x5d\x7e\x4c\xed\xb1\x9f\x3f\x38\x26\xa9\x3f\xdf\x06\x9d\x8e\xc9\xe7\x18\x0d\x75\xb4\x59\x2e\x35\xf5\x8e\x89\x19\xc7\xa5\xa6\x60\x86\x75\xc5\xe1\x5d\x0c\x27\x44\xc2\x3d\x3f\xf6\x2e\x35\xf0\x91\x87\x69\x17\xaf\x3d\xed\xfa\x49\x75\x03\xbc\x88\x3d\xe1\xf2\x04\xa6\x3c\xbd\x29\x74\xce\xb3\x30\xb9\x1b\xfb\x6b\x27\x81\xbb\xba\x91\x5c\xa0\xfc\x59\xca\x0f\x91\xe5\x5b\xb0\xdf\xa8\xe6\x78\x27\xa2\x7a\x6d\xdc\x98\xb3\x95\x82\xcd\xa2\xe0\x71\xa5\x60\xab\x28\x78\x52\x29\xe8\x16\x05\x3b\x95\x82\xec\x33\xba\x6b\xe7\x7c\xe1\xfb\xb5\x37\x7c\x51\x3f\x9f\xf2\x24\x81\x71\x9d\xc5\x58\xba\xa1\x75\xdb\x50\x9e\xdd\xd0\xda\x6f\x28\xcf\xfc\x6c\x07\x0d\xe5\xd9\x0d\xae\x67\x38\xc9\x9b\x09\x1c\xf2\x55\x86\xf5\x42\xb4\xa1\xb1\x3a\x8d\x15\x71\xc4\x99\x94\x70\xd9\xc8\x61\x84\xb1\xf5\x1f\x0a\x69\x2f\x67\x7c\xac\xb7\x8e\x13\x78\xc5\xd9\x11\x27\x63\x72\xc9\x61\x13\xb6\x36\x29\x85\xb7\xf8\x55\x8b\x9f\x9c\xed\x93\x43\xa3\xee\xbc\xe2\xf0\xd6\xfc\x47\xe1\x37\x67\xd7\x70\xbc\xdc\x63\xd9\xa3\x6d\x18\xd4\x6f\x0e\x5b\x9b\x20\xed\xc7\xba\x8c\xc4\x42\xde\x96\x0e\x44\x1b\x92\x4b\x3f\xb5\x2a\x8a\xcb\x20\x78\x38\xd1\x97\x4c\x80\xb2\x2e\xe8\xa7\x0d\xf6\x3c\x4f\xe0\xd3\x9f\x50\x38\xe2\x44\x3d\xdc\xda\xcc\x39\x9e\x2e\x4e\xc8\x32\xdc\x5c\xde\x97\x66\x47\x1a\xc6\xdf\xa2\xa0\x18\x86\x71\x9e\x70\xa6\x24\x3c\x6f\x9c\xd4\xee\xde\x2b\x7c\x1e\x48\xcf\x30\xbf\x97\x9c\xfd\x80\x5f\x0d\x88\x66\x21\x3c\xef\x6b\x05\xbc\xd1\xd8\x53\x6c\xb7\x36\x1f\x28\x57\x80\x60\x27\xe9\x42\x6c\x6d\x82\xde\xe8\xe1\xa7\x39\xe4\xe0\x8b\x61\x66\xe7\xd4\x53\xee\x39\xde\x90\xf9\xc4\x8d\xd5\xe9\x16\x8c\x1a\x97\xe9\x17\x27\xca\x1d\xd1\x8e\xe1\x2c\xcf\x39\xec\x80\x78\xf0\x8a\x53\x30\xac\xa3\x50\x68\xca\x35\xd3\x65\xc5\x62\x0a\x5f\x38\x1b\xc1\xeb\x26\x1f\x46\x89\x67\xab\xbd\x6e\x31\x97\xef\x39\xac\xf7\xe0\xfe\x1c\xdd\x20\xfa\xe1\xd6\xe6\xbc\x0b\x23\x2f\x4b\x44\x6e\x59\x08\x70\xef\x96\x7c\xc1\x31\x61\xac\x5c\x1a\xe1\xa7\x36\xcc\x20\xf1\xd8\x01\x89\x83\x83\xa0\xd9\xe1\xc3\xbb\xda\x09\xeb\xee\xc9\xdc\x2b\xfb\x5f\x5b\x9b\x78\xd2\xfc\xc5\x68\x1d\x72\x43\x97\x7c\xa8\x37\xe4\x35\xde\x1a\xdc\xd0\xa6\x07\x09\x2d\x10\xf9\xf8\x7f\xf2\x84\xc2\x8b\xba\xe5\x5a\x97\x6e\x2b\x81\xcf\x4d\xeb\x9e\x6d\xfa\x2c\x7e\xef\xfb\x8a\x8a\x95\x40\xbf\x0f\x2b\x2a\x56\xc2\x82\xbf\xfe\x81\x17\x7d\xe3\xec\x39\xfc\xa8\xa5\x34\xbc\x5e\xd4\xe4\x05\xee\xf4\x12\x8a\xa7\xcf\x09\xbc\xe1\xec\x1c\xe4\xe2\x77\x62\x16\xee\x67\xf5\xf6\x98\xa5\xf0\x42\xf7\x2a\x3c\xbc\x6a\xa3\x87\x24\x88\x37\x5b\x74\xbe\x5b\x41\x33\x6e\xc6\xa0\x82\xe6\x73\x5e\x19\xa0\xea\xdb\x32\xc4\xa6\x57\xd4\x7b\xc3\xf1\x70\x54\x05\xd0\x85\x1f\x46\x95\xdf\xe8\xa5\x21\x6d\x22\x60\x2f\x20\x0e\x6a\xf7\x5a\xc7\x71\x12\xe0\xc1\x7f\x10\xaa\x6f\x84\xe9\x52\xbc\xfe\x31\xf9\x44\xe4\x69\xf7\xcc\xc8\x60\xfb\xa9\x9a\x1e\x35\x12\xd6\xbe\xed\xc8\xd3\x5e\xa9\x68\x93\xa2\x04\x4e\x20\xa8\xc5\xc9\x4a\xea\x7f\xe4\xda\xda\xda\x9a\x93\xea\x0a\xff\x48\xa3\xb5\xd1\x04\xc2\x80\x1d\x4a\x88\x9a\xe6\xc1\xf9\x47\xfe\x23\x89\xd3\x89\x03\x22\x3b\x3d\xda\x71\xe8\x9a\xd3\x09\x02\x32\x0a\x52\x4f\xec\xa8\xa1\x4b\x3f\x40\x6e\x96\x80\xbf\x0c\x7a\xd5\x41\x4b\xe6\x28\x28\x6c\xd3\xd2\xd1\xa6\xbd\x53\xcd\x03\x12\xe7\xfc\x54\x96\xbf\x82\x6c\x63\x81\x8a\x34\xff\x99\xff\x8f\x8c\x04\x51\x46\x7f\xb3\xa1\x53\x8a\xd2\x76\x7b\x4c\x44\x00\x7e\x6c\xf6\x24\xc1\x94\xd0\x10\x58\x7a\x12\x03\xc7\x75\x3a\xb1\xe7\x9c\xfe\x0b\xbf\x76\xfe\xaf\x33\xc7\xde\x02\xc4\x40\xb3\x9a\xbb\x9b\xf6\x1c\xc8\xda\x3b\xce\xa9\x9d\x2b\x97\xd3\x8e\x73\xe6\xf4\x4b\x70\xc3\x55\x50\x36\xbd\x34\xce\xdb\xe5\xf6\x6e\x8a\xd5\x3b\xa3\x42\x87\x1e\x31\xa3\x49\x0e\x9c\x4f\x17\x62\xed\xf5\x2c\x92\xee\x73\xd4\xfe\xdd\x48\x8a\xe3\xd1\x1a\xd7\x6b\x97\xb3\x48\x3a\x9d\x4c\x2d\xfb\x82\x37\xa0\x3c\x67\xa9\xaa\x43\x3b\xce\xda\x88\x87\x13\xfc\xa6\xfb\x9a\xbe\x10\x6b\xa3\x68\x32\x89\x6e\xec\xb7\xa8\xe3\x80\xfc\xe0\x24\xa2\xa6\xd6\x0d\xbf\x9b\x79\x4e\x7f\x81\x8c\x0c\xe9\xe0\x80\x46\x30\x26\x3a\x80\x28\x80\xc8\xa8\x9a\x92\x71\xa6\x59\x84\xa7\x5b\x01\x53\xcb\xca\xb1\x73\xc2\xe5\x5a\x28\x75\xb4\xc6\x6b\x46\x80\x9f\xc7\x97\xd1\xda\x34\x9a\xcd\x42\x3f\x9c\x84\x3a\x14\x33\xa7\x63\x07\xdd\x3c\xbe\x75\x87\x56\x4e\xd9\x7c\x5c\xfc\x19\x2e\x49\xb6\xf8\xa8\xbe\x3a\xef\x55\xe4\x4f\xc4\x95\xed\xc8\x0c\x1b\xd3\xef\x34\x41\xee\x38\x9e\x19\x2a\xee\x19\x6f\xb9\xed\x79\x78\x2d\xa4\x85\x80\xf5\x1c\xda\x21\x41\x40\xc6\x24\x0c\x60\x1b\x66\xa6\x7d\xfa\xda\x37\xfc\x68\x16\xb0\xa9\x84\x49\xc0\x66\x01\x79\xa9\x29\x5c\x07\x6c\x28\x61\x58\xc7\x01\xf3\xd9\x9e\x04\x30\x26\xd7\x01\x94\xae\x65\x37\xd7\xbf\xe7\x53\x4f\x03\xff\x61\xd8\xf8\xc8\xf2\xf1\xab\x80\x5d\x49\xb8\x08\xd8\x8d\x84\x69\xdd\x2e\x2d\xe9\x87\xe7\x0d\xe5\x4f\x20\xc0\xf2\x9b\xa6\xf6\x3b\x70\x8e\x15\xee\x1a\x2a\x64\x07\xb9\xe3\xc0\x88\xe0\x9d\x04\x6e\x2b\x63\xa8\x7e\x16\x67\x4c\xae\x82\x9a\xf0\xaa\x31\xb9\x08\x20\x86\xa5\x48\xd9\xbb\x80\x04\xd5\xd8\x22\xac\x79\x13\x90\x03\x72\xca\xe1\xdc\x14\x9f\x19\xf3\xa5\x62\x6a\x4c\x91\x79\xe1\x17\xfc\xbd\x3b\xfb\xc3\xa0\x87\x91\x22\xc1\xaa\x10\x1b\x5c\x91\x5b\x72\x9b\x2f\xcb\x38\xa0\xb8\x04\xf6\xda\xff\xc1\xaa\xc5\xf9\xea\x69\xf0\x63\x4f\x81\x9f\x5e\x32\x7d\x16\xa4\x86\xce\xa1\xe1\x0e\x0b\xc3\xae\x1e\xf7\x3b\xb1\x9c\xaa\x28\x10\xb3\x99\x18\x3a\x5e\x31\xf6\xae\x3d\x08\xc8\xee\xcf\x95\x4a\x7a\x69\xc9\x2c\x9e\x4e\xd5\x52\xbb\xcd\xa5\x23\x82\x69\x40\x9c\xcf\x72\x2c\xa3\x1b\xb9\xa6\xef\xa6\xc2\x5b\x73\x3a\x18\x38\xf1\x2c\xa0\x70\xd4\xb0\xb2\x8f\xc0\xb7\x17\xf6\x03\x76\x4b\x86\x01\x14\x17\xfb\x9e\xde\x39\x70\x14\x10\xd3\xd8\x96\x64\x57\x00\x97\x0b\x34\xd7\xc2\x81\xc3\x00\xee\x02\x72\x10\xe0\x09\xea\xc7\x80\x45\x0d\x2e\x59\xfc\x24\x50\x3e\xa7\xbf\x0c\xc1\x1f\x78\x21\xf0\xd7\x5e\x0c\xfc\xbd\xa7\x52\xfa\xbf\xf0\x04\xf8\x37\x1e\x07\xff\xce\x8b\xc0\x7f\xee\x61\x38\xe4\xab\x0c\x4f\xc9\xaf\x84\x03\xcf\xb0\xcf\x9a\x91\xf1\x6d\x5c\x21\x0a\x6f\x03\x36\x8d\xe1\x67\xc0\xde\x0b\xf8\x5d\x2b\xe5\x7e\x9a\x0d\x9f\x85\xd8\x3e\xb6\x21\xb6\x34\x81\xe3\x80\x69\x09\x4f\x83\x45\x67\xac\xfd\x2a\x66\x96\xaa\xf5\xe1\xe9\x3f\xb3\x5b\x3f\x3a\x7b\xe8\x6a\x31\x43\xc7\x4d\xee\x22\x48\xb3\x4d\x75\x64\x91\x6d\x81\x59\x35\xc1\x8a\xf9\x4f\x01\x7b\x16\xc3\x49\x2d\x4e\xf8\x19\xf3\x9e\xd5\xd5\xea\xe3\x94\x2b\x91\x7f\x46\x88\xdb\xee\x12\x78\x6e\x48\xf1\x6d\x95\x14\xc3\x11\x79\xb2\xf7\x42\x94\xd0\x3b\x09\x88\x73\x70\x3b\x15\x81\x16\x43\xc3\x3c\xaf\xa2\x99\x5e\x7b\xb2\x36\x0c\xcf\x43\x3d\x83\x35\x3f\xd6\x6b\xe7\x91\xb6\x92\xc4\x36\xa4\xe9\x78\x9e\x06\xc4\xe9\xba\x86\xb4\xf2\x71\xb9\xad\xc1\x49\x40\xfe\xf5\x4a\x5e\xf3\x49\x38\x5c\x1b\x4d\x22\xae\xbd\x35\xe7\x5f\x1d\xd9\xf9\x97\xf3\x2f\xea\x3d\x15\xe4\x38\x20\x3d\xb1\xf5\xc0\x3a\xa3\xe0\x9b\x20\x9f\x02\x12\x61\x40\xe5\xcb\x80\xbd\xd5\xf0\x2b\x60\x7f\xf8\x88\xd7\x4b\xa3\xbb\xec\x76\x1f\xa8\x07\xbb\xdd\x07\x3d\xb1\x65\x7e\x13\xbd\xc1\x29\x3e\x18\xe0\xa2\x13\x9b\x2d\xfc\x3e\x68\xf8\xb2\x40\x17\x75\x8a\x8a\xfa\x06\x82\x6d\x6f\x61\xb6\xee\xed\x1d\xc6\xf4\xa0\xe7\x75\x21\x66\xa2\x1f\x17\x19\xf8\x3a\x9d\x22\xe5\x6c\xb9\x71\x9c\xe6\xf8\xdc\x7e\x3c\x9f\xef\x3c\xda\xe3\xa5\x85\x57\xac\xd7\x7d\xa0\x3a\x7c\x63\xfb\x71\x9e\xbe\x13\x3f\xdc\x82\x31\x35\xb6\xa7\x0d\xe5\x29\x9a\xc0\x97\x12\xb2\x65\xb1\xb1\xbc\x80\x36\x10\xd4\xf0\xc1\x8c\xa3\xbe\x0f\x48\xf3\x22\x84\x52\x8b\x73\xa1\x16\x96\x41\x95\xb2\x6d\x54\x48\x00\x17\x5a\x1b\x75\x61\x15\x09\x54\x56\x2e\x81\xd7\xb5\xb4\x6b\xe0\x66\x58\x0c\xf9\x9d\x67\x21\xa0\x9a\xfa\x2e\x60\x6f\xe0\x45\x5d\xab\xf5\x31\x79\x67\x64\xae\xa4\xed\x36\xc1\xdf\xbd\xae\xb1\x32\xe6\xf3\xb4\x04\x9f\x68\x02\x9f\x6b\x95\xf2\x8d\x2c\xf9\x3f\x51\x0f\xb7\xe7\x5d\xba\x41\xd4\xc3\x5e\xb7\x3b\xef\xd2\x8e\x79\x83\xbf\x12\xf8\x5e\xb3\x35\xb2\x98\x1e\x5e\x3e\x11\xb3\x2b\x9b\x1b\xa6\xaf\x03\xc2\x33\xf2\x5f\xee\x7c\xab\xb7\xb3\xb5\x2b\x76\x1f\x10\xb1\xd1\x7b\xf2\xa8\x4b\x41\xb3\xc7\xbb\xdb\x62\xe7\x01\x21\xf1\xde\xd6\x7c\xbe\xfe\x22\x20\x82\x0e\xf8\x46\xcf\xe3\xb4\x43\x3e\x9b\xa7\x8d\xcf\x01\xc1\xca\x85\x03\xed\xa9\x20\xb2\xa3\x3a\x9a\x26\x99\x1f\x2c\xce\xbf\xdd\x9a\x56\xd9\xea\xed\xf1\x01\x22\xe3\xa9\x4c\x70\x14\xf9\x42\x37\x9f\xec\xf1\xf9\x7c\xf3\x09\x63\x8c\xb7\xdb\x69\xaf\x59\xed\xcd\xdd\x47\x8f\xb7\xc5\x0e\x5d\xc8\x54\x5a\x81\xb8\xd3\x7d\xf2\x68\x37\xaf\x93\xe7\x3a\xdd\xea\x96\xea\x3c\x7a\xf4\x68\x57\xec\x2e\xe6\x6e\xac\x80\xe9\x75\xb7\x76\x1f\xe7\x75\x76\x6b\xc1\xf4\xb6\xba\xdb\xbb\x05\x3e\x8f\xea\x01\xed\xec\x6e\x95\x90\x7e\x5c\x5f\xe9\xf1\x56\x6f\xf7\x71\x5e\xe9\x49\x6d\x77\x9b\xdd\x27\x4f\x76\x36\xf3\x4a\xbd\x6e\x2d\xa8\xcd\xad\x9d\xc7\x8f\x4a\xb5\x7a\xf5\xb0\x76\x37\x77\x77\x8a\x69\xea\x6d\xd6\xc3\x7a\xfc\x78\xc7\x4e\xe6\x82\x94\x2e\xef\xd1\xab\x48\xea\x0b\xdc\xa1\xb1\x31\x86\x70\x97\x1a\x2d\x6b\x29\xde\xbf\x51\xbf\x7c\x4a\xb2\x8f\x34\xa6\xa1\x8d\x5f\x02\xb2\x4d\xe1\x77\x40\x9c\x0d\x87\x96\x5e\x6e\x96\x5f\xe2\x33\xa5\xf0\x61\x85\xe6\x2a\x1f\x10\xc3\x6c\x3b\xa8\xb2\x7e\xad\xd9\x38\x69\xbd\xec\x82\x5c\x05\xe5\xf2\x3f\x36\x0e\xfe\x17\x72\x00\xc4\xe1\x93\xb3\x8c\x97\x57\x87\xac\x57\x42\x16\x2a\xfd\x58\x98\x6f\xb5\xad\xe7\x62\x0e\x82\x80\xda\x78\xfb\x33\x5a\xaa\xfd\xa6\x2e\xa6\xb6\x9b\x60\xb3\x1f\x16\x8f\xc5\xb9\xfe\x10\xfc\xa1\x79\xcf\x36\xef\xd8\xe6\x75\x55\x36\xd2\x2a\x66\xb6\xcf\xfe\x30\xb0\xa2\xfc\xa9\x20\x13\xf2\xcb\xa8\xa4\xdd\xec\xff\x6d\x98\x26\x5e\xee\xfb\x1e\x50\xf8\xd6\x60\xa5\x3f\x8d\xe1\x6b\x60\x6f\xaf\xd5\xe8\xa1\x96\x4b\x7d\x5b\x14\x16\xd3\x0a\x9b\xd6\xa9\xba\x68\x94\x69\x14\x12\xa8\x35\xbe\xa9\xed\x70\x95\x7f\xc8\x9e\x7c\xb9\xf6\xde\x11\xba\x59\xc2\x18\x31\x93\x61\x83\xed\xb1\x99\x6a\xa0\x2a\x64\x17\x12\x74\x5d\xb5\x35\xfc\x46\xe1\x9b\x00\x64\x68\xe5\x88\xa8\xad\x65\x4d\x86\xf3\x80\x5c\x62\xa0\xaf\x0a\xe1\x48\xdb\xc8\xfb\x04\xe2\x90\x15\xca\x6a\x3c\x73\xe0\x32\x48\xb5\x57\x21\x87\xb3\x7d\xed\xc0\x8f\xec\x45\x3c\x35\xd3\x31\xac\xbc\x9b\x69\xae\x74\xb5\xda\x28\x94\xe7\x42\x4d\x55\x28\x35\xea\xa1\xf6\x6d\x96\x37\x63\x86\x3a\xf2\xab\x5c\x47\xe6\x52\x46\x1a\x13\x62\xce\x1c\xd0\x21\x6a\xcf\xfb\x64\x3f\x00\xe7\x5c\x48\xa1\xb8\x8e\xd4\xe7\x93\xb7\x0e\x08\x5b\x74\xa9\xd3\x76\x13\xee\x8b\x49\xd1\xe4\x2e\x20\x1f\x03\x9a\xff\x1f\xf0\x6c\x58\x69\xfe\x0f\xd3\x69\x1c\xd2\x05\x64\x1c\x78\x15\x34\xc2\x6b\x59\x85\x3d\xa8\x9b\x52\xeb\xc5\x0c\xc3\x55\x31\xa1\x1f\x63\x24\x18\xef\x55\x5c\xca\x21\x15\x35\x35\xc9\x4e\x2b\x32\x37\xe5\xa8\x81\x2c\xb2\x23\x11\xbf\xa1\x3c\xb3\x48\x67\x61\x7a\xe4\x31\x09\xd3\xe0\xc3\xeb\x5a\xda\x48\x9d\x4c\x09\x0c\x1b\x01\xfa\x9e\xa3\xc5\xad\x76\xb2\x74\x78\x21\x3b\x97\x70\x15\x1a\xed\xf4\x22\x5c\x7d\x8f\xe4\x2a\x24\xf7\xfe\x07\x63\xb0\x6c\x7b\x84\xb3\x18\x86\x21\xa9\xb3\x80\x5b\xa1\xbd\x91\xf7\xa1\x72\x28\x9a\xce\xe2\x28\xfd\x7e\xb6\x99\x49\x1b\x63\x4b\x21\x08\x3c\x05\xc1\xd4\x93\x10\xbc\xf1\x2e\x35\x04\xef\x3d\x0d\xc1\x89\xb7\xde\xcb\x7c\xe2\x09\x85\x69\x98\x26\x73\x38\x6f\x5c\xa8\x1b\x72\x11\x82\xf3\xe2\xe0\x93\x03\x2d\x90\x30\x0d\xed\xde\xbc\x09\x6d\x00\x70\x2b\x24\x68\x0d\x3b\x42\xa9\x48\x19\x72\xa6\x36\x1c\xf8\xae\x61\xb6\x32\x2f\xf2\xf8\x0f\xcb\x77\x1b\xae\x90\x30\x44\xa5\x49\xfe\xf6\x43\x76\x2c\xe1\x20\x64\x6f\x25\x3c\x0b\xd9\x41\x68\x34\xf3\xc3\xb0\x29\xa9\xe6\x98\xec\x87\xb5\x11\xac\x21\xb1\xf7\x36\x6d\xe6\xa8\xa3\x4a\xdf\xf6\xda\x4c\x3d\x08\xf5\x37\xa0\xc7\x24\x4d\xa9\x96\x80\xc8\xba\xb8\x0c\xd9\x57\x09\x1f\x97\xa7\x3d\x75\x67\x64\xab\xfc\x5c\x12\x84\x7c\x19\xa6\x1a\xbc\xb1\x64\x17\xa7\xa6\x84\xc4\x61\x58\x2f\xb6\x48\x9a\xdc\xe9\x63\x68\xd3\x64\xd9\x04\x4f\x47\x21\x19\x6b\x0a\x07\x21\x69\x21\xf0\x22\x57\xd2\xdb\x4a\x1f\xe5\xd1\x74\x0d\x0a\x3f\x1b\xe9\x05\x51\xb0\x97\xa8\xfa\xdf\xa5\xfb\x89\xcf\xc6\xec\xde\xf7\x9e\x85\x10\x78\xaf\x42\x18\x7a\x6f\x43\x10\xde\xcf\x10\x46\x9e\xaf\xad\xe9\xf9\x3b\x84\xe3\x10\x9e\x86\xec\x9b\x24\x8e\x69\xe0\x50\xf8\x14\xb2\xa7\x12\x4e\x1a\xbb\x79\x6a\xa6\xf5\x53\x68\xcc\xd8\xf4\x9f\x83\x10\x24\x85\x8f\x18\xc5\xbc\xbf\x5c\xf0\x2a\xa6\x36\x81\x19\x85\xe7\x2b\x90\x3f\x09\xd1\xe9\xff\xc2\x10\xdc\x25\x4e\x77\x5d\xb2\x8e\x94\x27\x0c\xee\xaa\x9b\x75\xc9\x9d\x9d\xe9\x81\x4e\x9a\x88\x79\x4d\xdc\x06\x42\x0c\xc5\xd0\xa9\x6a\xe0\xce\x91\xd0\x37\x91\x1a\xaf\xd9\x5d\xb4\xa0\x6a\xe3\xc9\x7c\x1c\x10\x82\xc1\x09\xd4\xf5\xef\x5c\xff\x2b\xc5\xcf\xc1\x2a\xf3\x10\x44\x70\x13\xda\x34\xd4\xb9\x06\x9e\x1d\x39\x64\x4e\x50\xe7\xb3\x14\x99\xc5\xa6\xc4\x6c\x1a\xc9\x99\x58\x1b\xa9\xe8\x6a\x8d\x4f\x43\x14\xe0\xee\xe2\xc9\xb4\xf3\x8e\x4f\x46\x91\xba\x12\xc3\xb5\x58\x4d\xd2\x3a\x49\x82\xbe\x6d\xea\x8d\x43\x7b\xc5\x83\xc2\xcb\x5a\xb6\x29\x07\x8e\x56\xb1\x70\xbc\x34\x75\x5f\x02\xbf\xea\xeb\xb9\xad\xf9\x3c\x0b\x8a\x1e\x48\xf4\x6f\xbc\xaf\xad\x29\x64\x4d\xb4\x4b\x02\x5f\x6a\x2b\x67\xe9\x03\xdf\x87\xf0\x8d\x13\xd9\x71\x98\x63\x0d\xcf\xf0\x7f\x27\xc1\x9c\xe9\x69\x90\x45\xc5\x50\x3c\x6f\x4d\x93\xcc\xbd\x6b\x18\x94\x9f\xc0\x8b\xb0\xf6\x10\xc8\x3f\x2c\x3e\x9a\x18\x8c\x3b\x24\xbb\xf0\x80\xb1\x18\x4f\x39\x7c\x34\x98\xbf\x0e\xed\x0d\xd1\x77\x21\x48\x41\xa4\xfb\x8d\x52\x7b\x8b\xd4\xaa\xfa\x1d\x65\xfe\x24\xf0\xb9\x8e\x49\xa6\x76\x29\x51\x15\xb7\xa7\xb6\x51\x3d\x79\x60\xd1\xf7\xc6\x4d\x80\x13\xf1\x19\xf9\x44\x2b\x4d\xe1\xd0\x58\xf7\x98\x64\x69\x1b\x1a\x06\x3b\x42\xcf\xc9\x7b\x0c\x31\x3c\x46\x43\xf9\xc8\x1a\xca\xaf\x31\xc6\xf0\x2b\x84\xcb\xe7\x3b\x9f\xe2\x7c\x52\x38\x45\x5f\x89\x5c\x4c\xd5\xb5\x70\xe7\xf6\x85\xb6\xd3\xf5\x22\x9d\xb6\x23\x0d\x1f\xc2\xca\xf5\x09\x85\xa7\x3c\x11\x1b\x93\xef\xb5\x6c\x7a\x4c\xbe\x84\x99\xf6\x49\x13\x78\x49\x42\xb0\xc9\xa3\xf2\xf4\x65\x70\xa4\xc9\xcb\xd0\x62\xb6\xde\x03\x9b\xf0\xec\xb8\x9c\xcf\x6c\xa1\x86\xc8\x6a\xa4\x0e\xda\x4a\x71\xd7\xde\xe2\x33\xc5\x85\xae\xf5\x2b\xc4\xdc\x1d\xc7\x59\xe6\x35\x40\xff\x75\xbe\x8c\xdd\x3d\x3c\x99\x19\x38\x83\xec\xb0\xa2\xed\x40\x44\x3d\xc7\x49\xe0\x5b\x93\x24\xb1\x15\x1f\x3a\x78\x8b\xb0\xd0\xf6\x6c\x0f\x33\xa7\xf3\xd5\xf4\x79\x56\x74\xf2\xdc\x60\x78\x1e\x82\x36\x0a\x21\x0f\x2d\xc7\xfc\xf1\x9f\x41\x5f\x0d\x34\x4e\x81\xbe\xf9\x83\x82\x26\xa3\x06\x0d\x22\xab\xa0\xa2\x65\x9a\xb3\xa7\x86\x23\xa4\x37\x6e\x63\x5a\x8f\x91\xe0\x8e\x90\xde\x82\xd7\xf8\x21\x85\xaf\x7f\x4f\x09\xab\xa9\x40\xba\xef\xff\x48\x07\xf1\x6a\x3a\xe0\x59\x71\x4a\xad\x86\x0a\x82\x1a\xca\xd0\x15\xca\x08\xf1\x61\x31\x23\x1f\x51\x4c\x00\x7a\x8a\x2d\xf3\xc5\xb8\xac\xf2\x42\xac\x22\x21\x1d\xfd\xef\xb0\x46\x8c\xc8\xc3\xdc\x9b\xb8\xf3\x0c\xdf\x45\x67\xb6\x7d\x3c\xd6\xc4\xc6\x14\x52\x38\xd2\x94\x96\xed\x3a\x0a\x22\x4a\xb5\xcf\x38\x4a\x35\x70\xde\xb0\xe4\xd9\x31\x55\xd0\x50\x9e\x51\x44\xd8\x50\x9e\x85\x39\x45\x0d\xe5\x59\xa2\xea\x51\x43\x79\x6e\x53\x44\xec\x48\xc2\x2c\x62\x42\x11\x67\x2a\xd4\x2c\x9c\x69\x9c\xfd\x83\xdb\x29\x97\xc3\xfd\xc9\xc4\x01\x3f\xa2\x30\x89\x9a\x18\xe6\x09\x51\x70\xcf\x5b\x5e\x1e\x80\x77\x12\xc3\xa7\x18\x0c\x65\xa6\x69\x43\xae\xeb\x70\x30\x12\x65\xe0\xdc\x67\x8b\x06\x78\x8e\xff\x02\x19\x9e\xa4\xb4\xe3\x24\x0e\x2e\x61\x0b\x27\x72\x3b\x81\xab\x08\x8d\x8f\xa8\x36\xfe\x56\x32\x33\x96\x69\x23\x8e\xa8\x5b\xa5\x5a\x1c\x66\xd2\xae\x82\x69\xbe\x1d\xb9\x7c\x1d\x75\xc1\xcf\xb8\xe8\xd2\xda\x4a\x12\xb8\x89\xd8\x77\xb8\x8b\xd8\x57\x18\x37\x62\x24\xf7\x7a\x03\xc7\xc9\x52\x05\xa5\x22\xe7\x36\x62\x9f\x61\x3f\xfa\x83\xf7\xff\x9e\x1f\x7b\x1c\xf8\x67\x4f\x01\x1f\x19\x3b\x8b\x7b\x1a\x7c\x61\xec\xa2\x6b\x7b\x5f\xea\x20\x6a\xbe\x2f\xf5\x52\x90\x98\xce\xe7\x63\x72\x1b\x81\xf3\xff\x71\xcc\x5e\xae\x9e\xd2\x70\x36\x26\x77\x11\x38\x9e\x93\x26\xfc\xe0\xe9\xc9\x3d\xc7\x93\xfb\x6a\xe5\x2c\xb5\xef\xfb\x80\x8c\xc9\x77\x01\x41\x07\xb9\x43\x16\xca\x10\x2e\x25\x97\x8e\x58\x98\xed\xdc\x23\x4d\x26\x64\x3f\x42\xed\x74\x1c\xd9\xaf\xd1\x45\xe9\x09\x65\x39\xda\x34\xad\x14\xc3\xa5\xce\x4b\x29\x3c\x8b\x9a\xf2\x11\xbf\x14\x44\x2c\x0e\x2a\x4e\x07\xf5\xd0\x81\x2c\x7b\x8f\x5f\x93\x9c\xe1\x86\x1c\x58\x7c\xbe\x0b\x1b\x34\xa5\xf0\xc6\xfa\x38\xc2\xa7\x52\x48\x3a\x56\xc3\x18\x57\xfb\xad\x57\x0a\x87\x51\xfd\xbd\xaa\x97\xc2\xf0\xb9\x2a\x36\x22\xc5\x66\xe0\xa4\x57\x91\xaa\x29\x94\x8b\xd8\xb3\x67\xa6\x1b\x8c\x4c\xfd\x2e\x20\xee\xa0\x8c\xc6\x34\x07\xe3\x08\x62\xb4\x9d\xaa\x55\xed\x0c\xa1\xc1\xb7\x4c\x79\x16\x19\xb5\x88\x8c\x4e\x91\xf9\x1f\x4e\x1a\x38\x58\xc9\x31\x57\xca\x4d\x71\x58\x46\x46\x74\x7a\x66\x2f\x59\x54\x44\x39\x61\x72\x5a\xd1\xa0\x82\x66\x61\xed\xe6\x1f\x93\x9b\x08\x9c\x0b\xad\xa7\xde\xc3\x87\x0e\xa0\x2e\x7a\x14\x41\xd7\x4e\xfd\x23\xc3\x02\xbc\xa2\xce\xac\x52\xa9\x67\x2b\x3d\xc6\x4a\x46\xb9\xfe\x18\x31\xad\x89\x33\x8a\x02\xbc\x2c\xfa\x2a\x62\x3f\x14\xbc\x8d\xd8\x0f\x09\x3f\xeb\x56\xc5\x8e\x0d\xf9\x56\xf0\xda\x3b\xd2\xe4\x3a\x22\xda\xe5\x2d\x5a\x4a\x85\x7f\x4c\x4e\x88\x46\xbe\xd6\x32\xb6\x63\x64\x7d\xa0\xaf\x22\xd0\x6e\x30\x86\x97\x06\x54\x44\x84\xf5\x57\x9e\x84\x70\x28\x48\x2b\xa2\xf0\x31\xca\x84\xde\x86\xcf\xd5\xc6\x15\xd7\xc1\x05\xaa\x6b\x69\xe2\x86\xdf\xcb\xab\x52\x4e\xdf\xad\xf2\xfd\x95\x46\xfe\x60\x98\x32\xaa\x8b\xd6\x1d\x92\x16\x76\xd3\x38\x3f\x8c\xaa\x3c\x8e\x9a\x7d\x03\xbf\xa3\xfa\x34\x2a\xc8\x6f\xac\xc8\x7a\x1a\xb1\xb7\x91\x31\x96\x3f\xd5\x6f\xa8\x94\x37\xea\x65\xde\x78\x4b\x7e\x46\x18\x77\x76\x42\x04\xdc\xf3\x9f\x9e\x76\xb9\xe1\x69\xc2\xe5\x3f\xc1\x0a\x84\x28\xbd\x2a\x28\xcc\xec\x0e\xcc\xbf\xde\x4b\x62\xfe\x18\x25\x24\xc6\x54\x0e\xb4\x7c\xff\x3c\x2e\xe7\x1c\xae\x85\xff\x22\x24\x31\x2d\x75\xf1\x3a\x84\x43\x4d\x62\x6a\xbb\xc8\xc1\xe5\x96\xa6\x59\xc7\xbc\x35\x0e\x77\xf1\xa8\x28\xab\xa1\x4a\x35\x16\x98\xfa\x31\x11\xe6\x35\xc6\xb0\xae\x96\x83\xfe\x63\x6f\xe1\xa8\xb2\x1a\xd1\xb5\x76\x40\x4e\xad\x4e\x89\x67\xf6\x67\xb4\x4e\xf7\xcf\x4c\xa5\x17\x02\xde\xd9\x24\x7b\x36\xc2\x0d\x1c\xdc\x67\x44\xba\x5f\xad\x60\x7d\x8e\xc2\xf1\x71\x02\x2f\xeb\x36\xd9\x3a\xda\x6c\xbf\x1a\x77\x00\xc4\x76\x0f\x7c\xf5\x08\x46\x96\x3d\x86\x97\x86\xa4\x51\xd7\x72\xa8\x77\x48\x04\x2c\x62\x4b\xd3\x63\xdb\x54\x66\x3b\x68\x11\x94\x37\x8d\x6e\xda\x2a\x71\x79\xab\x3c\x4f\xb7\x0a\x6a\x7e\x1b\xfe\xdd\xc6\x28\x14\x93\x61\xb1\x4d\xde\x37\x4e\x32\xc6\x30\xfe\xc9\x72\x4b\xff\x7c\x59\x86\x52\xb9\x21\x81\x31\xaf\x18\x7f\x63\x6f\x3f\x6c\xf4\x40\xe1\x7e\x5b\xbe\x03\xa1\x8a\x1f\x09\x85\xd7\x75\x53\xda\x0c\x3a\xbd\x28\x83\x31\xd1\x79\x27\xa0\xd3\xd0\x3c\x23\x01\x96\x3b\xd4\xc5\x0f\x63\x92\x37\xce\xc7\x17\x4d\x6e\xc9\xeb\x28\x0d\xff\xc4\x8b\xe7\x0d\x72\xa8\xbb\x97\x7f\x26\xe4\xd8\x7e\x3a\xb2\xc0\x58\x95\x71\x5f\x17\x65\x39\xaf\xfb\xf9\xab\x54\x0d\xe8\xd9\x6b\x1c\x34\xbd\xda\xd7\xb7\x7f\x55\x92\x32\x0a\x53\x94\xb9\x8a\xb2\x2a\xa5\x78\xc0\x18\xbf\x0e\x50\x6c\x86\xd8\xc5\x64\x47\x2e\x3f\x2b\x7f\x0a\x3e\xed\xaf\x90\xd1\xc2\xf5\xed\x85\xd5\x52\x53\xa3\x79\x04\xf8\x3f\x6c\x9e\xde\x35\x54\xf9\xe6\x2d\xc3\x69\xb7\xf3\x9f\x19\xd0\x10\x81\x46\x2c\x74\x7d\x18\xb1\xc8\xf5\xf3\x9b\x9a\xbe\xcb\x61\xc2\xfc\xf2\x37\x08\xc6\x1a\x6f\xfb\xe1\x8f\x28\xfb\x31\xca\x7e\xcc\xa0\x27\xb6\xf6\xe4\x60\x4c\xde\x45\xa0\x36\xb6\x61\x42\xbd\x5b\xf2\x22\x02\x69\xa4\x24\x3e\xe3\x77\x08\xb2\xa9\x5a\x5a\xe5\x03\x72\x2a\xd2\x51\x64\x4e\x81\x84\xc2\xe7\x15\xdb\xe0\x45\x54\xa8\x8d\xdf\x9b\x85\x9c\xae\xe3\x2b\xd6\x05\x63\xc4\xbc\xd9\x92\x9f\x23\x10\x1d\xa3\xf3\x7c\x89\x40\x6c\x20\x81\xda\x5d\xf8\x61\xb9\xfb\x85\xeb\x72\x19\xe8\x1f\xdc\x3a\x78\x6a\xfd\x25\x69\xa5\xf5\x43\x62\xd3\xe3\xb5\xdb\x86\xb3\x58\xf9\x83\x7e\x96\x88\xbd\x87\x6f\xb5\x5a\xc2\x2d\xf9\x1a\x15\x5f\xa0\xf8\xd1\x38\x1d\xf6\x1e\x0f\x7e\xb6\xc0\xda\xe1\xb5\xca\xcf\x61\x25\xf4\x3a\x15\xb0\x20\x98\xcd\xec\x19\x5b\x6f\xf2\x8f\x88\x84\x98\x1e\xe2\x7d\x64\x23\xa5\x7b\x46\x0d\xfc\x16\xd9\x0c\xa7\x9c\xd9\xb8\x93\x80\x11\xbc\x09\x60\xb4\xc2\x87\x9b\xf3\x2e\xdd\xe8\x41\xc8\x30\x2f\xc4\x98\x7c\x4f\x75\xd9\x26\x78\x1c\xe1\xd9\xe4\x54\x11\xfb\xc1\x49\x48\x0d\x01\x3e\xe4\xe0\xb3\xe8\xa1\x80\x19\xcb\x5c\xdb\xb8\x5a\x1f\x0c\xf3\x4c\x33\xd3\x73\x78\xc9\x89\xbb\x63\x5a\xe2\xc1\xd6\x7b\x0d\xbf\x34\x7e\x69\x22\xe7\xc1\xd1\xa0\xf7\x70\xeb\x01\x19\x75\xfc\x0e\x89\x36\x66\xf4\x61\x44\xbd\xae\x61\x8a\xa3\x15\xb1\xe9\x19\x5f\x90\xc5\xdd\x11\xee\xfa\xf3\xf9\x22\x2f\xb8\x2e\x7d\xe5\xa3\x9c\xd8\x32\xdf\x49\x59\x72\xa4\xd0\x68\x2b\xf7\x92\x05\xa0\x58\x04\x9a\xbd\x24\xda\x08\x13\x6e\x44\x7e\xf6\xb5\x73\xb4\xdc\x75\x29\x16\xde\x2a\x35\xa6\xba\xc6\xaf\xc2\xa8\x51\x1d\x51\x4c\x89\xa4\xee\x65\x14\x4a\x82\xd9\xcb\xf5\xa8\x29\xce\x3b\xcb\x7f\x64\xfe\x2a\xa3\x98\x1c\xe6\xb7\x4b\x46\xe4\x96\xc8\x91\x5d\x06\xbb\xb8\x29\xf7\x14\x75\xb3\x84\xb7\xe5\xca\xc0\x68\x6e\x1d\xd6\x90\x55\x36\x9c\x8e\xbb\xb9\xf3\xe0\x85\x20\x63\xa2\x47\xd6\xf0\x7c\x40\x7a\x1b\xa8\x9c\xc7\x7f\x89\x73\x37\x43\xb9\xe7\xdd\x12\x81\x50\x8c\x2a\x10\xe5\x76\x2c\x1f\xb1\x3b\x08\x46\x7f\xa7\xb8\x3d\x2a\xab\x33\x25\x31\x2d\x4a\x62\xda\x1a\xf3\x81\x55\x85\xfe\x5a\x64\xf7\x17\x15\xc3\x5f\x65\xc5\xed\x65\x49\x31\x7c\x09\xfe\x63\x54\x01\xfd\xc7\xa8\x02\x1a\xbd\xe3\xcc\x6c\xac\xae\xd1\xb3\x57\xe9\x82\x35\x20\xe3\x1c\x62\x45\x0f\xf4\x1f\x2f\xc0\x5b\x52\xf5\x7a\x8b\xca\xe0\xf6\x62\x8d\xb7\x78\xb1\x8f\xf7\xf0\x1b\x14\x45\xbd\x9d\x85\x7a\xfe\x78\x11\xd2\x92\xe2\xa9\x16\x6b\xec\x96\x6b\x70\x96\x0d\xc8\x0c\x97\xe3\xd7\x11\xbb\xde\x4b\x41\xb8\xcb\x5f\xd2\x81\xd1\x70\x8d\x9d\xbe\xe6\x80\x7d\xc1\x5d\xde\xf5\x90\x67\xf7\xba\xf0\xc5\x28\x62\x7c\x04\xf1\xc8\x56\x87\x82\xc5\xd7\x9a\x5c\xc7\x66\x0a\xb9\x99\x9f\x04\x9e\x0b\x0a\xcf\x34\xe1\x6e\x70\x95\x9e\x9e\x97\xc6\xda\xa0\xfe\x66\x67\xac\xd1\x88\xbd\x91\x30\x1a\xb1\xa6\xae\x32\xe7\xa8\xec\x38\x0f\x97\x82\x00\x28\x4d\x20\x1a\xd5\xc5\xa9\xe6\x5e\x2f\x4a\xc1\x1f\xa5\x69\x4f\x67\x4d\x7b\x05\xc3\x5a\x21\xb8\xb1\x67\xe7\x93\x11\x9b\x48\xd2\x7b\xd8\xa5\x70\x5d\xcb\x36\xc6\x64\x36\x32\xf2\x67\x1b\xec\xa1\xd3\x36\x6a\x60\xc5\x05\xd1\xd3\x8d\xd3\x7f\xfe\x39\xbb\x4f\x08\x7d\xd0\x19\xb8\xf0\xcf\x3f\xff\xfc\xf3\x3f\x5b\xf3\xff\xf1\xcf\x3f\xb3\x33\x87\x1a\x5e\x3b\x19\x81\x6e\x9c\x5c\xe9\x06\x32\x81\x6f\x9c\x38\xff\xfc\xe3\x50\x7b\x2e\x97\xdd\xdd\x48\x60\xf8\x97\x3b\xb4\x5b\xcc\xf8\x16\x66\x63\xe1\x6e\x6b\x60\x09\xed\xc2\x43\x89\x70\x3d\x02\xf3\x96\xd3\x84\x7a\x62\x85\x31\xf3\xd2\x73\x1c\x40\x52\x19\x1b\x2b\x76\xdb\x73\xd0\x93\x17\xdc\xa0\x53\x2d\x23\x20\xe1\xfa\x17\x66\xae\xa7\xde\xa5\x06\x5f\xd9\x4f\x53\x66\x3b\xde\x1f\xd9\x1d\x9f\x2d\x60\xbe\xe3\xff\x7a\x2f\x14\xfb\x98\xf7\x97\xf0\x8b\xff\x8c\x5e\x46\xdf\xcd\x93\xce\xb7\x13\x43\xfd\x31\xa5\xe9\x58\x28\x6d\xdc\x6a\x06\x45\x85\xc1\x02\xcd\xfb\xde\x9f\x2e\xef\x67\xab\x80\x2e\x8f\xc1\x1f\x7b\xeb\x3d\xc8\x40\xd6\x73\x52\x7f\xe4\x21\x2b\xe7\xa9\xdd\x84\x81\x0c\x74\x39\x5d\x62\xc9\xc0\x6c\x35\xd1\x7b\x1a\x4e\x04\x5d\xab\xf4\x5d\xd5\x52\x39\xaa\x04\xad\x11\x44\x36\xd0\xe8\xa2\x11\x58\x7a\x3f\x1f\x41\x4d\x47\xab\x72\x5e\x62\x1e\x04\xfe\x11\x42\xf3\x67\x8a\xe9\x50\xf8\x2b\xc0\x1b\x28\xe7\xe0\x9b\xa7\x08\x66\x2c\xee\x38\xff\xe3\x61\x7a\x58\xd2\x6f\x3c\x35\xc6\xb9\x33\x12\x26\xb2\xb9\x8e\x8b\x79\xb6\xd7\xa4\x26\x18\x7c\x7a\x5d\x3e\x40\x4b\x61\x4d\x72\x58\x76\x45\x24\x9b\xe0\xb5\x9c\x13\x12\xc0\x7d\x70\xe5\x5d\x8d\x6c\x66\xb7\xa7\x46\x25\xff\xb6\x6a\xa3\xf2\x1f\x09\x1c\x68\x6a\x3f\x08\x47\x21\x55\xb5\x56\xa9\xae\x6b\xb7\xa4\x15\xc0\x1b\x7b\x07\xca\x92\x6b\x6c\xef\x16\x38\x09\x2a\xb2\x07\x86\x50\x75\xf4\x87\x4e\xb1\xf4\x40\xc3\xeb\xb0\xee\x00\xde\xb2\x67\x6b\xce\x3c\x46\x32\x41\x0c\x0b\xa2\x1b\x87\x44\xd3\xfc\x53\xe1\x25\x56\x21\x22\x08\x16\x33\x28\x1d\x93\x18\xdf\x2e\xd1\xd9\x5d\x48\x26\xae\x4d\xa0\x91\x90\x8a\xb7\xcb\x2e\xcb\xb5\x11\x7b\x53\x6f\x02\xfc\xa3\x77\xed\xfa\x55\x8f\x89\x99\x98\x21\x1b\x93\x93\x08\x34\x04\x14\x5a\x6c\x4c\x26\xe6\x77\xb4\x04\x69\xec\x45\x31\xf0\x09\xde\x92\x8f\xcc\xa6\x0a\x06\xbe\x17\x47\x06\x36\x3e\xc4\x91\x17\x02\x7f\xe5\xb5\x4c\x47\xc3\xdc\xc5\x86\x65\x63\x12\xa5\x9b\x3e\x88\xe0\x4d\x88\xf9\x40\x43\xb4\x94\xa9\x57\x94\xf1\xac\xec\x5b\x5a\x06\x45\xd9\xc8\x96\x8d\x46\x44\x94\x35\x94\x12\x3f\x50\x76\xc7\x2a\xbb\x63\x67\x76\xc7\x6a\xb8\x7f\x9f\x67\x7a\xb4\x1b\x75\x51\x73\x6a\x6a\x77\x5c\xd7\xae\xd7\x5d\xa0\x7b\xff\xe9\x22\xdd\xdb\xb8\x86\x2b\xb6\x4f\x3e\x45\x30\xb3\x19\xc4\x21\xa2\x70\xc1\xae\x0a\xfa\xcb\x26\xf5\x95\xd7\x62\x57\x2e\x07\xff\xa9\x67\x39\x75\x36\xde\xd0\x8e\xf7\xa2\xe0\xcd\x06\xea\x94\xed\x93\x60\x94\x43\x0d\x68\xff\x82\x4d\x97\xa1\x7e\xf4\x86\x6c\x8a\x58\x15\x00\xa3\x05\x80\x76\xd3\x9d\xb3\x7d\x32\x2c\x00\x8e\x0c\xc0\xf3\x25\x80\xfe\xb9\x77\xbe\x00\x6e\xb4\x00\xee\xf1\x22\x3f\x98\x2c\xce\xcb\x13\xec\xf0\xa6\xec\x18\x2b\x48\x0b\x93\x5d\xdf\x80\x72\xf9\x98\x0e\xc6\xe4\x62\x94\x3d\x78\x63\xd2\xca\x1f\xc0\xdf\xb2\xbc\x79\x16\x91\xf5\x1e\xad\x5e\xaa\xbb\xab\x87\x6c\xc4\x2e\xe1\xec\x98\xf8\x70\x47\xa9\x8b\xf9\x71\xb8\xeb\x0f\x90\xbd\x64\xf7\x66\x39\x4a\x60\x4a\x31\xb5\xf8\x96\x77\x97\x53\xef\x2c\x22\x77\x68\xaa\x45\x20\x23\x38\x08\xc9\x5d\xea\xd2\x4a\x28\x9c\xd7\xf1\xeb\xd2\x41\xdd\x4d\xa3\x9e\xd3\xf2\x76\x2b\xdf\x38\xaa\x65\xfc\x47\x92\xac\x0e\x45\xfd\x28\xab\xa1\xa8\xf7\xd6\xfe\x86\xf1\x88\x1d\x49\xb8\xad\x05\x7a\x37\x22\xf6\xb0\xd8\x5e\x30\x1a\x8f\x0c\x85\x6f\xdb\x53\x5b\xbc\x28\x98\xbe\xf3\x0f\xd3\x03\xe3\xd9\x89\x38\x17\xb7\x0e\xf8\x91\xa9\xf9\x3d\xfd\x16\xc7\x7e\xa3\x14\x3a\xe1\x44\x3e\x44\x74\x0e\x6a\x11\x90\x09\x3c\x5b\x65\xb7\x2e\x7e\xf3\xb2\x23\x17\xb3\xd4\xd9\xbc\x47\xc2\xf5\xaf\x31\xc3\x6f\x51\x53\xd8\xcc\xa5\xa0\x98\x02\x6d\xaf\xc6\x1f\x2e\xe3\x59\x24\x45\xcd\x27\xf2\x19\xaa\x91\x30\x26\xfb\x23\x38\x18\x19\xd3\x71\x57\x6c\xa7\xd7\x19\x8f\x46\xb5\x99\x49\xf6\x8d\xe9\xd6\xdb\xde\xee\xd2\xce\xa3\xde\x93\xed\xdd\xc7\xa0\x19\x51\x7b\xdd\x81\xda\xe8\x6d\xef\x76\x9f\xec\x7a\x8a\x3e\xc4\x5f\x8f\xe6\x5d\x10\x2c\x7d\xfd\xe8\x81\x86\x98\x11\xb1\x41\x04\x96\xe2\x0d\x0e\xf1\x70\x6b\x77\x67\xd3\xde\xeb\xb0\xaf\x9f\xec\xce\xbb\x94\x9a\xd7\xf3\x2e\x70\x26\x36\xc8\xd6\xee\xce\x83\xb8\x43\xe2\xf4\xfa\x47\x9c\x5e\xff\x40\xbf\xc5\xce\x03\xde\xd9\xa4\x0f\x7b\x3b\x5b\xf3\x2e\x84\x2c\xe8\x90\x60\xaf\xd7\x1d\x6c\x79\x1b\x4f\x32\x26\x7e\xcf\x9f\x7b\x7c\x83\x90\xde\xce\xd6\x83\xc0\x54\xde\x31\x3d\xf7\x80\x6f\x7a\x21\xf8\xaf\xbd\xb8\xb3\xdd\xed\x3e\xd0\x1d\xb2\xb9\x17\x0e\xba\x5e\x8f\x26\x09\x5c\x36\xae\xf1\x91\xd9\x3a\x87\xa9\x09\xec\xf2\xe7\x09\x85\x8f\x2b\xd4\x92\x77\x01\x6c\x6e\xdb\xd9\xcd\x9b\xc1\x6e\xd7\x86\x09\xae\x6e\xd7\x13\x5b\x76\x45\xf0\xd3\xb8\xab\xeb\xee\x76\x0b\xf0\x18\xfe\xb7\x5c\x3d\xd5\x3c\x16\xf0\xdf\x5c\x54\x69\xba\x0b\x2a\x78\x6f\xc1\x10\xdc\x5c\xbc\x42\xb2\xa0\x3e\x6f\x2f\xc8\xa6\x9d\x05\x99\xb3\xbb\xc0\x32\x1f\x2d\x5c\xee\x78\xbc\x28\x6b\x9e\x2c\x5e\xd9\xe8\x75\x17\xf5\x80\x5e\xcf\xd0\xfb\xef\xbf\x98\xa2\x9c\xc8\x7b\x62\x0b\x27\xea\xf8\x2f\x17\xda\x7f\x6d\xd8\xf9\x0a\xa6\x56\x49\xe5\xf0\xc9\x18\x93\x4f\x47\xd0\x85\x16\x85\x93\xba\x2d\x9f\xcd\xf6\x9e\x1c\xdc\x92\x93\x11\xc8\x7f\xff\xbb\x07\x2f\x89\x32\xe4\xd1\x6b\xcb\xc1\x4b\xa2\x41\x51\x4f\x53\xcf\x58\xb1\xcf\x57\x68\xd1\x27\xd6\x91\xe2\x38\x18\xea\xb7\xa2\xaf\x97\x64\x4c\x9e\x8f\x40\x6e\xbc\x10\x46\xf5\xb2\x9f\x39\xb2\x1b\xfd\xd7\x0a\xf8\x2f\x0d\x7c\xa7\xeb\x40\x9c\x65\x14\x78\xdf\x60\x91\xfe\x1a\x81\x21\xf5\xe3\x11\x7c\x1a\xd9\xa8\x87\x0d\xa7\x83\xaf\x37\x57\xdc\x9e\x2d\x74\xbf\xde\x82\xd6\xb7\xb9\x40\x8a\x5b\x0b\xa4\xb8\xbd\x40\x8a\x3b\x0b\xa4\xb8\xbb\x40\x8a\x8f\x16\x48\xf1\xf1\x02\x29\x3e\x59\x20\xc5\x5e\x77\x91\x16\x7b\x4b\xf9\xbb\x7b\x9b\x49\x42\xc6\xe4\x67\x36\xe6\xca\xa0\xc7\xe4\xb2\x98\x8b\x4f\xa5\xd7\x1f\x8b\xd7\x5e\xe9\xf5\xdb\xfa\xd7\xbf\x8b\xd7\x6e\xfa\x7a\xcb\xe8\x6d\xc5\xeb\x1f\xff\x5f\xf2\xbe\x44\xb9\x6d\x1c\x6d\xf0\x55\x14\x8e\x76\x06\xdc\x7c\x62\xe4\x5c\x9d\x30\xcd\x52\xf9\x90\x63\x27\x8e\x9d\xc4\xb9\x7b\xf2\xa7\x00\x92\x92\x68\x51\xa4\x42\x82\x94\xed\x44\xfb\x1a\xfb\x40\xfb\x62\x5b\xf8\x00\xf0\x10\x49\xd9\x99\xee\xf9\x77\xaa\xb6\xba\x2b\x16\x49\x10\x00\x81\xef\xc6\x77\x18\x6b\xf8\x20\x60\xee\x32\x80\xf9\x04\x5e\x4f\x4c\x78\xd1\x0d\xd8\x8a\xb7\x12\xee\x44\x9d\xb9\x40\xac\x65\x96\xce\xc8\x45\x44\xd0\x57\xc8\x84\x64\x6d\x9a\xf0\xc7\x57\x81\x0b\x85\x0b\xf2\xab\x89\x73\x1c\xc1\xf3\x2d\xb0\xf3\x8a\x43\xee\xca\x5c\x2e\x26\xbc\x17\x13\x7c\x3e\xc1\x53\x38\xe5\xae\x75\x7c\x60\x7c\x35\x31\x1c\xe5\xf3\x16\xc0\x95\x1e\xee\xaf\xcf\xce\x95\x8b\xbb\xf2\x09\x78\xb3\x05\x1b\xeb\xf9\x52\x3a\x18\xa1\xd0\x0a\x37\x1d\xe4\x8a\xcc\xcd\x68\xc6\x73\x08\x77\x0a\xa9\x41\x1d\x7f\xa7\x06\xcc\xc9\x8b\x09\x5c\x4e\x80\xf8\x4e\x62\xe2\x49\xbb\x10\x17\xca\x40\x92\x0f\x78\x54\xb3\x92\xb7\x75\x10\x0a\xde\xa4\x63\x79\xd3\x4d\x7c\xaa\x02\xb1\xe7\x78\x7f\x5f\xdd\x57\xa9\xaa\xd5\xdd\x5d\x25\x88\x78\x86\xcc\xfe\xf5\x6a\xa2\x32\x82\x4d\xc0\xb7\xe8\x07\x2c\xa0\x8b\xc9\x9f\x27\x60\xd0\xe5\x32\x0c\x5c\x8c\x43\xb9\x87\xf9\x12\x00\x73\x1d\x0c\xa5\xdb\x78\x69\xca\xba\x24\x9f\x27\x18\xad\xfd\x5e\x66\xd0\xfd\x34\x71\xdc\xce\x70\xee\xd2\x3a\xb5\x6b\x67\x40\xf7\x6d\x1f\xe8\xd8\xe6\x40\x3f\xd8\x14\x68\x5f\xac\xf1\xca\x4e\x80\x5d\xd9\x2e\xb0\x03\x1b\x6b\x3b\x7d\xe9\xde\xca\x1f\xf4\xb3\x78\xf9\xa1\x78\xef\x50\x6e\xce\x4b\xb4\x8c\x3d\x58\x43\xc4\x74\x98\xba\x16\xbb\x5e\x4e\x54\x64\x8b\x12\xce\x8a\x48\x9c\x6a\x80\xf8\x97\x89\xac\xb5\xa0\xdf\xee\x08\xf1\xc1\xc0\xa0\x5a\x4c\xfb\x4d\x41\xfd\xfe\xe5\x32\x48\x7e\x35\xa0\x7f\xe9\x47\x5e\x10\x4d\xff\x44\x34\x7f\x7b\xcc\xbb\xca\x4a\xa0\xa3\x7f\x04\x40\x14\xab\x51\xc0\x4c\x79\xa7\x84\xae\xfd\xce\x58\xa8\x96\xb8\xa7\x12\xc2\x4f\x5d\x12\x31\x9c\xcc\xa7\x49\x19\x92\xc4\x59\xe7\xc9\x5c\x3b\x1a\x19\x90\x54\x7c\x1b\x51\x81\xe2\xa0\xcd\xa9\x3e\x24\x4c\x1a\x2d\x7c\xd6\xe1\x55\xf8\x54\x69\x13\x59\x6b\x03\x77\x6e\xa3\xe5\xf3\x93\xf8\xf3\x79\x74\xdf\x1e\x0a\xa8\x12\xf2\xfb\x7a\x0d\x94\x39\x3f\xa8\x6b\xdf\xd9\x81\x8f\x68\x0e\x9c\x88\x7f\x4f\xc5\x3f\x98\xeb\xee\x35\x86\xdc\xbc\x90\x49\x79\xda\x7a\xef\xbd\x25\x94\xc1\x8f\x53\xa1\x08\xdf\x19\x9a\x70\xa6\x7f\xbc\xd6\x3f\x0a\x67\x1b\xb4\xb8\x64\x4c\x99\x60\xd6\x10\x74\x7c\x8e\x76\x52\x8c\xd9\xf6\x14\x6e\xbf\xb5\xa5\x70\x9b\x30\x15\x0d\xc5\x3a\x7a\xd7\x6b\x95\x76\x3c\x7f\xa2\x9e\x87\xac\x8b\x5e\x1f\xb9\x04\x85\xa3\xbb\x67\x32\xa6\xdb\x84\x9c\xa9\x0a\xbd\x2b\x03\x1e\x0f\x1f\x3e\xf1\x1f\x21\x3d\xf2\x0c\xc0\x30\x68\xbc\x98\x19\xf0\xe0\xb1\xfa\xbd\x30\xa4\xf6\x20\xa8\xa1\x81\x32\xd6\x57\x13\xbc\xd6\xe5\x8d\x2c\x9e\x04\x0b\x62\xae\xa1\xdf\xfe\xfc\xf7\xa1\xb2\x32\x7a\xac\x5d\x17\xd4\xca\x0c\x45\x5f\x62\x99\xec\x94\x62\xb2\xd3\x8a\x0a\x4f\xef\xf9\x3f\x87\x23\xa1\x3d\xb8\xf2\xb7\x79\x97\xf0\xbb\x46\xcf\x30\x4d\x3b\x03\x14\x0c\x7d\x90\x81\x6d\x67\xc4\x30\x40\x7e\x3a\xe4\xcc\x94\xe9\x0c\x16\xcc\x79\x3d\x81\x19\x53\x8e\xb0\xcb\xd6\xc5\xfd\x66\xcf\x18\x82\xde\x7a\x0d\x53\x01\x77\xbb\xf6\x92\x11\xc3\x30\x05\xc5\x54\xbf\xae\x8a\x5b\xe3\xe2\xd7\x07\xb4\x27\xf5\xed\xbe\xa0\xa2\xf2\xe6\x1a\x56\xa2\x83\xcf\x98\xe6\xfe\xa1\x6e\xc9\x0e\x8b\xc7\x57\xac\x3d\x73\xd5\x1c\x1d\xab\x3f\xc9\xb5\x38\x2c\x51\xee\x5d\xab\x09\xef\x92\x7c\x99\x40\x02\xb2\x2e\x62\xd3\x3c\xc9\x37\x45\x33\x01\xf2\x3b\x9b\xd6\x79\x89\x07\x9b\xd4\x0d\x13\x68\xa2\x2a\xde\x3e\xd3\x62\x72\xe2\x2b\x05\xe2\xaa\xef\x44\x5d\x5c\x7d\x29\x56\xce\xb9\xdc\x86\x26\xbd\xb7\x64\xca\x40\xad\x34\xd7\x0b\x1d\xa9\x75\xd6\xd1\x09\x7d\x46\x30\xea\xdb\xd4\xcb\xbe\x10\xcf\x42\x06\xe2\x2e\x7a\xfb\x88\xe5\x3f\x8a\x49\x22\x8b\xc1\x33\xcc\xad\xfa\x59\x9d\x3a\x5d\x31\xf4\x96\x66\x78\x00\xa2\xb6\x68\xc1\xb0\xc8\x12\x86\xb6\x75\x22\x12\x62\xd1\x60\x3c\x91\xe9\x14\xc7\x88\x44\xb8\xe2\x86\x21\xfe\xbf\x83\xe5\xac\xf7\x25\xa0\x18\x86\x98\xbb\xf8\x77\x6c\x1f\x09\x16\x53\x82\xc5\x98\x89\x41\xf1\xe6\x1a\x0e\x3b\x47\x53\x31\x86\xd1\x66\x88\x21\x9c\xb6\x01\xab\x3c\x93\x15\x2f\x18\xe7\xb3\x38\x0b\xbd\x5e\x14\xf3\x1e\xf3\x7b\xfe\x62\xc9\xaf\x0c\xf1\xfd\x87\x0c\x0e\x7d\x62\x7c\x4c\xe2\x68\xda\x3b\x3e\x3f\x7b\xf2\x78\xb8\xd3\x9b\xc4\xc9\x82\x72\xc3\x84\x4f\x2a\xe1\xc3\x45\x2b\xda\x1e\xf9\x02\x5f\x23\xd3\xdc\x32\xc2\x71\x86\x71\x41\xe7\x6d\xdc\xa4\x59\xa8\x4c\x05\xa1\x3e\x2b\xfd\x8e\xb4\xe8\x2d\x33\xfc\x8b\x81\xb8\x8a\x52\x9d\x93\x08\x7c\x99\x63\x55\xe8\xd7\x4d\x00\x44\x7a\xf1\x19\xd4\x91\xd9\x26\xae\x9c\x33\x98\x73\xb8\x14\x7f\x3b\x45\xda\x2f\x93\x52\x02\xbd\x60\x32\x77\x2c\x1c\x67\x42\xe0\x3b\xc4\xc3\xc0\x93\xe6\x46\xc9\x91\xcf\xc8\x29\x42\xe8\xa9\x80\x6f\xe5\x9a\x4a\x65\xf6\x62\x56\x7e\x2e\x57\x99\xca\x54\x46\xde\x32\x2e\xe0\x58\x10\x67\x79\x5a\x26\x74\x58\x5f\x2d\xf1\x3e\x8d\xfe\x81\xab\xab\x52\x85\x2d\x69\xca\xe5\x1a\xa3\x1f\xef\x37\xd6\xe9\x7e\xbd\x7d\xa9\xf1\x98\xa4\xc8\x89\x59\x6b\xc3\x75\x9b\x00\xcf\x91\x0a\x27\xaa\x6a\x1b\x5f\xb7\x89\xd1\x05\xb8\x99\x4d\xf8\x3c\x23\x99\x6e\x33\xa9\x3a\x2d\xab\x44\xde\xe2\xe3\xa8\xda\xd6\x15\x89\x74\xd1\x07\xa0\x6a\x73\xaf\x9b\x9b\x8b\x15\x8b\x3f\xc8\xcd\xdd\x95\x49\xe6\xfa\x32\xce\x64\x5f\x66\x00\x59\x61\x88\x09\x1d\xeb\xa1\x2e\x32\x12\x92\x6f\x0c\xb6\x06\x00\xbf\x25\xfb\x92\xd0\x44\x02\x51\x31\xa3\x51\x56\xc8\xbf\x89\xc0\x50\x7f\x5d\x03\x06\x54\x7b\x8e\x99\x00\x0a\x0c\xda\x14\x4f\x10\x3a\xc4\xce\x67\xf8\x63\x4e\x4e\x18\x88\x9f\xe0\x6a\xb0\x39\x63\x2a\xff\xd6\x1e\x73\x26\x11\xbc\x63\x0e\x8b\xe0\x6d\x17\xd2\x63\x6d\x25\x8b\x5e\xdf\x25\xd1\x00\xab\xcc\xba\x76\x82\x25\xbe\x12\x59\x35\x1d\xd8\xc2\x4e\x64\xe9\xf3\xc4\xc2\x84\x8b\x07\xcc\x89\x23\x38\x62\x8e\x1b\xc1\xf7\x66\xb7\xc1\x84\x5c\x92\x23\x06\x3b\xc3\x1d\xb4\x78\xfc\xfc\x89\x97\x8f\x9f\x4a\xfb\x87\x46\x1e\x2c\x0a\x86\x4f\x1e\x3e\xc0\x5a\x4a\xaa\xdd\xc3\x47\x78\x35\xe2\x77\x77\x6c\x8e\x5a\xd5\x01\x93\xc5\x93\xd4\x2a\x1e\x12\xb1\xa6\xa3\x41\x66\x67\x65\x76\x7f\x13\x5e\x77\x52\x35\xd9\xed\x63\x1c\x7e\x34\x27\xdf\x19\x60\x9f\xe8\x82\x26\xe8\xb1\x8d\xf7\xb4\x6f\x24\xdb\x5a\xe6\xce\x47\xdf\x1e\x5f\x9a\x4f\xdb\xea\x20\xde\x19\xca\x74\x4b\x99\x4c\xa9\xad\x80\xbb\x42\x6b\x0e\x09\x07\x5d\x0a\x55\x16\x58\xe3\xe6\xef\xc5\x4b\x51\xa5\x18\x65\x40\x5c\x3c\x74\x66\x40\xb1\x6e\x8a\x09\x2f\xb6\xf2\xb1\x7a\x2d\xfe\x4a\x44\xc0\xab\x6e\xcc\xd5\xb5\x58\x28\x7a\x69\xbd\x66\xe0\x22\x5a\x88\xaf\x0b\xaa\x39\x57\xe5\x97\xed\x92\x17\x0c\x28\x02\x83\x45\xaf\x07\x24\xb8\x8b\xb5\xd2\x23\xa0\x96\x6b\xea\xfa\x6f\x16\x83\xa0\xbd\x14\x28\x85\x44\xb7\x72\x2b\x6d\x56\xe4\x83\x58\x7e\x59\xf4\x40\xcc\xaa\x4a\x07\x7e\xb4\x2d\x2e\x45\x0b\x7d\x41\x13\x60\xe2\xec\xb9\x04\x23\x46\x70\x74\x89\xd9\x55\x2a\x30\xc1\xc4\xeb\x9b\x3d\x14\x2b\x1d\x93\x89\xa0\x85\xb8\xd6\x81\x92\xe1\x9e\x33\x67\x4e\xba\x92\x1c\x3b\x3f\x68\x6a\x0b\x1a\x67\x02\x7d\x2e\xd0\xf9\xa5\x20\x2e\x27\x42\x7a\x78\xa7\xef\x7f\x12\xb7\xce\xa9\x09\x6e\x60\x27\x40\x1f\xc9\xfb\xeb\xcd\x8c\x4d\x12\x3a\x9f\x14\xf5\x54\x2b\xe2\x0d\x56\xcf\x4b\xee\xee\x3c\xab\x02\xf2\xce\xfd\x21\x56\xbe\xa3\xe6\x08\x17\xce\xb7\xdc\x40\xa8\xf0\xef\xd0\x09\xe3\x1d\x53\xb5\x76\x21\x12\x9f\x27\xda\xef\xec\xb4\xb6\x17\x68\x76\x49\xf6\x18\x3c\xd9\x7c\xe3\xe9\x93\xd6\x17\x52\xfd\xc2\xfd\xca\x0b\x21\x79\x55\xb6\x78\x2e\xfe\xf9\x24\xfe\x79\x09\x67\x24\x81\x21\x96\xf0\x52\x93\xef\x6c\x89\x7d\xee\x0c\x8b\x05\xc0\x74\xbd\xc8\xb7\x84\x24\xcd\xe0\x8c\x99\xf0\x9e\x39\xb5\xf4\x2b\x47\x54\x6c\xd0\xbf\x52\xa7\xf5\x5a\x9a\x1e\x85\x58\x6e\x9a\xf0\x99\x35\xeb\xfe\xa9\x7f\x68\x06\xc3\xb6\xe0\xff\x4a\x8a\x99\xbd\xb6\xc2\xeb\xbd\x05\xa6\x68\xc2\x32\x9e\xa2\xa7\xf7\x0c\x0e\x33\xd3\x54\x85\xe1\x13\x5d\x4e\xf3\x30\x53\xc5\xf5\xde\xb0\xa2\xbc\xda\x67\x66\xc2\x21\x23\xa5\xc4\xe4\x65\x09\x5a\x61\x0a\x91\xc9\x34\xe1\x23\x53\x01\x74\x9f\x3a\x09\x1e\x9e\xd9\x7d\xb3\x09\x77\x22\xc5\x48\x84\x70\xa3\x12\x32\x70\x8b\xae\xed\x8f\xcc\x5c\x97\xd6\xb7\x2f\xbf\x20\x5e\xcf\xc9\x27\x06\x17\xb8\x53\x52\xc6\x4e\xd6\x6b\x78\xd9\xde\x01\xfd\x20\x59\xe7\xbe\x14\x8f\x56\xc8\x3a\xe9\x18\x63\x34\xaf\x24\x03\xed\x3f\x2b\xad\x42\x95\xae\x77\x51\x0c\xd7\x37\xb8\x90\xc5\xf1\xe2\x0d\x03\x8a\x02\x38\x5e\x9d\x68\x6e\x98\xa1\xa8\x9b\xc8\xc0\x87\xe7\x1c\xbe\x30\x70\x51\xcc\xc6\x66\xa7\x0c\xeb\xba\x43\x94\x56\x63\xf9\x0e\xa4\xae\xb1\x9f\xf8\x94\xc7\x89\x01\xf3\x89\x09\x49\x7a\xc3\x92\x6a\xe5\x0c\xfd\x23\xd2\x7a\xe6\x08\xbf\x16\x60\xc2\xdd\x2e\xc1\x0f\xdd\x7d\xcd\x91\x80\x05\x8c\x2e\x97\xf1\x91\x1b\x23\xf3\x6d\xb1\x77\x3a\x00\xe7\x08\x83\x73\x40\xa9\x1b\x62\x47\x2b\xee\x08\xbe\x73\xca\x08\xca\x31\x26\x64\xe2\x37\xb7\xe8\x18\x57\x2b\x69\x86\x61\xa3\xa0\xba\xcb\x64\xb9\xaf\x7a\x10\x36\xb7\xae\x04\x24\xc8\x3d\xed\xa3\x72\x55\xcb\x10\x24\x9f\xa3\x6c\x50\xba\x4d\xe0\x0c\xc5\x9e\x25\xa9\xe0\x36\xd6\x95\xda\x8d\x24\x05\x0a\xdc\x62\xab\x62\xb2\xda\x8d\x48\xe0\xab\x2f\xa7\xc9\x56\x72\x53\xf1\x03\xfe\x5f\x4d\x17\x61\x4c\x4d\x97\x8e\x8b\xe9\x3e\xb4\xa9\xb2\x13\x54\xa6\xaa\xa4\xdc\x37\x38\xdd\xc0\x21\xb9\x9c\x60\x28\xb8\xa9\x6f\x9a\x38\x2a\xae\xbd\xbd\x60\x24\xaf\x4c\xa8\x3a\x2e\xad\x8f\x1b\xd4\xc7\x7d\x64\x6f\xbe\x22\xb1\xa1\x7c\x47\xe2\x43\xb1\x7a\xf8\x76\x15\x05\x6a\xab\xfe\xb8\x0e\x4a\xfb\x72\x4c\xcc\xc4\x6c\xd1\xfd\xa2\xd9\x6f\xcd\x66\x0a\x23\xab\xad\x9e\xd4\x5b\xed\xd6\x3a\xdb\x2d\x9a\x3d\x6d\x36\x2b\x3a\x2b\x5b\xed\x34\x81\x1c\xbd\xde\x52\x47\x75\xd8\x37\xa5\x47\x7e\x2a\xb8\xa3\x8e\x4d\xd3\x87\x2b\x3a\xb1\x32\x83\x89\x73\x49\x78\xaa\x5e\x6b\xb5\xc9\x45\xe8\xb0\x84\x73\x8d\x25\x39\x5b\x9b\x6b\xd9\xe5\xb3\x8d\x39\x4c\xd6\x65\xb6\xb7\xdb\x77\x5c\x21\x94\xb7\xe8\xf9\x7e\x91\xd2\x98\x3d\xbb\xd5\x18\xec\x50\x4e\x1e\x4b\xe0\x1e\xde\x66\x88\x07\xb7\x9b\xbc\xb6\x7c\x6e\xeb\xb0\xea\x31\x92\x42\x78\xfb\x59\xd3\xcf\x76\xb8\xad\x67\x94\xea\xf2\xb5\x09\x59\xea\x90\xeb\xc0\x39\x72\xe1\x3a\xaa\xb1\xd9\x88\x9c\x44\xe4\x3a\x20\x07\x94\xfb\x56\x14\xaf\x88\x74\x6e\x34\x81\xa6\xdb\x9c\x49\xab\x3e\x77\xd2\xfb\xf2\x5a\xa8\x59\xf4\x54\x45\xbd\x16\x14\x44\xfb\xbc\x9c\xda\x2f\x55\x03\x60\xdc\xbe\x0a\x88\xb1\x5f\x98\x20\xd2\x8c\x2d\x02\xae\x12\x6a\x27\x0b\xe8\x9d\x4b\x2b\x75\x2f\x48\xf1\xf9\x95\xcf\x7b\x98\x34\xcd\x32\xb4\xdf\x65\x23\xa4\x56\x3b\x05\x71\x3b\x8b\x2b\x91\x94\xda\x33\x88\x31\x98\xa3\x4c\xfa\x71\x22\xed\x8a\x10\xa5\x42\x8d\xdd\x57\x7e\x34\x3e\x83\x71\x20\x6f\x54\x1c\xb9\xa4\x77\x90\xeb\xa0\xbb\x4e\x2c\x73\xc8\x08\xd1\x57\x7b\x66\xf1\xbb\xc6\xdf\xee\xe9\x93\xa9\x7b\xc6\xdd\x58\xe8\xc5\x7b\x71\x73\x4e\x62\x4a\x6e\x41\x2d\xa4\x16\x13\x55\xcd\xa1\x89\x9c\x06\xce\xf5\x92\xc4\x08\xe8\x10\x08\x7e\x14\x20\xff\x16\x93\xcf\xd2\x16\x47\xb1\x1f\xf4\x02\xad\x52\x4b\xdb\x8f\x81\x9e\xda\xbb\xe4\x92\x15\x1e\x38\x91\xe5\x42\x64\x79\x26\xb8\x73\x1b\x27\xcc\xb8\xed\xc7\xeb\xaa\xa3\x39\xe2\x78\x75\x1e\x97\x84\x0b\x56\x80\x5f\x2b\xe7\x93\xe2\xda\x35\x7c\xaa\x1e\x60\xc4\x21\xb5\xfa\xa3\x72\x8f\x09\x73\x90\xa2\x63\x22\x71\x26\x44\x96\x50\xfc\xd9\x87\xdc\x61\x42\x64\xf1\xc4\xd5\x18\xfa\xe2\x4f\x5f\x5b\x09\x99\x96\x50\x96\x8c\x84\x4d\x43\xe1\x9c\xec\x32\xc8\xc1\x33\x6b\xd6\x42\xcf\x44\x29\xe5\x94\x93\xd4\x2c\x24\x95\x39\x83\x7e\xc5\x20\x98\x2b\x83\x20\x2e\xed\x09\x17\x3b\x3c\xa7\x64\x22\x64\x55\xd3\x16\xdf\xba\xe1\x73\x38\xa9\x01\xf1\xa4\x09\xc4\x4b\x09\xb8\x34\x42\xb0\xf4\x82\x74\x19\xd2\xab\x1e\x9d\x4c\x64\x7a\xa0\x5d\xf4\x4b\xdd\x0a\xbe\x50\x41\x04\x05\xca\xd4\x99\x34\x5d\xc4\x96\x12\x8c\x2b\xae\x77\x1a\x80\xd1\x45\xd1\x65\x02\x56\xfb\xa5\x0b\xe0\x83\x4d\x4f\xb7\x65\xd3\x03\x70\xa3\xc5\x45\xa5\x45\xc3\x7f\x53\x3c\x4e\x2c\x7a\x51\x02\xd6\x9c\xf8\x92\x0d\x49\x24\x6e\x42\x14\xd2\x1a\x55\x2b\x10\x3c\xe8\x0b\x90\x4f\xd5\x19\x62\x90\x56\x8e\x98\xe5\xa1\x9f\x3a\x5f\x8e\x3b\x85\x44\x79\xba\x7c\x30\x3e\x19\xbf\x1b\xd7\x53\xa8\x4d\xd2\x5f\x3f\xee\xb2\xe8\x87\x8e\x13\xaf\x38\x05\x1f\x82\x54\x9e\x78\xb1\x5f\xed\xba\x25\x67\x4b\xe3\x38\xed\xd4\x25\x78\xa2\x26\x06\x48\x9b\xdf\xdb\x28\xc1\x54\xe6\xaf\x0a\xb4\xf5\x78\xc3\x1b\xd7\x8f\x37\xf8\x7b\x16\x6f\xee\xe1\x55\x20\xcd\xcd\x6b\x13\xc2\x74\x7b\x72\x8f\xbc\xe3\xb9\xce\x38\xe7\x75\xee\xd1\x21\xc1\x3c\x5a\xec\x52\xca\xdd\xfd\xb6\x8e\x7a\x2f\x02\xe2\xa5\xd2\x36\xbd\x68\xf6\xa4\x73\x03\xf4\x53\xf4\x47\xd0\x1a\x0c\xfb\x6c\x63\xd8\x16\xd0\x99\xcd\x81\xed\xc9\x43\xe9\x59\xea\x8c\xc9\x1f\x3b\x70\x1f\x86\x5f\x4d\x58\xa6\x1d\xd6\xa5\x06\x93\xba\xbf\x09\xfc\x33\x7b\x4e\xd2\xb4\x3d\x7e\xe5\x39\x87\x85\x98\x2f\xcc\x52\x73\x0d\x2a\x57\x57\x7b\xe4\x8d\xf4\x96\x55\xde\xc9\x89\x45\x8f\x4d\x60\xdf\x91\x1e\xcf\x24\x02\x5f\x12\x96\x42\x06\x1c\xf2\xd4\xac\xb9\xa0\xd3\x3a\xfd\x97\x8c\xe2\xbb\xa0\x67\xc4\x95\x8c\x86\x7e\x30\x9b\x51\x0e\xf8\xec\x59\xc7\xcb\x95\x71\x25\xff\xbb\x24\x13\x31\xba\x0b\x87\x3e\x71\x53\x41\x31\x2b\xcc\xcb\xaf\x32\x2f\x43\xf0\xad\x92\xf1\x3d\x54\x3c\x0a\xdd\x85\xeb\x0d\x95\x53\xae\xfc\x58\x19\x8c\xd7\x58\x90\xa0\xf4\xcb\x0d\x53\x88\xdb\x02\x11\x1a\x7e\xca\xe8\xbc\xda\x01\x88\xfa\x80\x76\xd5\xf1\x5c\x67\xb9\xb9\x6a\xc3\xde\x6e\xcd\xaf\x60\x77\xf2\x93\x56\xe9\xed\xa9\x69\x03\xa0\x2e\x36\x5b\x3c\x2a\x4a\xd7\x37\x64\x94\xef\x32\x02\xa6\xb2\xb5\x9d\x2c\x55\xf0\x01\x60\xa9\x3d\x0f\x88\x2f\x19\xab\xa9\x57\x76\x9a\x4a\x6e\xc0\x05\x37\xf0\x15\x37\xb0\xcb\x17\x7d\x7c\x11\xdf\x6a\x7a\x82\x67\x1d\xf3\xda\x41\x98\xc5\x25\xc9\xe4\x82\xe8\x9d\xdb\xf6\x46\x09\x54\x75\x79\x28\x93\x3e\xc9\xf3\x54\xf9\x80\xc4\x49\x30\x0d\x22\x1a\x6e\x2b\x22\xf0\x9b\x76\xa8\x68\x97\x3f\x75\xb3\x2b\x3b\x02\xba\x27\x48\xc3\xd8\xf6\x81\x1d\x49\xfa\xb0\xdb\x06\x00\xfa\x95\x87\xe2\x95\x27\xe2\x95\x2b\xd9\x7a\x9c\xde\x58\x41\x22\xf1\xa9\x77\xd5\xe6\x6a\x92\xfa\x9c\x87\x1b\x1e\x25\xda\xd9\xc4\x0b\x52\xca\xc2\x3f\x55\x3b\x62\xbf\x8b\xe4\xfe\xa0\x91\x9d\x80\x2a\xb5\x00\x87\x7a\x65\xa9\xe7\x25\x7e\x9a\x76\xf8\xe2\xec\x0b\xee\x06\xa7\xa9\x23\xd3\xdf\x2e\x7d\xf4\x25\xf1\x03\x72\xea\x92\x43\x41\x17\x8a\xdc\xb7\xda\x23\x67\x9c\xaa\x54\xb9\xb2\x9b\x32\x45\xee\x95\x4b\x76\x53\x14\xc5\x2e\xd2\x1b\x13\x08\x71\x9b\x03\xcd\xec\x0c\x68\x6e\xfb\x40\xdf\xda\x14\x58\x68\x27\xc0\x9e\xcb\xe9\x9f\xa7\x85\x73\x10\x0f\xc4\x38\x5f\x4a\xdf\x99\x68\x12\x4c\x0d\x98\xa7\xda\xd5\xc8\x4f\xd2\x20\x8e\x8e\xa3\x49\xac\xfd\x85\xa6\xf1\x07\x79\xb3\xf2\xd5\x2c\x0b\x42\xef\x00\xfd\x88\xea\xf7\xde\xa7\x7e\x52\xbd\x97\xd0\xc8\x9d\xd5\x92\x08\xe7\xc1\x46\x5f\x79\xa5\xf7\x2b\x97\x5c\xa4\xda\xdd\x46\x4d\x31\xcc\x52\x4c\x3b\x76\x9a\x8a\xc7\x97\xf2\x31\x1c\x77\xb3\xb8\xba\xfc\xa0\xc5\x20\x59\xf4\x11\x73\xbc\x9d\xa7\x75\x21\x22\x11\x42\x84\x3c\x75\x38\xe9\x20\x7d\x3a\x81\xd7\xb7\x4d\xc8\xaf\xe6\x89\x1c\xa9\x80\x92\x14\x0a\xa1\xfe\x24\x85\x31\xad\x10\x0c\x76\x65\xff\x60\xd7\x48\xc4\x14\x3d\x36\xe1\xba\x95\x9d\xa3\x34\x3d\x8b\x6b\x8a\xa4\xac\xaf\x1a\x71\x2b\x8c\xa5\xa7\x9b\x13\x95\xc5\x54\x83\xa4\xb8\x6d\x25\x7e\x18\x53\x8f\xdc\xd9\x11\x84\xc1\x5c\xc3\x59\xd7\x6a\x25\x16\x7b\x2d\xa3\xff\x5f\x35\xe3\xbc\x1e\x29\x1a\xb6\x22\xcb\x09\xec\x63\x69\xcd\x13\x69\x01\xc5\x10\x1d\x8c\x1a\x6f\xf0\xc7\x13\x3b\xb3\x28\xb0\xa7\xb6\x0b\xee\xb5\x9d\x72\x82\x95\x6b\x68\xdd\xdc\xe1\x42\x20\x7a\x4d\xc1\x4d\x05\x41\x3b\x50\xbd\x0a\xd2\x83\xcc\x4e\xf0\xde\x26\xcb\xd3\x9d\x7a\xa2\x53\x70\x0f\xec\x42\x76\xbf\xa4\x50\x48\x6f\x3b\x9b\xe1\x69\xee\xb5\xbd\xe0\x75\xfd\x64\x4c\x89\xe2\x67\xfe\xba\x1a\x18\x24\xd5\x13\x31\x15\xe6\x5c\x92\xab\x14\x94\xf8\x36\x59\x8b\x69\x8e\xc5\xf4\x50\xe1\x7a\x46\xab\x09\x25\xca\x71\xfa\x9c\x4c\x4c\x70\xc7\x76\xaa\x67\xb6\x5b\x99\xd9\xd3\x32\x2a\xae\xf9\x6a\xce\x49\x58\x68\x51\x08\x3b\x2b\xcc\x38\xf2\x4d\x6a\xa5\xe3\x80\x84\x15\xe5\x10\x0d\x0e\xed\x1d\x85\x9c\xe4\x4d\x75\x6c\x3a\x11\x7a\x5a\x5b\xec\x94\x7e\xef\x79\x56\x63\x96\xf7\xdb\xa3\xa4\xa2\x52\x4c\xde\x79\x50\x6d\x72\x9d\x92\xea\xb3\x47\xed\xaf\xf3\x4a\x60\x61\xd1\xf6\xb1\x0a\x45\x13\xeb\xde\x77\xd0\x43\xc3\x13\xaa\xa7\x76\x80\xda\x0c\x47\x63\x4f\xed\xb7\x62\x69\x9e\x62\xc6\xa7\xfe\xba\x2e\xc0\x3d\xac\x8e\xdc\xd4\xcb\x2e\xc9\x37\xa5\x60\x81\x5f\x13\x15\x17\x12\xce\xa5\xd4\xc5\x4e\x00\x07\xc0\x84\x2e\xb4\x25\x7a\x8b\x9d\xd8\x0b\xe4\xf8\xb4\x66\x13\x9e\x49\xb0\x96\x9d\xb8\x07\xb2\x13\x01\xd6\xcf\xa8\x33\x6b\x42\xcc\x81\x1d\x3b\xb3\x52\x8a\xab\x81\xb1\x0a\xfa\x42\x30\x54\xfd\x8d\x65\x47\xcd\xa8\x2f\x01\x71\xd5\xa8\xaf\x2a\xd4\xdd\x2f\x82\xbd\x68\xd1\xd1\xbe\x9e\x53\x33\xde\xcb\xdd\x57\xf1\x5e\x05\xca\xfe\xb6\x29\x4e\xbe\xdd\x14\xc2\x76\x36\x23\xc0\x5c\xd6\x68\xf2\x74\xb3\x97\x6f\xdb\x14\x63\xd9\xe4\x41\x5d\x6e\xdd\x4b\x9d\x37\xf0\xae\x8d\x5a\x92\x39\xd9\x4b\x91\xea\x63\x5a\xb1\x37\x3e\xec\x40\x64\xda\x91\x79\xd7\xb8\x47\x97\xc1\xbd\xfc\xbe\xb1\x86\xb7\xad\xb4\x9d\x26\xf6\x9d\x1d\xa1\x67\x47\x40\xbf\xa1\x23\x52\xdf\xee\xaf\xd7\x70\xd0\x2d\x1a\xd4\xa3\x21\xfd\x58\x69\xef\xec\x81\x10\x19\x8e\xed\xb7\xa8\xd5\xd0\x73\x5b\xc8\x92\xa0\x06\x90\x31\xdc\xec\x89\xdd\x07\x7a\xa2\x46\xf4\xc1\x5d\x60\x57\x43\x71\x77\x07\xfd\x34\xe7\x52\xd4\xc3\xf9\xb0\x29\x1e\x87\xe9\x00\x70\x77\x2e\xf8\xbc\x68\xcb\x66\xe2\x1f\x19\xb1\x5c\x44\x7b\x8b\xd7\x30\x94\x50\xa7\x42\x59\x9b\x70\x94\x3a\x82\xdd\xf8\xf1\x1a\xbe\x77\x6a\x97\x89\xd5\x1f\x45\x78\xb8\x60\xc2\xeb\x4e\xc5\x5c\xbb\x44\x3b\x33\x4a\x12\x99\xa9\xf6\x7b\x2a\x4f\xc9\x67\xaa\xe2\x98\xb1\x4c\x62\x2f\xc3\x57\x0d\x78\x39\xc1\x90\x57\x98\x38\xf1\x28\xb1\xe8\xc4\x16\xbb\xc3\xda\xde\x9a\x6e\x24\xa8\xd4\x6f\xa6\x2d\x81\xc3\x41\x45\xbd\xa8\xe3\x75\xd0\xc8\x0f\xdd\xa3\x6c\xbd\x26\x26\x84\x72\x50\x4c\x61\x5c\x0c\xea\x6d\x9c\xa4\x09\xb9\x0b\x3d\x1f\x9d\x77\x29\xd1\x33\x56\x19\xed\x04\x5b\x0d\x67\x71\xca\xed\xa7\xc3\xa7\x0f\xee\x19\x15\xe9\xe1\x2c\x85\x2b\x5c\x0f\x42\x9d\x1f\x72\xef\x5d\x87\xe3\xfe\xb9\xa5\x8e\x2a\x80\x63\xcf\xde\x59\x43\xe6\xd4\x0c\x89\xb8\xa3\x28\xdd\xa3\x0e\x11\xaf\xc1\x77\x36\x2d\x8d\x53\xa6\x9a\x49\x0b\x90\x20\x3c\x73\x72\x90\x02\x07\x66\x02\x7b\x65\xe7\xc0\x5e\x8b\xcd\x9b\x00\x7b\x8b\x5a\xcc\x37\x3b\x14\xa0\xc8\x04\x0f\x4e\xc1\xc5\x38\x5d\xec\xc1\x0d\xed\x89\x60\x9f\x01\xb8\xfb\x02\xf6\x0e\x6c\x2a\x58\x55\x26\x04\xf5\xa3\x74\x8d\xf2\xcf\x87\xb4\xf0\xca\x13\x68\x00\x2f\x52\x87\x25\xc4\xf0\x82\xdc\x30\xe1\x95\xbc\x48\x97\x34\x32\x4c\x78\x9e\x3a\x93\x04\xde\x0b\xe1\xfe\x45\x0a\x78\xa2\x37\x27\xaf\xd4\xaf\xe7\x29\x31\x4e\x62\xea\x05\xd1\xd4\xb2\x2c\xc3\xfc\x2a\x5d\xf5\x3f\xb7\x8a\x3a\xef\x48\x64\xf1\xf8\xfd\x72\xe9\x27\xfb\x34\xf5\xd1\x7b\xf2\x4d\xda\x72\x3a\x4b\xb1\xd2\x47\xd3\x89\x25\xaa\x15\xd9\xe3\xd5\x54\x4f\xb9\x0f\x9f\x53\x74\xd9\xc2\xdc\xf6\x1f\x3b\xb1\x60\x4e\x96\x09\x44\x30\xd7\x11\x67\x9f\x52\xe7\x63\x4a\x0c\x37\xa4\x69\x7a\x2a\xa4\x73\x13\xbe\x74\x08\x6a\x2f\x52\xf1\xc9\x9f\x52\x22\xb3\xab\xf5\xf0\xdf\xc1\x8a\x26\x91\xd0\x5c\xcc\xaf\xa6\x5a\x91\x37\x68\x9e\xc1\x20\xca\x97\xbf\x62\x9f\x92\xc6\x29\xcd\x16\xea\xca\xf5\xfb\x74\x13\xea\xbf\xa4\x85\x45\x2a\x0a\x71\xc3\x58\xc6\x79\x2c\xb6\x2c\x91\xd7\x61\x60\x98\xc0\x43\x67\x96\x80\x1f\x3a\x8b\x04\xb2\xb0\x7b\x55\xfc\x50\x96\x8f\xc3\x22\xe5\xc8\x6a\x69\xd8\xbe\x0c\x59\x28\xa4\xf5\xc0\x9d\x1b\x42\x50\x47\x43\x94\x1b\x6e\xf7\xfd\x49\x42\xbd\x72\x11\xcd\x07\x01\xf7\x17\x7a\xb9\x54\xaa\x1c\x84\xa8\x4a\x93\x30\x88\xe6\x3d\x15\x7f\x20\x5a\xfa\xa6\x3d\x27\x51\x28\x81\x8f\x87\x60\x30\xea\xce\x05\x41\x89\x3c\x03\x0c\x9e\xd0\x28\x5d\xd2\xc4\x8f\xb8\x21\x38\xa3\x68\x30\x89\x23\x6e\x60\x31\x40\x3f\x09\xca\xdb\x6e\x96\xa4\x82\x0e\x18\xcb\x38\x88\x30\x29\xb7\x7a\x10\x67\x42\x01\xf5\x0d\x30\xa2\x38\x12\x50\x50\x9d\x8a\x21\x56\x43\xc6\x08\xe1\x64\xa4\x9b\x44\xd0\xb6\x40\xba\xd0\x3b\x7a\xc0\x77\x34\xf8\x4d\xa7\xf9\xed\x78\xfe\x58\xa7\xf9\xed\xda\xaf\xcd\x40\xbc\xb4\x75\xab\x5e\x06\x24\x88\xc9\x9c\xb0\x10\x6b\x52\xcb\x38\x02\xf8\x54\xc4\x0f\xac\xd1\x23\x2f\x94\xc0\x42\x0d\x13\x72\xf9\x53\x80\x8d\x17\x22\x5a\xf0\x80\x87\x62\x31\xfa\x61\xbb\x13\x05\xd4\xc2\x6c\xab\x28\xc2\x78\x34\x40\x8a\xdf\x5b\x24\x83\xfb\xbd\x05\x1b\xdc\xd7\x7b\x5e\xdd\x6c\xc6\xa3\x9e\x68\x9a\x2e\x7a\x2c\x4e\x3c\x3f\x19\x24\xc1\x74\xc6\x07\xc3\x1e\xf7\x2f\xf9\x60\x91\x71\xdf\x2b\xf6\x28\x4b\xfd\x64\x90\xfa\xa1\xef\xca\xad\x0d\x78\x40\xc3\xe2\xe9\x60\x11\x5f\x0f\x6e\x68\xb2\xf2\xd9\x3c\xe0\x37\xb4\x52\x13\x71\xe3\x10\x21\xe5\x6f\xae\xeb\x56\xb0\x3b\xb9\xfb\x0f\xc7\xf8\xc7\x5d\x8e\x09\xf0\x65\x5c\x52\x14\xb6\x7d\xce\x74\x30\xa1\x9e\xef\xe1\xb5\x82\xae\x41\xea\xbb\x71\xe4\xd1\xe4\x4a\x42\x54\x1a\x12\x99\xd6\xcf\x14\x2b\x4e\x8c\x43\xcc\x42\xda\x63\x57\x3d\x3e\x0b\xd2\x1e\x96\xba\xa9\x0c\x6d\xdc\xd5\x14\x76\x0d\x8b\xb6\x2d\xaf\xda\x8b\x22\x7f\x35\x92\x59\x4d\x1d\xe3\xee\xeb\x80\xe4\xb1\xc4\xd5\x59\x07\x5a\x7f\x4c\xc1\x98\x25\xfe\xc4\x80\x7b\xff\x75\x41\x73\x9a\xba\x49\xb0\xe4\xf6\xbd\x40\xd6\x85\x24\x89\x13\x99\x56\xe2\x2f\x43\xea\xfa\xe4\xde\x3f\xd3\x7b\x53\x30\x0c\xd3\x1c\x19\x86\x9d\x98\xba\x74\xe3\xb2\x0d\x4e\xce\x39\x89\xd0\xae\x9d\x6d\x56\x4e\x9d\x93\xb0\xb1\x74\x7a\xa9\x82\x68\x12\x6b\x98\x18\x1a\x26\xcc\x42\x42\xd0\x63\xe7\x0b\x2c\xc2\xa6\x67\x55\x21\xf6\xbb\x3e\x4a\xbf\x43\x9d\x37\x9d\x13\x4c\x91\x5f\xc0\x5e\x5e\x8c\x38\xa1\xbd\x09\x1d\x30\x3f\x0c\x07\x69\x48\xd3\xd9\x20\x46\x58\xc5\x05\xef\x0b\xc6\x47\x0c\x75\xf6\x84\xcb\x5e\xa4\x9e\x4b\xaa\xee\x5b\xdd\x1f\xe0\xd1\x68\x2a\x36\xb3\xf6\x09\x75\x8b\x9e\x7f\x8b\x59\x75\xcd\xc9\x33\x24\x20\x4c\x43\xe9\xa3\xd5\x49\xd9\x57\xb1\x0e\x4d\xec\x6c\x72\xad\x9b\x90\x31\xf9\xa3\x4c\xf0\x5b\xa6\xf1\xc5\x1a\x5f\xab\x26\x39\xaa\xe5\xcf\xbc\x13\x55\x82\xfa\x9f\x6d\xc4\xad\x88\x9d\x9f\x86\x62\x2f\x8a\x14\x9b\x05\x83\xa7\x82\x17\x4a\x1b\x44\x82\xa7\x9d\x32\xba\x45\xd6\x52\xc7\x6c\x43\xc7\x19\x41\x17\xb8\x39\x87\xf3\x8c\x64\x18\xd9\x22\x55\x9f\x32\x09\xe7\x2d\x5f\xc6\x57\x93\xfa\xab\x8d\xc6\x89\x4c\x78\xd7\x6b\x9f\x6a\x63\x84\xf3\x8c\x10\x35\xbc\xac\x87\xc2\xa5\x5e\x75\x8b\xc9\xc9\x19\xdd\xad\x8c\x59\x7f\x28\x1f\xac\x4d\xb8\x6a\xc5\xdb\x31\xd1\x11\x46\x56\xba\x0c\x03\x2e\x30\xf3\xee\xbd\x29\x06\xa6\x28\x12\x4e\x93\xa9\x2f\x18\xe0\x65\x07\xe2\x6f\x20\x52\xdd\x65\xe9\xb9\x54\xf7\xdb\x12\xde\x4a\xc8\x9f\x85\x42\x2c\x9e\x87\xc4\xf8\xc6\x42\x2a\x58\x65\x49\x29\x25\x80\x62\x42\xa8\x55\x08\x57\x21\x96\xf2\x40\xa6\xb3\x2b\x39\x0d\x17\xb4\x7d\xac\x7e\xcf\x0c\x13\xf6\xd5\x6f\xc1\x97\x0f\x6f\xc9\x73\xf6\x43\x2d\x95\x8e\x0b\x24\x42\xe6\x11\xc5\xab\x84\x2e\x6b\xa4\xdb\xb0\x0d\x45\xb3\x77\x8b\xb6\xab\xc1\xce\x70\x88\xad\x2e\x11\x3e\x71\xd2\xa7\x9d\xa2\x92\x90\x58\x4e\xb9\x90\xbd\x5e\x98\x42\x70\x91\xc4\x9f\x86\x58\xb3\xed\x46\x42\x56\x15\x6a\x5a\x11\x7f\x11\x44\x59\xda\xc0\x79\x34\xd4\x8a\x99\xd9\x6d\xdc\xa6\x8b\x64\xd2\x90\x14\x33\xed\x1a\x6f\x19\x6e\x1b\x6e\x6d\xc2\x45\x07\xd8\xfc\x12\xe5\x8e\x3a\xc7\xc7\x37\xdc\x19\x4d\x78\x93\xd0\xc5\x59\xa2\x68\xef\x1a\xce\x5b\xa7\x11\xfd\xbe\x33\x1c\x19\x43\x55\x9f\xd5\xc6\x7f\xd7\x70\xdc\xda\xf6\x5c\x70\x0d\x19\x92\x2e\x78\x99\x8c\x3e\xc7\x9b\x32\x20\x3d\xa9\xde\xba\x2e\x6f\x81\x2c\xff\xba\x19\xf1\x7f\x1e\x92\xff\xbf\xa2\xfd\x71\x5d\x2a\x91\xfe\x3d\xf2\xfe\xdd\xbe\x69\x68\xd6\x7f\xd2\x01\x27\xaf\x2a\x5a\x53\x30\x8d\x84\xd8\x35\x19\xb8\xbe\x10\xbe\xcb\x1d\x97\xf8\x79\x1c\x0a\x41\x61\xa5\x50\xf0\x5b\x13\x05\x2b\xcc\x66\x70\xbf\x4b\x37\xf4\x9e\x45\xce\x9c\x7c\x0b\x65\x10\x85\xe5\x9b\x3a\xe7\xfa\x75\xc7\x0c\xbf\x85\x42\x5a\x30\xd7\x70\x56\x1b\x31\x83\x82\xea\xdc\x32\xa3\x6e\x46\x22\x73\x74\x46\x54\x81\x26\x2c\xc8\x6e\x9f\x11\x0e\xea\x86\x5f\xcd\xba\x8d\xe1\x33\x09\x9c\x91\xbe\x00\x77\x99\xd4\x3d\x74\xc2\x04\xde\x29\x12\x48\x19\x4a\xde\x6f\xc3\x6d\x96\x1a\xe7\x88\x10\xdf\x99\x93\xb3\x50\xda\x92\xf7\x28\x44\x3e\xa9\x64\xff\x36\xab\xf9\x25\x3b\xf9\xff\x59\xc1\xff\x23\x13\x93\x4c\x0a\xa9\xc9\xa2\x5f\x84\xdc\x24\x3e\xd2\x62\x15\x5b\x48\x52\x51\xc3\x96\x71\x1a\x48\x3b\x10\x9e\xc0\x04\xae\x52\x9c\xe4\x56\x0b\x45\x2f\x1d\x60\x04\x76\x2f\x0c\x52\x2e\x15\x02\xbc\x5d\x12\xc4\xe5\x60\x28\xb4\x83\x87\x25\x49\x2c\x95\x08\x24\xcd\xa8\x3b\xf4\xbc\xc1\x24\xf4\x2f\x7b\x8d\x8e\xf5\x6b\x27\x82\x76\xc3\xf0\xf7\xeb\x50\xcc\xfc\xbb\xa0\xcc\x7b\x61\x9b\x84\xf8\x32\x20\x65\x95\xf4\x35\xcc\xc9\x69\x88\x8b\x6a\xda\x82\xe6\x18\x26\x10\xcc\x7d\xf8\x1a\x32\xab\x3f\x92\xb7\xec\x8b\x10\xa3\xb8\x4c\x58\xe2\x28\xdd\x22\x2e\xfd\xb8\x29\xe2\xca\x1e\x6e\x23\x2f\xd6\xf9\x04\x72\xb0\x40\xd1\xe1\x76\xd2\xe9\x5f\xf9\x1d\x22\xe2\x71\x51\x03\x08\x51\x89\x70\x64\x7a\x87\x24\x11\x9c\x8b\x6b\xce\xf5\xae\x64\x97\x02\xd8\x7a\xc5\x6a\xef\x60\x5f\x28\x19\x1c\x86\x0a\x16\xbe\x9b\xe5\x0a\x29\x4b\x10\x36\xe8\x87\x42\x0e\x93\xac\xe2\xa0\x89\xb1\x4a\x0a\x1c\x19\x9a\xb5\x61\x21\x36\xe4\x3a\x46\xb9\x28\x05\x0f\x4d\x6e\xc3\x42\x17\xc9\x60\x47\x4d\x53\x69\x6c\x0b\x9a\x4c\x83\x68\x10\xfa\x13\xa1\xcf\x0d\x1e\x24\xa5\x81\x61\x63\xd9\x84\xa4\x85\xeb\x24\x67\x7c\x24\x91\x2d\x13\xca\xdf\xf7\xf0\x86\x48\x24\x6e\xb1\x51\x77\x44\xc7\x2d\x55\xe1\x9d\x72\x81\x5b\x54\xe1\xff\x1c\xa5\xf7\x11\x73\x87\x9e\xff\xab\x7a\x6f\x75\xc3\xfe\xb4\x8a\xcb\x4d\xbb\xcb\x5a\xa0\xd5\xe8\xe6\x8a\x8a\x3e\x4e\x63\xde\xc3\x05\x57\x08\xf0\xd5\x84\xc0\xa1\x3f\x7f\x62\x06\xba\x0c\x94\x59\x7b\x1b\x79\x78\x5a\x92\x87\x83\x10\x02\xc8\xd0\xc2\xfd\x85\x12\xdf\x04\xe6\x60\xd4\xd6\x28\x73\xc9\x44\xb0\x41\x24\xb6\x86\x5d\xbb\x4c\x0d\x40\xdf\xaa\xea\xe4\x17\xa1\x9c\xea\xb0\x58\xf3\xd5\x2c\xe0\xfe\x20\x5d\x52\x57\xda\x9d\x36\xc4\x55\x26\xe7\x5e\x03\xa9\xfe\x26\x89\x5c\xb0\xc1\x03\x85\xae\x73\x0e\x31\x1c\x11\x17\xd0\x3f\x21\x18\xcd\xc9\x51\xb1\x41\x25\xf9\x55\x53\xd0\x08\x3e\x27\x6f\x43\xe0\x10\x09\x66\xa5\x11\x5c\xe2\xc6\xeb\x4e\x11\x78\x45\x12\x0a\x18\x40\x09\x6e\x06\x6e\x86\x16\xe5\x9a\x09\x50\x62\x8e\xd6\x26\x98\xfe\x5b\xea\x87\xd5\x4f\x58\x86\x83\x47\x6a\x42\xdd\x91\x29\x21\xf9\x1e\x22\x42\x8e\x51\xa2\xfd\x62\x02\x26\x36\x8d\x00\x47\x8f\xcc\x8a\x8a\x22\xee\x53\x67\x8c\x99\x1d\x45\xcb\x67\xf5\x2e\xe6\xe4\x75\x88\xa5\xa9\x81\x42\x02\x43\xd1\x41\x69\x52\x35\x4e\x63\xb9\x83\x12\x80\xd2\xde\x04\x6d\x8c\x18\xe0\xa8\xac\x63\xd1\x32\x13\xea\xd4\x2b\x65\x64\x95\x00\x0c\xcf\xb7\xd8\x56\xd1\xe2\xcc\x62\x65\x71\x7e\x1f\x3a\xcf\x43\x62\xb8\x33\xdf\x9d\x23\x9e\x7f\x56\x8a\xda\xd5\x52\x30\xfc\x37\x61\xe9\xdc\xa9\x74\x37\x28\x1a\x7f\x35\xf1\xd8\xe4\xe3\x36\xd3\xec\x8c\x46\x53\x4c\xfc\x92\x04\x10\xc1\x9b\x50\xa0\xd2\xa7\x36\x29\xe2\x16\xe6\xd9\x39\x79\x55\x3c\x5b\x70\x01\xbf\x02\x88\x65\xb9\xb7\x81\x50\x6f\x93\x38\x2c\x2e\xc5\x1c\x59\x7c\x59\x61\xe4\xf8\xee\x67\xfd\xb1\xf8\x0c\xe9\x7b\xbd\x83\x81\x5e\xd2\xf7\x61\x51\xac\x2e\x31\xc5\x57\xca\xda\x99\x1c\x5e\x06\xa6\xe2\x6e\x55\x84\x6a\xf4\xe2\x05\x2e\x9e\xf2\xdc\xdc\xd6\xf3\xa5\x79\x4b\x88\x2e\x25\xc2\x45\x8a\xf4\x48\xf8\xff\xd2\x6a\x9d\xa5\x9f\xed\xfb\xe8\x57\xf6\x09\xdd\x86\xd0\x69\xb7\xc8\xc6\xf2\x32\x74\x1a\xf5\xdd\x0f\xb3\x6a\xec\xde\x22\x2b\xa3\xeb\xa2\xbc\x88\xae\x7b\x19\xca\x6a\xc5\x89\xb8\x95\xbb\x60\xcc\xfd\xab\xfd\xd8\xf3\x0d\xc0\x6c\x28\x26\xf0\xbc\x7b\xc7\xe7\xfe\x95\x17\xaf\xa2\x62\xcb\x93\x1c\x4b\xfc\x6e\x7f\x23\x5b\x6e\xb4\xcf\xc4\xd0\xaf\x79\x51\xb8\xf2\xd0\x37\x41\xa8\xb4\x34\xbf\x65\xfe\x29\xb7\x6d\xc0\x8a\xdf\x6b\xd0\xf1\xbc\xc8\x17\x93\x3b\xff\xf0\xa3\xdc\xa9\x1e\x2f\xfe\x03\x26\xb9\xf3\x04\x58\xee\xec\x3c\x80\xb4\xe3\x8b\x4a\x4a\xe2\x0a\x68\x8c\xc3\x01\xcd\x78\xdc\x26\x4f\x6e\x35\x4a\xdf\x86\x9b\xcd\x09\xcd\xe1\x8e\xd0\x16\x4a\xa8\x79\x1e\xa8\xe3\x9e\xad\x1c\x51\x5a\x03\xab\xbd\xec\xd4\x7a\x31\xfe\xcf\xff\x36\x2a\xe0\x07\x61\xeb\xc7\xde\xc2\xf2\xf9\x10\x8a\x34\x41\x32\xf6\x46\x66\x0c\xcd\x73\x79\xa8\xb7\xa0\xa1\x20\x57\x5e\x8e\x44\xa8\xf0\xad\x33\xa1\x9f\x23\x15\x0a\xc4\xef\x85\xfc\x2d\x13\x52\x99\x30\x6b\x9d\xcb\x19\x89\xe0\xce\xd0\x5c\xc3\xb2\x0b\x42\x2a\xc7\x4a\x3b\xc5\xb1\xd2\x34\x6f\xa1\x6f\x72\x24\xe5\xc4\xbe\xea\xd8\xe7\x65\x0e\x8a\xfa\x4a\xc8\x9d\xe5\x1a\x82\xa7\x39\x5a\x95\xae\xf2\x76\x8b\x51\x5f\x86\x7a\x7e\x93\x59\x12\x12\xc8\x9c\x28\x47\x67\x68\x87\x2a\x8a\xd3\x8f\x65\xc6\x31\x16\x62\xce\x23\xac\x7b\xe4\x3a\x59\x2e\x04\xd2\xc0\x39\x24\x19\xa2\x42\xac\xaa\x8d\x84\x42\x84\x98\x48\x0f\x16\x3e\x32\x0c\x5b\xa8\x0a\xc6\x8c\xa6\x7a\x97\x6d\xbc\x48\x33\xd7\xf5\xd3\xaa\x9c\x5b\x42\x62\x12\xaf\x7a\x51\x3c\x98\x66\x9c\xfb\x49\xda\xa1\xd6\x1c\xc9\xcd\x4e\x73\x31\xde\x06\x28\x0b\x50\x37\xee\x4e\x0a\x09\x38\x88\x06\xab\xc0\xe3\x33\x03\x8c\xfb\xc3\xe1\xf2\xb2\x0d\xfc\x71\xf1\x24\x02\x6c\x12\xe9\x7e\xde\x5e\x03\x0d\xc9\xf5\x24\x4e\x0a\xd2\x89\xd0\x21\x16\x8f\xe7\x64\xc3\x6c\x78\x48\x22\x60\x79\x25\xbc\xbe\x75\x61\x75\xf5\x8d\x49\x6e\x96\x45\x3a\xb8\xde\xad\x33\xe2\x83\x5b\x09\x8a\xa4\x3f\x7f\x26\x16\xb3\xfa\xa3\x7e\x6c\x6b\xe4\x13\x37\x68\xc1\xb4\xdd\x5c\xf4\x5c\x54\xe0\x89\xd7\x26\xf8\x79\x5b\xec\xb3\x1a\x74\x24\x5e\xd8\x31\x6d\x6c\xb9\xca\x49\x90\xb7\xb0\x8b\xca\x4a\x0d\x18\x8f\xb6\x11\x89\x65\x12\x2c\xe4\x81\x8e\x97\x93\xc0\x04\xda\x22\xcb\xc2\x2f\x9a\xcb\xc2\x9c\xc4\x7f\xc1\x89\x85\x1c\x39\xcf\x8b\xd7\xc5\x3e\x0a\x15\xa3\xa6\x67\x94\xf3\xdd\x47\x26\xd9\x53\x9b\x0f\x3d\xdf\x9a\x5a\x46\x3b\x59\xc3\x03\x5a\x75\xd2\x25\x81\x97\xd1\xd4\xc7\x13\x54\x24\x71\x41\x4e\xe2\xbc\x42\xde\xe2\xbc\x24\x6e\x98\x5c\x19\xe6\xf9\x36\x91\xa4\x14\x3b\x26\x8f\x7b\x5e\xc0\x7a\x0b\x76\xbf\xb7\x48\x5a\xed\x00\xd2\x98\x74\x83\xd8\xf1\x1e\x95\xcb\x8f\x21\xe1\x2d\xdb\xbd\x0c\x6b\x76\x28\x03\x7d\x87\x0b\x41\xe0\xb2\x83\x71\xe9\x63\xdc\xdd\x8e\xe7\x3a\x49\xf4\x78\x0b\x07\xad\x1d\xe3\xee\x37\x1b\x4a\xfd\x59\xdb\x9c\x4d\x73\x54\x64\xe5\x33\x1a\xfa\xb3\xf8\x12\x2f\x89\x97\x42\x18\x90\x46\x16\x79\x10\x21\x68\xdc\x58\x62\x8e\xb9\x69\x9d\x37\xe1\x70\x8b\x9c\xb0\x88\xb3\xd4\x97\xab\x5b\x9c\xf5\x9f\xde\xd4\x3e\xf4\x69\xee\x97\xed\x2f\x3a\x56\x47\x1f\x72\x9f\x0b\x06\x7f\x9c\x3b\x0f\x87\x70\x82\x6c\xfe\x5b\xee\x3c\x78\x02\xd7\xcd\xb5\x68\x2f\xba\x58\x56\xc4\x4c\x74\x25\x23\xac\x2b\x55\xd0\xa1\x73\x4e\xb8\x59\xab\xbd\x78\x5b\x89\x66\xaf\x63\xea\x3a\xd2\xe7\xdd\x0d\x12\xcd\xdb\x1b\x24\xa2\x83\x2d\x4b\xc9\xc2\xac\xb2\xe8\x47\x79\xa3\x26\xb2\xac\x53\x82\x1c\xed\x48\x26\x2f\x18\x62\xf2\x02\xba\x83\x5e\xb6\xec\x09\x56\x98\xa6\x09\x72\xad\x33\x0e\x59\x2e\x2b\x52\xbd\xdb\x62\x07\xbc\xce\xb5\x1d\xf0\x03\xc7\xfa\x1c\x14\xf5\x5e\x3e\x3a\x10\xd4\x57\xc2\x10\x6a\xc0\xd8\xe3\x39\xff\x95\x1e\x33\x79\x60\xd7\xc2\x0b\x6f\x60\x4b\x9b\xc5\x8b\x3a\x58\x92\xdf\xc5\x92\x8e\x4b\x96\xf4\x36\x27\xac\x60\x40\xdf\x6a\xf7\xe3\xe2\xfe\x49\x79\x9f\x8f\xf0\x43\x0f\x14\x13\x1b\x23\xab\x11\x9f\x51\x59\x0d\xfd\xda\x79\x95\x9f\xf9\x25\x3f\xcb\x72\xe2\x9a\x10\x54\x79\x9a\xd5\x47\xae\x26\x57\xf5\x4c\xf1\x34\x5a\xe1\x69\xef\x6a\x3c\xed\x60\x3b\x4f\x3b\xcf\xcd\xd1\x3b\xc9\xd3\x0e\x14\x4f\xbb\xc8\x4d\x20\x89\xb3\x27\xe7\x2b\x01\x4a\x16\x5a\x15\x10\x85\x45\xd1\x0e\x72\xb2\x87\x2f\xfd\x85\xdc\x4f\x12\x9b\x49\x9b\x35\x47\xd2\x9a\xef\xff\x09\x12\xfc\x59\x43\x82\x8f\x7e\x51\x7a\x3f\xbb\x51\x7a\x7f\xdd\x41\xc9\xcb\x94\xb3\x12\x6f\x63\x89\xa9\x13\xe7\x0e\xda\xa6\x02\x88\x2c\x77\x81\x85\x9e\xcf\x62\x08\x20\x16\xb8\x76\xe4\x93\x00\xdd\x2c\x26\x5b\x64\x4c\x50\xfe\x79\x9b\x56\x21\x0c\x5b\x9b\xe7\x60\x8c\x23\xb4\xf1\x4a\x3d\x58\x1a\x37\xb0\x54\x7b\x02\x97\x79\x35\xdc\x32\x19\x95\x76\xa6\x14\xfe\x55\x91\xf5\x7b\x2e\xe6\xde\x2a\xb2\xb2\x5b\x88\xac\x47\x39\x4c\x20\x32\x81\xb8\x8e\x40\xbd\xb3\x18\x2a\x47\x1a\xe0\x9a\xa3\x5f\x91\x6a\x4e\x28\xf3\xc3\xde\xdc\xbf\xea\x4d\xe2\xa4\xf8\x72\x6d\xa7\x53\x87\xaa\x7f\x51\x77\x7f\x4a\x66\xba\xc8\xeb\x27\x37\xe5\x98\x95\xbb\x4a\x7e\x92\x69\xad\xb1\x64\xc2\x0e\xd6\x4c\x40\xea\x7f\x87\x5b\xf4\xe4\xef\x7f\xbf\xc3\x2d\x36\xff\xf9\xf3\x28\x26\x19\x02\x8e\x91\xce\xe2\x95\x51\xdb\x4c\x81\x5f\x6e\xbc\x58\x86\x3e\xf7\x07\x0b\x3f\xca\x7a\xc6\x5d\x2a\x84\x01\xb2\x8b\x44\xc3\x84\x53\xf9\x73\xc7\x6c\xc1\xc2\x42\xca\x10\x6f\x96\xb6\xc4\x7d\xa4\xc2\x99\x9c\xe1\xd7\xe2\x0f\x46\x49\x7c\xc8\x1d\xc1\xf4\xd7\xf0\xe2\x06\xa6\xf9\xea\x06\x69\xea\xf9\x0d\x4c\xf9\x7d\xc7\x73\x1d\xd5\xf8\xf9\x16\x42\x56\xab\x30\x55\x9e\xdc\xdb\x6d\xcf\xb7\xb9\x18\xd2\x90\xbc\xc8\x49\x62\xb9\xab\x86\x41\x97\x6b\x71\xcc\xa2\x0f\x15\x95\x7c\x93\xab\x1c\xb8\x1f\x3b\xa9\x88\x64\xc0\x91\x60\xc1\x65\xcd\xc9\xb6\xf3\x71\x2c\xb6\x75\xca\x6f\xa8\x32\x17\x59\xee\x6a\x5d\x58\x7e\x22\x5f\x25\x43\x37\x4d\xb3\x71\xee\x57\xb5\x12\xf6\x16\x9a\x50\x0b\x70\xdc\x81\x39\x99\x26\x60\x70\xca\x8e\x23\xcf\xbf\xc4\x54\xfa\x5c\x96\x23\xdc\x38\x2b\x4c\xfc\x90\xca\xc5\xec\xf0\xbe\xac\x83\x1c\x0d\xc9\x9b\x5c\xf2\x7d\x69\x84\x4c\x06\x0f\xb7\x2c\x77\x89\x37\x6f\x55\x15\x34\xbb\x67\xdc\x95\x39\x10\xd0\xf3\xdc\xaf\xb0\xa4\x4f\x9d\xc6\x8b\xc4\x62\x09\xf2\xe9\xf9\x88\x64\x0e\xe1\x98\x04\xfd\x08\xa8\xc3\x2d\xb6\x04\xb7\x90\xaa\x10\x09\x87\xbf\x24\x5a\xf9\x4a\xb4\x0a\x0a\x41\xca\xbf\xfd\xfb\xbe\x7c\x77\xdb\x86\xe0\x0a\xf5\x9a\x38\x8e\x74\x00\xb1\xfc\x55\x89\xe5\xaf\x24\x96\xff\x99\x5d\x2a\x64\xb5\xcd\xb2\x73\x42\x30\xcb\x50\xd8\xf8\x90\xa3\x64\xf2\x3e\x6f\x88\x69\x15\x51\xe6\x38\x37\x47\xcf\x85\x16\x6d\x2b\x01\x4d\x5c\xb9\xf2\xea\x24\x37\x47\x2f\x72\x69\xae\xc9\xa0\x2b\xe3\xaf\x84\x64\x0c\xc5\x63\x93\xb5\x82\x99\x2a\xb8\xa8\x34\x13\x86\x90\xb0\x63\x77\x5e\x9e\xc5\x28\x4e\xb4\x33\x1c\xfe\x8f\xd2\x80\xdc\x41\xf2\x7a\xb5\x2b\xe9\xb2\x5a\x92\xc1\xcf\x39\x41\x37\x74\xa4\x7f\xf6\x9c\x7c\xcc\x21\x82\xc4\x62\xb3\xc2\x85\x50\x0b\x01\x6b\x13\xbe\x6c\x91\x14\x84\x8c\x10\x4b\xa1\xfe\x5c\x86\xce\x1d\xa3\xe0\xc0\xa6\x52\x70\xd8\x03\x0c\x62\x0b\x81\x89\x3f\x73\xcc\x6e\xc3\x1e\x6c\x3f\x2f\x72\x69\xe2\xf5\xaa\xe2\xc1\xe6\xc3\xc1\xcc\xa7\x5e\x55\x9d\x3e\xaa\x82\x59\x4f\x80\x1a\xa7\x2c\xed\x55\xda\xe2\x0d\xfd\xc2\x2e\x71\x43\x18\x82\x0a\x6e\x0b\x42\xac\x21\xa5\xf0\x51\x1e\xfe\x49\x17\x2c\x6c\xb7\xd3\x6c\x47\xd0\xc1\xd2\x85\xe7\x29\xf1\x47\xc6\x73\x94\xf5\x88\x14\x5b\x4c\xc3\x96\x37\x0c\x6d\xdb\xd8\x0b\x6b\xa5\xaa\xe6\xe4\x53\x2e\x56\x7a\x02\x01\x06\x96\x7e\x0a\xa1\x74\x97\x84\xc4\x7a\x0d\x93\x50\xdf\x2f\xcf\xc8\x21\xb1\xce\x20\x0e\x4b\x8b\xc9\xc6\x82\x28\x40\x91\x9f\x17\x8f\xca\x51\x63\x3d\xea\xeb\x1c\x30\x86\xdf\xc5\xfd\xd6\x8f\x55\x7d\xaf\x2b\x01\xc2\xb5\xce\xfb\x37\x89\xab\xe5\xf9\x66\xe3\xd0\xfb\x81\x64\x27\x2f\x03\x42\xb8\x73\x07\x23\x17\x77\x04\x6b\xe3\x6b\xed\xe3\x9a\x8e\xb6\xb9\x95\x3d\xa8\x1a\x8b\xf6\xe3\x30\xa4\xcb\xd4\xef\xd1\x30\x54\x07\x5e\x86\xf9\xd5\xde\xe2\x26\x56\x7b\x5d\x06\xf3\xd4\x5f\xd6\x9f\xf9\x32\x85\x4b\xf2\x21\x84\x09\x30\x48\x85\x50\x20\xc9\xed\x4b\x69\x86\x9e\xed\x18\x26\x44\x5e\x2d\xa6\xe4\x65\x5e\xc6\x94\x44\x31\xd7\x27\x6f\xaa\xc7\xc4\x93\xf6\x6b\x9e\xc4\xd1\xd4\x30\x81\x7b\x37\x9c\xd6\x53\xeb\x9b\x0e\x75\x75\x9b\x25\x0d\x6b\xa8\x2d\x4d\x4a\xe2\x9f\x81\x1b\x87\xd9\x42\xe6\x34\x54\x7a\x46\x45\xca\xd6\xde\x03\xfa\x60\x4c\x4d\x3c\xf1\x8a\x89\x27\xe5\x02\x44\xe2\xd6\x22\x27\x14\x53\x9e\xad\x50\x34\x3a\x40\x6a\xb8\xa9\xbf\xf6\xaa\x17\x85\x44\x5f\xb8\x29\x6c\x24\x2b\xb9\xfd\xcc\xff\x3d\xb3\xad\x4c\xac\x9a\x68\x2b\x70\xdc\x5a\xd5\xf2\x5f\x58\x5d\xe5\x28\xfd\xdf\xb1\xb8\x5a\x8f\x6b\xd2\xf7\x5a\xb3\x89\xef\x7b\x8c\xba\x55\x67\xd6\x40\x8b\x0c\x42\x17\xf7\xba\xd3\xaf\xa2\x0c\x74\xef\xbf\x48\x1c\xfd\x14\x5d\xee\x62\x8b\xbe\xa9\xdd\xe8\x7d\x27\x32\x47\x86\x47\x39\x1d\x18\x77\x7d\xdb\x87\x7b\xff\xf5\xcf\xf4\x7f\x92\x8a\xc3\xfd\x4f\xf1\xd0\x16\xba\xc7\x3f\xef\xcd\xf8\x22\x2c\x5e\x15\x02\x88\x90\xe6\xb9\x59\x89\xfc\x93\x5d\x4d\x13\xba\x58\x7c\xf3\xbd\x00\x63\xdc\x8c\x09\x0d\x53\xc1\xb5\x33\xaf\xdb\xc4\x7a\x8c\x56\xa4\xdf\x87\xa3\xc8\x1e\xfe\x7e\x8c\x65\x98\x46\x09\x56\x61\xa2\x5e\x97\xa3\xb1\xf6\x30\x4e\xfe\x19\xfd\xfc\x67\xf2\xf3\x9f\x91\x74\x34\x76\x25\x6a\x8a\x29\xd3\xc4\xa7\x86\x09\x41\x6b\x17\x52\x3c\x4c\xe2\x55\x8a\xa2\x21\x1a\xb6\x62\x6f\x6b\x4a\xe3\x4b\x92\x79\xf0\x00\x76\x1e\x61\x05\x3f\x4f\xe6\x95\x96\x67\x33\xff\x16\xf4\x4e\xb6\x43\x60\x54\x42\xa0\xeb\x29\x10\xcc\x34\x08\x72\x04\x41\xff\xd6\xf8\x0d\x81\x87\x72\x82\xf7\x17\x20\xfa\xbf\x65\xde\x6d\x53\xfc\xab\x51\xfe\xbf\x61\xc1\x0b\xdb\x4d\xed\x63\xfe\x65\xe4\x9f\xb4\xc1\x76\xa5\xf0\x08\xeb\x78\xae\xed\xdd\x69\xc7\x73\x1d\x34\x16\x7a\x42\xfb\x7b\xba\x86\xdc\x53\xc5\xb4\x3d\xaf\x2d\x8f\xf9\x0e\x7a\x21\x54\x42\x69\xda\x83\xaa\xaa\x51\x48\x35\xb7\x29\x6d\x3c\x6b\x75\x33\x13\xb8\xbc\xcd\xc7\xac\xfe\xbc\xd5\xc1\x4c\x36\xa9\x1f\x37\x94\x41\x21\x6d\x67\x17\xbf\x36\x6d\xaa\x8e\x72\x36\xbc\xac\x5a\x8d\x36\x49\xa9\xef\xf5\x9b\x8b\x59\xad\x92\xd5\x7e\xb6\x2e\x93\x8d\x38\x32\xd9\x88\xcc\x06\x88\x66\xbe\x23\x21\x84\x6d\xf7\xbe\xd5\xee\x4a\xe8\x48\xcb\x2d\xd6\x34\x81\x6f\xf7\xa4\xfd\xd7\xfc\x64\x2b\x5e\xb1\x89\xf2\x8a\x4d\x94\x57\xec\x69\x88\xe9\xe4\x0b\x6f\x57\x14\xb7\x5f\x83\x5f\xf3\x76\xf5\x2d\xe9\xd4\x09\x9b\xd1\x0e\xb7\xf7\x19\x4d\xba\x7d\x46\x3d\x0f\x83\x25\x32\xbd\x27\x8b\xe6\x9e\xb4\xb9\x89\xd1\x61\xaf\x8c\xd5\x90\xce\xa7\x1e\x89\x4c\x55\x0b\x72\x26\xb9\xd0\xd2\x30\x61\xd9\xec\xaf\x25\xea\xb6\x70\xe2\xd6\x1b\x72\x14\x13\x6e\x8e\x1a\xeb\x5a\x2e\xf8\xcc\x6b\x21\x51\xe8\x31\xa6\x73\x16\x56\x8c\x8a\xf2\x7f\xfb\x97\xbb\xdb\xad\xf7\x65\xcb\xe8\x03\xcc\xde\x56\xd1\x22\x16\x1e\x7a\x98\x37\xb9\x86\xf2\x37\xae\xb3\xc3\x4a\xf4\x70\xa3\xbe\xc8\xed\xe3\x9a\x7d\x4d\x02\xa7\x1d\x24\x4c\x9b\xe0\x56\x6d\x72\x47\x77\xdc\x73\x75\xca\x15\xee\xb7\x17\xc2\xd4\x13\x88\x25\x3f\x75\x33\x55\x98\x8a\x7f\xde\x8c\x5d\x78\x9f\x62\xb0\x94\xe7\x18\x0b\x3e\x78\x64\xc0\x7c\x8b\x0c\x54\x21\x3e\xd1\x06\x41\xd9\xd4\x7f\x1e\x68\x5e\xa5\x3d\x87\x2f\x3d\x55\x2b\x69\xb7\x0b\x78\x8b\x48\x60\x7d\x9c\x38\xde\xd6\xf2\x61\xed\x20\x78\x5b\xcb\xfb\xd5\x96\x87\x6d\x9f\x57\x49\xe4\xe0\x70\x8b\x3e\x84\xc0\xe1\x16\x3b\x84\x58\x5c\x7d\x7e\x56\x5d\xe1\xd4\xdb\xb4\xf1\xb7\xda\x05\x62\xed\xa2\x39\x26\x7f\x84\x84\x7b\xf0\x22\x54\x55\x7e\x76\x3d\x95\x16\x21\x29\x54\x51\x8a\xe9\x59\x1b\xaa\x74\x77\x1f\xfb\xb2\x8f\x4c\xf5\xf1\x00\xa8\x9d\xad\x4d\x08\x5a\xfb\xb8\x85\x0b\x80\x3c\xf1\x50\xe5\xf5\x62\x18\x7b\x58\x02\x34\x1a\x5d\x92\xb9\x07\x6d\x4e\xc4\xca\x8c\x29\x24\xe7\x01\x4f\xd0\xb1\xc2\x40\xca\x08\x95\xf4\x41\xa5\x73\xec\x57\x99\x7c\xf2\xd4\x53\xd9\xee\x2f\x84\x3e\xb0\x45\x9b\x2f\x24\x3e\x1a\x92\x09\xab\xd8\x27\x5f\x27\x7e\x1e\xf8\x2b\x95\xea\x54\xda\x45\xce\xb7\x02\x6c\xed\x88\xe7\x61\xcf\xb8\x7b\xe5\xa9\xfe\x2e\x3c\xc0\x44\x24\x99\x23\x48\xb9\x81\x19\x2f\x7c\xc3\x36\xde\x63\xa5\x40\xa3\x76\x22\xb1\x10\xcb\xd8\x3c\xc2\xa3\x21\x39\xf5\xca\xe9\x65\xf2\xa4\xa1\xf3\xc5\xea\xa1\x18\xaf\x59\x5d\x53\x9f\xeb\x85\x7a\xa6\x80\x71\x6d\xc2\xf1\x0d\x64\xe3\xa4\xe3\xb9\x3e\x19\xf8\xd6\xf1\x5c\x1f\xa7\x5f\x7b\xaa\xb6\xcc\xd9\x2d\x97\x30\x89\x57\xd5\x15\x2c\x80\xf2\x1c\x8f\xb3\x40\x81\xac\xe4\xed\xdf\x3c\x48\x3d\x13\x52\x8f\x5c\x7b\x26\x44\x26\x14\xad\x0f\x54\x21\x04\xf5\xc2\x7d\xf5\xc2\x71\xed\x05\x5e\x79\x61\x8c\x49\x0b\xaa\x9d\x9f\xd4\xda\x6a\xcf\x8c\x3d\xaf\x35\xab\x02\x06\xca\x02\x96\x7b\x39\x95\xf5\x5e\x38\x04\xe2\x6a\x09\x58\x71\xe4\x02\xad\x82\xfd\xd1\x19\x31\x4e\xfd\x95\x4e\x9f\x2b\x46\x5a\x4d\x80\xc3\x87\x14\xd3\x94\x19\x63\x2f\xe0\xe5\xc3\xe9\x44\xa6\x75\x02\xe6\x4c\x36\xa2\x18\x9a\xe6\x99\x89\x45\x11\x54\x2f\xc9\x99\x07\xd4\x62\x2b\xa0\x58\x3a\xc1\xba\x42\x2c\xa6\x16\xed\x57\xd1\xb6\x58\xe0\x5a\x06\x91\x36\x71\xdf\x78\xa5\x4a\x33\xf6\x74\x98\x66\x29\xd8\x55\xed\xfa\x12\x5f\x4a\xb6\xab\xa3\x09\x54\x68\xb5\x65\xb4\xd9\xf2\x6a\xb4\xad\xf4\x2f\xaa\xd1\x25\xe4\xe7\xfa\x9c\x0d\xb6\x36\xfb\x20\x9d\x23\xbf\x6e\x9a\xf6\xd0\x89\xfd\xd0\x23\x3b\xbf\x7f\xa1\xe8\x43\x91\xe1\x4a\xb5\x52\x1e\x49\x71\x30\x18\x46\x6c\xfe\xa5\xa7\x0e\xf7\x0a\x50\x29\xf2\xd6\x5c\x79\x2a\xf3\x76\x01\x29\xb9\x67\x82\x4c\xb9\xbd\x22\xb1\x07\xc6\xbe\xae\x8f\xa9\x9b\x4e\x8a\xa6\xa1\x6c\xba\x6b\xb6\xee\xca\x25\x59\x79\x10\x63\x19\x54\xb8\x24\xe7\x82\xdb\x52\x60\xa5\xc4\xfc\x4e\x0a\x56\xb3\x47\x86\x09\x6f\xeb\x28\xd8\xa6\x86\x54\x85\x5a\x37\x0c\x96\x78\x90\xe0\x72\x32\x04\xfc\xcf\x34\x5a\x4e\x1a\x26\xc1\x65\x29\xaf\xd7\x88\x5c\xec\xd1\xb0\x27\x54\x81\x36\x9b\x0d\x3e\x1d\x08\x5d\xcd\x4b\xe2\x65\xb5\xd9\xd7\x7a\x3c\xc0\x63\xe9\x00\x6a\xb1\x37\xa0\xf2\x7e\xbe\x92\xd6\x75\xcb\x3d\xb9\x21\xbc\xa2\x98\x80\x3e\x3c\xe9\x38\x46\xe8\x98\xfb\xc0\x0b\x68\x18\x4f\x7b\xd5\x0b\xc5\xad\xb6\x7c\x31\xea\xa2\x98\xcd\xa4\xab\xc1\xa6\x91\xfe\x9d\x57\x7f\xae\x12\x77\xd4\x69\x79\x8d\x94\xbb\x61\x9c\xaa\x83\xe7\xa4\xd5\x91\xa1\x6d\xa9\x63\xef\x4a\x77\xca\xdb\x9b\x4c\xe2\xb8\xc2\x89\xfd\xaa\xd3\xdf\x4d\x7b\xa7\x56\xb8\x10\xb2\xe0\x60\x9b\x18\x54\x95\x82\x8e\x3a\x1b\xb2\x37\xc8\xb6\x77\x13\xbf\x77\x15\x67\xbd\x34\x53\x3f\x56\x34\xe2\x3d\x1e\xf7\x64\xf9\xdb\x1a\xf5\x18\x89\x45\x79\xd5\x1a\x68\x5d\x63\x95\x97\x94\xcc\xc9\x81\x27\x2b\x87\x55\xbd\x28\xe3\x68\x12\x24\x0b\x49\x42\xe8\x63\xfb\x92\x12\x37\x35\xc1\x3d\xb1\x8d\xb1\x1c\x4d\x53\x5d\x31\xf5\xef\x5b\x74\x9f\x36\xad\xb6\x77\x63\x14\x6d\x84\x19\x08\x8e\xc3\xb2\x9e\xcf\xeb\x2d\xeb\x58\x93\x50\x3f\x74\x2a\x4e\x32\x81\xc7\xe5\x66\x74\x73\xab\xb6\x71\xb3\x23\xcd\x46\x90\x3a\xae\xe4\xeb\xe6\x4a\xca\xe5\x32\x6a\x86\xa7\xbf\x7a\x8c\x03\x3f\xf4\xb9\x0a\x32\x5f\x9b\xf0\xa2\x73\x3b\xc2\x9c\x44\xe6\x5d\xe3\xef\xaa\x0e\xb1\x4c\xaa\x82\x09\xd4\x5f\x79\x2d\x6e\xe7\xd5\xfc\xa4\xb5\x84\xcf\x58\x7a\xc8\xf0\xbd\x80\x63\xb6\x52\xee\xfc\xa2\x6f\x70\x15\x59\x05\x0f\x37\xaa\xde\x3b\x6a\x9b\x46\xdc\xfe\xc5\x5e\xe7\xe4\x85\x07\xe8\x2a\x8f\x15\x8e\xea\x67\xe8\x52\x94\x94\x38\xf9\xbc\xc3\xb4\xfb\x3e\x93\x71\x07\x86\xf3\xbf\x0c\xbb\x34\x95\x3c\xc4\x20\x04\x7c\xf3\x7d\xdb\x32\x5d\x52\x12\xa6\x9b\x39\x8a\x5a\x6b\x1d\xd7\x7c\x11\x3c\x4f\xba\x6a\xc0\x73\x4f\xda\x91\x3f\x6f\x41\xa3\xff\xcc\x80\xe5\x62\xb2\x15\xd5\xb8\x8a\x63\x15\x74\xfa\xee\xa1\xd0\x98\x62\xe2\xfd\xf1\xa6\x22\x2c\x9f\xa3\xcc\x8a\x2d\xd8\xaa\x05\x5b\xb0\x0b\x55\xea\x5b\xf6\x82\x39\xf3\x08\x1a\x40\x3e\xfc\xaa\x7f\x7a\x3d\x8b\x4d\x55\xf6\xff\x10\xf8\x48\xc2\x4d\x81\x16\x98\x19\x84\x7c\xf0\x20\x91\xc5\x60\xeb\x1c\xa0\x34\xe3\xbc\xf7\xc4\x94\xfa\xa6\x90\x2f\x48\x34\x3a\xe5\x64\x4e\x8e\xd4\x5b\xa6\x7d\xc1\x4b\xd3\xe1\xda\x84\x37\xad\x00\x98\x26\x64\x95\x20\x28\xa8\x68\xde\x8f\x35\x1d\x20\x81\x6a\xd9\x2a\xbe\x61\x05\x42\x0f\x8e\x22\x88\x5f\xfa\x64\xf4\xb7\xb8\x18\xd0\xd9\x5a\x7a\xde\x6c\x75\xa9\x61\x7b\x6b\xe5\x4a\x83\x25\x78\x4d\x90\xfe\x58\x2a\xc8\x50\xaf\x9f\x3e\xe6\xb4\xe7\xe4\x4d\xc1\xc3\x4b\xd8\x2b\x17\xa9\x3d\x02\x47\x6e\xde\x67\x4f\x07\x77\x47\x58\x2f\x5f\x86\xfc\x64\x6d\x36\x13\xde\x6e\x33\x41\xa5\x62\x6d\xc2\x27\xaf\x5a\x12\xe0\x4b\x7d\xad\x37\x72\x5c\x68\x08\xd5\x2e\xe9\xf5\xc1\x8a\x22\xf1\x1b\xa3\x15\x15\xe7\xd7\x6b\x78\xd9\xa1\xd1\x69\x5f\xaf\xa8\xdf\xad\xc8\xa1\xff\x00\x87\x48\xee\xc0\x4b\x0f\x2e\xa9\x59\xad\xf1\x1c\x4c\x48\xa2\x6b\x72\x25\xcf\x8a\x53\x2c\x95\xa0\xef\x8b\xa7\x5d\x93\x2a\x66\x73\xea\x4d\xfd\x1e\xfe\x3b\x58\x06\x61\x18\xaf\xd4\x85\xfa\x00\x85\xcb\xc8\x8b\x79\xbc\xec\x2d\xea\x51\x03\xf2\x48\x0b\x45\x9d\x75\xc7\x70\x5f\x4d\xac\x19\x6d\x42\xd2\xaf\x92\xab\x44\x45\xca\x3e\x50\xb2\xf4\x8f\x16\x33\x79\x87\x17\x46\x41\x88\xba\xc3\xc2\xa2\x3e\x96\x4c\x64\x9f\xc5\x3f\x7b\x18\x57\x5d\x4c\xf0\x96\xfd\xce\x65\x27\x43\x01\x1f\x38\x7f\xde\xc7\x00\xb1\x19\x5f\x84\x87\x71\x62\x98\xe0\xf7\x1d\x2f\x81\xac\xef\xf4\x13\xa0\xfd\xff\x60\x69\x6c\x97\xaa\x34\x62\x1f\x40\x1d\xca\xf0\xb5\xd9\x2e\x9d\x15\x24\xc7\xed\xfc\x20\x05\xaa\x5a\x7e\x0a\x9a\x0d\xff\x3b\xe4\xa7\x5d\xf1\xed\x6e\xff\xdf\x2a\x3f\x75\x8d\xb1\x21\x3f\xc5\x7d\x99\x8a\xd2\x30\x61\xd2\xb5\x6a\xbd\xc6\xd1\x9b\x3c\x18\xdc\xa2\xa0\xdf\x47\xa7\x70\x6c\x5b\xcb\x18\x30\x27\x71\xbf\xed\xb0\xb0\x6e\x32\xdc\x19\xde\x6c\x33\x4c\x0a\x7d\x97\xf5\xbb\xcd\x2e\x9d\x69\x03\x76\x31\xd3\xf8\xfd\x32\x6d\xc0\x52\x81\xe2\x0d\xa6\x94\x32\x5e\x0b\x36\x72\x05\x14\x0e\x5a\xaf\x3c\x99\xbe\x2c\xe8\x03\x57\x7c\x54\x7e\xe4\xa4\x0f\xc6\xf1\x81\x01\xcf\x91\xaa\x7f\x28\xee\x49\x51\xa0\x47\x39\x3e\x3a\x0e\xb1\xf0\x9e\x59\x3c\x16\x92\x44\xed\x21\x1d\x97\x0f\xa5\xb5\xd0\xab\xbf\x7c\x50\x3e\x97\x66\x45\xaf\xc7\xae\xf4\xb8\xfb\xe5\x33\x6d\x81\x90\x0f\x76\xab\x13\xe2\x3e\xde\x16\x74\x50\x61\x42\xf1\x54\xdb\x7c\x8c\xda\x19\x11\x52\xf5\xe7\x9e\x3a\x2b\x32\x55\x39\x3d\x13\x32\x21\x23\xf8\x52\x46\xa0\xb8\x22\xc3\x42\x46\x58\x9b\x90\xf6\xdb\xc5\xd3\x24\x6c\xea\xf6\x35\x20\x6a\xb1\x32\x9d\xd2\x85\x6f\x2b\x4b\x94\xcc\x83\xf6\xb0\x5d\x88\xd9\xd2\xc7\xae\xac\x24\x51\xef\x26\x2a\xa3\x7c\x25\xba\xcc\xee\x1b\x26\xe4\xfd\xa6\x7c\x2c\xed\x16\xaa\x84\xfb\x95\xb4\x59\x3c\x79\x56\x55\x10\xd5\x57\x85\x25\x1a\x18\xfb\xb2\x94\x42\xef\x5c\xd6\x44\xc0\x81\xeb\x8c\x91\x37\xf2\xd3\x48\x3e\xc9\xdb\x8f\x85\xba\x70\xb4\xfc\x64\x81\x65\xe9\xa2\x1e\x44\x87\xcb\x67\xb4\x22\x64\xba\x18\xec\x0c\x5b\x02\xef\x85\x48\xfa\xe7\x07\x96\xdf\x7d\x9b\xa1\x35\xca\x91\x44\x48\x3d\x67\x4a\x04\x30\x04\x4c\x41\x53\x3e\xf7\x9b\x15\x6f\xe4\x0b\x65\x14\xc6\x9d\xe1\x26\x31\x2f\xda\xe8\xf3\x34\x6c\xd3\x48\x84\xae\x1a\x29\x93\x3a\xc6\x55\xaf\x89\x98\xe5\x27\xa1\x1d\x09\x55\xb2\x67\x48\xc4\xd8\xa3\xd2\x97\xfb\x55\x20\x1d\xb4\xcb\x0f\xdf\x22\xaa\xe9\x29\xab\x0a\x2c\xf5\x39\x96\xd5\x57\x36\x45\xb5\x22\x3c\x7c\xbd\x26\x7e\xd5\x92\x53\x07\xa7\xac\x03\x9c\xb2\xbf\x12\x9c\x5e\xfb\x7e\x52\x6e\xea\x51\xd8\xbe\xa9\x32\x60\xba\x0f\x91\x86\x27\x44\x33\x4f\xa2\x99\x1b\x7b\x98\xcc\x56\x5e\x2d\x05\x6b\x84\x45\x07\xc1\xe8\xa0\xd6\x15\x94\xfa\xf3\x9f\xf4\x1e\xeb\xb4\xdc\x12\x47\x16\x4c\x08\x11\xe3\x62\x0b\xf2\xbe\x20\x25\x57\x32\x12\xc4\x62\x47\xd0\x45\x0e\x54\x59\x97\xde\x71\x24\x2b\x35\xab\x9c\x17\x7f\xc5\x07\xec\x61\xdd\x97\x5b\x7e\x00\xb7\x28\x6f\xe3\xd0\xff\xe2\xd0\xba\x34\xcd\xed\x47\xcf\xfe\xe2\xd1\xdf\xa7\x7e\x72\xfb\xd1\xf3\xbf\x6e\xf4\xe7\xba\x54\xcf\xed\x47\x7f\xfb\xd7\x8d\xfe\x56\xd5\xf6\xb9\xf5\xe0\xac\xd5\xb5\xfa\x5f\x1b\xfc\x57\x3f\x9c\x3d\x2f\xa9\x16\xc6\x44\x46\x16\xdd\xb3\xe8\x6f\x35\x47\xec\x1a\xf3\x94\xa5\x92\x54\xf7\xfd\x62\x32\xcb\x4a\x0c\x41\x2d\xf9\xf8\xdf\x26\xbf\x4d\x7e\x9b\x3c\xad\x25\x1e\x1f\x4c\xe8\x22\x08\xaf\x0c\x30\x16\x71\x14\xcb\x04\x4f\xc5\x07\x7a\xfd\x86\x47\xab\x0a\x94\x7a\xa6\x78\xfe\x1a\x66\xad\xb2\x80\x0e\x1c\x70\x22\xcb\xbd\x6e\x7a\x3f\x3e\x2e\x85\xfb\x97\x29\x2c\xfa\x20\x95\x8e\xeb\xcd\x32\xcd\x6d\x39\x32\x58\x2a\x45\x8d\xa5\x3c\x20\xb9\x40\x51\x83\x7d\x7f\xd6\xe1\x37\x43\xab\x1e\x24\xbb\x84\xf5\xc1\x07\x0e\x14\xee\xdc\xc9\x0a\x47\x8e\x4e\x17\x94\xba\x03\xc7\x7a\x4d\x22\xcb\x1d\x37\xfd\x3f\xbe\xe4\xe2\x03\x4e\xc0\x2d\x92\xe2\x17\x49\x31\xb9\x83\x3b\xe9\x1e\x98\x16\x3d\x96\x39\x18\xf7\x00\x53\xec\xce\x64\x86\xdd\xef\xd0\x99\x2d\xeb\x61\x9b\x8e\xa1\x7c\xca\x78\xbf\x3d\x0b\xc6\xd7\xd6\x08\x86\xd2\xd3\x3f\x4c\xd1\xfa\x70\x95\xab\xfc\xb1\x70\x49\xfc\x3e\x24\x7d\x74\x65\x85\x5d\x92\xf5\xe1\xa3\x07\x14\x2f\xbf\x36\xca\xe2\xc9\x7e\x28\xec\x92\x3d\x0f\x2e\xb8\xf8\x60\xf1\xe5\xdf\x20\xb2\xdc\xfd\x86\x0d\xa7\xd6\xfa\x94\x13\xb1\x3c\xf0\x01\xeb\x5e\x54\xdb\xff\xd6\xbd\xf0\x91\xb7\x5e\xc3\xb2\xef\xfc\xd8\xb3\xcb\x2a\x3d\x40\x1f\xda\xea\x5c\xd5\x58\xc3\x54\x3f\x2d\xed\xd6\xe2\xf9\xb9\xbe\x5a\xc3\xaa\x68\xa1\x6a\x9c\xe1\x73\xf9\x7b\x0d\x57\x7d\x21\x50\x2d\xfb\x30\xed\xc3\xaa\x0f\xa2\xa5\xce\x25\xbd\x4c\xe2\x85\xcf\x67\x7e\x96\x5a\x41\x7c\xcf\x8b\xdd\x54\x4e\x21\x88\xa6\xf2\xc7\x82\x46\x74\xea\x27\xf7\x64\x97\x47\x7e\xb8\x34\xd6\x82\xb3\xcb\x01\xe5\x5d\x63\x0d\x97\x9d\xea\xe5\x61\xab\x24\xd4\x2c\x58\xb8\xec\x6f\x2a\xde\x7d\xbd\xd6\x6a\x83\x14\xd4\xd5\xd1\x67\xda\xdf\x48\xc1\xba\xea\x37\xb4\xeb\xbe\x80\x6a\x13\x12\x73\x64\xf4\xca\x84\x0d\x6b\x13\x76\xb7\x28\xc5\x2d\x69\xb8\xee\xce\xc9\xa5\xd2\xb9\x35\xd0\x86\xb5\x56\xaa\x3a\xc1\x2c\x24\x89\xb5\x87\x19\xf6\x54\x20\x21\xd4\xc3\x0a\xa5\x76\x34\x56\xea\x86\x3a\x3c\x84\x7d\x79\x1d\xd1\xdc\x30\xe1\xb0\x43\x12\x1a\xf7\x2b\x03\x32\x21\xd8\xe1\x9f\x01\x8f\xa7\xd3\xd0\x17\xf2\xe1\x60\xe1\xe9\x9b\x61\x30\x9d\xf1\xd2\x7d\x73\xc1\x06\x8f\x7a\x4b\x3e\x78\xd0\x5b\xca\xd0\x95\x5a\xb2\x41\x16\x73\x1e\x2f\x0c\x30\x76\x96\x97\xbd\x34\x0e\x03\xaf\x97\x4c\x19\x25\x43\xe8\xc9\xff\xad\x9d\xfb\x8f\xcc\x12\x5f\xf7\x2b\xac\x21\xe2\x34\x88\xaa\x27\xa0\xb5\x75\x11\x53\x61\x09\x8d\x3c\x6d\xaf\x6e\x78\x0f\x28\x28\x6b\xca\x93\xea\x6d\x5c\x93\xaf\x92\x65\x48\xa9\x72\xb7\x4f\x12\x13\xae\x50\x0b\x6d\x2a\x07\xd5\xea\xa8\xad\xee\x6d\x0d\x03\xcc\x06\x2b\x0c\xa2\x30\x88\xfc\x32\xa4\xb2\xf9\x5d\x1d\x41\xed\x1b\xf6\xf8\xc8\x5f\xd5\xb4\xb0\x8a\x0b\x48\xe1\xc5\x4c\xaa\xce\x39\xc9\x1a\x4e\xb7\x9b\x6a\x6a\x64\x54\x9d\xa3\x50\x0f\x8d\xbb\x60\x3c\xc4\xc8\x6d\x1d\x8c\xa9\xd2\x47\x26\x4d\xf7\x5b\x38\x14\x02\xab\x7b\xbd\xc9\xb5\xd5\x46\x0a\x00\x29\x48\xf3\xac\x4f\x2a\xf9\x57\x2e\xda\x00\xf3\x47\xdf\xde\xd1\x09\x58\xce\xbb\x1a\x68\x9f\xec\xe3\x56\xd0\x66\xc5\x99\x01\x9c\xf4\x9d\x69\x42\x8c\x04\x53\xf9\x7d\xeb\x5c\x8d\x4b\x72\xdc\x07\x03\x71\x0e\x64\xee\xf3\xc8\x84\x93\x3e\x31\x52\x7e\x15\xfa\xe9\xcc\xf7\xa5\x57\x74\x16\x82\x11\xc6\xd4\x93\xe9\x16\xc8\x1c\x33\x84\x99\xfa\x89\x9f\x24\x71\xa2\x1e\x5d\x05\xc4\x38\xa4\x41\xe8\x7b\x3d\x1e\xf7\xc4\x3b\xbd\xfd\xf3\xf3\xde\x24\x89\x17\xb2\x82\xa5\xa9\x12\x34\xac\x4d\xb8\xae\x7c\x45\xdb\x0e\x91\xc8\x49\x2c\x37\xac\xb1\xbe\x6f\x7d\x88\xee\x1a\x61\xc0\xee\xb1\x38\xe6\x29\x4f\xe8\x72\xf0\xd0\x1a\x5a\xc3\x01\x0d\x97\x33\x6a\x3d\x1e\x78\x41\xca\xef\xb9\x69\x5a\x36\xb0\x16\x41\x64\xb9\x42\x9d\xbd\x40\x87\x8b\xb2\x0f\x94\x6d\xe8\xca\x4f\xe3\x85\x3f\x78\x68\xfd\x66\x0d\xf1\xcd\xea\xed\xf2\xe5\xf3\xbe\x14\xba\x2a\xb8\x22\xd5\xc2\x33\x92\x58\xec\x2d\x24\x96\xcb\xcc\x67\x65\x05\x81\x44\xff\x2a\x30\x8a\x56\xa4\x8e\xd2\x8e\xda\x52\xd8\xb9\xf8\xe0\xc3\x3e\x49\x6e\x0f\x63\x5a\xfc\xd2\x54\x9f\x25\x3e\x9d\xf7\xa2\x02\x55\xe5\x75\xb2\xde\x64\xc0\xa7\xd2\xa2\x4e\xab\x47\x29\xc1\x84\xc8\xbc\x80\x62\x7e\xba\x23\xdd\x41\x8d\x12\xac\x37\xfb\xc1\x6c\x56\xa4\xc0\xc7\x68\x0d\x67\xf5\x9d\x56\x87\x18\x96\x9b\x82\x2b\xfe\x70\xe0\xd5\x9a\x59\xdc\xa2\x24\x22\x9f\x12\x62\x9a\xe6\x5a\x0b\x65\x9f\x13\xf2\xe3\x95\x5d\xb4\x2a\xca\x88\xf6\xb8\x45\x1d\x0a\x11\xb7\xa8\xe7\x8d\x73\x3f\xe2\x27\x41\xca\xfd\xc8\x17\x1a\x71\xbc\x4c\xa5\xa9\x8e\x9b\xa2\x45\x44\xf3\x60\x4a\x79\x9c\x58\x59\xea\x27\xbb\x53\x3f\xe2\x56\x10\x79\xfe\xe5\xd9\x84\x18\xef\x92\xc0\x43\x17\x95\xdf\x87\x3f\x7f\xb6\x76\x37\xa3\xe9\x4c\x67\xa3\xe4\xcd\xea\x13\xc1\x84\xdc\x11\xbb\xc5\x93\xf0\xa5\x7f\xf5\xf3\x67\x62\x2d\x7c\x4e\xd5\xcf\x74\x16\x4c\x38\xfe\xde\xf9\x5d\x08\x74\x58\xda\xe8\xe7\xcf\xc8\x92\x29\xe3\xc4\x2f\x2f\x5e\x45\x02\x61\x4c\xf3\x47\x62\x2d\x13\x5f\x0c\x7e\x20\x77\x83\x94\x91\x06\xb3\xc4\x9f\x80\xef\x88\xd5\x81\xcc\xb9\x88\x09\x37\x2d\xfa\x8c\x12\x97\x64\x7f\xff\xbb\x6f\x31\xdf\x71\x9c\xcc\x62\xbe\xb8\xa0\xef\xf1\x82\xbe\xc7\x27\xd4\xa2\xf2\x19\xb5\xe8\x48\x79\x4a\x64\x6b\x5b\xf9\x9e\xf0\x35\x1a\xbc\xd7\xe0\x4e\xed\xd6\x63\x97\xc4\x72\xa7\x10\x01\x0e\xcc\x45\xbb\x37\x76\x62\xb9\x6f\xc0\x3d\x13\x7f\xcf\xc0\x3d\x12\x7f\x8f\x44\x17\x7b\x7d\xe7\x4b\x44\xfa\x26\xbc\xeb\xa4\x3c\xca\xcd\x6a\x94\xd8\x58\xc4\x00\xf3\xdd\xcb\xea\xc5\x6f\xb7\x9c\x91\xa9\xfa\x59\x23\x6e\x1f\x11\x0e\x47\xe2\xa9\x45\x65\xaa\xe6\xbe\x73\xd6\x27\x3f\xdc\xa9\xfd\x3a\x05\x37\xb5\xaf\x28\xb8\xdc\x6e\x68\x06\xa5\x5f\x41\x7f\xa4\x5d\x73\x23\x8b\xae\x6d\xa9\x49\x81\xf6\x47\xbe\x24\x6f\xfb\x60\xfc\x0d\x8f\x96\xcf\x40\x5e\x8d\xc4\x95\x9b\x63\x4a\x90\x77\x7d\x48\x2c\x46\xe1\x48\xa0\xbe\x3f\x2a\x0b\x8b\xd8\x65\xb5\x91\xc4\xa2\xef\x4d\x4c\xeb\x3a\x41\xff\xda\xb5\x58\xa2\x43\x9f\xec\xf5\x4d\xb1\x68\x67\xa9\x58\xc2\x16\x3a\xcf\xde\xd8\x63\xf2\xc7\xb5\x64\x19\x78\x90\x53\x63\xf0\xeb\xf5\xda\x7c\x76\x16\x38\x3f\x5e\xd1\x20\xb2\x7f\x04\x51\xc0\xed\x83\x3e\x99\xbb\x26\x19\x9a\xeb\x35\x44\xd6\x38\x5c\x8c\x74\xbf\xbd\x08\x93\x15\x63\xae\x29\x3c\x1a\xee\x05\x51\x8f\x9b\xf8\x27\x19\x61\xd6\x64\xc3\x71\xfc\xd1\x3e\x79\x6c\xda\x11\x49\xfe\xf0\xbf\x02\xff\xc3\xff\x6a\xda\xe2\xa7\x23\x7e\x0a\x7d\x66\x1c\x2e\xe0\x2c\x30\x6d\xfc\xe5\x9c\x05\x6b\xc2\x67\x41\x6a\x3e\xfb\xbf\x01\x00\x00\xff\xff\x6b\x64\x26\x23\xa1\x82\x01\x00"), }, "/templates": &vfsgen۰DirInfo{ name: "templates", @@ -147,9 +147,9 @@ var Assets = func() http.FileSystem { "/templates/default.tmpl": &vfsgen۰CompressedFileInfo{ name: "default.tmpl", modTime: time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC), - uncompressedSize: 17284, + uncompressedSize: 17128, - compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x1b\x7f\x6f\xda\xc8\xf2\x7f\x7f\x8a\x39\x9f\x9e\xae\x91\x00\x93\xf6\xae\xba\x12\xc8\x13\x25\x4e\x63\x3d\x02\x11\x38\xed\x55\xa7\x53\xb4\xd8\x03\x6c\x6b\x7b\x7d\xbb\xeb\x10\x2e\xe5\xbb\x3f\xed\xda\xfc\x30\x98\x84\x44\xbd\x84\xf7\x2e\x89\xda\xd8\xeb\xf9\x3d\xb3\x33\xb3\xde\xf5\xed\x2d\xf8\x38\xa4\x11\x82\x79\x75\x45\x02\xe4\x32\x24\x11\x19\x21\x37\x61\x36\x6b\xaa\xfb\xf3\xf4\xfe\xf6\x16\x30\xf2\x61\x36\x33\xb6\xa2\x5c\xf6\xda\x0a\xeb\xf6\x16\x2a\xf6\x8d\x44\x1e\x91\xe0\xb2\xd7\x86\xd9\xcc\xfa\xd1\xd2\x70\xe2\xdf\x1c\x3d\xa4\xd7\xc8\x1b\x0a\xa8\x97\xdd\xc0\x37\x48\x78\xf0\x67\x82\x7c\x9a\xa2\x67\x8c\xf2\x9c\x44\x32\xf8\x82\x9e\x54\x1c\x7e\x57\xd8\x7d\x49\x64\x22\xe0\x1b\x48\x76\x19\xc7\xc8\x53\x54\x3a\x04\xfc\x73\xf1\xd0\x1c\x52\x4e\xa3\x91\xc2\xa9\x29\x1c\xad\x90\xa8\x9c\xea\x51\xf8\x06\x01\x46\xab\x1c\xff\x00\x05\xf4\x81\xb3\x24\x6e\x93\x01\x06\xa2\xd2\x67\x5c\xa2\x7f\x41\x28\x17\x95\x8f\x24\x48\x50\x31\xfc\xc2\x68\x04\x26\x28\xaa\x90\xb2\x1c\x49\x78\xa5\x68\x55\x5a\x2c\x0c\x59\x94\x22\x1f\x64\x63\x2b\xf4\x0e\x60\x36\x7b\x75\x7b\x0b\x13\x2a\xc7\x79\xe0\x4a\x0f\x43\x76\x8d\x79\xee\x1d\x12\xa2\xc8\x2c\x5a\xc4\x7d\x21\xf8\xc1\xe2\x6a\x8b\x9b\x7c\x14\x1e\xa7\xb1\xa4\x2c\x32\xef\xb0\xb1\xc4\x1b\x99\xba\xf4\x2a\xa0\x42\x66\xa0\x9c\x44\x23\x84\x0a\xcc\x66\xa9\x5c\x35\x63\x39\xb8\x69\x27\x65\x95\xb2\x36\xa4\x12\x5f\xdd\x35\x60\xa1\x40\x26\x58\xca\xbc\x19\x45\x4c\x12\x25\x53\x8e\xe4\xca\xf0\xe3\xe8\xf6\x59\xc2\x3d\xac\xa5\xce\xc4\x08\x39\x91\x8c\xa7\x91\x68\x14\x18\x2a\x67\x03\x11\x10\xef\x6b\xc5\xc7\x21\x49\x02\x59\x91\x54\x06\x98\x59\x41\x62\x18\x07\x44\xe6\x63\xb1\xb2\xcd\xe4\x79\x3a\x89\x50\xb3\x21\x2c\x22\x95\x9f\x73\x3b\xd2\x1b\x92\x20\x18\x10\xef\xeb\x06\xbd\x42\xf1\x15\x51\xf8\x06\xf7\x01\x06\x34\xfa\xba\xb3\x04\x5e\x26\x01\xf5\xcd\xdd\x10\x62\x8e\x2a\xba\x76\x84\x5e\x11\xe8\x4e\x8b\xe9\x94\xb3\xa3\xc8\xd4\x63\x11\x86\xec\x0b\xdd\x51\x06\x05\x9f\xf0\x60\x57\x89\x77\x57\x6e\xc8\x98\x4c\x13\xec\x96\x20\x1c\xd3\xd8\x1b\x13\xb9\x44\xe0\x2c\x7c\x7c\xe8\xac\x53\x0b\x51\x08\x32\x7a\x40\x58\xe7\x64\x8b\x15\x37\x3f\x91\xd3\x05\xbd\xcd\xdc\xf2\xb0\xa9\xb2\x49\xd1\x0b\x28\x46\xf2\xf1\x1a\x6f\xa3\xb8\x2c\x50\x8f\x8b\xa7\x4d\xba\x34\x12\x92\x44\x1e\x8a\x02\xba\x1b\xc9\xf4\x0e\xab\xb2\x58\x8c\x30\xa2\xf8\x78\x27\xdd\x45\x6c\xd3\x43\x59\xed\xd9\x92\x6a\x0b\x8b\x8d\xb1\x56\xea\x72\xb5\xf4\x00\xaa\x50\x9e\xcd\x8c\x74\x10\xd2\x41\x9d\xd4\xef\xb6\x48\xbe\x20\x6b\x26\xe5\x15\x8d\x0a\xf8\xf5\x50\xb0\xe0\x1a\xfd\x35\x8e\xf3\xe1\xdd\x79\xce\x31\x36\xb8\x96\x77\x31\xa9\xd0\x35\xe6\xe1\xd1\x94\xf3\xfa\x04\x1f\x33\x31\x8d\x17\xff\xdd\xe1\xbf\xe6\xaa\xfd\x79\xb0\x41\xaf\xd0\x3f\x5b\xbc\xbe\xe6\x1f\xc9\xae\x54\x21\xdf\x9a\xe5\x37\xc1\x63\xc2\xe5\xf4\x01\xf0\x92\x8c\x76\x85\x26\x23\x8c\xe4\xd5\x7a\xf9\xcd\xc7\xd7\x35\xf5\x24\xe3\x2c\x16\xcb\xb0\x95\x44\xe2\x55\x3e\xd0\x5e\x62\xe9\x61\xb9\x60\xd3\xaa\x18\x49\x2a\xa7\x57\x3e\x15\x71\x40\xa6\x57\x5b\x3a\xbd\xfb\x13\xf7\x26\xe5\x90\x45\x54\x32\x65\x90\x2b\xc9\x58\xf0\xc0\x92\xb8\x4a\x1b\x43\x42\x83\x65\x1c\x2c\x17\x53\x0f\x96\x32\x4f\x69\x2c\x43\x2d\x96\x51\xff\xe1\xa4\xdb\x72\x3f\x5f\xd8\xa0\x86\xe0\xe2\xf2\x7d\xdb\x69\x81\x59\xb6\xac\x4f\x6f\x5a\x96\x75\xe2\x9e\xc0\x6f\x67\xee\x79\x1b\x0e\x2b\x55\x70\x39\x89\x04\x55\xc1\x46\x02\xcb\xb2\x3b\x26\x98\x63\x29\xe3\x9a\x65\x4d\x26\x93\xca\xe4\x4d\x85\xf1\x91\xe5\xf6\xac\x1b\x45\xeb\x50\x21\x67\x97\x65\xb9\x82\x59\xf1\xa5\x6f\x1e\x1b\xf5\x1f\xca\x65\xa3\x2f\xa7\x01\x02\x89\x7c\xd0\x4c\x7c\xe4\x54\x39\x54\xb5\x4e\xa0\x48\x8b\x9a\x65\x8d\xa8\x1c\x27\x83\x8a\xc7\x42\x4b\xe9\x30\x4a\x22\x4b\x93\x23\x5e\x4a\xaf\xac\x55\x2b\xcf\xcd\x21\x0c\xc3\x70\xc7\x08\xe7\x8e\x0b\x6d\xea\x61\x24\x10\x5e\x9d\x3b\xee\x81\x61\xb4\x58\x3c\xe5\x74\x34\x96\xf0\xca\x3b\x80\xd7\xd5\xc3\x9f\xe1\x3c\xa5\x68\x18\x17\xc8\x43\x2a\x04\x65\x11\x50\x01\x63\xe4\x38\x98\xc2\x88\x93\x48\xa2\x5f\x82\x21\x47\x04\x36\x04\x6f\x4c\xf8\x08\x4b\x20\x19\x90\x68\x0a\x31\x72\xc1\x22\x60\x03\x49\x68\xa4\xe2\x9f\x80\xc7\xe2\xa9\xc1\x86\x20\xc7\x54\x80\x60\x43\x39\x21\x3c\xd5\x90\x08\xc1\x3c\x4a\x24\xfa\xe0\x33\x2f\x09\x31\x4a\x27\x2e\x0c\x69\x80\x02\x5e\xc9\x31\x82\xd9\xcf\x30\xcc\x03\xcd\xc4\x47\x12\x18\x34\x02\xf5\x6c\xfe\x48\xaf\x43\x59\x22\x81\xa3\x90\x9c\x6a\x2b\x94\x80\x46\x5e\x90\xf8\x4a\x86\xf9\xe3\x80\x86\x34\xe3\xa0\xd0\xb5\xe2\xc2\x90\x0c\x12\x81\x25\x2d\x67\x09\x42\xe6\xd3\xa1\xfa\x8b\x5a\xad\x38\x19\x04\x54\x8c\x4b\xe0\x53\x45\x7a\x90\x48\x2c\x81\x50\x83\xda\x8e\x25\xa5\x87\xc5\x38\x08\x0c\x02\xc3\x63\x31\x45\x01\x5a\xd7\xa5\x74\x1a\x46\x89\x1e\x2b\x83\xca\xcc\x44\x42\x8d\x4c\xc6\x2c\xcc\x6b\x42\x85\x31\x4c\x78\x44\xc5\x18\x35\x8e\xcf\x40\x30\xcd\x51\x45\xb3\x1a\x51\xe0\x43\x16\x04\x6c\xa2\x54\xf3\x58\xe4\xd3\x6c\xe9\xa9\x9d\x4c\x06\x6a\xf9\xed\x2d\xfc\x1a\x31\x49\xbd\xd4\xdc\xda\x01\xf1\xd2\xab\xd9\x23\x31\x26\x41\x00\x03\xcc\x0c\x86\x3e\xd0\x08\xc8\x8a\x3a\x5c\xb1\x57\xfd\xa1\xa4\x24\x80\x98\x71\xcd\x6f\x5d\xcd\x8a\x61\xb8\x67\x36\xf4\xbb\xa7\xee\xa7\x66\xcf\x06\xa7\x0f\x17\xbd\xee\x47\xe7\xc4\x3e\x01\xb3\xd9\x07\xa7\x6f\x96\xe0\x93\xe3\x9e\x75\x2f\x5d\xf8\xd4\xec\xf5\x9a\x1d\xf7\x33\x74\x4f\xa1\xd9\xf9\x0c\xff\x71\x3a\x27\x25\xb0\x7f\xbb\xe8\xd9\xfd\x3e\x74\x7b\x86\x73\x7e\xd1\x76\xec\x93\x12\x38\x9d\x56\xfb\xf2\xc4\xe9\x7c\x80\xf7\x97\x2e\x74\xba\x2e\xb4\x9d\x73\xc7\xb5\x4f\xc0\xed\x82\x62\x98\x91\x72\xec\xbe\x22\x76\x6e\xf7\x5a\x67\xcd\x8e\xdb\x7c\xef\xb4\x1d\xf7\x73\xc9\x38\x75\xdc\x8e\xa2\x79\xda\xed\x41\x13\x2e\x9a\x3d\xd7\x69\x5d\xb6\x9b\x3d\xb8\xb8\xec\x5d\x74\xfb\x36\x34\x3b\x27\xd0\xe9\x76\x9c\xce\x69\xcf\xe9\x7c\xb0\xcf\xed\x8e\x5b\x01\xa7\x03\x9d\x2e\xd8\x1f\xed\x8e\x0b\xfd\xb3\x66\xbb\xad\x58\x19\xcd\x4b\xf7\xac\xdb\x53\xf2\x41\xab\x7b\xf1\xb9\xe7\x7c\x38\x73\xe1\xac\xdb\x3e\xb1\x7b\x7d\x78\x6f\x43\xdb\x69\xbe\x6f\xdb\x29\xab\xce\x67\x68\xb5\x9b\xce\x79\x09\x4e\x9a\xe7\xcd\x0f\xb6\xc6\xea\xba\x67\x76\xcf\x50\x60\xa9\x74\xf0\xe9\xcc\x56\x43\x8a\x5f\xb3\x03\xcd\x96\xeb\x74\x3b\x4a\x8d\x56\xb7\xe3\xf6\x9a\x2d\xb7\x04\x6e\xb7\xe7\x2e\x50\x3f\x39\x7d\xbb\x04\xcd\x9e\xd3\x57\x06\x39\xed\x75\xcf\x4b\x86\x32\x67\xf7\x54\x81\x38\x1d\x85\xd7\xb1\x53\x2a\xca\xd4\x90\xf3\x48\xb7\xa7\xef\x2f\xfb\xf6\x82\x20\x9c\xd8\xcd\xb6\xd3\xf9\xd0\x57\xc8\x4a\xc5\x39\x70\xc5\x28\x97\x8f\x8d\xba\x4e\x81\x37\x61\x10\x89\x46\x41\x62\x3b\x7c\xf7\xee\x5d\x9a\xcf\xcc\xdd\x80\x84\x4a\x6e\x0d\x73\xc8\x22\x59\x1e\x92\x90\x06\xd3\x1a\xfc\x74\x86\xc1\x35\x4a\xea\x11\xe8\x60\x82\x3f\x95\x60\x31\x50\x82\x26\xa7\x24\x28\x81\x20\x91\x28\x0b\xe4\x74\x78\x04\x03\x76\x53\x16\xf4\x2f\x55\x8b\x61\xc0\xb8\x8f\xbc\x3c\x60\x37\x47\xa0\x89\x0a\xfa\x17\xd6\xe0\xf0\xe7\xf8\xe6\x08\x42\xc2\x47\x34\xaa\x41\xf5\x48\xe5\xd6\x31\x12\xff\x39\xf9\x87\x28\x09\xa8\x8a\xda\x30\xaf\x29\x4e\xd4\x2c\x32\xd5\xec\x95\x18\xc9\x86\x39\xa1\xbe\x1c\x37\x7c\xbc\xa6\x1e\x96\xf5\xcd\xf3\x19\x0b\xac\xb9\xb8\xca\x99\x65\xfc\x33\xa1\xd7\x0d\xb3\x95\x8a\x5a\x76\xa7\x31\xae\x08\xae\x5a\x11\x4b\x39\xf7\x48\x57\x02\x81\xb2\x71\xe9\x9e\x96\x7f\x7d\x66\xf1\xf5\x7b\x97\xe7\x73\xf7\x5d\xbd\x48\xdd\xd2\xc2\x1d\x1b\x46\xdd\x52\x41\xa9\x2e\x06\xcc\x9f\x02\x95\x18\x0a\x8f\xc5\xd8\x30\x4d\x7d\x23\xa7\xea\x3a\x9b\x51\xc2\x1b\x63\x48\xf4\x8c\xb2\x55\x75\x3f\x9f\xf7\xbe\x4f\xaa\x64\x79\x82\x83\xaf\x54\x96\xd3\x07\x21\x63\x72\xac\x91\xd2\xda\x40\x89\x40\x7f\x09\xa4\x62\x43\x63\x97\x89\xff\x25\x11\xb2\x06\x11\x8b\xf0\x08\xc6\xa8\x2a\x53\x0d\x0e\xab\xd5\x7f\x1d\x41\x40\x23\x2c\x2f\x86\x2a\x6f\x31\x3c\x02\x3d\x03\x52\x00\xf8\x81\x86\x6a\xb2\x90\x48\x1e\xc1\x80\x78\x5f\x47\x9c\x25\x91\x5f\xf6\x58\xc0\x78\x0d\x7e\x1c\xbe\x55\xbf\xab\xe6\x87\x98\xf8\xbe\x96\x4a\x45\xc3\x60\xa4\x21\x1b\x66\x06\x69\x2a\x7b\x4b\x32\x78\xea\xf0\x58\x51\x69\x47\x3d\x0a\x65\x07\xa8\x4b\xfe\x8c\x79\x0c\x40\x49\xf0\xc4\x99\xf4\x1a\xb9\x22\x12\x94\x49\x40\x47\x51\x0d\x24\x8b\xf3\x86\xba\xd6\x0f\x1a\xa6\x64\xb1\x79\x5c\xb7\xa4\xbf\x14\x34\xcd\xac\xe6\xdb\x6a\xf5\x89\xa7\x4a\xa1\xd0\xd9\xd2\xaa\x06\x83\x80\x79\x5f\x73\xb1\x1d\x92\x9b\x72\x16\x24\x6f\xab\xd5\xf8\x26\xf7\xd0\x0b\x90\x70\xc5\x50\x8e\x73\xe3\xdb\x26\xca\xc2\x38\x40\x12\xc9\xd6\xa6\x44\xce\x5a\xda\x50\x00\x75\x9f\x5e\x3f\x75\x58\xe5\xf5\x5d\x37\xce\xdd\x4a\xcc\xe5\x56\x4e\xd6\x93\x39\xf3\xb3\xb2\x84\x09\x1e\x06\x41\x06\xdd\x30\xab\xe9\xbd\x88\x89\x37\xbf\x7f\x52\x45\xb3\x87\x9c\xf8\x34\x11\x35\x78\xa3\xc7\x0a\x12\xc0\x70\x98\xcb\x62\x29\x5a\x0d\x0e\xe3\x1b\x10\x2c\xa0\x3e\xfc\x88\xef\xd4\x6f\x3e\x31\x0c\x87\x2b\xb6\xd8\x87\xec\xb0\x94\xe4\xe9\xb2\xc4\xdb\xad\x13\x2e\x67\x5d\x8d\x32\xc9\x4a\xcd\x2f\xd5\xea\x11\xe8\x12\x95\xc1\x7b\x18\x49\xe4\x45\xfe\xd2\xff\xaa\xda\x29\x9b\x7e\xb3\xdf\xfe\xf2\xfa\x75\xab\xb8\x00\xbd\x56\x71\x6d\x42\x36\xdf\x52\x06\xab\xde\x4b\x71\x8b\x67\xe4\xfc\x67\xb9\xdf\xbb\xd8\xe8\x05\xfd\xb2\xa4\xf0\x5d\xd2\x01\x1c\xc2\x6c\x26\x16\x2f\x3c\x60\xc8\x38\x2c\xf7\x24\xb7\xec\x09\xc3\x6c\xb6\xc6\x15\x56\x77\x28\x1b\xb9\xfd\xc9\x0d\xb0\xec\xd5\x4a\xce\xf9\x8b\x1c\xbc\xb8\xe7\x2f\x61\xba\x4b\x31\x5b\x06\xcf\x61\x1a\x3c\x77\xc5\xc6\xde\xe7\xbe\xad\x66\xdf\xaf\x20\xd8\xf7\x50\xa8\x42\x75\x9e\x4b\xee\x0a\x87\x4c\x0d\x02\x63\x8e\xc3\x86\xb9\xcb\x8e\xc1\x13\xc7\xc3\x3c\x69\x9e\x9e\x9e\x66\xc9\xd7\x47\x8f\x71\xfd\x4e\x6e\xbe\x3c\xc8\x2d\x08\x5e\xab\xe5\x40\x2e\x6f\x0f\x58\xe0\x17\x27\x6e\x2f\xe1\x42\x51\x8f\x19\x4d\x07\x16\x0d\x05\x8d\x34\xd1\xac\xaf\x58\x4b\xf0\xbf\x28\xc1\x34\x3d\xfd\x12\x75\xc8\x78\x58\x03\x8f\xc4\x54\x92\x80\xfe\x85\x85\x49\xff\xcd\xcf\xbf\xa2\x4f\x0a\xea\xf5\x06\x44\x36\xac\xad\x5c\x4b\x0b\xf9\x62\x70\xd1\xbd\xc5\x37\x99\x7b\x8f\x3f\x52\x9c\x00\x8d\xe0\xde\xb7\xe3\x75\x8b\x14\xc6\xf0\x5a\xe2\x2d\x4e\xbf\xe9\xcf\x7d\x9b\x1f\x05\x45\xe1\x65\xca\xfe\x3d\x53\x56\x48\xce\xa2\xd1\xf3\x99\xf6\xf7\xed\xa7\xca\xfe\xc8\x76\xbe\xea\x56\x2a\xe4\x77\x88\xba\x82\x86\x21\x7b\x32\x3f\x3a\xb5\xbe\x85\xf6\x12\x87\xff\x8c\x38\x4c\x5b\xd3\x45\xa8\xd5\x07\xcf\xe7\x66\xb0\x8a\x6d\x74\xcf\x99\xc1\xed\x07\xfb\x9e\x59\x99\xed\xf3\x0e\x0a\x6a\xc1\x72\x13\x3d\xad\x04\xcf\x1e\x19\x2b\x12\xed\x4b\x78\xdc\x6b\xd1\x7b\x0f\x82\xfe\x8f\x06\xcb\x6a\x87\xb9\x7e\x32\xf5\x99\x1a\xca\x79\xbb\xb5\xd1\x53\x26\x91\x8f\x5c\x75\x7f\xf9\x70\x4a\xcf\xd6\xaa\x26\x6a\xff\x72\xcc\xe3\xaa\xe9\x8e\xed\xdd\xea\x59\x93\x42\xf7\xbe\x74\x85\x7b\x53\x8d\xf7\x2e\x32\x01\xea\xe3\x3d\x94\x69\xef\xec\xf4\x90\x19\x7c\x57\x47\xfc\x32\xb1\xfe\x3f\xdb\xdc\xd5\xe5\xd6\xe2\xcc\xde\x72\xc1\x35\x1f\x7a\x86\x25\xd7\xea\x09\xc2\x97\x68\xfc\x67\x44\xe3\xcb\xa2\xeb\x65\xd1\xf5\xb2\xe8\xda\xf7\x60\x79\x59\x74\xed\x4d\xcb\xb6\xcd\x51\x75\x4b\xef\xc7\x1d\x3f\x60\x2b\x74\x81\xb2\x1c\x79\xf2\x93\x18\xb9\xa3\x49\x2b\x27\x4d\x96\x8e\x7e\xf7\xee\xdd\x5d\x1b\xdc\xf9\x9d\xdd\xcd\x2d\xc9\xfd\x68\x1a\xf6\xa9\x7d\x79\xca\xd6\xe5\xf5\xd6\xd6\xa5\x70\x13\xed\x3e\x97\xaf\xf4\x36\x6b\xe7\x1a\xf2\xa7\xb0\x56\xd3\x55\xfe\x33\xfa\xa7\x0b\x88\xd7\xab\xd9\x4a\x6b\xb4\x73\xaa\xc2\x48\xc2\x60\xba\xdb\x3e\xdc\x66\xee\xd8\x38\xef\xb0\x9e\x19\xea\x96\x4f\xaf\x8f\xd3\xff\x8d\x7c\x9a\xd8\xb7\xb6\x76\xcb\xf1\xba\x54\xc5\x65\xfe\xaa\x5b\x03\xe6\x4f\xd5\xc8\x58\x86\xc1\xb1\x61\x14\x7f\xbf\x13\x27\x62\xcc\xae\x91\x7f\x87\x6f\xd3\x37\x48\xfd\xfd\xdf\x83\x7d\x9f\xcf\xc1\x76\xff\x1a\xec\xfb\x7d\x0c\xb6\xc2\x73\x07\x4b\x2e\xbf\x17\x7f\xc0\x37\xa1\xff\x0d\x00\x00\xff\xff\xe5\xdf\x5d\x02\x84\x43\x00\x00"), + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x1b\x7f\x6f\xda\xc8\xf2\x7f\x7f\x8a\x39\x9f\x9e\xae\x91\x00\x93\xf6\xae\xba\x10\xc8\x13\x25\x4e\x63\x3d\x02\x11\x38\xed\x55\xa7\x53\xb4\xd8\x03\x6c\x6b\x7b\x7d\xbb\xeb\x10\x2e\xe5\xbb\x3f\xed\xda\xfc\x30\x98\x84\x54\xbd\x84\xf7\x2e\x89\xda\xd8\xeb\xf9\x3d\xb3\x33\xb3\xde\xf5\xdd\x1d\xf8\x38\xa4\x11\x82\x79\x7d\x4d\x02\xe4\x32\x24\x11\x19\x21\x37\x61\x36\x6b\xaa\xfb\x8b\xf4\xfe\xee\x0e\x30\xf2\x61\x36\x33\xb6\xa2\x5c\xf5\xda\x0a\xeb\xee\x0e\x2a\xf6\xad\x44\x1e\x91\xe0\xaa\xd7\x86\xd9\xcc\xfa\xd1\xd2\x70\xe2\xdf\x1c\x3d\xa4\x37\xc8\x1b\x0a\xa8\x97\xdd\xc0\x57\x48\x78\xf0\x67\x82\x7c\x9a\xa2\x67\x8c\xf2\x9c\x44\x32\xf8\x8c\x9e\x54\x1c\x7e\x57\xd8\x7d\x49\x64\x22\xe0\x2b\x48\x76\x15\xc7\xc8\x53\x54\x3a\x04\xfc\x73\xf1\xd0\x1c\x52\x4e\xa3\x91\xc2\xa9\x29\x1c\xad\x90\xa8\x9c\xe9\x51\xf8\x0a\x01\x46\xab\x1c\xff\x00\x05\xf4\x9e\xb3\x24\x6e\x93\x01\x06\xa2\xd2\x67\x5c\xa2\x7f\x49\x28\x17\x95\x0f\x24\x48\x50\x31\xfc\xcc\x68\x04\x26\x28\xaa\x90\xb2\x1c\x49\x78\xa5\x68\x55\x5a\x2c\x0c\x59\x94\x22\x1f\x64\x63\x2b\xf4\x0e\x60\x36\x7b\x75\x77\x07\x13\x2a\xc7\x79\xe0\x4a\x0f\x43\x76\x83\x79\xee\x1d\x12\xa2\xc8\x2c\x5a\xc4\x7d\x21\xf8\xc1\xe2\x6a\x8b\x9b\x7c\x14\x1e\xa7\xb1\xa4\x2c\x32\xef\xb1\xb1\xc4\x5b\x99\xba\xf4\x3a\xa0\x42\x66\xa0\x9c\x44\x23\x84\x0a\xcc\x66\xa9\x5c\x35\x63\x39\xb8\x69\x27\x65\x95\xb2\x36\xa4\x12\x5f\xdd\x35\x60\xa1\x40\x26\x58\xca\xbc\x19\x45\x4c\x12\x25\x53\x8e\xe4\xca\xf0\xb7\xd1\xed\xb3\x84\x7b\x58\x4b\x9d\x89\x11\x72\x22\x19\x4f\x23\xd1\x28\x30\x54\xce\x06\x22\x20\xde\x97\x8a\x8f\x43\x92\x04\xb2\x22\xa9\x0c\x30\xb3\x82\xc4\x30\x0e\x88\xcc\xc7\x62\x65\x9b\xc9\xf3\x74\x12\xa1\x66\x43\x58\x44\x2a\x3f\xe7\x76\xa4\x37\x24\x41\x30\x20\xde\x97\x0d\x7a\x85\xe2\x2b\xa2\xf0\x15\x1e\x02\x0c\x68\xf4\x65\x67\x09\xbc\x4c\x02\xea\x9b\xbb\x21\xc4\x1c\x55\x74\xed\x08\xbd\x22\xd0\xbd\x16\xd3\x29\x67\x47\x91\xa9\xc7\x22\x0c\xd9\x67\xba\xa3\x0c\x0a\x3e\xe1\xc1\xae\x12\xef\xae\xdc\x90\x31\x99\x26\xd8\x2d\x41\x18\x2b\xd5\xfc\x44\x4e\x17\x28\x9b\xf3\xf7\x71\xe1\xb8\x49\xd1\x0b\x28\x46\xf2\xdb\x03\x72\x1b\xc5\x65\x11\xf8\x36\x9f\x6d\xd2\xa5\x91\x90\x24\xf2\x50\x14\xd0\xdd\x48\x58\x95\xed\x56\x65\xb1\x18\x61\x44\x71\x41\x38\x44\x21\xc8\xe8\xdb\xe6\xf7\x06\xb1\x4d\x0f\x65\xf9\x7d\x4b\x3a\x2b\x4c\xe8\xc6\x5a\x39\xc9\xd5\xab\x03\xa8\x42\x79\x36\x33\xd2\x41\x48\x07\x75\xe2\xbc\xdf\x22\xf9\xa2\xa7\x99\x94\x57\x34\x2a\xe0\xd7\x43\xc1\x82\x1b\xf4\xd7\x38\xce\x87\x77\xe7\x39\xc7\xd8\xe0\x5a\xde\xc5\xa4\x42\xe7\xf1\xc7\x47\x53\xce\xeb\x13\xf4\xc6\x44\x3e\xd6\xe7\xc6\x8b\xff\xee\xf1\x5f\x73\xd5\xfe\x3c\xd8\xa0\x57\xe8\x9f\x2d\x5e\x5f\xf3\x8f\x64\xd7\xaa\x58\x6e\xcd\xa4\x9b\xe0\x31\xe1\x72\xfa\x08\x78\x49\x46\xbb\x42\x93\x11\x46\xf2\x7a\xbd\xc4\xe5\xe3\xeb\x86\x7a\x92\x71\x16\x8b\x65\xd8\x4a\x22\xf1\x3a\x1f\x68\x2f\xb1\xf4\xb8\x5c\xb0\x69\x55\x8c\x24\x95\xd3\x6b\x9f\x8a\x38\x20\xd3\xeb\x2d\xdd\xd4\xc3\x89\x7b\x93\x72\xc8\x22\x2a\x99\x32\xc8\xb5\x64\x2c\x78\x64\x49\x5c\xa5\x8d\x21\xa1\xc1\x32\x0e\x96\x0b\x96\x47\x4b\x99\xa7\x34\x96\xa1\x16\xcb\xa8\xff\x70\xda\x6d\xb9\x9f\x2e\x6d\x50\x43\x70\x79\xf5\xae\xed\xb4\xc0\x2c\x5b\xd6\xc7\x37\x2d\xcb\x3a\x75\x4f\xe1\xb7\x73\xf7\xa2\x0d\x87\x95\x2a\xb8\x9c\x44\x82\xaa\x60\x23\x81\x65\xd9\x1d\x13\xcc\xb1\x94\x71\xcd\xb2\x26\x93\x49\x65\xf2\xa6\xc2\xf8\xc8\x72\x7b\xd6\xad\xa2\x75\xa8\x90\xb3\xcb\xb2\x5c\xc1\xac\xf8\xd2\x37\x4f\x8c\xfa\x0f\xe5\xb2\xd1\x97\xd3\x00\x81\x44\x3e\x68\x26\x3e\x72\xaa\x1c\x3a\xe4\x2c\x04\x45\x5a\xd4\x2c\x6b\x44\xe5\x38\x19\x54\x3c\x16\x5a\x4a\x87\x51\x12\x59\x9a\x1c\xf1\x52\x7a\x65\xad\x5a\x79\x6e\x0e\x61\x18\x86\x3b\x46\xb8\x70\x5c\x68\x53\x0f\x23\x81\xf0\xea\xc2\x71\x0f\x0c\xa3\xc5\xe2\x29\xa7\xa3\xb1\x84\x57\xde\x01\xbc\xae\x1e\xfe\x0c\x17\x29\x45\xc3\xb8\x44\x1e\x52\x21\x28\x8b\x80\x0a\x18\x23\xc7\xc1\x14\x46\x9c\x44\x12\xfd\x12\x0c\x39\x22\xb0\x21\x78\x63\xc2\x47\x58\x02\xc9\x80\x44\x53\x88\x91\x0b\x16\x01\x1b\x48\x42\x23\x15\xff\x04\x3c\x16\x4f\x0d\x36\x04\x39\xa6\x02\x04\x1b\xca\x09\xe1\xa9\x86\x44\x08\xe6\x51\x22\xd1\x07\x9f\x79\x49\x88\x51\x3a\x71\x61\x48\x03\x14\xf0\x4a\x8e\x11\xcc\x7e\x86\x61\x1e\x68\x26\x3e\x92\xc0\xa0\x11\xa8\x67\xf3\x47\x7a\xad\xc7\x12\x09\x1c\x85\xe4\x54\x5b\xa1\x04\x34\xf2\x82\xc4\x57\x32\xcc\x1f\x07\x34\xa4\x19\x07\x85\xae\x15\x17\x86\x64\x90\x08\x2c\x69\x39\x4b\x10\x32\x9f\x0e\xd5\x5f\xd4\x6a\xc5\xc9\x20\xa0\x62\x5c\x02\x9f\x2a\xd2\x83\x44\x62\x09\x84\x1a\xd4\x76\x2c\x29\x3d\x2c\xc6\x41\x60\x10\x18\x1e\x8b\x29\x0a\xd0\xba\x2e\xa5\xd3\x30\x4a\xf4\x58\x19\x54\x66\x26\x12\x6a\x64\x32\x66\x61\x5e\x13\x2a\x8c\x61\xc2\x23\x2a\xc6\xa8\x71\x7c\x06\x82\x69\x8e\x2a\x9a\xd5\x88\x02\x1f\xb2\x20\x60\x13\xa5\x9a\xc7\x22\x9f\x66\xcb\x3b\xed\x64\x32\x50\x4b\x5c\x6f\xe1\xd7\x88\x49\xea\xa5\xe6\xd6\x0e\x88\x97\x5e\xcd\x1e\x89\x31\x09\x02\x18\x60\x66\x30\xf4\x81\x46\x40\x56\xd4\xe1\x8a\xbd\xea\x0f\x25\x25\x01\xc4\x8c\x6b\x7e\xeb\x6a\x56\x0c\xc3\x3d\xb7\xa1\xdf\x3d\x73\x3f\x36\x7b\x36\x38\x7d\xb8\xec\x75\x3f\x38\xa7\xf6\x29\x98\xcd\x3e\x38\x7d\xb3\x04\x1f\x1d\xf7\xbc\x7b\xe5\xc2\xc7\x66\xaf\xd7\xec\xb8\x9f\xa0\x7b\x06\xcd\xce\x27\xf8\x8f\xd3\x39\x2d\x81\xfd\xdb\x65\xcf\xee\xf7\xa1\xdb\x33\x9c\x8b\xcb\xb6\x63\x9f\x96\xc0\xe9\xb4\xda\x57\xa7\x4e\xe7\x3d\xbc\xbb\x72\xa1\xd3\x75\xa1\xed\x5c\x38\xae\x7d\x0a\x6e\x17\x14\xc3\x8c\x94\x63\xf7\x15\xb1\x0b\xbb\xd7\x3a\x6f\x76\xdc\xe6\x3b\xa7\xed\xb8\x9f\x4a\xc6\x99\xe3\x76\x14\xcd\xb3\x6e\x0f\x9a\x70\xd9\xec\xb9\x4e\xeb\xaa\xdd\xec\xc1\xe5\x55\xef\xb2\xdb\xb7\xa1\xd9\x39\x85\x4e\xb7\xe3\x74\xce\x7a\x4e\xe7\xbd\x7d\x61\x77\xdc\x0a\x38\x1d\xe8\x74\xc1\xfe\x60\x77\x5c\xe8\x9f\x37\xdb\x6d\xc5\xca\x68\x5e\xb9\xe7\xdd\x9e\x92\x0f\x5a\xdd\xcb\x4f\x3d\xe7\xfd\xb9\x0b\xe7\xdd\xf6\xa9\xdd\xeb\xc3\x3b\x1b\xda\x4e\xf3\x5d\xdb\x4e\x59\x75\x3e\x41\xab\xdd\x74\x2e\x4a\x70\xda\xbc\x68\xbe\xb7\x35\x56\xd7\x3d\xb7\x7b\x86\x02\x4b\xa5\x83\x8f\xe7\xb6\x1a\x52\xfc\x9a\x1d\x68\xb6\x5c\xa7\xdb\x51\x6a\xb4\xba\x1d\xb7\xd7\x6c\xb9\x25\x70\xbb\x3d\x77\x81\xfa\xd1\xe9\xdb\x25\x68\xf6\x9c\xbe\x32\xc8\x59\xaf\x7b\x51\x32\x94\x39\xbb\x67\x0a\xc4\xe9\x28\xbc\x8e\x9d\x52\x51\xa6\x86\x9c\x47\xba\x3d\x7d\x7f\xd5\xb7\x17\x04\xe1\xd4\x6e\xb6\x9d\xce\xfb\xbe\x42\x56\x2a\xce\x81\x2b\x46\xb9\x7c\x62\xd4\x75\x0a\xbc\x0d\x83\x48\x34\x0a\x12\xdb\xe1\xd1\xd1\x51\x9a\xcf\xcc\xdd\x80\x84\x4a\x6e\x0d\x73\xc8\x22\x59\x1e\x92\x90\x06\xd3\x1a\xfc\x74\x8e\xc1\x0d\x4a\xea\x11\xe8\x60\x82\x3f\x95\x60\x31\x50\x82\x26\xa7\x24\x28\x81\x20\x91\x28\x0b\xe4\x74\x78\x0c\x03\x76\x5b\x16\xf4\x2f\x55\x8b\x61\xc0\xb8\x8f\xbc\x3c\x60\xb7\xc7\xa0\x89\x0a\xfa\x17\xd6\xe0\xf0\xe7\xf8\xf6\x18\x42\xc2\x47\x34\xaa\x41\xf5\x58\xe5\xd6\x31\x12\xff\x39\xf9\x87\x28\x09\xa8\x8a\xda\x30\x6f\x28\x4e\xd4\x2c\x32\xd5\xec\x95\x18\xc9\x86\x39\xa1\xbe\x1c\x37\x7c\xbc\xa1\x1e\x96\xf5\xcd\xf3\x19\x0b\xac\xb9\xb8\xca\x99\x65\xfc\x33\xa1\x37\x0d\xb3\x95\x8a\x5a\x76\xa7\x31\xae\x08\xae\x5a\x11\x4b\x39\xf7\x58\x57\x02\x81\xb2\x71\xe5\x9e\x95\x7f\x7d\x66\xf1\xf5\xbb\x8d\xe7\x73\xf7\x7d\xbd\x48\xdd\xd2\xc2\x9d\x18\x46\xdd\x52\x41\xa9\x2e\x06\xcc\x9f\x02\x95\x18\x0a\x8f\xc5\xd8\x30\x4d\x7d\x23\xa7\xea\x3a\x9b\x51\xc2\x1b\x63\x48\xf4\x8c\xb2\x55\x75\xbf\x98\xf7\xbe\x4f\xaa\x64\x79\x82\x83\x2f\x54\x96\xd3\x07\x21\x63\x72\xac\x91\xd2\xda\x40\x89\x40\x7f\x09\xa4\x62\x43\x63\x97\x89\xff\x39\x11\xb2\x06\x11\x8b\xf0\x18\xc6\xa8\x2a\x53\x0d\x0e\xab\xd5\x7f\x1d\x43\x40\x23\x2c\x2f\x86\x2a\x6f\x31\x3c\x06\x3d\x03\x52\x00\xf8\x81\x86\x6a\xb2\x90\x48\x1e\xc3\x80\x78\x5f\x46\x9c\x25\x91\x5f\xf6\x58\xc0\x78\x0d\x7e\x1c\xbe\x55\xbf\xab\xe6\x87\x98\xf8\xbe\x96\x4a\x45\xc3\x60\xa4\x21\x1b\x66\x06\x69\x2a\x7b\x4b\x32\x78\xea\xf0\x58\x51\x69\x47\x3d\x0a\x65\x07\xa8\x4b\xfe\x8c\x79\x0c\x40\x49\xf0\xc4\x99\xf4\x06\xb9\x22\x12\x94\x49\x40\x47\x51\x0d\x24\x8b\xf3\x86\xba\xd1\x0f\x1a\xa6\x64\xb1\x79\x52\xb7\xa4\xbf\x14\x34\xcd\xac\xe6\xdb\x6a\xf5\x89\xa7\x4a\xa1\xd0\xd9\xd2\xaa\x06\x83\x80\x79\x5f\x72\xb1\x1d\x92\xdb\x72\x16\x24\x6f\xab\xd5\xf8\x36\xf7\xd0\x0b\x90\x70\xc5\x50\x8e\x73\xe3\xdb\x26\xca\xc2\x38\x40\x12\xc9\xd6\xa6\x44\xce\x5a\xda\x50\x00\x75\x9f\xde\x3c\x75\x58\xe5\xf5\x5d\x37\xce\xfd\x4a\xcc\xe5\x56\x4e\xd6\x93\x39\xf3\xb3\xb2\x84\x09\x1e\x06\x41\x06\xdd\x30\xab\xe9\xbd\x88\x89\x37\xbf\x7f\x52\x45\xb3\x87\x9c\xf8\x34\x11\x35\x78\xa3\xc7\x0a\x12\xc0\x70\x98\xcb\x62\x29\x5a\x0d\x0e\xe3\x5b\x10\x2c\xa0\x3e\xfc\x88\x47\xea\x37\x9f\x18\x86\xc3\x15\x5b\xec\x43\x76\x58\x4a\xf2\x74\x59\xe2\xed\xd6\x09\x97\xb3\xae\x46\x99\x64\xa5\xe6\x97\x6a\xf5\x18\x74\x89\xca\xe0\x3d\x8c\x24\xf2\x22\x7f\xe9\x7f\x55\xed\x94\x4d\xbf\xd9\x6f\x7f\x79\xfd\xba\x55\x5c\x80\x5e\xab\xb8\x36\x21\x9b\x6f\x29\x83\x55\xef\xa5\xb8\xc5\x33\x72\xfe\xb3\xdc\x53\x5d\x6c\xa6\x82\x7e\x59\x52\xf8\x2e\xe9\x00\x0e\x61\x36\x13\x8b\x17\x1e\x30\x64\x1c\x96\xfb\x7e\x5b\xf6\x5d\x61\x36\x5b\xe3\x0a\xab\xbb\x80\x8d\xdc\x1e\xe0\x06\x58\xf6\x6a\x25\xe7\xfc\x45\x0e\x5e\xdc\xf3\x97\x30\xdd\xa5\x98\x2d\x83\xe7\x30\x0d\x9e\xfb\x62\x63\xef\x73\xdf\x56\xb3\xef\x57\x10\xec\x7b\x28\x54\xa1\x3a\xcf\x25\xf7\x85\x43\xa6\x06\x81\x31\xc7\x61\xc3\xdc\x65\xc7\xe0\x89\xe3\x61\x9e\x34\xcf\xce\xce\xb2\xe4\xeb\xa3\xc7\xb8\x7e\x27\x37\x5f\x1e\xe4\x16\x04\xaf\xd5\x72\x20\x97\xb7\x07\x2c\xf0\x8b\x13\xb7\x97\x70\xa1\xa8\xc7\x8c\xa6\x03\x8b\x86\x82\x46\x9a\x68\xd6\x57\xac\x25\xf8\x5f\x94\x60\x9a\x9e\x7e\x89\x3a\x64\x3c\xac\x81\x47\x62\x2a\x49\x40\xff\xc2\xc2\xa4\xff\xe6\xe7\x5f\xd1\x27\x05\xf5\x7a\x03\x22\x1b\xd6\x56\xae\xa5\x85\x7c\x31\xb8\xe8\xde\xe2\xdb\xcc\xbd\x27\x1f\x28\x4e\x80\x46\xf0\xe0\xdb\xf1\xba\x45\x0a\x63\x78\x2d\xf1\x16\xa7\xdf\xf4\xe7\xa1\xcd\x8f\x82\xa2\xf0\x32\x65\xff\x9e\x29\x2b\x24\x67\xd1\xe8\xf9\x4c\xfb\xfb\xf6\x93\x5b\x7f\x64\x3b\x5f\x75\x2b\x15\xf2\x3b\x44\x5d\x41\xc3\x90\x3d\x99\x1f\x4f\x5a\xdf\x42\x7b\x89\xc3\x7f\x46\x1c\xa6\xad\xe9\x22\xd4\xea\x83\xe7\x73\x33\x58\xc5\x36\x7a\xe0\x5c\xde\xf6\xc3\x73\xcf\xac\xcc\xf6\x79\x07\x05\xb5\x60\xb9\x89\x9e\x56\x82\x67\x8f\x8c\x15\x89\xf6\x25\x3c\x1e\xb4\xe8\x83\x87\x2d\xff\x47\x83\x65\xb5\xc3\x5c\x3f\xfd\xf9\x4c\x0d\xe5\xbc\xdd\xda\xe8\x29\x93\xc8\x47\xae\xba\xbf\x7c\x38\xa5\xe7\x57\x55\x13\xb5\x7f\x39\xe6\xdb\xaa\xe9\x8e\xed\xdd\xea\x59\x93\x42\xf7\xbe\x74\x85\x7b\x53\x8d\xf7\x2e\x32\x01\xea\xe3\x3d\x94\x69\xef\xec\xf4\x98\x19\x7c\x5f\x47\xfc\x32\xb1\xfe\x3f\xdb\xdc\xd5\xe5\xd6\xe2\xcc\xde\x72\xc1\x35\x1f\x7a\x86\x25\xd7\xea\x09\xc2\x97\x68\xfc\x67\x44\xe3\xcb\xa2\xeb\x65\xd1\xf5\xb2\xe8\xda\xf7\x60\x79\x59\x74\xed\x4d\xcb\xb6\xcd\x51\x75\x4b\xef\xc7\x9d\x3c\x62\x2b\x74\x81\xb2\x1c\x79\xf2\x93\x18\xb9\xa3\x49\x2b\x27\x4d\x96\x8e\x3e\x3a\x3a\xba\x6f\x83\x3b\xbf\xb3\xbb\xb9\x25\xb9\x1f\x4d\xc3\x3e\xb5\x2f\x4f\xd9\xba\xbc\xde\xda\xba\x14\x6e\xa2\x3d\xe4\xf2\x95\xde\x66\xed\x5c\x43\xfe\x14\xd6\x6a\xba\xca\x7f\xaa\xfe\x74\x01\xf1\x7a\x35\x5b\x69\x8d\x76\x4e\x55\x18\x49\x18\x4c\x77\xdb\x87\xdb\xcc\x1d\x1b\xe7\x1d\xd6\x33\x43\xdd\xf2\xe9\xcd\x49\xfa\xbf\x91\x4f\x13\xfb\xd6\xd6\x6e\x39\x5e\x97\xaa\xb8\xcc\x5f\x75\x6b\xc0\xfc\xa9\x1a\x19\xcb\x30\x38\x31\x8c\xe2\xef\x77\xe2\x44\x8c\xd9\x0d\xf2\xef\xf0\xfd\xf7\x06\xa9\xbf\xff\x7b\xb0\xef\xf3\x39\xd8\xee\x5f\x83\x7d\xbf\x8f\xc1\x56\x78\xee\x60\xc9\xe5\x37\xd9\x8f\xf8\x26\xf4\xbf\x01\x00\x00\xff\xff\xfb\x35\xd4\x75\xe8\x42\x00\x00"), }, } fs["/"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ diff --git a/vendor/github.com/prometheus/alertmanager/cluster/cluster.go b/vendor/github.com/prometheus/alertmanager/cluster/cluster.go index 322481ea8333b..d7a513d267a35 100644 --- a/vendor/github.com/prometheus/alertmanager/cluster/cluster.go +++ b/vendor/github.com/prometheus/alertmanager/cluster/cluster.go @@ -405,7 +405,7 @@ func (p *Peer) reconnect() { // peerJoin(). if _, err := p.mlist.Join([]string{pr.Address()}); err != nil { p.failedReconnectionsCounter.Inc() - level.Debug(logger).Log("result", "failure", "peer", pr.Node, "addr", pr.Address()) + level.Debug(logger).Log("result", "failure", "peer", pr.Node, "addr", pr.Address(), "err", err) } else { p.reconnectionsCounter.Inc() level.Debug(logger).Log("result", "success", "peer", pr.Node, "addr", pr.Address()) @@ -435,7 +435,7 @@ func (p *Peer) refresh() { if !isPeerFound { if _, err := p.mlist.Join([]string{peer}); err != nil { p.failedRefreshCounter.Inc() - level.Warn(logger).Log("result", "failure", "addr", peer) + level.Warn(logger).Log("result", "failure", "addr", peer, "err", err) } else { p.refreshCounter.Inc() level.Debug(logger).Log("result", "success", "addr", peer) diff --git a/vendor/github.com/prometheus/alertmanager/cluster/clusterpb/cluster.pb.go b/vendor/github.com/prometheus/alertmanager/cluster/clusterpb/cluster.pb.go index c70986c8f6c78..a9425972ed83b 100644 --- a/vendor/github.com/prometheus/alertmanager/cluster/clusterpb/cluster.pb.go +++ b/vendor/github.com/prometheus/alertmanager/cluster/clusterpb/cluster.pb.go @@ -22,7 +22,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type Part struct { Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` @@ -475,6 +475,7 @@ func (m *FullState) Unmarshal(dAtA []byte) error { func skipCluster(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 + depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -506,10 +507,8 @@ func skipCluster(dAtA []byte) (n int, err error) { break } } - return iNdEx, nil case 1: iNdEx += 8 - return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -530,55 +529,30 @@ func skipCluster(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthCluster } iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthCluster - } - return iNdEx, nil case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCluster - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipCluster(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthCluster - } - } - return iNdEx, nil + depth++ case 4: - return iNdEx, nil + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupCluster + } + depth-- case 5: iNdEx += 4 - return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } + if iNdEx < 0 { + return 0, ErrInvalidLengthCluster + } + if depth == 0 { + return iNdEx, nil + } } - panic("unreachable") + return 0, io.ErrUnexpectedEOF } var ( - ErrInvalidLengthCluster = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowCluster = fmt.Errorf("proto: integer overflow") + ErrInvalidLengthCluster = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowCluster = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupCluster = fmt.Errorf("proto: unexpected end of group") ) diff --git a/vendor/github.com/prometheus/alertmanager/config/config.go b/vendor/github.com/prometheus/alertmanager/config/config.go index 5294e4da77f4c..485e9ac64494b 100644 --- a/vendor/github.com/prometheus/alertmanager/config/config.go +++ b/vendor/github.com/prometheus/alertmanager/config/config.go @@ -308,26 +308,6 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { sc.APIURL = c.Global.SlackAPIURL } } - for _, hc := range rcv.HipchatConfigs { - if hc.HTTPConfig == nil { - hc.HTTPConfig = c.Global.HTTPConfig - } - if hc.APIURL == nil { - if c.Global.HipchatAPIURL == nil { - return fmt.Errorf("no global Hipchat API URL set") - } - hc.APIURL = c.Global.HipchatAPIURL - } - if !strings.HasSuffix(hc.APIURL.Path, "/") { - hc.APIURL.Path += "/" - } - if hc.AuthToken == "" { - if c.Global.HipchatAuthToken == "" { - return fmt.Errorf("no global Hipchat Auth Token set") - } - hc.AuthToken = c.Global.HipchatAuthToken - } - } for _, poc := range rcv.PushoverConfigs { if poc.HTTPConfig == nil { poc.HTTPConfig = c.Global.HTTPConfig @@ -459,7 +439,6 @@ func DefaultGlobalConfig() GlobalConfig { SMTPHello: "localhost", SMTPRequireTLS: true, PagerdutyURL: mustParseURL("https://events.pagerduty.com/v2/enqueue"), - HipchatAPIURL: mustParseURL("https://api.hipchat.com/"), OpsGenieAPIURL: mustParseURL("https://api.opsgenie.com/"), WeChatAPIURL: mustParseURL("https://qyapi.weixin.qq.com/cgi-bin/"), VictorOpsAPIURL: mustParseURL("https://alert.victorops.com/integrations/generic/20131114/alert/"), @@ -574,8 +553,6 @@ type GlobalConfig struct { SMTPRequireTLS bool `yaml:"smtp_require_tls,omitempty" json:"smtp_require_tls,omitempty"` SlackAPIURL *SecretURL `yaml:"slack_api_url,omitempty" json:"slack_api_url,omitempty"` PagerdutyURL *URL `yaml:"pagerduty_url,omitempty" json:"pagerduty_url,omitempty"` - HipchatAPIURL *URL `yaml:"hipchat_api_url,omitempty" json:"hipchat_api_url,omitempty"` - HipchatAuthToken Secret `yaml:"hipchat_auth_token,omitempty" json:"hipchat_auth_token,omitempty"` OpsGenieAPIURL *URL `yaml:"opsgenie_api_url,omitempty" json:"opsgenie_api_url,omitempty"` OpsGenieAPIKey Secret `yaml:"opsgenie_api_key,omitempty" json:"opsgenie_api_key,omitempty"` WeChatAPIURL *URL `yaml:"wechat_api_url,omitempty" json:"wechat_api_url,omitempty"` @@ -708,7 +685,6 @@ type Receiver struct { EmailConfigs []*EmailConfig `yaml:"email_configs,omitempty" json:"email_configs,omitempty"` PagerdutyConfigs []*PagerdutyConfig `yaml:"pagerduty_configs,omitempty" json:"pagerduty_configs,omitempty"` - HipchatConfigs []*HipchatConfig `yaml:"hipchat_configs,omitempty" json:"hipchat_configs,omitempty"` SlackConfigs []*SlackConfig `yaml:"slack_configs,omitempty" json:"slack_configs,omitempty"` WebhookConfigs []*WebhookConfig `yaml:"webhook_configs,omitempty" json:"webhook_configs,omitempty"` OpsGenieConfigs []*OpsGenieConfig `yaml:"opsgenie_configs,omitempty" json:"opsgenie_configs,omitempty"` diff --git a/vendor/github.com/prometheus/alertmanager/config/notifiers.go b/vendor/github.com/prometheus/alertmanager/config/notifiers.go index 82829ded36de8..d112e268a0604 100644 --- a/vendor/github.com/prometheus/alertmanager/config/notifiers.go +++ b/vendor/github.com/prometheus/alertmanager/config/notifiers.go @@ -80,18 +80,6 @@ var ( Footer: `{{ template "slack.default.footer" . }}`, } - // DefaultHipchatConfig defines default values for Hipchat configurations. - DefaultHipchatConfig = HipchatConfig{ - NotifierConfig: NotifierConfig{ - VSendResolved: false, - }, - Color: `{{ if eq .Status "firing" }}red{{ else }}green{{ end }}`, - From: `{{ template "hipchat.default.from" . }}`, - Notify: false, - Message: `{{ template "hipchat.default.message" . }}`, - MessageFormat: `text`, - } - // DefaultOpsGenieConfig defines default values for OpsGenie configurations. DefaultOpsGenieConfig = OpsGenieConfig{ NotifierConfig: NotifierConfig{ @@ -372,35 +360,6 @@ func (c *SlackConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return unmarshal((*plain)(c)) } -// HipchatConfig configures notifications via Hipchat. -type HipchatConfig struct { - NotifierConfig `yaml:",inline" json:",inline"` - - HTTPConfig *commoncfg.HTTPClientConfig `yaml:"http_config,omitempty" json:"http_config,omitempty"` - - APIURL *URL `yaml:"api_url,omitempty" json:"api_url,omitempty"` - AuthToken Secret `yaml:"auth_token,omitempty" json:"auth_token,omitempty"` - RoomID string `yaml:"room_id,omitempty" json:"room_id,omitempty"` - From string `yaml:"from,omitempty" json:"from,omitempty"` - Notify bool `yaml:"notify,omitempty" json:"notify,omitempty"` - Message string `yaml:"message,omitempty" json:"message,omitempty"` - MessageFormat string `yaml:"message_format,omitempty" json:"message_format,omitempty"` - Color string `yaml:"color,omitempty" json:"color,omitempty"` -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *HipchatConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { - *c = DefaultHipchatConfig - type plain HipchatConfig - if err := unmarshal((*plain)(c)); err != nil { - return err - } - if c.RoomID == "" { - return fmt.Errorf("missing room id in Hipchat config") - } - return nil -} - // WebhookConfig configures notifications via a generic webhook. type WebhookConfig struct { NotifierConfig `yaml:",inline" json:",inline"` @@ -409,6 +368,10 @@ type WebhookConfig struct { // URL to send POST request to. URL *URL `yaml:"url" json:"url"` + // MaxAlerts is the maximum number of alerts to be sent per webhook message. + // Alerts exceeding this threshold will be truncated. Setting this to 0 + // allows an unlimited number of alerts. + MaxAlerts uint64 `yaml:"max_alerts" json:"max_alerts"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. diff --git a/vendor/github.com/prometheus/alertmanager/dispatch/dispatch.go b/vendor/github.com/prometheus/alertmanager/dispatch/dispatch.go index a024866d4f54c..d5233b5065f73 100644 --- a/vendor/github.com/prometheus/alertmanager/dispatch/dispatch.go +++ b/vendor/github.com/prometheus/alertmanager/dispatch/dispatch.go @@ -53,7 +53,11 @@ func NewDispatcherMetrics(r prometheus.Registerer) *DispatcherMetrics { }, ), } - prometheus.MustRegister(m.aggrGroups, m.processingDuration) + + if r != nil { + r.MustRegister(m.aggrGroups, m.processingDuration) + } + return &m } @@ -107,9 +111,8 @@ func (d *Dispatcher) Run() { d.mtx.Lock() d.aggrGroups = map[*Route]map[model.Fingerprint]*aggrGroup{} d.metrics.aggrGroups.Set(0) - d.mtx.Unlock() - d.ctx, d.cancel = context.WithCancel(context.Background()) + d.mtx.Unlock() d.run(d.alerts.Subscribe()) close(d.done) @@ -251,11 +254,16 @@ func (d *Dispatcher) Groups(routeFilter func(*Route) bool, alertFilter func(*typ // Stop the dispatcher. func (d *Dispatcher) Stop() { - if d == nil || d.cancel == nil { + if d == nil { + return + } + d.mtx.Lock() + if d.cancel == nil { return } d.cancel() d.cancel = nil + d.mtx.Unlock() <-d.done } diff --git a/vendor/github.com/prometheus/alertmanager/dispatch/route.go b/vendor/github.com/prometheus/alertmanager/dispatch/route.go index 43c48c2586a2d..ae301aaadcc3d 100644 --- a/vendor/github.com/prometheus/alertmanager/dispatch/route.go +++ b/vendor/github.com/prometheus/alertmanager/dispatch/route.go @@ -70,10 +70,13 @@ func NewRoute(cr *config.Route, parent *Route) *Route { for _, ln := range cr.GroupBy { opts.GroupBy[ln] = struct{}{} } + opts.GroupByAll = false + } else { + if cr.GroupByAll { + opts.GroupByAll = cr.GroupByAll + } } - opts.GroupByAll = cr.GroupByAll - if cr.GroupWait != nil { opts.GroupWait = time.Duration(*cr.GroupWait) } @@ -155,6 +158,17 @@ func (r *Route) Key() string { return b.String() } +// Walk traverses the route tree in depth-first order. +func (r *Route) Walk(visit func(*Route)) { + visit(r) + if r.Routes == nil { + return + } + for i := range r.Routes { + r.Routes[i].Walk(visit) + } +} + // RouteOpts holds various routing options necessary for processing alerts // that match a given route. type RouteOpts struct { diff --git a/vendor/github.com/prometheus/alertmanager/nflog/nflogpb/nflog.pb.go b/vendor/github.com/prometheus/alertmanager/nflog/nflogpb/nflog.pb.go index 8aef2b16419e3..deb7f8b21ebc2 100644 --- a/vendor/github.com/prometheus/alertmanager/nflog/nflogpb/nflog.pb.go +++ b/vendor/github.com/prometheus/alertmanager/nflog/nflogpb/nflog.pb.go @@ -26,7 +26,7 @@ var _ = time.Kitchen // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type Receiver struct { // Configured name of the receiver group. @@ -1141,6 +1141,7 @@ func (m *MeshEntry) Unmarshal(dAtA []byte) error { func skipNflog(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 + depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1172,10 +1173,8 @@ func skipNflog(dAtA []byte) (n int, err error) { break } } - return iNdEx, nil case 1: iNdEx += 8 - return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1196,55 +1195,30 @@ func skipNflog(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthNflog } iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthNflog - } - return iNdEx, nil case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowNflog - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipNflog(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthNflog - } - } - return iNdEx, nil + depth++ case 4: - return iNdEx, nil + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupNflog + } + depth-- case 5: iNdEx += 4 - return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } + if iNdEx < 0 { + return 0, ErrInvalidLengthNflog + } + if depth == 0 { + return iNdEx, nil + } } - panic("unreachable") + return 0, io.ErrUnexpectedEOF } var ( - ErrInvalidLengthNflog = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowNflog = fmt.Errorf("proto: integer overflow") + ErrInvalidLengthNflog = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowNflog = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupNflog = fmt.Errorf("proto: unexpected end of group") ) diff --git a/vendor/github.com/prometheus/alertmanager/notify/hipchat/hipchat.go b/vendor/github.com/prometheus/alertmanager/notify/hipchat/hipchat.go deleted file mode 100644 index f9014fca15cc2..0000000000000 --- a/vendor/github.com/prometheus/alertmanager/notify/hipchat/hipchat.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2019 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package hipchat - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "net/http" - - "github.com/go-kit/kit/log" - commoncfg "github.com/prometheus/common/config" - - "github.com/prometheus/alertmanager/config" - "github.com/prometheus/alertmanager/notify" - "github.com/prometheus/alertmanager/template" - "github.com/prometheus/alertmanager/types" -) - -// Notifier implements a Notifier for Hipchat notifications. -type Notifier struct { - conf *config.HipchatConfig - tmpl *template.Template - logger log.Logger - client *http.Client - retrier *notify.Retrier -} - -// New returns a new Hipchat notification handler. -func New(c *config.HipchatConfig, t *template.Template, l log.Logger) (*Notifier, error) { - client, err := commoncfg.NewClientFromConfig(*c.HTTPConfig, "hipchat", false) - if err != nil { - return nil, err - } - return &Notifier{ - conf: c, - tmpl: t, - logger: l, - client: client, - // Response codes 429 (rate limiting) and 5xx can potentially recover. - // 2xx response codes indicate successful requests. - // https://developer.atlassian.com/hipchat/guide/hipchat-rest-api/api-response-codes - retrier: ¬ify.Retrier{RetryCodes: []int{http.StatusTooManyRequests}}, - }, nil -} - -type hipchatReq struct { - From string `json:"from"` - Notify bool `json:"notify"` - Message string `json:"message"` - MessageFormat string `json:"message_format"` - Color string `json:"color"` -} - -// Notify implements the Notifier interface. -func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error) { - var err error - var msg string - var ( - data = notify.GetTemplateData(ctx, n.tmpl, as, n.logger) - tmplText = notify.TmplText(n.tmpl, data, &err) - tmplHTML = notify.TmplHTML(n.tmpl, data, &err) - roomid = tmplText(n.conf.RoomID) - apiURL = n.conf.APIURL.Copy() - ) - apiURL.Path += fmt.Sprintf("v2/room/%s/notification", roomid) - q := apiURL.Query() - q.Set("auth_token", string(n.conf.AuthToken)) - apiURL.RawQuery = q.Encode() - - if n.conf.MessageFormat == "html" { - msg = tmplHTML(n.conf.Message) - } else { - msg = tmplText(n.conf.Message) - } - - req := &hipchatReq{ - From: tmplText(n.conf.From), - Notify: n.conf.Notify, - Message: msg, - MessageFormat: n.conf.MessageFormat, - Color: tmplText(n.conf.Color), - } - if err != nil { - return false, err - } - - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(req); err != nil { - return false, err - } - - resp, err := notify.PostJSON(ctx, n.client, apiURL.String(), &buf) - if err != nil { - return true, notify.RedactURL(err) - } - defer notify.Drain(resp) - - return n.retrier.Check(resp.StatusCode, nil) -} diff --git a/vendor/github.com/prometheus/alertmanager/notify/notify.go b/vendor/github.com/prometheus/alertmanager/notify/notify.go index ef112d08e24e5..f211685e1b7bc 100644 --- a/vendor/github.com/prometheus/alertmanager/notify/notify.go +++ b/vendor/github.com/prometheus/alertmanager/notify/notify.go @@ -20,10 +20,11 @@ import ( "sync" "time" - "github.com/cenkalti/backoff" + "github.com/cenkalti/backoff/v4" "github.com/cespare/xxhash" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" @@ -90,6 +91,11 @@ func (i *Integration) Index() int { return i.idx } +// String implements the Stringer interface. +func (i *Integration) String() string { + return fmt.Sprintf("%s[%d]", i.name, i.idx) +} + // notifyKey defines a custom type with which a context is populated to // avoid accidental collisions. type notifyKey int @@ -233,7 +239,6 @@ func newMetrics(r prometheus.Registerer) *metrics { } for _, integration := range []string{ "email", - "hipchat", "pagerduty", "wechat", "pushover", @@ -316,12 +321,12 @@ type RoutingStage map[string]Stage func (rs RoutingStage) Exec(ctx context.Context, l log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) { receiver, ok := ReceiverName(ctx) if !ok { - return ctx, nil, fmt.Errorf("receiver missing") + return ctx, nil, errors.New("receiver missing") } s, ok := rs[receiver] if !ok { - return ctx, nil, fmt.Errorf("stage for receiver missing") + return ctx, nil, errors.New("stage for receiver missing") } return s.Exec(ctx, l, alerts...) @@ -362,14 +367,6 @@ func (fs FanoutStage) Exec(ctx context.Context, l log.Logger, alerts ...*types.A go func(s Stage) { if _, _, err := s.Exec(ctx, l, alerts...); err != nil { me.Add(err) - lvl := level.Error(l) - if ctx.Err() == context.Canceled { - // It is expected for the context to be canceled on - // configuration reload or shutdown. In this case, the - // message should only be logged at the debug level. - lvl = level.Debug(l) - } - lvl.Log("msg", "Error on notify", "err", err, "context_err", ctx.Err()) } wg.Done() }(s) @@ -547,12 +544,12 @@ func (n *DedupStage) needsUpdate(entry *nflogpb.Entry, firing, resolved map[uint func (n *DedupStage) Exec(ctx context.Context, l log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) { gkey, ok := GroupKey(ctx) if !ok { - return ctx, nil, fmt.Errorf("group key missing") + return ctx, nil, errors.New("group key missing") } repeatInterval, ok := RepeatInterval(ctx) if !ok { - return ctx, nil, fmt.Errorf("repeat interval missing") + return ctx, nil, errors.New("repeat interval missing") } firingSet := map[uint64]struct{}{} @@ -586,7 +583,7 @@ func (n *DedupStage) Exec(ctx context.Context, l log.Logger, alerts ...*types.Al case 1: entry = entries[0] default: - return ctx, nil, fmt.Errorf("unexpected entry result size %d", len(entries)) + return ctx, nil, errors.Errorf("unexpected entry result size %d", len(entries)) } if n.needsUpdate(entry, firingSet, resolvedSet, repeatInterval) { @@ -622,7 +619,7 @@ func (r RetryStage) Exec(ctx context.Context, l log.Logger, alerts ...*types.Ale if !r.integration.SendResolved() { firing, ok := FiringAlerts(ctx) if !ok { - return ctx, nil, fmt.Errorf("firing alerts missing") + return ctx, nil, errors.New("firing alerts missing") } if len(firing) == 0 { return ctx, alerts, nil @@ -636,24 +633,28 @@ func (r RetryStage) Exec(ctx context.Context, l log.Logger, alerts ...*types.Ale sent = alerts } + b := backoff.NewExponentialBackOff() + b.MaxElapsedTime = 0 // Always retry. + + tick := backoff.NewTicker(b) + defer tick.Stop() + var ( i = 0 - b = backoff.NewExponentialBackOff() - tick = backoff.NewTicker(b) iErr error ) - defer tick.Stop() + l = log.With(l, "receiver", r.groupName, "integration", r.integration.String()) for { i++ // Always check the context first to not notify again. select { case <-ctx.Done(): - if iErr != nil { - return ctx, nil, iErr + if iErr == nil { + iErr = ctx.Err() } - return ctx, nil, ctx.Err() + return ctx, nil, errors.Wrapf(iErr, "%s/%s: notify retry canceled after %d attempts", r.groupName, r.integration.String(), i) default: } @@ -665,23 +666,31 @@ func (r RetryStage) Exec(ctx context.Context, l log.Logger, alerts ...*types.Ale r.metrics.numNotifications.WithLabelValues(r.integration.Name()).Inc() if err != nil { r.metrics.numFailedNotifications.WithLabelValues(r.integration.Name()).Inc() - level.Debug(l).Log("msg", "Notify attempt failed", "attempt", i, "integration", r.integration.Name(), "receiver", r.groupName, "err", err) if !retry { - return ctx, alerts, fmt.Errorf("cancelling notify retry for %q due to unrecoverable error: %s", r.integration.Name(), err) + return ctx, alerts, errors.Wrapf(err, "%s/%s: notify retry canceled due to unrecoverable error after %d attempts", r.groupName, r.integration.String(), i) + } + if ctx.Err() == nil && (iErr == nil || err.Error() != iErr.Error()) { + // Log the error if the context isn't done and the error isn't the same as before. + level.Warn(l).Log("msg", "Notify attempt failed, will retry later", "attempts", i, "err", err) } // Save this error to be able to return the last seen error by an // integration upon context timeout. iErr = err } else { + lvl := level.Debug(l) + if i > 1 { + lvl = level.Info(l) + } + lvl.Log("msg", "Notify success", "attempts", i) return ctx, alerts, nil } case <-ctx.Done(): - if iErr != nil { - return ctx, nil, iErr + if iErr == nil { + iErr = ctx.Err() } - return ctx, nil, ctx.Err() + return ctx, nil, errors.Wrapf(iErr, "%s/%s: notify retry canceled after %d attempts", r.groupName, r.integration.String(), i) } } } @@ -705,17 +714,17 @@ func NewSetNotifiesStage(l NotificationLog, recv *nflogpb.Receiver) *SetNotifies func (n SetNotifiesStage) Exec(ctx context.Context, l log.Logger, alerts ...*types.Alert) (context.Context, []*types.Alert, error) { gkey, ok := GroupKey(ctx) if !ok { - return ctx, nil, fmt.Errorf("group key missing") + return ctx, nil, errors.New("group key missing") } firing, ok := FiringAlerts(ctx) if !ok { - return ctx, nil, fmt.Errorf("firing alerts missing") + return ctx, nil, errors.New("firing alerts missing") } resolved, ok := ResolvedAlerts(ctx) if !ok { - return ctx, nil, fmt.Errorf("resolved alerts missing") + return ctx, nil, errors.New("resolved alerts missing") } return ctx, alerts, n.nflog.Log(n.recv, gkey, firing, resolved) diff --git a/vendor/github.com/prometheus/alertmanager/notify/pagerduty/pagerduty.go b/vendor/github.com/prometheus/alertmanager/notify/pagerduty/pagerduty.go index c581f7b1403c0..190a160edfe27 100644 --- a/vendor/github.com/prometheus/alertmanager/notify/pagerduty/pagerduty.go +++ b/vendor/github.com/prometheus/alertmanager/notify/pagerduty/pagerduty.go @@ -22,6 +22,7 @@ import ( "net/http" "strings" + "github.com/alecthomas/units" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" @@ -34,6 +35,8 @@ import ( "github.com/prometheus/alertmanager/types" ) +const maxEventSize int = 512000 + // Notifier implements a Notifier for PagerDuty notifications. type Notifier struct { conf *config.PagerdutyConfig @@ -107,6 +110,33 @@ type pagerDutyPayload struct { CustomDetails map[string]string `json:"custom_details,omitempty"` } +func (n *Notifier) encodeMessage(msg *pagerDutyMessage) (bytes.Buffer, error) { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(msg); err != nil { + return buf, errors.Wrap(err, "failed to encode PagerDuty message") + } + + if buf.Len() > maxEventSize { + truncatedMsg := fmt.Sprintf("Custom details have been removed because the original event exceeds the maximum size of %s", units.MetricBytes(maxEventSize).String()) + + if n.apiV1 != "" { + msg.Details = map[string]string{"error": truncatedMsg} + } else { + msg.Payload.CustomDetails = map[string]string{"error": truncatedMsg} + } + + warningMsg := fmt.Sprintf("Truncated Details because message of size %s exceeds limit %s", units.MetricBytes(buf.Len()).String(), units.MetricBytes(maxEventSize).String()) + level.Warn(n.logger).Log("msg", warningMsg) + + buf.Reset() + if err := json.NewEncoder(&buf).Encode(msg); err != nil { + return buf, errors.Wrap(err, "failed to encode PagerDuty message") + } + } + + return buf, nil +} + func (n *Notifier) notifyV1( ctx context.Context, eventType string, @@ -145,12 +175,12 @@ func (n *Notifier) notifyV1( return false, errors.New("service key cannot be empty") } - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(msg); err != nil { - return false, errors.Wrap(err, "failed to encode PagerDuty v1 message") + encodedMsg, err := n.encodeMessage(msg) + if err != nil { + return false, err } - resp, err := notify.PostJSON(ctx, n.client, n.apiV1, &buf) + resp, err := notify.PostJSON(ctx, n.client, n.apiV1, &encodedMsg) if err != nil { return true, errors.Wrap(err, "failed to post message to PagerDuty v1") } @@ -218,12 +248,12 @@ func (n *Notifier) notifyV2( return false, errors.New("routing key cannot be empty") } - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(msg); err != nil { - return false, errors.Wrap(err, "failed to encode PagerDuty v2 message") + encodedMsg, err := n.encodeMessage(msg) + if err != nil { + return false, err } - resp, err := notify.PostJSON(ctx, n.client, n.conf.URL.String(), &buf) + resp, err := notify.PostJSON(ctx, n.client, n.conf.URL.String(), &encodedMsg) if err != nil { return true, errors.Wrap(err, "failed to post message to PagerDuty") } diff --git a/vendor/github.com/prometheus/alertmanager/notify/slack/slack.go b/vendor/github.com/prometheus/alertmanager/notify/slack/slack.go index 45a087195ea68..1fd5267726a8f 100644 --- a/vendor/github.com/prometheus/alertmanager/notify/slack/slack.go +++ b/vendor/github.com/prometheus/alertmanager/notify/slack/slack.go @@ -17,6 +17,8 @@ import ( "bytes" "context" "encoding/json" + "fmt" + "github.com/pkg/errors" "net/http" "github.com/go-kit/kit/log" @@ -183,5 +185,7 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error) // Only 5xx response codes are recoverable and 2xx codes are successful. // https://api.slack.com/incoming-webhooks#handling_errors // https://api.slack.com/changelog/2016-05-17-changes-to-errors-for-incoming-webhooks - return n.retrier.Check(resp.StatusCode, resp.Body) + retry, err := n.retrier.Check(resp.StatusCode, resp.Body) + err = errors.Wrap(err, fmt.Sprintf("channel %q", req.Channel)) + return retry, err } diff --git a/vendor/github.com/prometheus/alertmanager/notify/webhook/webhook.go b/vendor/github.com/prometheus/alertmanager/notify/webhook/webhook.go index 3ef3e55dbc0ba..42ca8ec6abbba 100644 --- a/vendor/github.com/prometheus/alertmanager/notify/webhook/webhook.go +++ b/vendor/github.com/prometheus/alertmanager/notify/webhook/webhook.go @@ -69,12 +69,22 @@ type Message struct { *template.Data // The protocol version. - Version string `json:"version"` - GroupKey string `json:"groupKey"` + Version string `json:"version"` + GroupKey string `json:"groupKey"` + TruncatedAlerts uint64 `json:"truncatedAlerts"` +} + +func truncateAlerts(maxAlerts uint64, alerts []*types.Alert) ([]*types.Alert, uint64) { + if maxAlerts != 0 && uint64(len(alerts)) > maxAlerts { + return alerts[:maxAlerts], uint64(len(alerts)) - maxAlerts + } + + return alerts, 0 } // Notify implements the Notifier interface. func (n *Notifier) Notify(ctx context.Context, alerts ...*types.Alert) (bool, error) { + alerts, numTruncated := truncateAlerts(n.conf.MaxAlerts, alerts) data := notify.GetTemplateData(ctx, n.tmpl, alerts, n.logger) groupKey, err := notify.ExtractGroupKey(ctx) @@ -83,9 +93,10 @@ func (n *Notifier) Notify(ctx context.Context, alerts ...*types.Alert) (bool, er } msg := &Message{ - Version: "4", - Data: data, - GroupKey: groupKey.String(), + Version: "4", + Data: data, + GroupKey: groupKey.String(), + TruncatedAlerts: numTruncated, } var buf bytes.Buffer diff --git a/vendor/github.com/prometheus/alertmanager/pkg/labels/parse.go b/vendor/github.com/prometheus/alertmanager/pkg/labels/parse.go index a3fbfb10268fc..7f664ecad0821 100644 --- a/vendor/github.com/prometheus/alertmanager/pkg/labels/parse.go +++ b/vendor/github.com/prometheus/alertmanager/pkg/labels/parse.go @@ -14,9 +14,10 @@ package labels import ( - "fmt" "regexp" "strings" + + "github.com/pkg/errors" ) var ( @@ -70,17 +71,17 @@ func ParseMatcher(s string) (*Matcher, error) { ms := re.FindStringSubmatch(s) if len(ms) < 4 { - return nil, fmt.Errorf("bad matcher format: %s", s) + return nil, errors.Errorf("bad matcher format: %s", s) } name = ms[1] if name == "" { - return nil, fmt.Errorf("failed to parse label name") + return nil, errors.New("failed to parse label name") } matchType, found := typeMap[ms[2]] if !found { - return nil, fmt.Errorf("failed to find match operator") + return nil, errors.New("failed to find match operator") } if ms[3] != "" { diff --git a/vendor/github.com/prometheus/alertmanager/silence/silence.go b/vendor/github.com/prometheus/alertmanager/silence/silence.go index 98e533cec4783..c21c5c58af5dc 100644 --- a/vendor/github.com/prometheus/alertmanager/silence/silence.go +++ b/vendor/github.com/prometheus/alertmanager/silence/silence.go @@ -122,7 +122,7 @@ func (s *Silencer) Mutes(lset model.LabelSet) bool { ) if markerVersion == s.silences.Version() { // No new silences added, just need to check which of the old - // silences are still revelant. + // silences are still relevant. if len(ids) == 0 { // Super fast path: No silences ever applied to this // alert, none have been added. We are done. diff --git a/vendor/github.com/prometheus/alertmanager/silence/silencepb/silence.pb.go b/vendor/github.com/prometheus/alertmanager/silence/silencepb/silence.pb.go index 6014ca64ffa7e..10502c72931f3 100644 --- a/vendor/github.com/prometheus/alertmanager/silence/silencepb/silence.pb.go +++ b/vendor/github.com/prometheus/alertmanager/silence/silencepb/silence.pb.go @@ -26,7 +26,7 @@ var _ = time.Kitchen // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // Type specifies how the given name and pattern are matched // against a label set. @@ -155,7 +155,7 @@ type Silence struct { // The time range during which the silence is active. StartsAt time.Time `protobuf:"bytes,3,opt,name=starts_at,json=startsAt,proto3,stdtime" json:"starts_at"` EndsAt time.Time `protobuf:"bytes,4,opt,name=ends_at,json=endsAt,proto3,stdtime" json:"ends_at"` - // The last motification made to the silence. + // The last notification made to the silence. UpdatedAt time.Time `protobuf:"bytes,5,opt,name=updated_at,json=updatedAt,proto3,stdtime" json:"updated_at"` // DEPRECATED: A set of comments made on the silence. Comments []*Comment `protobuf:"bytes,7,rep,name=comments,proto3" json:"comments,omitempty"` @@ -1380,6 +1380,7 @@ func (m *MeshSilence) Unmarshal(dAtA []byte) error { func skipSilence(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 + depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1411,10 +1412,8 @@ func skipSilence(dAtA []byte) (n int, err error) { break } } - return iNdEx, nil case 1: iNdEx += 8 - return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1435,55 +1434,30 @@ func skipSilence(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthSilence } iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthSilence - } - return iNdEx, nil case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSilence - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipSilence(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthSilence - } - } - return iNdEx, nil + depth++ case 4: - return iNdEx, nil + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupSilence + } + depth-- case 5: iNdEx += 4 - return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } + if iNdEx < 0 { + return 0, ErrInvalidLengthSilence + } + if depth == 0 { + return iNdEx, nil + } } - panic("unreachable") + return 0, io.ErrUnexpectedEOF } var ( - ErrInvalidLengthSilence = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowSilence = fmt.Errorf("proto: integer overflow") + ErrInvalidLengthSilence = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSilence = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupSilence = fmt.Errorf("proto: unexpected end of group") ) diff --git a/vendor/github.com/prometheus/alertmanager/silence/silencepb/silence.proto b/vendor/github.com/prometheus/alertmanager/silence/silencepb/silence.proto index b99343f327418..9a62d9c56ab12 100644 --- a/vendor/github.com/prometheus/alertmanager/silence/silencepb/silence.proto +++ b/vendor/github.com/prometheus/alertmanager/silence/silencepb/silence.proto @@ -48,7 +48,7 @@ message Silence { google.protobuf.Timestamp starts_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; google.protobuf.Timestamp ends_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - // The last motification made to the silence. + // The last notification made to the silence. google.protobuf.Timestamp updated_at = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; // DEPRECATED: A set of comments made on the silence. diff --git a/vendor/github.com/prometheus/alertmanager/template/default.tmpl b/vendor/github.com/prometheus/alertmanager/template/default.tmpl index 97719e43a0a88..b26eebbf2e302 100644 --- a/vendor/github.com/prometheus/alertmanager/template/default.tmpl +++ b/vendor/github.com/prometheus/alertmanager/template/default.tmpl @@ -24,10 +24,6 @@ {{ define "slack.default.footer" }}{{ end }} -{{ define "hipchat.default.from" }}{{ template "__alertmanager" . }}{{ end }} -{{ define "hipchat.default.message" }}{{ template "__subject" . }}{{ end }} - - {{ define "pagerduty.default.description" }}{{ template "__subject" . }}{{ end }} {{ define "pagerduty.default.client" }}{{ template "__alertmanager" . }}{{ end }} {{ define "pagerduty.default.clientURL" }}{{ template "__alertmanagerURL" . }}{{ end }} diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index 11667a8268a79..f8866ff5d4a6a 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -179,6 +179,12 @@ func resolveFilepaths(baseDir string, cfg *Config) { for _, consulcfg := range cfg.ConsulSDConfigs { tlsPaths(&consulcfg.TLSConfig) } + for _, digitaloceancfg := range cfg.DigitalOceanSDConfigs { + clientPaths(&digitaloceancfg.HTTPClientConfig) + } + for _, dockerswarmcfg := range cfg.DockerSwarmSDConfigs { + clientPaths(&dockerswarmcfg.HTTPClientConfig) + } for _, cfg := range cfg.OpenstackSDConfigs { tlsPaths(&cfg.TLSConfig) } diff --git a/vendor/github.com/prometheus/prometheus/discovery/README.md b/vendor/github.com/prometheus/prometheus/discovery/README.md index 060c7d52c8b26..4c012c8d82be4 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/README.md +++ b/vendor/github.com/prometheus/prometheus/discovery/README.md @@ -216,4 +216,16 @@ If all the targets in a group go away, we need to send the target groups with em ``` down the channel. +### New Service Discovery Check List + +Here are some non-obvious parts of adding service discoveries that need to be verified: + +- Check for `nil` SDConfigs in `discovery/config/config.go`. +- Validate that discovery configs can be DeepEqualled by adding them to + `config/testdata/conf.good.yml` and to the associated tests. +- If there is a TLSConfig or HTTPClientConfig, add them to + `resolveFilepaths` in `config/config.go`. +- List the service discovery in both `` and + `` in `docs/configuration/configuration.md`. + diff --git a/vendor/github.com/prometheus/prometheus/discovery/config/config.go b/vendor/github.com/prometheus/prometheus/discovery/config/config.go index cf7b7e9e367fb..1ae0f952fbc4e 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/config/config.go +++ b/vendor/github.com/prometheus/prometheus/discovery/config/config.go @@ -83,6 +83,11 @@ func (c *ServiceDiscoveryConfig) Validate() error { return errors.New("empty or null section in digitalocean_sd_configs") } } + for _, cfg := range c.DockerSwarmSDConfigs { + if cfg == nil { + return errors.New("empty or null section in dockerswarm_sd_configs") + } + } for _, cfg := range c.DNSSDConfigs { if cfg == nil { return errors.New("empty or null section in dns_sd_configs") @@ -133,5 +138,10 @@ func (c *ServiceDiscoveryConfig) Validate() error { return errors.New("empty or null section in static_configs") } } + for _, cfg := range c.TritonSDConfigs { + if cfg == nil { + return errors.New("empty or null section in triton_sd_configs") + } + } return nil } diff --git a/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/dockerswarm.go b/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/dockerswarm.go index 3d5241c13e233..ef98890ff4dd7 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/dockerswarm.go +++ b/vendor/github.com/prometheus/prometheus/discovery/dockerswarm/dockerswarm.go @@ -44,7 +44,6 @@ type SDConfig struct { HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"` Host string `yaml:"host"` - url *url.URL Role string `yaml:"role"` Port int `yaml:"port"` @@ -62,11 +61,9 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { if c.Host == "" { return fmt.Errorf("host missing") } - url, err := url.Parse(c.Host) - if err != nil { + if _, err = url.Parse(c.Host); err != nil { return err } - c.url = url switch c.Role { case "services", "nodes", "tasks": case "": @@ -82,56 +79,69 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { type Discovery struct { *refresh.Discovery client *client.Client + role string port int } // NewDiscovery returns a new Discovery which periodically refreshes its targets. func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { + var err error + d := &Discovery{ port: conf.Port, + role: conf.Role, } - rt, err := config_util.NewRoundTripperFromConfig(conf.HTTPClientConfig, "dockerswarm_sd", false) + hostURL, err := url.Parse(conf.Host) if err != nil { return nil, err } - // This is used in tests. In normal situations, it is set when Unmarshaling. - if conf.url == nil { - conf.url, err = url.Parse(conf.Host) + opts := []client.Opt{ + client.WithHost(conf.Host), + client.WithAPIVersionNegotiation(), + } + + // There are other protocols than HTTP supported by the Docker daemon, like + // unix, which are not supported by the HTTP client. Passing HTTP client + // options to the Docker client makes those non-HTTP requests fail. + if hostURL.Scheme == "http" || hostURL.Scheme == "https" { + rt, err := config_util.NewRoundTripperFromConfig(conf.HTTPClientConfig, "dockerswarm_sd", false) if err != nil { return nil, err } + opts = append(opts, + client.WithHTTPClient(&http.Client{ + Transport: rt, + Timeout: time.Duration(conf.RefreshInterval), + }), + client.WithScheme(hostURL.Scheme), + ) } - d.client, err = client.NewClientWithOpts( - client.WithHost(conf.Host), - client.WithHTTPClient(&http.Client{ - Transport: rt, - Timeout: time.Duration(conf.RefreshInterval), - }), - client.WithScheme(conf.url.Scheme), - client.WithAPIVersionNegotiation(), - ) + d.client, err = client.NewClientWithOpts(opts...) if err != nil { return nil, fmt.Errorf("error setting up docker swarm client: %w", err) } - var r func(context.Context) ([]*targetgroup.Group, error) - switch conf.Role { - case "services": - r = d.refreshServices - case "nodes": - r = d.refreshNodes - case "tasks": - r = d.refreshTasks - } - d.Discovery = refresh.NewDiscovery( logger, "dockerswarm", time.Duration(conf.RefreshInterval), - r, + d.refresh, ) return d, nil } + +func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { + switch d.role { + case "services": + return d.refreshServices(ctx) + case "nodes": + return d.refreshNodes(ctx) + case "tasks": + return d.refreshTasks(ctx) + default: + panic(fmt.Errorf("unexpected role %s", d.role)) + } +} diff --git a/vendor/github.com/prometheus/prometheus/pkg/rulefmt/rulefmt.go b/vendor/github.com/prometheus/prometheus/pkg/rulefmt/rulefmt.go index 899ab2738cd97..0003332170378 100644 --- a/vendor/github.com/prometheus/prometheus/pkg/rulefmt/rulefmt.go +++ b/vendor/github.com/prometheus/prometheus/pkg/rulefmt/rulefmt.go @@ -68,7 +68,7 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) { for j, g := range g.Groups { if g.Name == "" { - errs = append(errs, errors.Errorf("%d:%d: Groupname should not be empty", node.Groups[j].Line, node.Groups[j].Column)) + errs = append(errs, errors.Errorf("%d:%d: Groupname must not be empty", node.Groups[j].Line, node.Groups[j].Column)) } if _, ok := set[g.Name]; ok { diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index 3c4629f7b0298..007422c64ffdf 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -24,7 +24,6 @@ import ( "sort" "strconv" "sync" - "sync/atomic" "time" "github.com/go-kit/kit/log" @@ -56,31 +55,15 @@ const ( minInt64 = -9223372036854775808 ) -var ( - // DefaultEvaluationInterval is the default evaluation interval of - // a subquery in milliseconds. - DefaultEvaluationInterval int64 -) - -// SetDefaultEvaluationInterval sets DefaultEvaluationInterval. -func SetDefaultEvaluationInterval(ev time.Duration) { - atomic.StoreInt64(&DefaultEvaluationInterval, durationToInt64Millis(ev)) -} - -// GetDefaultEvaluationInterval returns the DefaultEvaluationInterval as time.Duration. -func GetDefaultEvaluationInterval() int64 { - return atomic.LoadInt64(&DefaultEvaluationInterval) -} - type engineMetrics struct { currentQueries prometheus.Gauge maxConcurrentQueries prometheus.Gauge queryLogEnabled prometheus.Gauge queryLogFailures prometheus.Counter - queryQueueTime prometheus.Summary - queryPrepareTime prometheus.Summary - queryInnerEval prometheus.Summary - queryResultSort prometheus.Summary + queryQueueTime prometheus.Observer + queryPrepareTime prometheus.Observer + queryInnerEval prometheus.Observer + queryResultSort prometheus.Observer } // convertibleToInt64 returns true if v does not over-/underflow an int64. @@ -221,19 +204,24 @@ type EngineOpts struct { // LookbackDelta determines the time since the last sample after which a time // series is considered stale. LookbackDelta time.Duration + + // NoStepSubqueryIntervalFn is the default evaluation interval of + // a subquery in milliseconds if no step in range vector was specified `[30m:]`. + NoStepSubqueryIntervalFn func(rangeMillis int64) int64 } // Engine handles the lifetime of queries from beginning to end. // It is connected to a querier. type Engine struct { - logger log.Logger - metrics *engineMetrics - timeout time.Duration - maxSamplesPerQuery int - activeQueryTracker *ActiveQueryTracker - queryLogger QueryLogger - queryLoggerLock sync.RWMutex - lookbackDelta time.Duration + logger log.Logger + metrics *engineMetrics + timeout time.Duration + maxSamplesPerQuery int + activeQueryTracker *ActiveQueryTracker + queryLogger QueryLogger + queryLoggerLock sync.RWMutex + lookbackDelta time.Duration + noStepSubqueryIntervalFn func(rangeMillis int64) int64 } // NewEngine returns a new engine. @@ -242,6 +230,16 @@ func NewEngine(opts EngineOpts) *Engine { opts.Logger = log.NewNopLogger() } + queryResultSummary := prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "query_duration_seconds", + Help: "Query timings", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"slice"}, + ) + metrics := &engineMetrics{ currentQueries: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, @@ -267,38 +265,10 @@ func NewEngine(opts EngineOpts) *Engine { Name: "queries_concurrent_max", Help: "The max number of concurrent queries.", }), - queryQueueTime: prometheus.NewSummary(prometheus.SummaryOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "query_duration_seconds", - Help: "Query timings", - ConstLabels: prometheus.Labels{"slice": "queue_time"}, - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }), - queryPrepareTime: prometheus.NewSummary(prometheus.SummaryOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "query_duration_seconds", - Help: "Query timings", - ConstLabels: prometheus.Labels{"slice": "prepare_time"}, - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }), - queryInnerEval: prometheus.NewSummary(prometheus.SummaryOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "query_duration_seconds", - Help: "Query timings", - ConstLabels: prometheus.Labels{"slice": "inner_eval"}, - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }), - queryResultSort: prometheus.NewSummary(prometheus.SummaryOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "query_duration_seconds", - Help: "Query timings", - ConstLabels: prometheus.Labels{"slice": "result_sort"}, - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }), + queryQueueTime: queryResultSummary.WithLabelValues("queue_time"), + queryPrepareTime: queryResultSummary.WithLabelValues("prepare_time"), + queryInnerEval: queryResultSummary.WithLabelValues("inner_eval"), + queryResultSort: queryResultSummary.WithLabelValues("result_sort"), } if t := opts.ActiveQueryTracker; t != nil { @@ -320,20 +290,18 @@ func NewEngine(opts EngineOpts) *Engine { metrics.maxConcurrentQueries, metrics.queryLogEnabled, metrics.queryLogFailures, - metrics.queryQueueTime, - metrics.queryPrepareTime, - metrics.queryInnerEval, - metrics.queryResultSort, + queryResultSummary, ) } return &Engine{ - timeout: opts.Timeout, - logger: opts.Logger, - metrics: metrics, - maxSamplesPerQuery: opts.MaxSamples, - activeQueryTracker: opts.ActiveQueryTracker, - lookbackDelta: opts.LookbackDelta, + timeout: opts.Timeout, + logger: opts.Logger, + metrics: metrics, + maxSamplesPerQuery: opts.MaxSamples, + activeQueryTracker: opts.ActiveQueryTracker, + lookbackDelta: opts.LookbackDelta, + noStepSubqueryIntervalFn: opts.NoStepSubqueryIntervalFn, } } @@ -525,14 +493,14 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval if s.Start == s.End && s.Interval == 0 { start := timeMilliseconds(s.Start) evaluator := &evaluator{ - startTimestamp: start, - endTimestamp: start, - interval: 1, - ctx: ctxInnerEval, - maxSamples: ng.maxSamplesPerQuery, - defaultEvalInterval: GetDefaultEvaluationInterval(), - logger: ng.logger, - lookbackDelta: ng.lookbackDelta, + startTimestamp: start, + endTimestamp: start, + interval: 1, + ctx: ctxInnerEval, + maxSamples: ng.maxSamplesPerQuery, + logger: ng.logger, + lookbackDelta: ng.lookbackDelta, + noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn, } val, warnings, err := evaluator.Eval(s.Expr) @@ -575,14 +543,14 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval // Range evaluation. evaluator := &evaluator{ - startTimestamp: timeMilliseconds(s.Start), - endTimestamp: timeMilliseconds(s.End), - interval: durationMilliseconds(s.Interval), - ctx: ctxInnerEval, - maxSamples: ng.maxSamplesPerQuery, - defaultEvalInterval: GetDefaultEvaluationInterval(), - logger: ng.logger, - lookbackDelta: ng.lookbackDelta, + startTimestamp: timeMilliseconds(s.Start), + endTimestamp: timeMilliseconds(s.End), + interval: durationMilliseconds(s.Interval), + ctx: ctxInnerEval, + maxSamples: ng.maxSamplesPerQuery, + logger: ng.logger, + lookbackDelta: ng.lookbackDelta, + noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn, } val, warnings, err := evaluator.Eval(s.Expr) if err != nil { @@ -657,13 +625,14 @@ func (ng *Engine) populateSeries(querier storage.Querier, s *parser.EvalStmt) { hints := &storage.SelectHints{ Start: timestamp.FromTime(s.Start), End: timestamp.FromTime(s.End), - Step: durationToInt64Millis(s.Interval), + Step: durationMilliseconds(s.Interval), } // We need to make sure we select the timerange selected by the subquery. - // TODO(gouthamve): cumulativeSubqueryOffset gives the sum of range and the offset - // we can optimise it by separating out the range and offsets, and subtracting the offsets - // from end also. + // The cumulativeSubqueryOffset function gives the sum of range and the offset. + // TODO(gouthamve): Consider optimising it by separating out the range and offsets, and subtracting the offsets + // from end also. See: https://github.com/prometheus/prometheus/issues/7629. + // TODO(bwplotka): Add support for better hints when subquerying. See: https://github.com/prometheus/prometheus/issues/7630. subqOffset := ng.cumulativeSubqueryOffset(path) offsetMilliseconds := durationMilliseconds(subqOffset) hints.Start = hints.Start - offsetMilliseconds @@ -769,11 +738,11 @@ type evaluator struct { endTimestamp int64 // End time in milliseconds. interval int64 // Interval in milliseconds. - maxSamples int - currentSamples int - defaultEvalInterval int64 - logger log.Logger - lookbackDelta time.Duration + maxSamples int + currentSamples int + logger log.Logger + lookbackDelta time.Duration + noStepSubqueryIntervalFn func(rangeMillis int64) int64 } // errorf causes a panic with the input formatted into an error. @@ -1333,21 +1302,22 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { return ev.matrixSelector(e) case *parser.SubqueryExpr: - offsetMillis := durationToInt64Millis(e.Offset) - rangeMillis := durationToInt64Millis(e.Range) + offsetMillis := durationMilliseconds(e.Offset) + rangeMillis := durationMilliseconds(e.Range) newEv := &evaluator{ - endTimestamp: ev.endTimestamp - offsetMillis, - interval: ev.defaultEvalInterval, - ctx: ev.ctx, - currentSamples: ev.currentSamples, - maxSamples: ev.maxSamples, - defaultEvalInterval: ev.defaultEvalInterval, - logger: ev.logger, - lookbackDelta: ev.lookbackDelta, + endTimestamp: ev.endTimestamp - offsetMillis, + ctx: ev.ctx, + currentSamples: ev.currentSamples, + maxSamples: ev.maxSamples, + logger: ev.logger, + lookbackDelta: ev.lookbackDelta, + noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn, } if e.Step != 0 { - newEv.interval = durationToInt64Millis(e.Step) + newEv.interval = durationMilliseconds(e.Step) + } else { + newEv.interval = ev.noStepSubqueryIntervalFn(rangeMillis) } // Start with the first timestamp after (ev.startTimestamp - offset - range) @@ -1367,10 +1337,6 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { panic(errors.Errorf("unhandled expression of type: %T", expr)) } -func durationToInt64Millis(d time.Duration) int64 { - return int64(d / time.Millisecond) -} - // vectorSelector evaluates a *parser.VectorSelector expression. func (ev *evaluator) vectorSelector(node *parser.VectorSelector, ts int64) (Vector, storage.Warnings) { ws, err := checkAndExpandSeriesSet(ev.ctx, node) @@ -2005,7 +1971,25 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without case parser.AVG: group.groupCount++ - group.mean += (s.V - group.mean) / float64(group.groupCount) + if math.IsInf(group.mean, 0) { + if math.IsInf(s.V, 0) && (group.mean > 0) == (s.V > 0) { + // The `mean` and `s.V` values are `Inf` of the same sign. They + // can't be subtracted, but the value of `mean` is correct + // already. + break + } + if !math.IsInf(s.V, 0) && !math.IsNaN(s.V) { + // At this stage, the mean is an infinite. If the added + // value is neither an Inf or a Nan, we can keep that mean + // value. + // This is required because our calculation below removes + // the mean value, which would look like Inf += x - Inf and + // end up as a NaN. + break + } + } + // Divide each side of the `-` by `group.groupCount` to avoid float64 overflows. + group.mean += s.V/float64(group.groupCount) - group.mean/float64(group.groupCount) case parser.GROUP: // Do nothing. Required to avoid the panic in `default:` below. diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index 454ff4fa53a95..01af16e270a05 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -354,7 +354,24 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode var mean, count float64 for _, v := range values { count++ - mean += (v.V - mean) / count + if math.IsInf(mean, 0) { + if math.IsInf(v.V, 0) && (mean > 0) == (v.V > 0) { + // The `mean` and `v.V` values are `Inf` of the same sign. They + // can't be subtracted, but the value of `mean` is correct + // already. + continue + } + if !math.IsInf(v.V, 0) && !math.IsNaN(v.V) { + // At this stage, the mean is an infinite. If the added + // value is neither an Inf or a Nan, we can keep that mean + // value. + // This is required because our calculation below removes + // the mean value, which would look like Inf += x - Inf and + // end up as a NaN. + continue + } + } + mean += v.V/count - mean/count } return mean }) diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go index cd7c91c9f5c0b..de82d67254653 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go @@ -26,7 +26,7 @@ import ( // Node is a generic interface for all nodes in an AST. // // Whenever numerous nodes are listed such as in a switch-case statement -// or a chain of function definitions (e.g. String(), expr(), etc.) convention is +// or a chain of function definitions (e.g. String(), PromQLExpr(), etc.) convention is // to list them as follows: // // - Statements @@ -49,9 +49,9 @@ type Node interface { type Statement interface { Node - // stmt ensures that no other type accidentally implements the interface + // PromQLStmt ensures that no other type accidentally implements the interface // nolint:unused - stmt() + PromQLStmt() } // EvalStmt holds an expression and information on the range it should @@ -66,7 +66,7 @@ type EvalStmt struct { Interval time.Duration } -func (*EvalStmt) stmt() {} +func (*EvalStmt) PromQLStmt() {} // Expr is a generic interface for all expression types. type Expr interface { @@ -75,8 +75,8 @@ type Expr interface { // Type returns the type the expression evaluates to. It does not perform // in-depth checks as this is done at parsing-time. Type() ValueType - // expr ensures that no other types accidentally implement the interface. - expr() + // PromQLExpr ensures that no other types accidentally implement the interface. + PromQLExpr() } // Expressions is a list of expression nodes that implements Node. @@ -180,7 +180,7 @@ type VectorSelector struct { type TestStmt func(context.Context) error func (TestStmt) String() string { return "test statement" } -func (TestStmt) stmt() {} +func (TestStmt) PromQLStmt() {} func (TestStmt) PositionRange() PositionRange { return PositionRange{ @@ -204,16 +204,16 @@ func (e *BinaryExpr) Type() ValueType { return ValueTypeVector } -func (*AggregateExpr) expr() {} -func (*BinaryExpr) expr() {} -func (*Call) expr() {} -func (*MatrixSelector) expr() {} -func (*SubqueryExpr) expr() {} -func (*NumberLiteral) expr() {} -func (*ParenExpr) expr() {} -func (*StringLiteral) expr() {} -func (*UnaryExpr) expr() {} -func (*VectorSelector) expr() {} +func (*AggregateExpr) PromQLExpr() {} +func (*BinaryExpr) PromQLExpr() {} +func (*Call) PromQLExpr() {} +func (*MatrixSelector) PromQLExpr() {} +func (*SubqueryExpr) PromQLExpr() {} +func (*NumberLiteral) PromQLExpr() {} +func (*ParenExpr) PromQLExpr() {} +func (*StringLiteral) PromQLExpr() {} +func (*UnaryExpr) PromQLExpr() {} +func (*VectorSelector) PromQLExpr() {} // VectorMatchCardinality describes the cardinality relationship // of two Vectors in a binary operation. diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y index d066a3702cd1f..501545b9879ce 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y @@ -536,7 +536,7 @@ metric : metric_identifier label_set ; -metric_identifier: AVG | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | IDENTIFIER | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | QUANTILE | STDDEV | STDVAR | SUM | TOPK; +metric_identifier: AVG | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | IDENTIFIER | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | QUANTILE | STDDEV | STDVAR | SUM | TOPK | WITHOUT; label_set : LEFT_BRACE label_set_list RIGHT_BRACE { $$ = labels.New($2...) } diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go index 75470a8079a22..1acb4dc6bbb10 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go @@ -192,241 +192,248 @@ var yyExca = [...]int{ -1, 1, 1, -1, -2, 0, - -1, 32, - 1, 120, - 10, 120, - 22, 120, + -1, 33, + 1, 121, + 10, 121, + 22, 121, -2, 0, - -1, 54, - 2, 132, - 15, 132, - 60, 132, - 66, 132, - -2, 89, -1, 55, 2, 133, 15, 133, 60, 133, 66, 133, - -2, 90, + -2, 89, -1, 56, 2, 134, 15, 134, 60, 134, 66, 134, - -2, 92, + -2, 90, -1, 57, 2, 135, 15, 135, 60, 135, 66, 135, - -2, 93, + -2, 92, -1, 58, 2, 136, 15, 136, 60, 136, 66, 136, - -2, 94, + -2, 93, -1, 59, 2, 137, 15, 137, 60, 137, 66, 137, - -2, 99, + -2, 94, -1, 60, 2, 138, 15, 138, 60, 138, 66, 138, - -2, 101, + -2, 99, -1, 61, 2, 139, 15, 139, 60, 139, 66, 139, - -2, 103, + -2, 101, -1, 62, 2, 140, 15, 140, 60, 140, 66, 140, - -2, 104, + -2, 103, -1, 63, 2, 141, 15, 141, 60, 141, 66, 141, - -2, 105, + -2, 104, -1, 64, 2, 142, 15, 142, 60, 142, 66, 142, - -2, 106, + -2, 105, -1, 65, 2, 143, 15, 143, 60, 143, 66, 143, + -2, 106, + -1, 66, + 2, 144, + 15, 144, + 60, 144, + 66, 144, -2, 107, - -1, 175, - 12, 183, - 13, 183, - 16, 183, - 17, 183, - 23, 183, - 26, 183, - 32, 183, - 33, 183, - 36, 183, - 42, 183, - 45, 183, - 46, 183, - 47, 183, - 48, 183, - 49, 183, - 50, 183, - 51, 183, - 52, 183, - 53, 183, - 54, 183, - 55, 183, - 56, 183, - 60, 183, - 64, 183, - -2, 0, -1, 176, - 12, 183, - 13, 183, - 16, 183, - 17, 183, - 23, 183, - 26, 183, - 32, 183, - 33, 183, - 36, 183, - 42, 183, - 45, 183, - 46, 183, - 47, 183, - 48, 183, - 49, 183, - 50, 183, - 51, 183, - 52, 183, - 53, 183, - 54, 183, - 55, 183, - 56, 183, - 60, 183, - 64, 183, + 12, 184, + 13, 184, + 16, 184, + 17, 184, + 23, 184, + 26, 184, + 32, 184, + 33, 184, + 36, 184, + 42, 184, + 45, 184, + 46, 184, + 47, 184, + 48, 184, + 49, 184, + 50, 184, + 51, 184, + 52, 184, + 53, 184, + 54, 184, + 55, 184, + 56, 184, + 60, 184, + 64, 184, + 66, 184, -2, 0, - -1, 192, - 19, 181, + -1, 177, + 12, 184, + 13, 184, + 16, 184, + 17, 184, + 23, 184, + 26, 184, + 32, 184, + 33, 184, + 36, 184, + 42, 184, + 45, 184, + 46, 184, + 47, 184, + 48, 184, + 49, 184, + 50, 184, + 51, 184, + 52, 184, + 53, 184, + 54, 184, + 55, 184, + 56, 184, + 60, 184, + 64, 184, + 66, 184, -2, 0, - -1, 240, + -1, 193, 19, 182, -2, 0, + -1, 241, + 19, 183, + -2, 0, } const yyPrivate = 57344 -const yyLast = 543 +const yyLast = 598 var yyAct = [...]int{ - 246, 196, 34, 135, 236, 237, 167, 168, 107, 73, - 98, 96, 95, 175, 176, 120, 99, 198, 173, 230, - 174, 170, 249, 97, 114, 229, 115, 208, 228, 53, - 109, 214, 162, 250, 247, 252, 100, 171, 100, 244, - 108, 169, 224, 6, 243, 68, 225, 210, 211, 227, - 113, 212, 102, 161, 103, 223, 94, 242, 116, 101, - 199, 201, 203, 204, 205, 213, 215, 218, 219, 220, - 221, 222, 94, 30, 200, 202, 206, 207, 209, 216, - 217, 98, 104, 91, 78, 79, 80, 99, 81, 82, - 83, 84, 85, 86, 87, 88, 89, 90, 138, 91, - 92, 144, 7, 148, 142, 145, 93, 140, 241, 141, - 2, 3, 4, 5, 143, 137, 139, 251, 77, 172, - 31, 160, 93, 137, 177, 178, 179, 180, 181, 182, - 183, 184, 185, 186, 187, 188, 189, 190, 121, 122, + 247, 197, 35, 136, 237, 238, 168, 169, 108, 74, + 97, 96, 99, 174, 121, 175, 98, 250, 100, 176, + 177, 230, 95, 54, 231, 229, 171, 48, 69, 101, + 50, 22, 49, 163, 245, 148, 251, 248, 51, 244, + 116, 67, 172, 6, 170, 101, 228, 18, 19, 92, + 115, 20, 243, 103, 162, 104, 69, 68, 117, 102, + 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 65, 66, 94, 95, 99, 13, 114, 105, 31, 24, + 100, 30, 7, 252, 8, 79, 80, 81, 33, 82, + 83, 84, 85, 86, 87, 88, 89, 90, 91, 139, + 92, 93, 225, 145, 149, 143, 146, 141, 110, 142, + 2, 3, 4, 5, 242, 224, 144, 78, 109, 32, + 173, 138, 161, 94, 226, 178, 179, 180, 181, 182, + 183, 184, 185, 186, 187, 188, 189, 190, 191, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, - 133, 134, 1, 193, 198, 164, 154, 192, 152, 75, - 226, 10, 166, 151, 208, 239, 44, 169, 214, 74, - 191, 70, 195, 231, 150, 159, 170, 232, 233, 234, - 235, 238, 155, 157, 210, 211, 43, 52, 212, 45, - 9, 9, 171, 156, 158, 69, 240, 199, 201, 203, - 204, 205, 213, 215, 218, 219, 220, 221, 222, 42, - 41, 200, 202, 206, 207, 209, 216, 217, 47, 68, - 112, 49, 22, 48, 109, 111, 147, 245, 136, 50, - 119, 248, 66, 75, 108, 137, 110, 8, 18, 19, - 106, 32, 20, 74, 40, 253, 39, 38, 67, 72, - 254, 54, 55, 56, 57, 58, 59, 60, 61, 62, - 63, 64, 65, 117, 146, 37, 13, 118, 36, 33, - 24, 47, 68, 35, 49, 22, 48, 165, 76, 163, - 194, 71, 50, 51, 197, 66, 153, 46, 105, 0, - 0, 18, 19, 0, 0, 20, 0, 0, 0, 0, - 0, 67, 0, 0, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 65, 0, 47, 68, 13, - 49, 22, 48, 24, 0, 0, 0, 0, 50, 0, - 0, 66, 0, 0, 0, 0, 0, 18, 19, 0, - 0, 20, 17, 68, 0, 0, 22, 67, 0, 0, - 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 18, 19, 0, 13, 20, 17, 30, 24, - 0, 22, 0, 0, 0, 11, 12, 14, 15, 16, - 21, 23, 25, 26, 27, 28, 29, 18, 19, 0, - 13, 20, 0, 0, 24, 0, 0, 0, 0, 0, + 133, 134, 135, 153, 46, 140, 10, 137, 152, 1, + 70, 227, 138, 155, 138, 240, 71, 45, 44, 151, + 34, 95, 48, 69, 232, 50, 22, 49, 233, 234, + 235, 236, 239, 51, 80, 43, 67, 194, 42, 156, + 158, 193, 18, 19, 89, 90, 20, 241, 92, 120, + 157, 159, 68, 41, 192, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 40, 165, 76, + 13, 94, 39, 118, 24, 167, 30, 147, 246, 75, + 170, 38, 249, 48, 69, 160, 50, 22, 49, 171, + 113, 119, 110, 37, 51, 112, 254, 67, 36, 76, + 166, 255, 109, 18, 19, 172, 111, 20, 107, 75, + 77, 164, 195, 68, 72, 73, 55, 56, 57, 58, + 59, 60, 61, 62, 63, 64, 65, 66, 199, 53, + 52, 13, 9, 9, 198, 24, 154, 30, 209, 47, + 106, 0, 215, 0, 0, 0, 253, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 211, 212, + 0, 0, 213, 0, 0, 0, 0, 0, 0, 0, + 0, 200, 202, 204, 205, 206, 214, 216, 219, 220, + 221, 222, 223, 199, 0, 201, 203, 207, 208, 210, + 217, 218, 0, 209, 0, 0, 0, 215, 0, 0, + 0, 196, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 211, 212, 0, 0, 213, 0, 0, + 0, 0, 0, 0, 0, 0, 200, 202, 204, 205, + 206, 214, 216, 219, 220, 221, 222, 223, 0, 0, + 201, 203, 207, 208, 210, 217, 218, 17, 69, 0, + 0, 22, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 18, 19, 0, + 0, 20, 17, 31, 0, 0, 22, 0, 0, 0, 11, 12, 14, 15, 16, 21, 23, 25, 26, 27, - 28, 29, 94, 0, 0, 13, 0, 0, 149, 24, - 0, 0, 0, 0, 78, 79, 80, 0, 81, 82, - 83, 84, 85, 86, 87, 88, 89, 90, 0, 91, - 92, 0, 0, 94, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 78, 79, 80, 0, 81, - 82, 83, 93, 85, 86, 87, 88, 89, 90, 0, - 91, 92, 0, 0, 94, 0, 0, 0, 0, 0, - 0, 0, 0, 94, 0, 0, 78, 79, 80, 0, - 81, 82, 94, 93, 85, 86, 79, 88, 89, 90, - 0, 91, 92, 0, 78, 79, 88, 89, 0, 0, - 91, 0, 0, 0, 0, 88, 89, 0, 0, 91, - 92, 0, 0, 0, 93, 0, 0, 0, 0, 0, - 0, 0, 0, 93, 0, 0, 0, 0, 0, 0, - 0, 0, 93, + 28, 29, 18, 19, 0, 13, 20, 0, 0, 24, + 0, 30, 0, 0, 0, 11, 12, 14, 15, 16, + 21, 23, 25, 26, 27, 28, 29, 95, 0, 0, + 13, 0, 0, 150, 24, 0, 30, 0, 0, 79, + 80, 81, 0, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 91, 0, 92, 93, 0, 0, 95, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 79, 80, 81, 0, 82, 83, 84, 94, 86, 87, + 88, 89, 90, 91, 0, 92, 93, 0, 0, 95, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 79, 80, 81, 0, 82, 83, 95, 94, 86, + 87, 0, 89, 90, 91, 0, 92, 93, 0, 79, + 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 89, 90, 0, 0, 92, 93, 0, 0, 0, 94, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 94, } var yyPact = [...]int{ - 41, 92, 355, 355, 259, 330, -1000, -1000, -1000, 60, + 41, 72, 410, 410, 160, 385, -1000, -1000, -1000, 65, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - 231, -1000, 116, -1000, 58, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, 21, 23, -1000, 305, - -1000, 305, 32, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 222, -1000, - -1000, 218, -1000, -1000, 22, -1000, 4, -1000, -44, -44, - -44, -44, -44, -44, -44, -44, -44, -44, -44, -44, - -44, -44, -44, 226, 114, 23, -50, -1000, 99, 99, - 206, -1000, 398, 42, -1000, 156, -1000, -1000, 154, -1000, - -1000, 157, -1000, 30, -1000, 150, 305, -1000, -45, -48, - -1000, 305, 305, 305, 305, 305, 305, 305, 305, 305, - 305, 305, 305, 305, 305, -1000, -1000, -1000, 151, -1000, - -1000, -1000, -1000, 152, -1000, -1000, 35, -1000, 58, -1000, - -1000, 28, -1000, 26, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 1, -5, -1000, -1000, -1000, -1000, - 24, 24, 469, 99, 99, 99, 99, 42, 478, 478, - 478, 460, 429, 478, 478, 460, 42, 42, 478, 42, - 469, -1000, 106, -1000, 37, -1000, -1000, -1000, -1000, -1000, + -1000, 247, -1000, 115, -1000, 59, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 14, 30, -1000, + 221, -1000, 221, 43, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 240, + -1000, -1000, 238, -1000, -1000, 48, -1000, 18, -1000, -45, + -45, -45, -45, -45, -45, -45, -45, -45, -45, -45, + -45, -45, -45, -45, 155, 153, 30, -48, -1000, 101, + 101, 15, -1000, 453, 8, -1000, 151, -1000, -1000, 161, + -1000, -1000, 217, -1000, 31, -1000, 213, 221, -1000, -50, + -42, -1000, 221, 221, 221, 221, 221, 221, 221, 221, + 221, 221, 221, 221, 221, 221, -1000, -1000, -1000, 185, + -1000, -1000, -1000, -1000, 331, -1000, -1000, 95, -1000, 59, + -1000, -1000, 106, -1000, 23, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -3, 0, -1000, -1000, -1000, + -1000, 27, 27, 157, 101, 101, 101, 101, 8, 533, + 533, 533, 515, 484, 533, 533, 515, 8, 8, 533, + 8, 157, -1000, 112, -1000, 32, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 305, -1000, -1000, -1000, -1000, 17, - 17, -2, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 14, - 115, -1000, -1000, 15, -1000, 58, -1000, -1000, -1000, 17, - -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 221, -1000, -1000, -1000, -1000, + 20, 20, -7, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 17, 81, -1000, -1000, 276, -1000, 59, -1000, -1000, -1000, + 20, -1000, -1000, -1000, -1000, -1000, } var yyPgo = [...]int{ - 0, 288, 8, 287, 1, 286, 284, 187, 283, 161, - 281, 237, 9, 280, 5, 4, 279, 278, 0, 6, - 277, 7, 273, 12, 58, 268, 267, 2, 265, 264, - 11, 263, 29, 247, 246, 244, 230, 210, 209, 186, - 166, 189, 3, 165, 152, 120, + 0, 290, 8, 289, 1, 286, 284, 279, 280, 156, + 264, 84, 9, 262, 5, 4, 261, 260, 0, 6, + 250, 7, 248, 11, 58, 243, 241, 2, 231, 227, + 10, 223, 23, 222, 217, 203, 199, 188, 185, 168, + 167, 154, 3, 165, 159, 119, } var yyR1 = [...]int{ @@ -440,15 +447,15 @@ var yyR1 = [...]int{ 39, 39, 39, 40, 41, 41, 41, 32, 32, 32, 1, 1, 1, 2, 2, 2, 2, 11, 11, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 9, 9, - 9, 9, 10, 10, 10, 12, 12, 12, 12, 45, - 17, 17, 17, 17, 16, 16, 16, 16, 16, 20, - 20, 20, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 9, + 9, 9, 9, 10, 10, 10, 12, 12, 12, 12, + 45, 17, 17, 17, 17, 16, 16, 16, 16, 16, + 20, 20, 20, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 8, 8, - 5, 5, 5, 5, 34, 19, 21, 21, 18, 42, - 38, 43, 43, 15, 15, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 8, + 8, 5, 5, 5, 5, 34, 19, 21, 21, 18, + 42, 38, 43, 43, 15, 15, } var yyR2 = [...]int{ @@ -462,73 +469,73 @@ var yyR2 = [...]int{ 5, 4, 3, 2, 2, 1, 1, 3, 4, 2, 3, 1, 2, 3, 3, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 3, 4, - 2, 0, 3, 1, 2, 3, 3, 2, 1, 2, - 0, 3, 2, 1, 1, 3, 1, 3, 4, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, + 4, 2, 0, 3, 1, 2, 3, 3, 2, 1, + 2, 0, 3, 2, 1, 1, 3, 1, 3, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, - 1, 0, 1, 0, 1, + 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, + 1, 1, 0, 1, 0, 1, } var yyChk = [...]int{ -1000, -44, 69, 70, 71, 72, 2, 10, -11, -7, -9, 45, 46, 60, 47, 48, 49, 12, 32, 33, 36, 50, 16, 51, 64, 52, 53, 54, 55, 56, - 13, -45, -11, 10, -27, -22, -25, -28, -33, -34, - -35, -37, -38, -39, -40, -41, -3, 12, 17, 15, - 23, -8, -7, -32, 45, 46, 47, 48, 49, 50, - 51, 52, 53, 54, 55, 56, 26, 42, 13, -41, - -9, -10, 18, -12, 12, 2, -17, 2, 26, 27, - 28, 30, 31, 32, 33, 34, 35, 36, 37, 38, - 39, 41, 42, 64, 14, -23, -30, 2, 60, 66, - 15, -30, -27, -27, -32, -1, 18, -2, 12, 2, - 18, 7, 2, 28, 2, 22, -24, -31, -26, -36, - 59, -24, -24, -24, -24, -24, -24, -24, -24, -24, - -24, -24, -24, -24, -24, -42, 2, 9, -42, 2, - -30, -23, -14, 15, 2, -14, -29, 20, -27, 20, - 18, 7, 2, -5, 2, 28, 39, 29, 40, 18, - -12, 23, 2, -16, 5, -20, 12, -19, -21, 17, - 26, 42, -27, 63, 65, 61, 62, -27, -27, -27, + 66, 13, -45, -11, 10, -27, -22, -25, -28, -33, + -34, -35, -37, -38, -39, -40, -41, -3, 12, 17, + 15, 23, -8, -7, -32, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 26, 42, 13, + -41, -9, -10, 18, -12, 12, 2, -17, 2, 26, + 27, 28, 30, 31, 32, 33, 34, 35, 36, 37, + 38, 39, 41, 42, 64, 14, -23, -30, 2, 60, + 66, 15, -30, -27, -27, -32, -1, 18, -2, 12, + 2, 18, 7, 2, 28, 2, 22, -24, -31, -26, + -36, 59, -24, -24, -24, -24, -24, -24, -24, -24, + -24, -24, -24, -24, -24, -24, -42, 2, 9, -42, + 2, -30, -23, -14, 15, 2, -14, -29, 20, -27, + 20, 18, 7, 2, -5, 2, 28, 39, 29, 40, + 18, -12, 23, 2, -16, 5, -20, 12, -19, -21, + 17, 26, 42, -27, 63, 65, 61, 62, -27, -27, -27, -27, -27, -27, -27, -27, -27, -27, -27, -27, - -27, 19, 6, 2, -13, 20, -4, -6, 2, 45, - 59, 46, 60, 47, 48, 49, 61, 62, 12, 63, - 32, 33, 36, 50, 16, 51, 64, 65, 52, 53, - 54, 55, 56, 20, 7, 18, -2, 23, 2, 24, - 24, -21, -19, -19, -14, -14, -15, -14, -15, -43, - -42, 2, 20, 7, 2, -27, -18, 17, -18, 24, - 19, 2, 20, -4, -18, + -27, -27, 19, 6, 2, -13, 20, -4, -6, 2, + 45, 59, 46, 60, 47, 48, 49, 61, 62, 12, + 63, 32, 33, 36, 50, 16, 51, 64, 65, 52, + 53, 54, 55, 56, 20, 7, 18, -2, 23, 2, + 24, 24, -21, -19, -19, -14, -14, -15, -14, -15, + -43, -42, 2, 20, 7, 2, -27, -18, 17, -18, + 24, 19, 2, 20, -4, -18, } var yyDef = [...]int{ - 0, -2, 111, 111, 0, 0, 7, 6, 1, 111, + 0, -2, 112, 112, 0, 0, 7, 6, 1, 112, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, - 0, 2, -2, 3, 4, 8, 9, 10, 11, 12, - 13, 14, 15, 16, 17, 18, 0, 95, 174, 0, - 180, 0, 75, 76, -2, -2, -2, -2, -2, -2, - -2, -2, -2, -2, -2, -2, 168, 169, 0, 5, - 87, 0, 110, 113, 0, 118, 119, 123, 41, 41, + 108, 0, 2, -2, 3, 4, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 0, 95, 175, + 0, 181, 0, 75, 76, -2, -2, -2, -2, -2, + -2, -2, -2, -2, -2, -2, -2, 169, 170, 0, + 5, 87, 0, 111, 114, 0, 119, 120, 124, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, - 41, 41, 41, 0, 0, 0, 21, 22, 0, 0, - 0, 58, 0, 73, 74, 0, 79, 81, 0, 86, - 108, 0, 114, 0, 117, 122, 0, 40, 45, 46, - 42, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 65, 66, 179, 0, 72, - 19, 20, 23, 0, 52, 24, 0, 60, 62, 64, - 77, 0, 82, 0, 85, 170, 171, 172, 173, 109, - 112, 115, 116, 121, 124, 126, 129, 130, 131, 175, - 0, 0, 25, 0, 0, -2, -2, 26, 27, 28, - 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, - 39, 67, -2, 71, 0, 51, 54, 56, 57, 144, + 41, 41, 41, 41, 0, 0, 0, 21, 22, 0, + 0, 0, 58, 0, 73, 74, 0, 79, 81, 0, + 86, 109, 0, 115, 0, 118, 123, 0, 40, 45, + 46, 42, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 65, 66, 180, 0, + 72, 19, 20, 23, 0, 52, 24, 0, 60, 62, + 64, 77, 0, 82, 0, 85, 171, 172, 173, 174, + 110, 113, 116, 117, 122, 125, 127, 130, 131, 132, + 176, 0, 0, 25, 0, 0, -2, -2, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, + 38, 39, 67, -2, 71, 0, 51, 54, 56, 57, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, - 165, 166, 167, 59, 63, 78, 80, 83, 84, 0, - 0, 0, 176, 177, 43, 44, 47, 184, 48, 0, - -2, 70, 49, 0, 55, 61, 125, 178, 127, 0, - 68, 69, 50, 53, 128, + 165, 166, 167, 168, 59, 63, 78, 80, 83, 84, + 0, 0, 0, 177, 178, 43, 44, 47, 185, 48, + 0, -2, 70, 49, 0, 55, 61, 126, 179, 128, + 0, 68, 69, 50, 53, 129, } var yyTok1 = [...]int{ @@ -1421,77 +1428,77 @@ yydefault: { yyVAL.labels = yyDollar[1].labels } - case 108: + case 109: yyDollar = yyS[yypt-3 : yypt+1] //line generated_parser.y:542 { yyVAL.labels = labels.New(yyDollar[2].labels...) } - case 109: + case 110: yyDollar = yyS[yypt-4 : yypt+1] //line generated_parser.y:544 { yyVAL.labels = labels.New(yyDollar[2].labels...) } - case 110: + case 111: yyDollar = yyS[yypt-2 : yypt+1] //line generated_parser.y:546 { yyVAL.labels = labels.New() } - case 111: + case 112: yyDollar = yyS[yypt-0 : yypt+1] //line generated_parser.y:548 { yyVAL.labels = labels.New() } - case 112: + case 113: yyDollar = yyS[yypt-3 : yypt+1] //line generated_parser.y:552 { yyVAL.labels = append(yyDollar[1].labels, yyDollar[3].label) } - case 113: + case 114: yyDollar = yyS[yypt-1 : yypt+1] //line generated_parser.y:554 { yyVAL.labels = []labels.Label{yyDollar[1].label} } - case 114: + case 115: yyDollar = yyS[yypt-2 : yypt+1] //line generated_parser.y:556 { yylex.(*parser).unexpected("label set", "\",\" or \"}\"") yyVAL.labels = yyDollar[1].labels } - case 115: + case 116: yyDollar = yyS[yypt-3 : yypt+1] //line generated_parser.y:561 { yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)} } - case 116: + case 117: yyDollar = yyS[yypt-3 : yypt+1] //line generated_parser.y:563 { yylex.(*parser).unexpected("label set", "string") yyVAL.label = labels.Label{} } - case 117: + case 118: yyDollar = yyS[yypt-2 : yypt+1] //line generated_parser.y:565 { yylex.(*parser).unexpected("label set", "\"=\"") yyVAL.label = labels.Label{} } - case 118: + case 119: yyDollar = yyS[yypt-1 : yypt+1] //line generated_parser.y:567 { yylex.(*parser).unexpected("label set", "identifier or \"}\"") yyVAL.label = labels.Label{} } - case 119: + case 120: yyDollar = yyS[yypt-2 : yypt+1] //line generated_parser.y:575 { @@ -1500,38 +1507,38 @@ yydefault: values: yyDollar[2].series, } } - case 120: + case 121: yyDollar = yyS[yypt-0 : yypt+1] //line generated_parser.y:584 { yyVAL.series = []SequenceValue{} } - case 121: + case 122: yyDollar = yyS[yypt-3 : yypt+1] //line generated_parser.y:586 { yyVAL.series = append(yyDollar[1].series, yyDollar[3].series...) } - case 122: + case 123: yyDollar = yyS[yypt-2 : yypt+1] //line generated_parser.y:588 { yyVAL.series = yyDollar[1].series } - case 123: + case 124: yyDollar = yyS[yypt-1 : yypt+1] //line generated_parser.y:590 { yylex.(*parser).unexpected("series values", "") yyVAL.series = nil } - case 124: + case 125: yyDollar = yyS[yypt-1 : yypt+1] //line generated_parser.y:594 { yyVAL.series = []SequenceValue{{Omitted: true}} } - case 125: + case 126: yyDollar = yyS[yypt-3 : yypt+1] //line generated_parser.y:596 { @@ -1540,13 +1547,13 @@ yydefault: yyVAL.series = append(yyVAL.series, SequenceValue{Omitted: true}) } } - case 126: + case 127: yyDollar = yyS[yypt-1 : yypt+1] //line generated_parser.y:603 { yyVAL.series = []SequenceValue{{Value: yyDollar[1].float}} } - case 127: + case 128: yyDollar = yyS[yypt-3 : yypt+1] //line generated_parser.y:605 { @@ -1555,7 +1562,7 @@ yydefault: yyVAL.series = append(yyVAL.series, SequenceValue{Value: yyDollar[1].float}) } } - case 128: + case 129: yyDollar = yyS[yypt-4 : yypt+1] //line generated_parser.y:612 { @@ -1565,7 +1572,7 @@ yydefault: yyDollar[1].float += yyDollar[2].float } } - case 129: + case 130: yyDollar = yyS[yypt-1 : yypt+1] //line generated_parser.y:622 { @@ -1574,7 +1581,7 @@ yydefault: } yyVAL.float = math.Float64frombits(value.StaleNaN) } - case 174: + case 175: yyDollar = yyS[yypt-1 : yypt+1] //line generated_parser.y:653 { @@ -1583,25 +1590,25 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 175: + case 176: yyDollar = yyS[yypt-1 : yypt+1] //line generated_parser.y:661 { yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val) } - case 176: + case 177: yyDollar = yyS[yypt-2 : yypt+1] //line generated_parser.y:663 { yyVAL.float = yyDollar[2].float } - case 177: + case 178: yyDollar = yyS[yypt-2 : yypt+1] //line generated_parser.y:664 { yyVAL.float = -yyDollar[2].float } - case 178: + case 179: yyDollar = yyS[yypt-1 : yypt+1] //line generated_parser.y:668 { @@ -1611,7 +1618,7 @@ yydefault: yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid repetition in series values: %s", err) } } - case 179: + case 180: yyDollar = yyS[yypt-1 : yypt+1] //line generated_parser.y:678 { @@ -1621,7 +1628,7 @@ yydefault: yylex.(*parser).addParseErr(yyDollar[1].item.PositionRange(), err) } } - case 180: + case 181: yyDollar = yyS[yypt-1 : yypt+1] //line generated_parser.y:689 { @@ -1630,13 +1637,13 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 181: + case 182: yyDollar = yyS[yypt-0 : yypt+1] //line generated_parser.y:702 { yyVAL.duration = 0 } - case 183: + case 184: yyDollar = yyS[yypt-0 : yypt+1] //line generated_parser.y:706 { diff --git a/vendor/github.com/prometheus/prometheus/promql/test.go b/vendor/github.com/prometheus/prometheus/promql/test.go index e3d99e83f8cc9..ff2549144e935 100644 --- a/vendor/github.com/prometheus/prometheus/promql/test.go +++ b/vendor/github.com/prometheus/prometheus/promql/test.go @@ -518,10 +518,11 @@ func (t *Test) clear() { t.storage = teststorage.New(t) opts := EngineOpts{ - Logger: nil, - Reg: nil, - MaxSamples: 10000, - Timeout: 100 * time.Second, + Logger: nil, + Reg: nil, + MaxSamples: 10000, + Timeout: 100 * time.Second, + NoStepSubqueryIntervalFn: func(int64) int64 { return durationMilliseconds(1 * time.Minute) }, } t.queryEngine = NewEngine(opts) diff --git a/vendor/github.com/prometheus/prometheus/rules/alerting.go b/vendor/github.com/prometheus/prometheus/rules/alerting.go index 54ee31d740b82..c82bc9640acd1 100644 --- a/vendor/github.com/prometheus/prometheus/rules/alerting.go +++ b/vendor/github.com/prometheus/prometheus/rules/alerting.go @@ -74,7 +74,7 @@ func (s AlertState) String() string { case StateFiring: return "firing" } - panic(errors.Errorf("unknown alert state: %s", s.String())) + panic(errors.Errorf("unknown alert state: %d", s)) } // Alert is the user-level representation of a single instance of an alerting rule. diff --git a/vendor/github.com/prometheus/prometheus/rules/manager.go b/vendor/github.com/prometheus/prometheus/rules/manager.go index 76c756c9775e1..c4344fa68e94b 100644 --- a/vendor/github.com/prometheus/prometheus/rules/manager.go +++ b/vendor/github.com/prometheus/prometheus/rules/manager.go @@ -852,6 +852,7 @@ type ManagerOptions struct { OutageTolerance time.Duration ForGracePeriod time.Duration ResendDelay time.Duration + GroupLoader GroupLoader Metrics *Metrics } @@ -863,6 +864,10 @@ func NewManager(o *ManagerOptions) *Manager { o.Metrics = NewGroupMetrics(o.Registerer) } + if o.GroupLoader == nil { + o.GroupLoader = FileLoader{} + } + m := &Manager{ groups: map[string]*Group{}, opts: o, @@ -875,8 +880,13 @@ func NewManager(o *ManagerOptions) *Manager { return m } -// Run starts processing of the rule manager. +// Run starts processing of the rule manager. It is blocking. func (m *Manager) Run() { + m.start() + <-m.done +} + +func (m *Manager) start() { close(m.block) } @@ -969,6 +979,22 @@ func (m *Manager) Update(interval time.Duration, files []string, externalLabels return nil } +// GroupLoader is responsible for loading rule groups from arbitrary sources and parsing them. +type GroupLoader interface { + Load(identifier string) (*rulefmt.RuleGroups, []error) + Parse(query string) (parser.Expr, error) +} + +// FileLoader is the default GroupLoader implementation. It defers to rulefmt.ParseFile +// and parser.ParseExpr +type FileLoader struct{} + +func (FileLoader) Load(identifier string) (*rulefmt.RuleGroups, []error) { + return rulefmt.ParseFile(identifier) +} + +func (FileLoader) Parse(query string) (parser.Expr, error) { return parser.ParseExpr(query) } + // LoadGroups reads groups from a list of files. func (m *Manager) LoadGroups( interval time.Duration, externalLabels labels.Labels, filenames ...string, @@ -978,7 +1004,7 @@ func (m *Manager) LoadGroups( shouldRestore := !m.restored for _, fn := range filenames { - rgs, errs := rulefmt.ParseFile(fn) + rgs, errs := m.opts.GroupLoader.Load(fn) if errs != nil { return nil, errs } @@ -991,7 +1017,7 @@ func (m *Manager) LoadGroups( rules := make([]Rule, 0, len(rg.Rules)) for _, r := range rg.Rules { - expr, err := parser.ParseExpr(r.Expr.Value) + expr, err := m.opts.GroupLoader.Parse(r.Expr.Value) if err != nil { return nil, []error{errors.Wrap(err, fn)} } diff --git a/vendor/github.com/prometheus/prometheus/scrape/scrape.go b/vendor/github.com/prometheus/prometheus/scrape/scrape.go index 4c6d3bab00d9c..ce0e3333214a1 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/scrape.go +++ b/vendor/github.com/prometheus/prometheus/scrape/scrape.go @@ -161,7 +161,7 @@ type scrapePool struct { appendable storage.Appendable logger log.Logger - mtx sync.RWMutex + mtx sync.Mutex config *config.ScrapeConfig client *http.Client // Targets and loops must always be synchronized to have the same @@ -434,9 +434,7 @@ func (sp *scrapePool) sync(targets []*Target) { if _, ok := uniqueTargets[hash]; !ok { wg.Add(1) go func(l loop) { - l.stop() - wg.Done() }(sp.loops[hash]) @@ -932,77 +930,100 @@ mainLoop: default: } - var ( - start = time.Now() - scrapeCtx, cancel = context.WithTimeout(sl.ctx, timeout) - ) + last = sl.scrapeAndReport(interval, timeout, last, errc) - // Only record after the first scrape. - if !last.IsZero() { - targetIntervalLength.WithLabelValues(interval.String()).Observe( - time.Since(last).Seconds(), - ) + select { + case <-sl.parentCtx.Done(): + close(sl.stopped) + return + case <-sl.ctx.Done(): + break mainLoop + case <-ticker.C: } + } - b := sl.buffers.Get(sl.lastScrapeSize).([]byte) - buf := bytes.NewBuffer(b) + close(sl.stopped) - contentType, scrapeErr := sl.scraper.scrape(scrapeCtx, buf) - cancel() + if !sl.disabledEndOfRunStalenessMarkers { + sl.endOfRunStaleness(last, ticker, interval) + } +} - if scrapeErr == nil { - b = buf.Bytes() - // NOTE: There were issues with misbehaving clients in the past - // that occasionally returned empty results. We don't want those - // to falsely reset our buffer size. - if len(b) > 0 { - sl.lastScrapeSize = len(b) - } - } else { - level.Debug(sl.l).Log("msg", "Scrape failed", "err", scrapeErr.Error()) - if errc != nil { - errc <- scrapeErr - } - } +// scrapeAndReport performs a scrape and then appends the result to the storage +// together with reporting metrics, by using as few appenders as possible. +// In the happy scenario, a single appender is used. +func (sl *scrapeLoop) scrapeAndReport(interval, timeout time.Duration, last time.Time, errc chan<- error) time.Time { + var ( + start = time.Now() + scrapeCtx, cancel = context.WithTimeout(sl.ctx, timeout) + ) - // A failed scrape is the same as an empty scrape, - // we still call sl.append to trigger stale markers. - total, added, seriesAdded, appErr := sl.append(b, contentType, start) - if appErr != nil { - level.Debug(sl.l).Log("msg", "Append failed", "err", appErr) - // The append failed, probably due to a parse error or sample limit. - // Call sl.append again with an empty scrape to trigger stale markers. - if _, _, _, err := sl.append([]byte{}, "", start); err != nil { - level.Warn(sl.l).Log("msg", "Append failed", "err", err) - } - } + // Only record after the first scrape. + if !last.IsZero() { + targetIntervalLength.WithLabelValues(interval.String()).Observe( + time.Since(last).Seconds(), + ) + } - sl.buffers.Put(b) + b := sl.buffers.Get(sl.lastScrapeSize).([]byte) + buf := bytes.NewBuffer(b) - if scrapeErr == nil { - scrapeErr = appErr - } + contentType, scrapeErr := sl.scraper.scrape(scrapeCtx, buf) + cancel() - if err := sl.report(start, time.Since(start), total, added, seriesAdded, scrapeErr); err != nil { - level.Warn(sl.l).Log("msg", "Appending scrape report failed", "err", err) + if scrapeErr == nil { + b = buf.Bytes() + // NOTE: There were issues with misbehaving clients in the past + // that occasionally returned empty results. We don't want those + // to falsely reset our buffer size. + if len(b) > 0 { + sl.lastScrapeSize = len(b) } - last = start + } else { + level.Debug(sl.l).Log("msg", "Scrape failed", "err", scrapeErr.Error()) + if errc != nil { + errc <- scrapeErr + } + } - select { - case <-sl.parentCtx.Done(): - close(sl.stopped) + app := sl.appender() + var err error + defer func() { + if err != nil { + app.Rollback() return - case <-sl.ctx.Done(): - break mainLoop - case <-ticker.C: + } + err = app.Commit() + if err != nil { + level.Error(sl.l).Log("msg", "Scrape commit failed", "err", err) + } + }() + // A failed scrape is the same as an empty scrape, + // we still call sl.append to trigger stale markers. + total, added, seriesAdded, appErr := sl.append(app, b, contentType, start) + if appErr != nil { + app.Rollback() + app = sl.appender() + level.Debug(sl.l).Log("msg", "Append failed", "err", appErr) + // The append failed, probably due to a parse error or sample limit. + // Call sl.append again with an empty scrape to trigger stale markers. + if _, _, _, err := sl.append(app, []byte{}, "", start); err != nil { + app.Rollback() + app = sl.appender() + level.Warn(sl.l).Log("msg", "Append failed", "err", err) } } - close(sl.stopped) + sl.buffers.Put(b) - if !sl.disabledEndOfRunStalenessMarkers { - sl.endOfRunStaleness(last, ticker, interval) + if scrapeErr == nil { + scrapeErr = appErr } + + if err = sl.report(app, start, time.Since(start), total, added, seriesAdded, scrapeErr); err != nil { + level.Warn(sl.l).Log("msg", "Appending scrape report failed", "err", err) + } + return start } func (sl *scrapeLoop) endOfRunStaleness(last time.Time, ticker *time.Ticker, interval time.Duration) { @@ -1045,11 +1066,25 @@ func (sl *scrapeLoop) endOfRunStaleness(last time.Time, ticker *time.Ticker, int // Call sl.append again with an empty scrape to trigger stale markers. // If the target has since been recreated and scraped, the // stale markers will be out of order and ignored. - if _, _, _, err := sl.append([]byte{}, "", staleTime); err != nil { - level.Error(sl.l).Log("msg", "stale append failed", "err", err) + app := sl.appender() + var err error + defer func() { + if err != nil { + app.Rollback() + return + } + err = app.Commit() + if err != nil { + level.Warn(sl.l).Log("msg", "Stale commit failed", "err", err) + } + }() + if _, _, _, err = sl.append(app, []byte{}, "", staleTime); err != nil { + app.Rollback() + app = sl.appender() + level.Warn(sl.l).Log("msg", "Stale append failed", "err", err) } - if err := sl.reportStale(staleTime); err != nil { - level.Error(sl.l).Log("msg", "stale report failed", "err", err) + if err = sl.reportStale(app, staleTime); err != nil { + level.Warn(sl.l).Log("msg", "Stale report failed", "err", err) } } @@ -1074,9 +1109,8 @@ type appendErrors struct { numOutOfBounds int } -func (sl *scrapeLoop) append(b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) { +func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) { var ( - app = sl.appender() p = textparse.New(b, contentType) defTime = timestamp.FromTime(ts) appErrs = appendErrors{} @@ -1085,10 +1119,6 @@ func (sl *scrapeLoop) append(b []byte, contentType string, ts time.Time) (total, defer func() { if err != nil { - app.Rollback() - return - } - if err = app.Commit(); err != nil { return } // Only perform cache cleaning if the scrape was not empty. @@ -1188,7 +1218,7 @@ loop: } // Increment added even if there's an error so we correctly report the - // number of samples remaining after relabelling. + // number of samples remaining after relabeling. added++ } @@ -1275,7 +1305,7 @@ const ( scrapeSeriesAddedMetricName = "scrape_series_added" + "\xff" ) -func (sl *scrapeLoop) report(start time.Time, duration time.Duration, scraped, added, seriesAdded int, scrapeErr error) (err error) { +func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration time.Duration, scraped, added, seriesAdded int, scrapeErr error) (err error) { sl.scraper.Report(start, duration, scrapeErr) ts := timestamp.FromTime(start) @@ -1284,14 +1314,6 @@ func (sl *scrapeLoop) report(start time.Time, duration time.Duration, scraped, a if scrapeErr == nil { health = 1 } - app := sl.appender() - defer func() { - if err != nil { - app.Rollback() - return - } - err = app.Commit() - }() if err = sl.addReportSample(app, scrapeHealthMetricName, ts, health); err != nil { return @@ -1311,16 +1333,8 @@ func (sl *scrapeLoop) report(start time.Time, duration time.Duration, scraped, a return } -func (sl *scrapeLoop) reportStale(start time.Time) (err error) { +func (sl *scrapeLoop) reportStale(app storage.Appender, start time.Time) (err error) { ts := timestamp.FromTime(start) - app := sl.appender() - defer func() { - if err != nil { - app.Rollback() - return - } - err = app.Commit() - }() stale := math.Float64frombits(value.StaleNaN) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go index acdcc4a172fe8..5a824d9659037 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go @@ -217,22 +217,35 @@ func (w *Writer) cut() error { return nil } -func cutSegmentFile(dirFile *os.File, magicNumber uint32, chunksFormat byte, allocSize int64) (headerSize int, newFile *os.File, seq int, err error) { +func cutSegmentFile(dirFile *os.File, magicNumber uint32, chunksFormat byte, allocSize int64) (headerSize int, newFile *os.File, seq int, returnErr error) { p, seq, err := nextSequenceFile(dirFile.Name()) if err != nil { - return 0, nil, 0, err + return 0, nil, 0, errors.Wrap(err, "next sequence file") } - f, err := os.OpenFile(p, os.O_WRONLY|os.O_CREATE, 0666) + ptmp := p + ".tmp" + f, err := os.OpenFile(ptmp, os.O_WRONLY|os.O_CREATE, 0666) if err != nil { - return 0, nil, 0, err - } + return 0, nil, 0, errors.Wrap(err, "open temp file") + } + defer func() { + if returnErr != nil { + var merr tsdb_errors.MultiError + merr.Add(returnErr) + if f != nil { + merr.Add(f.Close()) + } + // Calling RemoveAll on a non-existent file does not return error. + merr.Add(os.RemoveAll(ptmp)) + returnErr = merr.Err() + } + }() if allocSize > 0 { if err = fileutil.Preallocate(f, allocSize, true); err != nil { - return 0, nil, 0, err + return 0, nil, 0, errors.Wrap(err, "preallocate") } } if err = dirFile.Sync(); err != nil { - return 0, nil, 0, err + return 0, nil, 0, errors.Wrap(err, "sync directory") } // Write header metadata for new file. @@ -242,7 +255,24 @@ func cutSegmentFile(dirFile *os.File, magicNumber uint32, chunksFormat byte, all n, err := f.Write(metab) if err != nil { - return 0, nil, 0, err + return 0, nil, 0, errors.Wrap(err, "write header") + } + if err := f.Close(); err != nil { + return 0, nil, 0, errors.Wrap(err, "close temp file") + } + f = nil + + if err := fileutil.Rename(ptmp, p); err != nil { + return 0, nil, 0, errors.Wrap(err, "replace file") + } + + f, err = os.OpenFile(p, os.O_WRONLY, 0666) + if err != nil { + return 0, nil, 0, errors.Wrap(err, "open final file") + } + // Skip header for further writes. + if _, err := f.Seek(int64(n), 0); err != nil { + return 0, nil, 0, errors.Wrap(err, "seek in final file") } return n, f, seq, nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go index 40b14afcdf01b..a654682d19029 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go @@ -260,7 +260,7 @@ func (cdm *ChunkDiskMapper) WriteChunk(seriesRef uint64, mint, maxt int64, chk c // The upper 4 bytes are for the head chunk file index and // the lower 4 bytes are for the head chunk file offset where to start reading this chunk. - chkRef = chunkRef(uint64(cdm.curFileSequence), uint64(cdm.curFileNumBytes)) + chkRef = chunkRef(uint64(cdm.curFileSequence), uint64(cdm.curFileSize())) binary.BigEndian.PutUint64(cdm.byteBuf[bytesWritten:], seriesRef) bytesWritten += SeriesRefSize @@ -308,8 +308,8 @@ func chunkRef(seq, offset uint64) (chunkRef uint64) { // Size retention: because depending on the system architecture, there is a limit on how big of a file we can m-map. // Time retention: so that we can delete old chunks with some time guarantee in low load environments. func (cdm *ChunkDiskMapper) shouldCutNewFile(chunkSize int) bool { - return cdm.curFileNumBytes == 0 || // First head chunk file. - cdm.curFileNumBytes+int64(chunkSize+MaxHeadChunkMetaSize) > MaxHeadChunkFileSize // Exceeds the max head chunk file size. + return cdm.curFileSize() == 0 || // First head chunk file. + cdm.curFileSize()+int64(chunkSize+MaxHeadChunkMetaSize) > MaxHeadChunkFileSize // Exceeds the max head chunk file size. } // CutNewFile creates a new m-mapped file. @@ -342,7 +342,7 @@ func (cdm *ChunkDiskMapper) cut() (returnErr error) { } }() - cdm.size += cdm.curFileNumBytes + cdm.size += cdm.curFileSize() atomic.StoreInt64(&cdm.curFileNumBytes, int64(n)) if cdm.curFile != nil { @@ -558,7 +558,7 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef, chunkRef uint64, mmapFile := cdm.mmappedChunkFiles[segID] fileEnd := mmapFile.byteSlice.Len() if segID == cdm.curFileSequence { - fileEnd = int(cdm.curFileNumBytes) + fileEnd = int(cdm.curFileSize()) } idx := HeadChunkFileHeaderSize for idx < fileEnd { @@ -681,7 +681,7 @@ func (cdm *ChunkDiskMapper) Truncate(mint int64) error { var merr tsdb_errors.MultiError // Cut a new file only if the current file has some chunks. - if cdm.curFileNumBytes > HeadChunkFileHeaderSize { + if cdm.curFileSize() > HeadChunkFileHeaderSize { merr.Add(cdm.CutNewFile()) } merr.Add(cdm.deleteFiles(removedFiles)) @@ -732,8 +732,11 @@ func (cdm *ChunkDiskMapper) DeleteCorrupted(originalErr error) error { // Size returns the size of the chunk files. func (cdm *ChunkDiskMapper) Size() int64 { - n := atomic.LoadInt64(&cdm.curFileNumBytes) - return cdm.size + n + return cdm.size + cdm.curFileSize() +} + +func (cdm *ChunkDiskMapper) curFileSize() int64 { + return atomic.LoadInt64(&cdm.curFileNumBytes) } // Close closes all the open files in ChunkDiskMapper. diff --git a/vendor/github.com/prometheus/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/prometheus/tsdb/compact.go index acbdbf6443f5d..c3078ed8ffb25 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/compact.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/compact.go @@ -656,7 +656,9 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, defer func() { var merr tsdb_errors.MultiError merr.Add(err) - merr.Add(closeAll(closers)) + if cerr := closeAll(closers); cerr != nil { + merr.Add(errors.Wrap(cerr, "close")) + } err = merr.Err() c.metrics.populatingBlocks.Set(0) }() @@ -708,7 +710,6 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, s := newCompactionSeriesSet(indexr, chunkr, tombsr, all) syms := indexr.Symbols() - if i == 0 { set = s symbols = syms diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go index 2b0525ab6b2f8..e02ad5fae05c5 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go @@ -118,19 +118,27 @@ type Options struct { // SeriesLifecycleCallback specifies a list of callbacks that will be called during a lifecycle of a series. // It is always a no-op in Prometheus and mainly meant for external users who import TSDB. SeriesLifecycleCallback SeriesLifecycleCallback + + // BlocksToDelete is a function which returns the blocks which can be deleted. + // It is always the default time and size based retention in Prometheus and + // mainly meant for external users who import TSDB. + BlocksToDelete BlocksToDeleteFunc } +type BlocksToDeleteFunc func(blocks []*Block) map[ulid.ULID]struct{} + // DB handles reads and writes of time series falling into // a hashed partition of a seriedb. type DB struct { dir string lockf fileutil.Releaser - logger log.Logger - metrics *dbMetrics - opts *Options - chunkPool chunkenc.Pool - compactor Compactor + logger log.Logger + metrics *dbMetrics + opts *Options + chunkPool chunkenc.Pool + compactor Compactor + blocksToDelete BlocksToDeleteFunc // Mutex for that must be held when modifying the general block layout. mtx sync.RWMutex @@ -560,14 +568,18 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs } db = &DB{ - dir: dir, - logger: l, - opts: opts, - compactc: make(chan struct{}, 1), - donec: make(chan struct{}), - stopc: make(chan struct{}), - autoCompact: true, - chunkPool: chunkenc.NewPool(), + dir: dir, + logger: l, + opts: opts, + compactc: make(chan struct{}, 1), + donec: make(chan struct{}), + stopc: make(chan struct{}), + autoCompact: true, + chunkPool: chunkenc.NewPool(), + blocksToDelete: opts.BlocksToDelete, + } + if db.blocksToDelete == nil { + db.blocksToDelete = DefaultBlocksToDelete(db) } if !opts.NoLockfile { @@ -871,13 +883,17 @@ func (db *DB) reload() (err error) { return err } - deletable := db.deletableBlocks(loadable) + deletableULIDs := db.blocksToDelete(loadable) + deletable := make(map[ulid.ULID]*Block, len(deletableULIDs)) // Corrupted blocks that have been superseded by a loadable block can be safely ignored. // This makes it resilient against the process crashing towards the end of a compaction. // Creation of a new block and deletion of its parents cannot happen atomically. // By creating blocks with their parents, we can pick up the deletion where it left off during a crash. for _, block := range loadable { + if _, ok := deletableULIDs[block.meta.ULID]; ok { + deletable[block.meta.ULID] = block + } for _, b := range block.Meta().Compaction.Parents { delete(corrupted, b.ULID) deletable[b.ULID] = nil @@ -986,9 +1002,17 @@ func openBlocks(l log.Logger, dir string, loaded []*Block, chunkPool chunkenc.Po return blocks, corrupted, nil } +// DefaultBlocksToDelete returns a filter which decides time based and size based +// retention from the options of the db. +func DefaultBlocksToDelete(db *DB) BlocksToDeleteFunc { + return func(blocks []*Block) map[ulid.ULID]struct{} { + return deletableBlocks(db, blocks) + } +} + // deletableBlocks returns all blocks past retention policy. -func (db *DB) deletableBlocks(blocks []*Block) map[ulid.ULID]*Block { - deletable := make(map[ulid.ULID]*Block) +func deletableBlocks(db *DB, blocks []*Block) map[ulid.ULID]struct{} { + deletable := make(map[ulid.ULID]struct{}) // Sort the blocks by time - newest to oldest (largest to smallest timestamp). // This ensures that the retentions will remove the oldest blocks. @@ -998,34 +1022,36 @@ func (db *DB) deletableBlocks(blocks []*Block) map[ulid.ULID]*Block { for _, block := range blocks { if block.Meta().Compaction.Deletable { - deletable[block.Meta().ULID] = block + deletable[block.Meta().ULID] = struct{}{} } } - for ulid, block := range db.beyondTimeRetention(blocks) { - deletable[ulid] = block + for ulid := range BeyondTimeRetention(db, blocks) { + deletable[ulid] = struct{}{} } - for ulid, block := range db.beyondSizeRetention(blocks) { - deletable[ulid] = block + for ulid := range BeyondSizeRetention(db, blocks) { + deletable[ulid] = struct{}{} } return deletable } -func (db *DB) beyondTimeRetention(blocks []*Block) (deletable map[ulid.ULID]*Block) { +// BeyondTimeRetention returns those blocks which are beyond the time retention +// set in the db options. +func BeyondTimeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struct{}) { // Time retention is disabled or no blocks to work with. if len(db.blocks) == 0 || db.opts.RetentionDuration == 0 { return } - deletable = make(map[ulid.ULID]*Block) + deletable = make(map[ulid.ULID]struct{}) for i, block := range blocks { // The difference between the first block and this block is larger than // the retention period so any blocks after that are added as deletable. if i > 0 && blocks[0].Meta().MaxTime-block.Meta().MaxTime > db.opts.RetentionDuration { for _, b := range blocks[i:] { - deletable[b.meta.ULID] = b + deletable[b.meta.ULID] = struct{}{} } db.metrics.timeRetentionCount.Inc() break @@ -1034,13 +1060,15 @@ func (db *DB) beyondTimeRetention(blocks []*Block) (deletable map[ulid.ULID]*Blo return deletable } -func (db *DB) beyondSizeRetention(blocks []*Block) (deletable map[ulid.ULID]*Block) { +// BeyondSizeRetention returns those blocks which are beyond the size retention +// set in the db options. +func BeyondSizeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struct{}) { // Size retention is disabled or no blocks to work with. if len(db.blocks) == 0 || db.opts.MaxBytes <= 0 { return } - deletable = make(map[ulid.ULID]*Block) + deletable = make(map[ulid.ULID]struct{}) walSize, _ := db.Head().wal.Size() headChunksSize := db.Head().chunkDiskMapper.Size() @@ -1052,7 +1080,7 @@ func (db *DB) beyondSizeRetention(blocks []*Block) (deletable map[ulid.ULID]*Blo if blocksSize > int64(db.opts.MaxBytes) { // Add this and all following blocks for deletion. for _, b := range blocks[i:] { - deletable[b.meta.ULID] = b + deletable[b.meta.ULID] = struct{}{} } db.metrics.sizeRetentionCount.Inc() break diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/fileutil.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/fileutil.go index be9c6f2263c61..00bf63950b100 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/fileutil.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/fileutil.go @@ -125,19 +125,5 @@ func Replace(from, to string) error { } } - if err := os.Rename(from, to); err != nil { - return err - } - - // Directory was renamed; sync parent dir to persist rename. - pdir, err := OpenDir(filepath.Dir(to)) - if err != nil { - return err - } - - if err = pdir.Sync(); err != nil { - pdir.Close() - return err - } - return pdir.Close() + return Rename(from, to) } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go index a21f5ad3177e8..066c722eaabb6 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go @@ -44,6 +44,9 @@ var ( // ErrInvalidSample is returned if an appended sample is not valid and can't // be ingested. ErrInvalidSample = errors.New("invalid sample") + // ErrAppenderClosed is returned if an appender has already be successfully + // rolled back or commited. + ErrAppenderClosed = errors.New("appender closed") ) // Head handles reads and writes of time series data within a time window. @@ -638,11 +641,7 @@ func (h *Head) Init(minValidTime int64) error { defer h.postings.EnsureOrder() defer h.gc() // After loading the wal remove the obsolete data from the head. - if h.wal == nil { - return nil - } - - level.Info(h.logger).Log("msg", "Replaying WAL and on-disk memory mappable chunks if any, this may take a while") + level.Info(h.logger).Log("msg", "Replaying on-disk memory mappable chunks if any") start := time.Now() mmappedChunks, err := h.loadMmappedChunks() @@ -656,6 +655,15 @@ func (h *Head) Init(minValidTime int64) error { h.removeCorruptedMmappedChunks(err) } + level.Info(h.logger).Log("msg", "On-disk memory mappable chunks replay completed", "duration", time.Since(start).String()) + if h.wal == nil { + level.Info(h.logger).Log("msg", "WAL not found") + return nil + } + + level.Info(h.logger).Log("msg", "Replaying WAL, this may take a while") + + checkpointReplayStart := time.Now() // Backfill the checkpoint first if it exists. dir, startFrom, err := wal.LastCheckpoint(h.wal.Dir()) if err != nil && err != record.ErrNotFound { @@ -681,7 +689,9 @@ func (h *Head) Init(minValidTime int64) error { startFrom++ level.Info(h.logger).Log("msg", "WAL checkpoint loaded") } + checkpointReplayDuration := time.Since(checkpointReplayStart) + walReplayStart := time.Now() // Find the last segment. _, last, err := h.wal.Segments() if err != nil { @@ -706,7 +716,12 @@ func (h *Head) Init(minValidTime int64) error { level.Info(h.logger).Log("msg", "WAL segment loaded", "segment", i, "maxSegment", last) } - level.Info(h.logger).Log("msg", "WAL replay completed", "duration", time.Since(start).String()) + level.Info(h.logger).Log( + "msg", "WAL replay completed", + "checkpoint_replay_duration", checkpointReplayDuration.String(), + "wal_replay_duration", time.Since(walReplayStart).String(), + "total_replay_duration", time.Since(start).String(), + ) return nil } @@ -835,7 +850,7 @@ func (h *Head) Truncate(mint int64) (err error) { return ok } h.metrics.checkpointCreationTotal.Inc() - if _, err = wal.Checkpoint(h.wal, first, last, keep, mint); err != nil { + if _, err = wal.Checkpoint(h.logger, h.wal, first, last, keep, mint); err != nil { h.metrics.checkpointCreationFail.Inc() if _, ok := errors.Cause(err).(*wal.CorruptionErr); ok { h.metrics.walCorruptionsTotal.Inc() @@ -1081,6 +1096,7 @@ type headAppender struct { sampleSeries []*memSeries appendID, cleanupAppendIDsBelow uint64 + closed bool } func (a *headAppender) Add(lset labels.Labels, t int64, v float64) (uint64, error) { @@ -1104,6 +1120,7 @@ func (a *headAppender) Add(lset labels.Labels, t int64, v float64) (uint64, erro if err != nil { return 0, err } + if created { a.series = append(a.series, record.RefSeries{ Ref: s.ref, @@ -1180,7 +1197,11 @@ func (a *headAppender) log() error { return nil } -func (a *headAppender) Commit() error { +func (a *headAppender) Commit() (err error) { + if a.closed { + return ErrAppenderClosed + } + defer func() { a.closed = true }() if err := a.log(); err != nil { //nolint: errcheck a.Rollback() // Most likely the same error will happen again. @@ -1218,7 +1239,11 @@ func (a *headAppender) Commit() error { return nil } -func (a *headAppender) Rollback() error { +func (a *headAppender) Rollback() (err error) { + if a.closed { + return ErrAppenderClosed + } + defer func() { a.closed = true }() defer a.head.metrics.activeAppenders.Dec() defer a.head.iso.closeAppend(a.appendID) defer a.head.putSeriesBuffer(a.sampleSeries) @@ -1318,9 +1343,12 @@ func (h *Head) gc() { } // Rebuild symbols and label value indices from what is left in the postings terms. + // symMtx ensures that append of symbols and postings is disabled for rebuild time. + h.symMtx.Lock() + defer h.symMtx.Unlock() + symbols := make(map[string]struct{}, len(h.symbols)) values := make(map[string]stringset, len(h.values)) - if err := h.postings.Iter(func(t labels.Label, _ index.Postings) error { symbols[t.Name] = struct{}{} symbols[t.Value] = struct{}{} @@ -1336,13 +1364,8 @@ func (h *Head) gc() { // This should never happen, as the iteration function only returns nil. panic(err) } - - h.symMtx.Lock() - h.symbols = symbols h.values = values - - h.symMtx.Unlock() } // Tombstones returns a new reader over the head's tombstones @@ -1707,10 +1730,7 @@ func (h *Head) getOrCreateWithID(id, hash uint64, lset labels.Labels) (*memSerie h.symbols[l.Value] = struct{}{} } - // Postings should be set after setting the symbols (or after holding - // the symbol mtx) to avoid race during compaction of seeing partial symbols. h.postings.Add(id, lset) - return s, true, nil } @@ -2070,8 +2090,9 @@ func (s *memSeries) chunkID(pos int) int { return pos + s.firstChunkID } -// truncateChunksBefore removes all chunks from the series that have not timestamp -// at or after mint. Chunk IDs remain unchanged. +// truncateChunksBefore removes all chunks from the series that +// have no timestamp at or after mint. +// Chunk IDs remain unchanged. func (s *memSeries) truncateChunksBefore(mint int64) (removed int) { var k int if s.headChunk != nil && s.headChunk.maxTime < mint { @@ -2200,10 +2221,8 @@ func (s *memSeries) iterator(id int, isoState *isolationState, chunkDiskMapper * previousSamples += int(d.numSamples) } } - // mmappedChunks does not contain the last chunk. Hence check it separately. - if len(s.mmappedChunks) < ix { - previousSamples += s.headChunk.chunk.NumSamples() - } else { + + if s.headChunk != nil { totalSamples += s.headChunk.chunk.NumSamples() } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal/checkpoint.go b/vendor/github.com/prometheus/prometheus/tsdb/wal/checkpoint.go index d205c61212dc0..e8aa4326d0704 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wal/checkpoint.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wal/checkpoint.go @@ -25,6 +25,8 @@ import ( "strconv" "strings" + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" "github.com/pkg/errors" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" @@ -87,10 +89,12 @@ const checkpointPrefix = "checkpoint." // segmented format as the original WAL itself. // This makes it easy to read it through the WAL package and concatenate // it with the original WAL. -func Checkpoint(w *WAL, from, to int, keep func(id uint64) bool, mint int64) (*CheckpointStats, error) { +func Checkpoint(logger log.Logger, w *WAL, from, to int, keep func(id uint64) bool, mint int64) (*CheckpointStats, error) { stats := &CheckpointStats{} var sgmReader io.ReadCloser + level.Info(logger).Log("msg", "Creating checkpoint", "from_segment", from, "to_segment", to, "mint", mint) + { var sgmRange []SegmentRange diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/testing.go b/vendor/github.com/prometheus/prometheus/util/testutil/testing.go index f2335034d4777..1645f80d5c358 100644 --- a/vendor/github.com/prometheus/prometheus/util/testutil/testing.go +++ b/vendor/github.com/prometheus/prometheus/util/testutil/testing.go @@ -25,9 +25,11 @@ package testutil import ( "fmt" "reflect" + "testing" "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" + "go.uber.org/goleak" ) // This package is imported by non-test code and therefore cannot import the @@ -154,3 +156,14 @@ func formatMessage(msgAndArgs []interface{}) string { } return "" } + +// TolerantVerifyLeak verifies go leaks but excludes the go routines that are +// launched as side effects of some of our dependencies. +func TolerantVerifyLeak(m *testing.M) { + goleak.VerifyTestMain(m, + // https://github.com/census-instrumentation/opencensus-go/blob/d7677d6af5953e0506ac4c08f349c62b917a443a/stats/view/worker.go#L34 + goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), + // https://github.com/kubernetes/klog/blob/c85d02d1c76a9ebafa81eb6d35c980734f2c4727/klog.go#L417 + goleak.IgnoreTopFunction("k8s.io/klog/v2.(*loggingT).flushDaemon"), + ) +} diff --git a/vendor/github.com/rs/cors/.travis.yml b/vendor/github.com/rs/cors/.travis.yml index 17e5e50d52760..9a68b5676277b 100644 --- a/vendor/github.com/rs/cors/.travis.yml +++ b/vendor/github.com/rs/cors/.travis.yml @@ -1,7 +1,8 @@ language: go go: -- 1.9 - "1.10" +- "1.11" +- "1.12" - tip matrix: allow_failures: diff --git a/vendor/github.com/rs/cors/cors.go b/vendor/github.com/rs/cors/cors.go index d301ca724e5b4..2730934630956 100644 --- a/vendor/github.com/rs/cors/cors.go +++ b/vendor/github.com/rs/cors/cors.go @@ -5,8 +5,8 @@ as defined by http://www.w3.org/TR/cors/ You can configure it by passing an option struct to cors.New: c := cors.New(cors.Options{ - AllowedOrigins: []string{"foo.com"}, - AllowedMethods: []string{"GET", "POST", "DELETE"}, + AllowedOrigins: []string{"foo.com"}, + AllowedMethods: []string{http.MethodGet, http.MethodPost, http.MethodDelete}, AllowCredentials: true, }) @@ -69,10 +69,15 @@ type Options struct { Debug bool } +// Logger generic interface for logger +type Logger interface { + Printf(string, ...interface{}) +} + // Cors http handler type Cors struct { // Debug logger - Log *log.Logger + Log Logger // Normalized list of plain allowed origins allowedOrigins []string // List of allowed origins containing wildcards @@ -106,7 +111,7 @@ func New(options Options) *Cors { maxAge: options.MaxAge, optionPassthrough: options.OptionsPassthrough, } - if options.Debug { + if options.Debug && c.Log == nil { c.Log = log.New(os.Stdout, "[cors] ", log.LstdFlags) } @@ -161,7 +166,7 @@ func New(options Options) *Cors { // Allowed Methods if len(options.AllowedMethods) == 0 { // Default is spec's "simple" methods - c.allowedMethods = []string{"GET", "POST", "HEAD"} + c.allowedMethods = []string{http.MethodGet, http.MethodPost, http.MethodHead} } else { c.allowedMethods = convert(options.AllowedMethods, strings.ToUpper) } @@ -178,8 +183,15 @@ func Default() *Cors { // origins with all standard methods with any header and credentials. func AllowAll() *Cors { return New(Options{ - AllowedOrigins: []string{"*"}, - AllowedMethods: []string{"HEAD", "GET", "POST", "PUT", "PATCH", "DELETE"}, + AllowedOrigins: []string{"*"}, + AllowedMethods: []string{ + http.MethodHead, + http.MethodGet, + http.MethodPost, + http.MethodPut, + http.MethodPatch, + http.MethodDelete, + }, AllowedHeaders: []string{"*"}, AllowCredentials: false, }) @@ -304,10 +316,6 @@ func (c *Cors) handleActualRequest(w http.ResponseWriter, r *http.Request) { headers := w.Header() origin := r.Header.Get("Origin") - if r.Method == http.MethodOptions { - c.logf(" Actual request no headers added: method == %s", r.Method) - return - } // Always set Vary, see https://github.com/rs/cors/issues/10 headers.Add("Vary", "Origin") if origin == "" { @@ -342,7 +350,7 @@ func (c *Cors) handleActualRequest(w http.ResponseWriter, r *http.Request) { c.logf(" Actual response added headers: %v", headers) } -// convenience method. checks if debugging is turned on before printing +// convenience method. checks if a logger is set. func (c *Cors) logf(format string, a ...interface{}) { if c.Log != nil { c.Log.Printf(format, a...) diff --git a/vendor/github.com/rs/cors/utils.go b/vendor/github.com/rs/cors/utils.go index 53ad9e9db35b7..db83ac3ea9f44 100644 --- a/vendor/github.com/rs/cors/utils.go +++ b/vendor/github.com/rs/cors/utils.go @@ -12,7 +12,7 @@ type wildcard struct { } func (w wildcard) match(s string) bool { - return len(s) >= len(w.prefix+w.suffix) && strings.HasPrefix(s, w.prefix) && strings.HasSuffix(s, w.suffix) + return len(s) >= len(w.prefix)+len(w.suffix) && strings.HasPrefix(s, w.prefix) && strings.HasSuffix(s, w.suffix) } // convert converts a list of string using the passed converter function diff --git a/vendor/github.com/shurcooL/vfsgen/.travis.yml b/vendor/github.com/shurcooL/vfsgen/.travis.yml index 93b1fcdb31a27..6452acb283f3c 100644 --- a/vendor/github.com/shurcooL/vfsgen/.travis.yml +++ b/vendor/github.com/shurcooL/vfsgen/.travis.yml @@ -11,6 +11,6 @@ install: - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). script: - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d -s .) - - go tool vet . + - diff -n <(echo -n) <(gofmt -d -s .) + - go vet ./... - go test -v -race ./... diff --git a/vendor/github.com/shurcooL/vfsgen/generator.go b/vendor/github.com/shurcooL/vfsgen/generator.go index 5782693ebbdbb..a9c8dd6347212 100644 --- a/vendor/github.com/shurcooL/vfsgen/generator.go +++ b/vendor/github.com/shurcooL/vfsgen/generator.go @@ -4,7 +4,6 @@ import ( "bytes" "compress/gzip" "errors" - "fmt" "io" "io/ioutil" "net/http" @@ -48,7 +47,6 @@ func Generate(input http.FileSystem, opt Options) error { } // Write output file (all at once). - fmt.Println("writing", opt.Filename) err = ioutil.WriteFile(opt.Filename, buf.Bytes(), 0644) return err } diff --git a/vendor/github.com/uber/jaeger-client-go/.travis.yml b/vendor/github.com/uber/jaeger-client-go/.travis.yml index e81cc8805692f..f9c9a77768927 100644 --- a/vendor/github.com/uber/jaeger-client-go/.travis.yml +++ b/vendor/github.com/uber/jaeger-client-go/.travis.yml @@ -7,22 +7,22 @@ dist: trusty matrix: include: - - go: 1.13.x + - go: 1.14.x env: - TESTS=true - USE_DEP=true - COVERAGE=true - - go: 1.13.x + - go: 1.14.x env: - USE_DEP=true - CROSSDOCK=true - - go: 1.13.x + - go: 1.14.x env: - TESTS=true - USE_DEP=false - USE_GLIDE=true # test with previous version of Go - - go: 1.12.x + - go: 1.13.x env: - TESTS=true - USE_DEP=true diff --git a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md index 944feb2c85c86..cab87e9d6a96b 100644 --- a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md +++ b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md @@ -1,6 +1,27 @@ Changes by Version ================== +2.25.0 (2020-07-13) +------------------- +## Breaking changes +- [feat] Periodically re-resolve UDP server address, with opt-out (#520) -- Trevor Foster + + The re-resolving of UDP address is now enabled by default, to make the client more robust in Kubernetes deployments. + The old resolve-once behavior can be restored by setting DisableAttemptReconnecting=true in the Configuration struct, + or via JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED=true environment variable. + +## Bug fixes +- Do not add invalid context to references (#521) -- Yuri Shkuro + + +2.24.0 (2020-06-14) +------------------- +- Mention FromEnv() in the README, docs, and examples (#518) -- Martin Lercher +- Serialize access to RemotelyControlledSampler.sampler (#515) -- Dima +- Override reporter config only when agent host/port is set in env (#513) -- ilylia +- Converge on JAEGER_SAMPLING_ENDPOINT env variable (#511) -- Eundoo Song + + 2.23.1 (2020-04-28) ------------------- - Fix regression by handling nil logger correctly ([#507](https://github.com/jaegertracing/jaeger-client-go/pull/507)) -- Prithvi Raj diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.lock b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock index 2a5215a5008ba..387958b128137 100644 --- a/vendor/github.com/uber/jaeger-client-go/Gopkg.lock +++ b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock @@ -142,10 +142,19 @@ version = "v0.0.5" [[projects]] - digest = "1:0496f0e99014b7fd0a560c539f51d0882731137b85494142f47e550e4657176a" + digest = "1:ac83cf90d08b63ad5f7e020ef480d319ae890c208f8524622a2f3136e2686b02" + name = "github.com/stretchr/objx" + packages = ["."] + pruneopts = "UT" + revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c" + version = "v0.1.1" + +[[projects]] + digest = "1:d88ba57c4e8f5db6ce9ab6605a89f4542ee751b576884ba5271c2ba3d4b6f2d2" name = "github.com/stretchr/testify" packages = [ "assert", + "mock", "require", "suite", ] @@ -153,6 +162,42 @@ revision = "221dbe5ed46703ee255b1da0dec05086f5035f62" version = "v1.4.0" +[[projects]] + digest = "1:5b98956718573850caf7e0fd00b571a6657c4ef1f345ddf0c96b43ce355fe862" + name = "github.com/uber/jaeger-client-go" + packages = [ + ".", + "config", + "crossdock/client", + "crossdock/common", + "crossdock/endtoend", + "crossdock/log", + "crossdock/server", + "crossdock/thrift/tracetest", + "internal/baggage", + "internal/baggage/remote", + "internal/reporterstats", + "internal/spanlog", + "internal/throttler", + "internal/throttler/remote", + "log", + "log/zap/mock_opentracing", + "rpcmetrics", + "testutils", + "thrift", + "thrift-gen/agent", + "thrift-gen/baggage", + "thrift-gen/jaeger", + "thrift-gen/sampling", + "thrift-gen/zipkincore", + "transport", + "transport/zipkin", + "utils", + ] + pruneopts = "UT" + revision = "66c008c3d6ad856cac92a0af53186efbffa8e6a5" + version = "v2.24.0" + [[projects]] digest = "1:0ec60ffd594af00ba1660bc746aa0e443d27dd4003dee55f9d08a0b4ff5431a3" name = "github.com/uber/jaeger-lib" @@ -314,8 +359,36 @@ "github.com/pkg/errors", "github.com/prometheus/client_golang/prometheus", "github.com/stretchr/testify/assert", + "github.com/stretchr/testify/mock", "github.com/stretchr/testify/require", "github.com/stretchr/testify/suite", + "github.com/uber/jaeger-client-go", + "github.com/uber/jaeger-client-go/config", + "github.com/uber/jaeger-client-go/crossdock/client", + "github.com/uber/jaeger-client-go/crossdock/common", + "github.com/uber/jaeger-client-go/crossdock/endtoend", + "github.com/uber/jaeger-client-go/crossdock/log", + "github.com/uber/jaeger-client-go/crossdock/server", + "github.com/uber/jaeger-client-go/crossdock/thrift/tracetest", + "github.com/uber/jaeger-client-go/internal/baggage", + "github.com/uber/jaeger-client-go/internal/baggage/remote", + "github.com/uber/jaeger-client-go/internal/reporterstats", + "github.com/uber/jaeger-client-go/internal/spanlog", + "github.com/uber/jaeger-client-go/internal/throttler", + "github.com/uber/jaeger-client-go/internal/throttler/remote", + "github.com/uber/jaeger-client-go/log", + "github.com/uber/jaeger-client-go/log/zap/mock_opentracing", + "github.com/uber/jaeger-client-go/rpcmetrics", + "github.com/uber/jaeger-client-go/testutils", + "github.com/uber/jaeger-client-go/thrift", + "github.com/uber/jaeger-client-go/thrift-gen/agent", + "github.com/uber/jaeger-client-go/thrift-gen/baggage", + "github.com/uber/jaeger-client-go/thrift-gen/jaeger", + "github.com/uber/jaeger-client-go/thrift-gen/sampling", + "github.com/uber/jaeger-client-go/thrift-gen/zipkincore", + "github.com/uber/jaeger-client-go/transport", + "github.com/uber/jaeger-client-go/transport/zipkin", + "github.com/uber/jaeger-client-go/utils", "github.com/uber/jaeger-lib/metrics", "github.com/uber/jaeger-lib/metrics/metricstest", "github.com/uber/jaeger-lib/metrics/prometheus", diff --git a/vendor/github.com/uber/jaeger-client-go/README.md b/vendor/github.com/uber/jaeger-client-go/README.md index 7c348e73a7471..687f5780ccef6 100644 --- a/vendor/github.com/uber/jaeger-client-go/README.md +++ b/vendor/github.com/uber/jaeger-client-go/README.md @@ -44,28 +44,34 @@ and [config/example_test.go](./config/example_test.go). ### Environment variables -The tracer can be initialized with values coming from environment variables. None of the env vars are required -and all of them can be overridden via direct setting of the property on the configuration object. +The tracer can be initialized with values coming from environment variables, if it is +[built from a config](https://pkg.go.dev/github.com/uber/jaeger-client-go/config?tab=doc#Configuration.NewTracer) +that was created via [FromEnv()](https://pkg.go.dev/github.com/uber/jaeger-client-go/config?tab=doc#FromEnv). +None of the env vars are required and all of them can be overridden via direct setting +of the property on the configuration object. Property| Description --- | --- -JAEGER_SERVICE_NAME | The service name -JAEGER_AGENT_HOST | The hostname for communicating with agent via UDP -JAEGER_AGENT_PORT | The port for communicating with agent via UDP -JAEGER_ENDPOINT | The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces -JAEGER_USER | Username to send as part of "Basic" authentication to the collector endpoint -JAEGER_PASSWORD | Password to send as part of "Basic" authentication to the collector endpoint -JAEGER_REPORTER_LOG_SPANS | Whether the reporter should also log the spans -JAEGER_REPORTER_MAX_QUEUE_SIZE | The reporter's maximum queue size -JAEGER_REPORTER_FLUSH_INTERVAL | The reporter's flush interval, with units, e.g. "500ms" or "2s" ([valid units][timeunits]) -JAEGER_SAMPLER_TYPE | The sampler type -JAEGER_SAMPLER_PARAM | The sampler parameter (number) -JAEGER_SAMPLER_MANAGER_HOST_PORT | The HTTP endpoint when using the remote sampler, i.e. http://jaeger-agent:5778/sampling -JAEGER_SAMPLER_MAX_OPERATIONS | The maximum number of operations that the sampler will keep track of -JAEGER_SAMPLER_REFRESH_INTERVAL | How often the remotely controlled sampler will poll jaeger-agent for the appropriate sampling strategy, with units, e.g. "1m" or "30s" ([valid units][timeunits]) -JAEGER_TAGS | A comma separated list of `name = value` tracer level tags, which get added to all reported spans. The value can also refer to an environment variable using the format `${envVarName:default}`, where the `:default` is optional, and identifies a value to be used if the environment variable cannot be found -JAEGER_DISABLED | Whether the tracer is disabled or not. If true, the default `opentracing.NoopTracer` is used. -JAEGER_RPC_METRICS | Whether to store RPC metrics +JAEGER_SERVICE_NAME | The service name. +JAEGER_AGENT_HOST | The hostname for communicating with agent via UDP (default `localhost`). +JAEGER_AGENT_PORT | The port for communicating with agent via UDP (default `6831`). +JAEGER_ENDPOINT | The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. If specified, the agent host/port are ignored. +JAEGER_USER | Username to send as part of "Basic" authentication to the collector endpoint. +JAEGER_PASSWORD | Password to send as part of "Basic" authentication to the collector endpoint. +JAEGER_REPORTER_LOG_SPANS | Whether the reporter should also log the spans" `true` or `false` (default `false`). +JAEGER_REPORTER_MAX_QUEUE_SIZE | The reporter's maximum queue size (default `100`). +JAEGER_REPORTER_FLUSH_INTERVAL | The reporter's flush interval, with units, e.g. `500ms` or `2s` ([valid units][timeunits]; default `1s`). +JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED | When true, disables udp connection helper that periodically re-resolves the agent's hostname and reconnects if there was a change (default `false`). +JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL | Controls how often the agent client re-resolves the provided hostname in order to detect address changes ([valid units][timeunits]; default `30s`). +JAEGER_SAMPLER_TYPE | The sampler type: `remote`, `const`, `probabilistic`, `ratelimiting` (default `remote`). See also https://www.jaegertracing.io/docs/latest/sampling/. +JAEGER_SAMPLER_PARAM | The sampler parameter (number). +JAEGER_SAMPLER_MANAGER_HOST_PORT | (deprecated) The HTTP endpoint when using the `remote` sampler. +JAEGER_SAMPLING_ENDPOINT | The URL for the sampling configuration server when using sampler type `remote` (default `http://127.0.0.1:5778/sampling`). +JAEGER_SAMPLER_MAX_OPERATIONS | The maximum number of operations that the sampler will keep track of (default `2000`). +JAEGER_SAMPLER_REFRESH_INTERVAL | How often the `remote` sampler should poll the configuration server for the appropriate sampling strategy, e.g. "1m" or "30s" ([valid units][timeunits]; default `1m`). +JAEGER_TAGS | A comma separated list of `name=value` tracer-level tags, which get added to all reported spans. The value can also refer to an environment variable using the format `${envVarName:defaultValue}`. +JAEGER_DISABLED | Whether the tracer is disabled or not. If `true`, the `opentracing.NoopTracer` is used (default `false`). +JAEGER_RPC_METRICS | Whether to store RPC metrics, `true` or `false` (default `false`). By default, the client sends traces via UDP to the agent at `localhost:6831`. Use `JAEGER_AGENT_HOST` and `JAEGER_AGENT_PORT` to send UDP traces to a different `host:port`. If `JAEGER_ENDPOINT` is set, the client sends traces diff --git a/vendor/github.com/uber/jaeger-client-go/RELEASE.md b/vendor/github.com/uber/jaeger-client-go/RELEASE.md index 115e49ab8ad36..12438d84169ab 100644 --- a/vendor/github.com/uber/jaeger-client-go/RELEASE.md +++ b/vendor/github.com/uber/jaeger-client-go/RELEASE.md @@ -2,6 +2,7 @@ 1. Create a PR "Preparing for release X.Y.Z" against master branch * Alter CHANGELOG.md from ` (unreleased)` to ` (YYYY-MM-DD)` + * Use `git log --pretty=format:'- %s -- %an'` as the basis for for changelog entries * Update `JaegerClientVersion` in constants.go to `Go-X.Y.Z` 2. Create a release "Release X.Y.Z" on Github * Create Tag `vX.Y.Z` diff --git a/vendor/github.com/uber/jaeger-client-go/config/config.go b/vendor/github.com/uber/jaeger-client-go/config/config.go index 44e93533cf3a0..bb12282943265 100644 --- a/vendor/github.com/uber/jaeger-client-go/config/config.go +++ b/vendor/github.com/uber/jaeger-client-go/config/config.go @@ -22,6 +22,7 @@ import ( "time" "github.com/opentracing/opentracing-go" + "github.com/uber/jaeger-client-go/utils" "github.com/uber/jaeger-client-go" "github.com/uber/jaeger-client-go/internal/baggage/remote" @@ -36,16 +37,16 @@ const defaultSamplingProbability = 0.001 // Configuration configures and creates Jaeger Tracer type Configuration struct { // ServiceName specifies the service name to use on the tracer. - // Can be provided via environment variable named JAEGER_SERVICE_NAME + // Can be provided by FromEnv() via the environment variable named JAEGER_SERVICE_NAME ServiceName string `yaml:"serviceName"` - // Disabled can be provided via environment variable named JAEGER_DISABLED + // Disabled can be provided by FromEnv() via the environment variable named JAEGER_DISABLED Disabled bool `yaml:"disabled"` - // RPCMetrics can be provided via environment variable named JAEGER_RPC_METRICS + // RPCMetrics can be provided by FromEnv() via the environment variable named JAEGER_RPC_METRICS RPCMetrics bool `yaml:"rpc_metrics"` - // Tags can be provided via environment variable named JAEGER_TAGS + // Tags can be provided by FromEnv() via the environment variable named JAEGER_TAGS Tags []opentracing.Tag `yaml:"tags"` Sampler *SamplerConfig `yaml:"sampler"` @@ -57,8 +58,8 @@ type Configuration struct { // SamplerConfig allows initializing a non-default sampler. All fields are optional. type SamplerConfig struct { - // Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote - // Can be set by exporting an environment variable named JAEGER_SAMPLER_TYPE + // Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote. + // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_TYPE Type string `yaml:"type"` // Param is a value passed to the sampler. @@ -69,22 +70,23 @@ type SamplerConfig struct { // - for "remote" sampler, param is the same as for "probabilistic" // and indicates the initial sampling rate before the actual one // is received from the mothership. - // Can be set by exporting an environment variable named JAEGER_SAMPLER_PARAM + // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_PARAM Param float64 `yaml:"param"` - // SamplingServerURL is the address of jaeger-agent's HTTP sampling server - // Can be set by exporting an environment variable named JAEGER_SAMPLER_MANAGER_HOST_PORT + // SamplingServerURL is the URL of sampling manager that can provide + // sampling strategy to this service. + // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLING_ENDPOINT SamplingServerURL string `yaml:"samplingServerURL"` // SamplingRefreshInterval controls how often the remotely controlled sampler will poll - // jaeger-agent for the appropriate sampling strategy. - // Can be set by exporting an environment variable named JAEGER_SAMPLER_REFRESH_INTERVAL + // sampling manager for the appropriate sampling strategy. + // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_REFRESH_INTERVAL SamplingRefreshInterval time.Duration `yaml:"samplingRefreshInterval"` // MaxOperations is the maximum number of operations that the PerOperationSampler // will keep track of. If an operation is not tracked, a default probabilistic // sampler will be used rather than the per operation specific sampler. - // Can be set by exporting an environment variable named JAEGER_SAMPLER_MAX_OPERATIONS. + // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_MAX_OPERATIONS. MaxOperations int `yaml:"maxOperations"` // Opt-in feature for applications that require late binding of span name via explicit @@ -105,34 +107,46 @@ type ReporterConfig struct { // QueueSize controls how many spans the reporter can keep in memory before it starts dropping // new spans. The queue is continuously drained by a background go-routine, as fast as spans // can be sent out of process. - // Can be set by exporting an environment variable named JAEGER_REPORTER_MAX_QUEUE_SIZE + // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_MAX_QUEUE_SIZE QueueSize int `yaml:"queueSize"` // BufferFlushInterval controls how often the buffer is force-flushed, even if it's not full. // It is generally not useful, as it only matters for very low traffic services. - // Can be set by exporting an environment variable named JAEGER_REPORTER_FLUSH_INTERVAL + // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_FLUSH_INTERVAL BufferFlushInterval time.Duration // LogSpans, when true, enables LoggingReporter that runs in parallel with the main reporter // and logs all submitted spans. Main Configuration.Logger must be initialized in the code // for this option to have any effect. - // Can be set by exporting an environment variable named JAEGER_REPORTER_LOG_SPANS + // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_LOG_SPANS LogSpans bool `yaml:"logSpans"` - // LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address - // Can be set by exporting an environment variable named JAEGER_AGENT_HOST / JAEGER_AGENT_PORT + // LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address. + // Can be provided by FromEnv() via the environment variable named JAEGER_AGENT_HOST / JAEGER_AGENT_PORT LocalAgentHostPort string `yaml:"localAgentHostPort"` - // CollectorEndpoint instructs reporter to send spans to jaeger-collector at this URL - // Can be set by exporting an environment variable named JAEGER_ENDPOINT + // DisableAttemptReconnecting when true, disables udp connection helper that periodically re-resolves + // the agent's hostname and reconnects if there was a change. This option only + // applies if LocalAgentHostPort is specified. + // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED + DisableAttemptReconnecting bool `yaml:"disableAttemptReconnecting"` + + // AttemptReconnectInterval controls how often the agent client re-resolves the provided hostname + // in order to detect address changes. This option only applies if DisableAttemptReconnecting is false. + // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL + AttemptReconnectInterval time.Duration + + // CollectorEndpoint instructs reporter to send spans to jaeger-collector at this URL. + // Can be provided by FromEnv() via the environment variable named JAEGER_ENDPOINT CollectorEndpoint string `yaml:"collectorEndpoint"` // User instructs reporter to include a user for basic http authentication when sending spans to jaeger-collector. - // Can be set by exporting an environment variable named JAEGER_USER + // Can be provided by FromEnv() via the environment variable named JAEGER_USER User string `yaml:"user"` // Password instructs reporter to include a password for basic http authentication when sending spans to - // jaeger-collector. Can be set by exporting an environment variable named JAEGER_PASSWORD + // jaeger-collector. + // Can be provided by FromEnv() via the environment variable named JAEGER_PASSWORD Password string `yaml:"password"` // HTTPHeaders instructs the reporter to add these headers to the http request when reporting spans. @@ -382,7 +396,7 @@ func (rc *ReporterConfig) NewReporter( metrics *jaeger.Metrics, logger jaeger.Logger, ) (jaeger.Reporter, error) { - sender, err := rc.newTransport() + sender, err := rc.newTransport(logger) if err != nil { return nil, err } @@ -399,7 +413,7 @@ func (rc *ReporterConfig) NewReporter( return reporter, err } -func (rc *ReporterConfig) newTransport() (jaeger.Transport, error) { +func (rc *ReporterConfig) newTransport(logger jaeger.Logger) (jaeger.Transport, error) { switch { case rc.CollectorEndpoint != "": httpOptions := []transport.HTTPOption{transport.HTTPBatchSize(1), transport.HTTPHeaders(rc.HTTPHeaders)} @@ -408,6 +422,13 @@ func (rc *ReporterConfig) newTransport() (jaeger.Transport, error) { } return transport.NewHTTPTransport(rc.CollectorEndpoint, httpOptions...), nil default: - return jaeger.NewUDPTransport(rc.LocalAgentHostPort, 0) + return jaeger.NewUDPTransportWithParams(jaeger.UDPTransportParams{ + AgentClientUDPParams: utils.AgentClientUDPParams{ + HostPort: rc.LocalAgentHostPort, + Logger: logger, + DisableAttemptReconnecting: rc.DisableAttemptReconnecting, + AttemptReconnectInterval: rc.AttemptReconnectInterval, + }, + }) } } diff --git a/vendor/github.com/uber/jaeger-client-go/config/config_env.go b/vendor/github.com/uber/jaeger-client-go/config/config_env.go index a729bd8fe6007..92d60cd5926a5 100644 --- a/vendor/github.com/uber/jaeger-client-go/config/config_env.go +++ b/vendor/github.com/uber/jaeger-client-go/config/config_env.go @@ -24,29 +24,31 @@ import ( "github.com/opentracing/opentracing-go" "github.com/pkg/errors" - "github.com/uber/jaeger-client-go" ) const ( // environment variable names - envServiceName = "JAEGER_SERVICE_NAME" - envDisabled = "JAEGER_DISABLED" - envRPCMetrics = "JAEGER_RPC_METRICS" - envTags = "JAEGER_TAGS" - envSamplerType = "JAEGER_SAMPLER_TYPE" - envSamplerParam = "JAEGER_SAMPLER_PARAM" - envSamplerManagerHostPort = "JAEGER_SAMPLER_MANAGER_HOST_PORT" - envSamplerMaxOperations = "JAEGER_SAMPLER_MAX_OPERATIONS" - envSamplerRefreshInterval = "JAEGER_SAMPLER_REFRESH_INTERVAL" - envReporterMaxQueueSize = "JAEGER_REPORTER_MAX_QUEUE_SIZE" - envReporterFlushInterval = "JAEGER_REPORTER_FLUSH_INTERVAL" - envReporterLogSpans = "JAEGER_REPORTER_LOG_SPANS" - envEndpoint = "JAEGER_ENDPOINT" - envUser = "JAEGER_USER" - envPassword = "JAEGER_PASSWORD" - envAgentHost = "JAEGER_AGENT_HOST" - envAgentPort = "JAEGER_AGENT_PORT" + envServiceName = "JAEGER_SERVICE_NAME" + envDisabled = "JAEGER_DISABLED" + envRPCMetrics = "JAEGER_RPC_METRICS" + envTags = "JAEGER_TAGS" + envSamplerType = "JAEGER_SAMPLER_TYPE" + envSamplerParam = "JAEGER_SAMPLER_PARAM" + envSamplerManagerHostPort = "JAEGER_SAMPLER_MANAGER_HOST_PORT" // Deprecated by envSamplingEndpoint + envSamplingEndpoint = "JAEGER_SAMPLING_ENDPOINT" + envSamplerMaxOperations = "JAEGER_SAMPLER_MAX_OPERATIONS" + envSamplerRefreshInterval = "JAEGER_SAMPLER_REFRESH_INTERVAL" + envReporterMaxQueueSize = "JAEGER_REPORTER_MAX_QUEUE_SIZE" + envReporterFlushInterval = "JAEGER_REPORTER_FLUSH_INTERVAL" + envReporterLogSpans = "JAEGER_REPORTER_LOG_SPANS" + envReporterAttemptReconnectingDisabled = "JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED" + envReporterAttemptReconnectInterval = "JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL" + envEndpoint = "JAEGER_ENDPOINT" + envUser = "JAEGER_USER" + envPassword = "JAEGER_PASSWORD" + envAgentHost = "JAEGER_AGENT_HOST" + envAgentPort = "JAEGER_AGENT_PORT" ) // FromEnv uses environment variables to set the tracer's Configuration @@ -118,7 +120,9 @@ func (sc *SamplerConfig) samplerConfigFromEnv() (*SamplerConfig, error) { } } - if e := os.Getenv(envSamplerManagerHostPort); e != "" { + if e := os.Getenv(envSamplingEndpoint); e != "" { + sc.SamplingServerURL = e + } else if e := os.Getenv(envSamplerManagerHostPort); e != "" { sc.SamplingServerURL = e } else if e := os.Getenv(envAgentHost); e != "" { // Fallback if we know the agent host - try the sampling endpoint there @@ -184,20 +188,43 @@ func (rc *ReporterConfig) reporterConfigFromEnv() (*ReporterConfig, error) { rc.User = user rc.Password = pswd } else { + useEnv := false host := jaeger.DefaultUDPSpanServerHost if e := os.Getenv(envAgentHost); e != "" { host = e + useEnv = true } port := jaeger.DefaultUDPSpanServerPort if e := os.Getenv(envAgentPort); e != "" { if value, err := strconv.ParseInt(e, 10, 0); err == nil { port = int(value) + useEnv = true } else { return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envAgentPort, e) } } - rc.LocalAgentHostPort = fmt.Sprintf("%s:%d", host, port) + if useEnv || rc.LocalAgentHostPort == "" { + rc.LocalAgentHostPort = fmt.Sprintf("%s:%d", host, port) + } + + if e := os.Getenv(envReporterAttemptReconnectingDisabled); e != "" { + if value, err := strconv.ParseBool(e); err == nil { + rc.DisableAttemptReconnecting = value + } else { + return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterAttemptReconnectingDisabled, e) + } + } + + if !rc.DisableAttemptReconnecting { + if e := os.Getenv(envReporterAttemptReconnectInterval); e != "" { + if value, err := time.ParseDuration(e); err == nil { + rc.AttemptReconnectInterval = value + } else { + return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterAttemptReconnectInterval, e) + } + } + } } return rc, nil diff --git a/vendor/github.com/uber/jaeger-client-go/constants.go b/vendor/github.com/uber/jaeger-client-go/constants.go index 1f8578fbdaf1a..2f63d5909420a 100644 --- a/vendor/github.com/uber/jaeger-client-go/constants.go +++ b/vendor/github.com/uber/jaeger-client-go/constants.go @@ -22,7 +22,7 @@ import ( const ( // JaegerClientVersion is the version of the client library reported as Span tag. - JaegerClientVersion = "Go-2.23.1" + JaegerClientVersion = "Go-2.25.0" // JaegerClientVersionTagKey is the name of the tag used to report client version. JaegerClientVersionTagKey = "jaeger.version" @@ -102,5 +102,5 @@ const ( var ( // DefaultSamplingServerURL is the default url to fetch sampling config from, via http - DefaultSamplingServerURL = fmt.Sprintf("http://localhost:%d/sampling", DefaultSamplingServerPort) + DefaultSamplingServerURL = fmt.Sprintf("http://127.0.0.1:%d/sampling", DefaultSamplingServerPort) ) diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_remote.go b/vendor/github.com/uber/jaeger-client-go/sampler_remote.go index 112e3e1cb3ee6..f2edd5ca9436b 100644 --- a/vendor/github.com/uber/jaeger-client-go/sampler_remote.go +++ b/vendor/github.com/uber/jaeger-client-go/sampler_remote.go @@ -64,7 +64,7 @@ type RemotelyControlledSampler struct { // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq closed int64 // 0 - not closed, 1 - closed - sync.RWMutex + sync.RWMutex // used to serialize access to samplerOptions.sampler samplerOptions serviceName string @@ -95,22 +95,22 @@ func (s *RemotelyControlledSampler) IsSampled(id TraceID, operation string) (boo // OnCreateSpan implements OnCreateSpan of SamplerV2. func (s *RemotelyControlledSampler) OnCreateSpan(span *Span) SamplingDecision { - return s.sampler.OnCreateSpan(span) + return s.Sampler().OnCreateSpan(span) } // OnSetOperationName implements OnSetOperationName of SamplerV2. func (s *RemotelyControlledSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision { - return s.sampler.OnSetOperationName(span, operationName) + return s.Sampler().OnSetOperationName(span, operationName) } // OnSetTag implements OnSetTag of SamplerV2. func (s *RemotelyControlledSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision { - return s.sampler.OnSetTag(span, key, value) + return s.Sampler().OnSetTag(span, key, value) } // OnFinishSpan implements OnFinishSpan of SamplerV2. func (s *RemotelyControlledSampler) OnFinishSpan(span *Span) SamplingDecision { - return s.sampler.OnFinishSpan(span) + return s.Sampler().OnFinishSpan(span) } // Close implements Close() of Sampler. @@ -153,8 +153,8 @@ func (s *RemotelyControlledSampler) pollControllerWithTicker(ticker *time.Ticker // Sampler returns the currently active sampler. func (s *RemotelyControlledSampler) Sampler() SamplerV2 { - s.Lock() - defer s.Unlock() + s.RLock() + defer s.RUnlock() return s.sampler } diff --git a/vendor/github.com/uber/jaeger-client-go/span_context.go b/vendor/github.com/uber/jaeger-client-go/span_context.go index 1b44f3f8c2f1b..ae9d94a9a2ccf 100644 --- a/vendor/github.com/uber/jaeger-client-go/span_context.go +++ b/vendor/github.com/uber/jaeger-client-go/span_context.go @@ -212,10 +212,14 @@ func (c SpanContext) SetFirehose() { } func (c SpanContext) String() string { + var flags int32 + if c.samplingState != nil { + flags = c.samplingState.stateFlags.Load() + } if c.traceID.High == 0 { - return fmt.Sprintf("%016x:%016x:%016x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.samplingState.stateFlags.Load()) + return fmt.Sprintf("%016x:%016x:%016x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), flags) } - return fmt.Sprintf("%016x%016x:%016x:%016x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.samplingState.stateFlags.Load()) + return fmt.Sprintf("%016x%016x:%016x:%016x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), flags) } // ContextFromString reconstructs the Context encoded in a string diff --git a/vendor/github.com/uber/jaeger-client-go/tracer.go b/vendor/github.com/uber/jaeger-client-go/tracer.go index 8a3fc97abbcc8..477c6eae317a4 100644 --- a/vendor/github.com/uber/jaeger-client-go/tracer.go +++ b/vendor/github.com/uber/jaeger-client-go/tracer.go @@ -216,10 +216,10 @@ func (t *Tracer) startSpanWithOptions( options.StartTime = t.timeNow() } - // Predicate whether the given span context is a valid reference - // which may be used as parent / debug ID / baggage items source - isValidReference := func(ctx SpanContext) bool { - return ctx.IsValid() || ctx.isDebugIDContainerOnly() || len(ctx.baggage) != 0 + // Predicate whether the given span context is an empty reference + // or may be used as parent / debug ID / baggage items source + isEmptyReference := func(ctx SpanContext) bool { + return !ctx.IsValid() && !ctx.isDebugIDContainerOnly() && len(ctx.baggage) == 0 } var references []Reference @@ -235,7 +235,7 @@ func (t *Tracer) startSpanWithOptions( reflect.ValueOf(ref.ReferencedContext))) continue } - if !isValidReference(ctxRef) { + if isEmptyReference(ctxRef) { continue } @@ -245,14 +245,17 @@ func (t *Tracer) startSpanWithOptions( continue } - references = append(references, Reference{Type: ref.Type, Context: ctxRef}) + if ctxRef.IsValid() { + // we don't want empty context that contains only debug-id or baggage + references = append(references, Reference{Type: ref.Type, Context: ctxRef}) + } if !hasParent { parent = ctxRef hasParent = ref.Type == opentracing.ChildOfRef } } - if !hasParent && isValidReference(parent) { + if !hasParent && !isEmptyReference(parent) { // If ChildOfRef wasn't found but a FollowFromRef exists, use the context from // the FollowFromRef as the parent hasParent = true diff --git a/vendor/github.com/uber/jaeger-client-go/transport_udp.go b/vendor/github.com/uber/jaeger-client-go/transport_udp.go index 7370d8007518d..5734819ab1291 100644 --- a/vendor/github.com/uber/jaeger-client-go/transport_udp.go +++ b/vendor/github.com/uber/jaeger-client-go/transport_udp.go @@ -19,6 +19,7 @@ import ( "fmt" "github.com/uber/jaeger-client-go/internal/reporterstats" + "github.com/uber/jaeger-client-go/log" "github.com/uber/jaeger-client-go/thrift" j "github.com/uber/jaeger-client-go/thrift-gen/jaeger" "github.com/uber/jaeger-client-go/utils" @@ -57,35 +58,57 @@ type udpSender struct { failedToEmitSpans int64 } -// NewUDPTransport creates a reporter that submits spans to jaeger-agent. +// UDPTransportParams allows specifying options for initializing a UDPTransport. An instance of this struct should +// be passed to NewUDPTransportWithParams. +type UDPTransportParams struct { + utils.AgentClientUDPParams +} + +// NewUDPTransportWithParams creates a reporter that submits spans to jaeger-agent. // TODO: (breaking change) move to transport/ package. -func NewUDPTransport(hostPort string, maxPacketSize int) (Transport, error) { - if len(hostPort) == 0 { - hostPort = fmt.Sprintf("%s:%d", DefaultUDPSpanServerHost, DefaultUDPSpanServerPort) +func NewUDPTransportWithParams(params UDPTransportParams) (Transport, error) { + if len(params.HostPort) == 0 { + params.HostPort = fmt.Sprintf("%s:%d", DefaultUDPSpanServerHost, DefaultUDPSpanServerPort) } - if maxPacketSize == 0 { - maxPacketSize = utils.UDPPacketMaxLength + + if params.Logger == nil { + params.Logger = log.StdLogger + } + + if params.MaxPacketSize == 0 { + params.MaxPacketSize = utils.UDPPacketMaxLength } protocolFactory := thrift.NewTCompactProtocolFactory() // Each span is first written to thriftBuffer to determine its size in bytes. - thriftBuffer := thrift.NewTMemoryBufferLen(maxPacketSize) + thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize) thriftProtocol := protocolFactory.GetProtocol(thriftBuffer) - client, err := utils.NewAgentClientUDP(hostPort, maxPacketSize) + client, err := utils.NewAgentClientUDPWithParams(params.AgentClientUDPParams) if err != nil { return nil, err } return &udpSender{ client: client, - maxSpanBytes: maxPacketSize - emitBatchOverhead, + maxSpanBytes: params.MaxPacketSize - emitBatchOverhead, thriftBuffer: thriftBuffer, thriftProtocol: thriftProtocol, }, nil } +// NewUDPTransport creates a reporter that submits spans to jaeger-agent. +// TODO: (breaking change) move to transport/ package. +func NewUDPTransport(hostPort string, maxPacketSize int) (Transport, error) { + return NewUDPTransportWithParams(UDPTransportParams{ + AgentClientUDPParams: utils.AgentClientUDPParams{ + HostPort: hostPort, + MaxPacketSize: maxPacketSize, + }, + }) +} + // SetReporterStats implements reporterstats.Receiver. func (s *udpSender) SetReporterStats(rs reporterstats.ReporterStats) { s.reporterStats = rs diff --git a/vendor/github.com/uber/jaeger-client-go/utils/reconnecting_udp_conn.go b/vendor/github.com/uber/jaeger-client-go/utils/reconnecting_udp_conn.go new file mode 100644 index 0000000000000..0dffc7fa247cd --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/utils/reconnecting_udp_conn.go @@ -0,0 +1,189 @@ +// Copyright (c) 2020 The Jaeger Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "fmt" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/uber/jaeger-client-go/log" +) + +// reconnectingUDPConn is an implementation of udpConn that resolves hostPort every resolveTimeout, if the resolved address is +// different than the current conn then the new address is dialed and the conn is swapped. +type reconnectingUDPConn struct { + hostPort string + resolveFunc resolveFunc + dialFunc dialFunc + logger log.Logger + bufferBytes int64 + + connMtx sync.RWMutex + conn *net.UDPConn + destAddr *net.UDPAddr + closeChan chan struct{} +} + +type resolveFunc func(network string, hostPort string) (*net.UDPAddr, error) +type dialFunc func(network string, laddr, raddr *net.UDPAddr) (*net.UDPConn, error) + +// newReconnectingUDPConn returns a new udpConn that resolves hostPort every resolveTimeout, if the resolved address is +// different than the current conn then the new address is dialed and the conn is swapped. +func newReconnectingUDPConn(hostPort string, resolveTimeout time.Duration, resolveFunc resolveFunc, dialFunc dialFunc, logger log.Logger) (*reconnectingUDPConn, error) { + conn := &reconnectingUDPConn{ + hostPort: hostPort, + resolveFunc: resolveFunc, + dialFunc: dialFunc, + logger: logger, + closeChan: make(chan struct{}), + } + + if err := conn.attemptResolveAndDial(); err != nil { + logger.Error(fmt.Sprintf("failed resolving destination address on connection startup, with err: %q. retrying in %s", err.Error(), resolveTimeout)) + } + + go conn.reconnectLoop(resolveTimeout) + + return conn, nil +} + +func (c *reconnectingUDPConn) reconnectLoop(resolveTimeout time.Duration) { + ticker := time.NewTicker(resolveTimeout) + defer ticker.Stop() + + for { + select { + case <-c.closeChan: + return + case <-ticker.C: + if err := c.attemptResolveAndDial(); err != nil { + c.logger.Error(err.Error()) + } + } + } +} + +func (c *reconnectingUDPConn) attemptResolveAndDial() error { + newAddr, err := c.resolveFunc("udp", c.hostPort) + if err != nil { + return fmt.Errorf("failed to resolve new addr for host %q, with err: %w", c.hostPort, err) + } + + c.connMtx.RLock() + curAddr := c.destAddr + c.connMtx.RUnlock() + + // dont attempt dial if an addr was successfully dialed previously and, resolved addr is the same as current conn + if curAddr != nil && newAddr.String() == curAddr.String() { + return nil + } + + if err := c.attemptDialNewAddr(newAddr); err != nil { + return fmt.Errorf("failed to dial newly resolved addr '%s', with err: %w", newAddr, err) + } + + return nil +} + +func (c *reconnectingUDPConn) attemptDialNewAddr(newAddr *net.UDPAddr) error { + connUDP, err := c.dialFunc(newAddr.Network(), nil, newAddr) + if err != nil { + return err + } + + if bufferBytes := int(atomic.LoadInt64(&c.bufferBytes)); bufferBytes != 0 { + if err = connUDP.SetWriteBuffer(bufferBytes); err != nil { + return err + } + } + + c.connMtx.Lock() + c.destAddr = newAddr + // store prev to close later + prevConn := c.conn + c.conn = connUDP + c.connMtx.Unlock() + + if prevConn != nil { + return prevConn.Close() + } + + return nil +} + +// Write calls net.udpConn.Write, if it fails an attempt is made to connect to a new addr, if that succeeds the write is retried before returning +func (c *reconnectingUDPConn) Write(b []byte) (int, error) { + var bytesWritten int + var err error + + c.connMtx.RLock() + if c.conn == nil { + // if connection is not initialized indicate this with err in order to hook into retry logic + err = fmt.Errorf("UDP connection not yet initialized, an address has not been resolved") + } else { + bytesWritten, err = c.conn.Write(b) + } + c.connMtx.RUnlock() + + if err == nil { + return bytesWritten, nil + } + + // attempt to resolve and dial new address in case that's the problem, if resolve and dial succeeds, try write again + if reconnErr := c.attemptResolveAndDial(); reconnErr == nil { + c.connMtx.RLock() + defer c.connMtx.RUnlock() + return c.conn.Write(b) + } + + // return original error if reconn fails + return bytesWritten, err +} + +// Close stops the reconnectLoop, then closes the connection via net.udpConn 's implementation +func (c *reconnectingUDPConn) Close() error { + close(c.closeChan) + + // acquire rw lock before closing conn to ensure calls to Write drain + c.connMtx.Lock() + defer c.connMtx.Unlock() + + if c.conn != nil { + return c.conn.Close() + } + + return nil +} + +// SetWriteBuffer defers to the net.udpConn SetWriteBuffer implementation wrapped with a RLock. if no conn is currently held +// and SetWriteBuffer is called store bufferBytes to be set for new conns +func (c *reconnectingUDPConn) SetWriteBuffer(bytes int) error { + var err error + + c.connMtx.RLock() + if c.conn != nil { + err = c.conn.SetWriteBuffer(bytes) + } + c.connMtx.RUnlock() + + if err == nil { + atomic.StoreInt64(&c.bufferBytes, int64(bytes)) + } + + return err +} diff --git a/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go b/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go index fadd73e49aea2..2352643ce60aa 100644 --- a/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go +++ b/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go @@ -19,7 +19,9 @@ import ( "fmt" "io" "net" + "time" + "github.com/uber/jaeger-client-go/log" "github.com/uber/jaeger-client-go/thrift" "github.com/uber/jaeger-client-go/thrift-gen/agent" @@ -35,41 +37,90 @@ type AgentClientUDP struct { agent.Agent io.Closer - connUDP *net.UDPConn + connUDP udpConn client *agent.AgentClient maxPacketSize int // max size of datagram in bytes thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span } -// NewAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP. -func NewAgentClientUDP(hostPort string, maxPacketSize int) (*AgentClientUDP, error) { - if maxPacketSize == 0 { - maxPacketSize = UDPPacketMaxLength +type udpConn interface { + Write([]byte) (int, error) + SetWriteBuffer(int) error + Close() error +} + +// AgentClientUDPParams allows specifying options for initializing an AgentClientUDP. An instance of this struct should +// be passed to NewAgentClientUDPWithParams. +type AgentClientUDPParams struct { + HostPort string + MaxPacketSize int + Logger log.Logger + DisableAttemptReconnecting bool + AttemptReconnectInterval time.Duration +} + +// NewAgentClientUDPWithParams creates a client that sends spans to Jaeger Agent over UDP. +func NewAgentClientUDPWithParams(params AgentClientUDPParams) (*AgentClientUDP, error) { + // validate hostport + if _, _, err := net.SplitHostPort(params.HostPort); err != nil { + return nil, err + } + + if params.MaxPacketSize == 0 { + params.MaxPacketSize = UDPPacketMaxLength + } + + if params.Logger == nil { + params.Logger = log.StdLogger } - thriftBuffer := thrift.NewTMemoryBufferLen(maxPacketSize) + if !params.DisableAttemptReconnecting && params.AttemptReconnectInterval == 0 { + params.AttemptReconnectInterval = time.Second * 30 + } + + thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize) protocolFactory := thrift.NewTCompactProtocolFactory() client := agent.NewAgentClientFactory(thriftBuffer, protocolFactory) - destAddr, err := net.ResolveUDPAddr("udp", hostPort) - if err != nil { - return nil, err - } + var connUDP udpConn + var err error - connUDP, err := net.DialUDP(destAddr.Network(), nil, destAddr) - if err != nil { - return nil, err + if params.DisableAttemptReconnecting { + destAddr, err := net.ResolveUDPAddr("udp", params.HostPort) + if err != nil { + return nil, err + } + + connUDP, err = net.DialUDP(destAddr.Network(), nil, destAddr) + if err != nil { + return nil, err + } + } else { + // host is hostname, setup resolver loop in case host record changes during operation + connUDP, err = newReconnectingUDPConn(params.HostPort, params.AttemptReconnectInterval, net.ResolveUDPAddr, net.DialUDP, params.Logger) + if err != nil { + return nil, err + } } - if err := connUDP.SetWriteBuffer(maxPacketSize); err != nil { + + if err := connUDP.SetWriteBuffer(params.MaxPacketSize); err != nil { return nil, err } - clientUDP := &AgentClientUDP{ + return &AgentClientUDP{ connUDP: connUDP, client: client, - maxPacketSize: maxPacketSize, - thriftBuffer: thriftBuffer} - return clientUDP, nil + maxPacketSize: params.MaxPacketSize, + thriftBuffer: thriftBuffer, + }, nil +} + +// NewAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP. +func NewAgentClientUDP(hostPort string, maxPacketSize int) (*AgentClientUDP, error) { + return NewAgentClientUDPWithParams(AgentClientUDPParams{ + HostPort: hostPort, + MaxPacketSize: maxPacketSize, + }) } // EmitZipkinBatch implements EmitZipkinBatch() of Agent interface diff --git a/vendor/github.com/weaveworks/common/logging/format.go b/vendor/github.com/weaveworks/common/logging/format.go new file mode 100644 index 0000000000000..701b32f563232 --- /dev/null +++ b/vendor/github.com/weaveworks/common/logging/format.go @@ -0,0 +1,52 @@ +package logging + +import ( + "flag" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Format is a settable identifier for the output format of logs +type Format struct { + s string + Logrus logrus.Formatter +} + +// RegisterFlags adds the log format flag to the provided flagset. +func (f *Format) RegisterFlags(fs *flag.FlagSet) { + f.Set("logfmt") + fs.Var(f, "log.format", "Output log messages in the given format. Valid formats: [logfmt, json]") +} + +func (f Format) String() string { + return f.s +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (f *Format) UnmarshalYAML(unmarshal func(interface{}) error) error { + var format string + if err := unmarshal(&format); err != nil { + return err + } + return f.Set(format) +} + +// MarshalYAML implements yaml.Marshaler. +func (f Format) MarshalYAML() (interface{}, error) { + return f.String(), nil +} + +// Set updates the value of the output format. Implements flag.Value +func (f *Format) Set(s string) error { + switch s { + case "logfmt": + f.Logrus = &logrus.JSONFormatter{} + case "json": + f.Logrus = &logrus.JSONFormatter{} + default: + return errors.Errorf("unrecognized log format %q", s) + } + f.s = s + return nil +} diff --git a/vendor/github.com/weaveworks/common/logging/gokit.go b/vendor/github.com/weaveworks/common/logging/gokit.go index b5137fa4d6237..508fc385bdcd6 100644 --- a/vendor/github.com/weaveworks/common/logging/gokit.go +++ b/vendor/github.com/weaveworks/common/logging/gokit.go @@ -8,14 +8,25 @@ import ( "github.com/go-kit/kit/log/level" ) -// NewGoKit creates a new Interface backed by a GoKit logger -func NewGoKit(l Level) Interface { - logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) +// NewGoKitFormat creates a new Interface backed by a GoKit logger +// format can be "json" or defaults to logfmt +func NewGoKitFormat(l Level, f Format) Interface { + var logger log.Logger + if f.s == "json" { + logger = log.NewJSONLogger(log.NewSyncWriter(os.Stderr)) + } else { + logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + } logger = level.NewFilter(logger, l.Gokit) logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) return gokit{logger} } +// NewGoKit creates a new Interface backed by a GoKit logger +func NewGoKit(l Level) Interface { + return NewGoKitFormat(l, Format{s: "logfmt"}) +} + // GoKit wraps an existing gokit Logger. func GoKit(logger log.Logger) Interface { return gokit{logger} diff --git a/vendor/github.com/weaveworks/common/logging/logrus.go b/vendor/github.com/weaveworks/common/logging/logrus.go index 8ee641413c510..d302613f7b61e 100644 --- a/vendor/github.com/weaveworks/common/logging/logrus.go +++ b/vendor/github.com/weaveworks/common/logging/logrus.go @@ -6,14 +6,21 @@ import ( "github.com/sirupsen/logrus" ) -// NewLogrus makes a new Interface backed by a logrus logger -func NewLogrus(level Level) Interface { +// NewLogrusFormat makes a new Interface backed by a logrus logger +// format can be "json" or defaults to logfmt +func NewLogrusFormat(level Level, f Format) Interface { log := logrus.New() log.Out = os.Stderr log.Level = level.Logrus + log.Formatter = f.Logrus return logrusLogger{log} } +// NewLogrus makes a new Interface backed by a logrus logger +func NewLogrus(level Level) Interface { + return NewLogrusFormat(level, Format{Logrus: &logrus.TextFormatter{}}) +} + // Logrus wraps an existing Logrus logger. func Logrus(l *logrus.Logger) Interface { return logrusLogger{l} diff --git a/vendor/github.com/weaveworks/common/server/server.go b/vendor/github.com/weaveworks/common/server/server.go index 9fb23820a5049..4dc8ae0e9cd37 100644 --- a/vendor/github.com/weaveworks/common/server/server.go +++ b/vendor/github.com/weaveworks/common/server/server.go @@ -76,8 +76,9 @@ type Config struct { GRPCServerTime time.Duration `yaml:"grpc_server_keepalive_time"` GRPCServerTimeout time.Duration `yaml:"grpc_server_keepalive_timeout"` - LogLevel logging.Level `yaml:"log_level"` - Log logging.Interface `yaml:"-"` + LogFormat logging.Format `yaml:"log_format"` + LogLevel logging.Level `yaml:"log_level"` + Log logging.Interface `yaml:"-"` // If not set, default signal handler is used. SignalHandler SignalHandler `yaml:"-"` @@ -117,6 +118,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.GRPCServerTime, "server.grpc.keepalive.time", time.Hour*2, "Duration after which a keepalive probe is sent in case of no activity over the connection., Default: 2h") f.DurationVar(&cfg.GRPCServerTimeout, "server.grpc.keepalive.timeout", time.Second*20, "After having pinged for keepalive check, the duration after which an idle connection should be closed, Default: 20s") f.StringVar(&cfg.PathPrefix, "server.path-prefix", "", "Base path to serve all API routes from (e.g. /v1/)") + cfg.LogFormat.RegisterFlags(f) cfg.LogLevel.RegisterFlags(f) } diff --git a/vendor/go.uber.org/goleak/.gitignore b/vendor/go.uber.org/goleak/.gitignore new file mode 100644 index 0000000000000..0fff519a4ab7b --- /dev/null +++ b/vendor/go.uber.org/goleak/.gitignore @@ -0,0 +1,5 @@ +vendor/ +/bin +/lint.log +/cover.out +/cover.html diff --git a/vendor/go.uber.org/goleak/.travis.yml b/vendor/go.uber.org/goleak/.travis.yml new file mode 100644 index 0000000000000..b215cef17cdba --- /dev/null +++ b/vendor/go.uber.org/goleak/.travis.yml @@ -0,0 +1,24 @@ +sudo: false +language: go +go_import_path: go.uber.org/goleak + +env: + global: + - GO111MODULE=on + +matrix: + include: + - go: 1.12.x + - go: 1.13.x + env: LINT=1 + +install: + - make install + +script: + - test -z "$LINT" || make lint + - make test + +after_success: + - make cover + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/goleak/CHANGELOG.md b/vendor/go.uber.org/goleak/CHANGELOG.md new file mode 100644 index 0000000000000..2dfb98f619bec --- /dev/null +++ b/vendor/go.uber.org/goleak/CHANGELOG.md @@ -0,0 +1,17 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) +and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). + +## [1.0.0] +### Changed +- Migrate to Go modules. + +### Fixed +- Ignore trace related goroutines that cause false positives with -trace. + +## 0.10.0 +- Initial release. + +[1.0.0]: https://github.com/uber-go/goleak/compare/v0.10.0...v1.0.0 diff --git a/vendor/go.uber.org/goleak/LICENSE b/vendor/go.uber.org/goleak/LICENSE new file mode 100644 index 0000000000000..6c9bde216e211 --- /dev/null +++ b/vendor/go.uber.org/goleak/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2018 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/goleak/Makefile b/vendor/go.uber.org/goleak/Makefile new file mode 100644 index 0000000000000..53763fa8d112a --- /dev/null +++ b/vendor/go.uber.org/goleak/Makefile @@ -0,0 +1,41 @@ +export GOBIN ?= $(shell pwd)/bin + +GOLINT = $(GOBIN)/golint + +GO_FILES := $(shell \ + find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ + -o -name '*.go' -print | cut -b3-) + +.PHONY: build +build: + go build ./... + +.PHONY: install +install: + go mod download + +.PHONY: test +test: + go test -v -race ./... + go test -v -trace=/dev/null . + +.PHONY: cover +cover: + go test -race -coverprofile=cover.out -coverpkg=./... ./... + go tool cover -html=cover.out -o cover.html + +$(GOLINT): + go install golang.org/x/lint/golint + +.PHONY: lint +lint: $(GOLINT) + @rm -rf lint.log + @echo "Checking formatting..." + @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log + @echo "Checking vet..." + @go vet ./... 2>&1 | tee -a lint.log + @echo "Checking lint..." + @$(GOLINT) ./... 2>&1 | tee -a lint.log + @echo "Checking for unresolved FIXMEs..." + @git grep -i fixme | grep -v -e '^vendor/' -e '^Makefile' | tee -a lint.log + @[ ! -s lint.log ] diff --git a/vendor/go.uber.org/goleak/README.md b/vendor/go.uber.org/goleak/README.md new file mode 100644 index 0000000000000..8702de9fd1f09 --- /dev/null +++ b/vendor/go.uber.org/goleak/README.md @@ -0,0 +1,70 @@ +# goleak [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +Goroutine leak detector to help avoid Goroutine leaks. + +## Development Status: Alpha + +goleak is still in development, and APIs are still in flux. + +## Installation + +You can use `go get` to get the latest version: + +`go get -u go.uber.org/goleak` + +`goleak` also supports semver releases. It is compatible with Go 1.5+. + +## Quick Start + +To verify that there are no unexpected goroutines running at the end of a test: + +```go +func TestA(t *testing.T) { + defer goleak.VerifyNone(t) + + // test logic here. +} +``` + +Instead of checking for leaks at the end of every test, `goleak` can also be run +at the end of every test package by creating a `TestMain` function for your +package: + +```go +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m) +} +``` + +## Determine Source of Package Leaks + +When verifying leaks using `TestMain`, the leak test is only run once after all tests +have been run. This is typically enough to ensure there's no goroutines leaked from +tests, but when there are leaks, it's hard to determine which test is causing them. + +You can use the following bash script to determine the source of the failing test: + +```sh +# Create a test binary which will be used to run each test individually +$ go test -c -o tests + +# Run each test individually, printing "." for successful tests, or the test name +# for failing tests. +$ for test in $(go test -list . | grep "^Test"); do ./tests -test.run "^$test\$" &>/dev/null && echo -n "." || echo "\n$test failed"; done +``` + +This will only print names of failing tests which can be investigated individually. E.g., + +``` +..... +TestLeakyTest failed +....... +``` + + +[doc-img]: https://godoc.org/go.uber.org/goleak?status.svg +[doc]: https://godoc.org/go.uber.org/goleak +[ci-img]: https://travis-ci.com/uber-go/goleak.svg?branch=master +[ci]: https://travis-ci.com/uber-go/goleak +[cov-img]: https://codecov.io/gh/uber-go/goleak/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/goleak diff --git a/vendor/go.uber.org/goleak/doc.go b/vendor/go.uber.org/goleak/doc.go new file mode 100644 index 0000000000000..3832f8dbc5f45 --- /dev/null +++ b/vendor/go.uber.org/goleak/doc.go @@ -0,0 +1,22 @@ +// Copyright (c) 2018 Uber Technologies, Inc. + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package goleak is a Goroutine leak detector. +package goleak // import "go.uber.org/goleak" diff --git a/vendor/go.uber.org/goleak/glide.yaml b/vendor/go.uber.org/goleak/glide.yaml new file mode 100644 index 0000000000000..c6e7a00a06d6c --- /dev/null +++ b/vendor/go.uber.org/goleak/glide.yaml @@ -0,0 +1,8 @@ +package: go.uber.org/goleak +import: [] +testImport: +- package: github.com/stretchr/testify + version: ^1.1.4 + subpackages: + - assert + - require diff --git a/vendor/go.uber.org/goleak/go.mod b/vendor/go.uber.org/goleak/go.mod new file mode 100644 index 0000000000000..742547abd739b --- /dev/null +++ b/vendor/go.uber.org/goleak/go.mod @@ -0,0 +1,11 @@ +module go.uber.org/goleak + +go 1.13 + +require ( + github.com/kr/pretty v0.1.0 // indirect + github.com/stretchr/testify v1.4.0 + golang.org/x/lint v0.0.0-20190930215403-16217165b5de + golang.org/x/tools v0.0.0-20191108193012-7d206e10da11 // indirect + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect +) diff --git a/vendor/go.uber.org/goleak/go.sum b/vendor/go.uber.org/goleak/go.sum new file mode 100644 index 0000000000000..09b27d7eebfb1 --- /dev/null +++ b/vendor/go.uber.org/goleak/go.sum @@ -0,0 +1,30 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11 h1:Yq9t9jnGoR+dBuitxdo9l6Q7xh/zOyNnYUtDKaQ3x0E= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/go.uber.org/goleak/internal/stack/stacks.go b/vendor/go.uber.org/goleak/internal/stack/stacks.go new file mode 100644 index 0000000000000..94f82e4c0d509 --- /dev/null +++ b/vendor/go.uber.org/goleak/internal/stack/stacks.go @@ -0,0 +1,155 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package stack + +import ( + "bufio" + "bytes" + "fmt" + "io" + "runtime" + "strconv" + "strings" +) + +const _defaultBufferSize = 64 * 1024 // 64 KiB + +// Stack represents a single Goroutine's stack. +type Stack struct { + id int + state string + firstFunction string + fullStack *bytes.Buffer +} + +// ID returns the goroutine ID. +func (s Stack) ID() int { + return s.id +} + +// State returns the Goroutine's state. +func (s Stack) State() string { + return s.state +} + +// Full returns the full stack trace for this goroutine. +func (s Stack) Full() string { + return s.fullStack.String() +} + +// FirstFunction returns the name of the first function on the stack. +func (s Stack) FirstFunction() string { + return s.firstFunction +} + +func (s Stack) String() string { + return fmt.Sprintf( + "Goroutine %v in state %v, with %v on top of the stack:\n%s", + s.id, s.state, s.firstFunction, s.Full()) +} + +func getStacks(all bool) []Stack { + var stacks []Stack + + var curStack *Stack + stackReader := bufio.NewReader(bytes.NewReader(getStackBuffer(all))) + for { + line, err := stackReader.ReadString('\n') + if err == io.EOF { + break + } + if err != nil { + // We're reading using bytes.NewReader which should never fail. + panic("bufio.NewReader failed on a fixed string") + } + + // If we see the goroutine header, start a new stack. + isFirstLine := false + if strings.HasPrefix(line, "goroutine ") { + // flush any previous stack + if curStack != nil { + stacks = append(stacks, *curStack) + } + id, goState := parseGoStackHeader(line) + curStack = &Stack{ + id: id, + state: goState, + fullStack: &bytes.Buffer{}, + } + isFirstLine = true + } + curStack.fullStack.WriteString(line) + if !isFirstLine && curStack.firstFunction == "" { + curStack.firstFunction = parseFirstFunc(line) + } + } + + if curStack != nil { + stacks = append(stacks, *curStack) + } + return stacks +} + +// All returns the stacks for all running goroutines. +func All() []Stack { + return getStacks(true) +} + +// Current returns the stack for the current goroutine. +func Current() Stack { + return getStacks(false)[0] +} + +func getStackBuffer(all bool) []byte { + for i := _defaultBufferSize; ; i *= 2 { + buf := make([]byte, i) + if n := runtime.Stack(buf, all); n < i { + return buf[:n] + } + } +} + +func parseFirstFunc(line string) string { + line = strings.TrimSpace(line) + if idx := strings.LastIndex(line, "("); idx > 0 { + return line[:idx] + } + panic(fmt.Sprintf("function calls missing parents: %q", line)) +} + +// parseGoStackHeader parses a stack header that looks like: +// goroutine 643 [runnable]:\n +// And returns the goroutine ID, and the state. +func parseGoStackHeader(line string) (goroutineID int, state string) { + line = strings.TrimSuffix(line, ":\n") + parts := strings.SplitN(line, " ", 3) + if len(parts) != 3 { + panic(fmt.Sprintf("unexpected stack header format: %q", line)) + } + + id, err := strconv.Atoi(parts[1]) + if err != nil { + panic(fmt.Sprintf("failed to parse goroutine ID: %v in line %q", parts[1], line)) + } + + state = strings.TrimSuffix(strings.TrimPrefix(parts[2], "["), "]") + return id, state +} diff --git a/vendor/go.uber.org/goleak/leaks.go b/vendor/go.uber.org/goleak/leaks.go new file mode 100644 index 0000000000000..468dbaf9517e2 --- /dev/null +++ b/vendor/go.uber.org/goleak/leaks.go @@ -0,0 +1,80 @@ +// Copyright (c) 2017 Uber Technologies, Inc. + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package goleak + +import ( + "fmt" + + "go.uber.org/goleak/internal/stack" +) + +// TestingT is the minimal subset of testing.TB that we use. +type TestingT interface { + Error(...interface{}) +} + +// filterStacks will filter any stacks excluded by the given opts. +// filterStacks modifies the passed in stacks slice. +func filterStacks(stacks []stack.Stack, skipID int, opts *opts) []stack.Stack { + filtered := stacks[:0] + for _, stack := range stacks { + // Always skip the running goroutine. + if stack.ID() == skipID { + continue + } + // Run any default or user-specified filters. + if opts.filter(stack) { + continue + } + filtered = append(filtered, stack) + } + return filtered +} + +// Find looks for extra goroutines, and returns a descriptive error if +// any are found. +func Find(options ...Option) error { + cur := stack.Current().ID() + + opts := buildOpts(options...) + var stacks []stack.Stack + retry := true + for i := 0; retry; i++ { + stacks = filterStacks(stack.All(), cur, opts) + + if len(stacks) == 0 { + return nil + } + retry = opts.retry(i) + } + + return fmt.Errorf("found unexpected goroutines:\n%s", stacks) +} + +// VerifyNone marks the given TestingT as failed if any extra goroutines are +// found by Find. This is a helper method to make it easier to integrate in +// tests by doing: +// defer VerifyNone(t) +func VerifyNone(t TestingT, options ...Option) { + if err := Find(options...); err != nil { + t.Error(err) + } +} diff --git a/vendor/go.uber.org/goleak/options.go b/vendor/go.uber.org/goleak/options.go new file mode 100644 index 0000000000000..e011ba1b85d9f --- /dev/null +++ b/vendor/go.uber.org/goleak/options.go @@ -0,0 +1,152 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package goleak + +import ( + "strings" + "time" + + "go.uber.org/goleak/internal/stack" +) + +// Option lets users specify custom verifications. +type Option interface { + apply(*opts) +} + +// We retry up to 20 times if we can't find the goroutine that +// we are looking for. In between each attempt, we will sleep for +// a short while to let any running goroutines complete. +const _defaultRetries = 20 + +type opts struct { + filters []func(stack.Stack) bool + maxRetries int + maxSleep time.Duration +} + +// optionFunc lets us easily write options without a custom type. +type optionFunc func(*opts) + +func (f optionFunc) apply(opts *opts) { f(opts) } + +// IgnoreTopFunction ignores any goroutines where the specified function +// is at the top of the stack. The function name should be fully qualified, +// e.g., go.uber.org/goleak.IgnoreTopFunction +func IgnoreTopFunction(f string) Option { + return addFilter(func(s stack.Stack) bool { + return s.FirstFunction() == f + }) +} + +func maxSleep(d time.Duration) Option { + return optionFunc(func(opts *opts) { + opts.maxSleep = d + }) +} + +func addFilter(f func(stack.Stack) bool) Option { + return optionFunc(func(opts *opts) { + opts.filters = append(opts.filters, f) + }) +} + +func buildOpts(options ...Option) *opts { + opts := &opts{ + maxRetries: _defaultRetries, + maxSleep: 100 * time.Millisecond, + } + opts.filters = append(opts.filters, + isTestStack, + isSyscallStack, + isStdLibStack, + isTraceStack, + ) + for _, option := range options { + option.apply(opts) + } + return opts +} + +func (vo *opts) filter(s stack.Stack) bool { + for _, filter := range vo.filters { + if filter(s) { + return true + } + } + return false +} + +func (vo *opts) retry(i int) bool { + if i >= vo.maxRetries { + return false + } + + d := time.Duration(int(time.Microsecond) << uint(i)) + if d > vo.maxSleep { + d = vo.maxSleep + } + time.Sleep(d) + return true +} + +// isTestStack is a default filter installed to automatically skip goroutines +// that the testing package runs while the user's tests are running. +func isTestStack(s stack.Stack) bool { + // Until go1.7, the main goroutine ran RunTests, which started + // the test in a separate goroutine and waited for that test goroutine + // to end by waiting on a channel. + // Since go1.7, a separate goroutine is started to wait for signals. + // T.Parallel is for parallel tests, which are blocked until all serial + // tests have run with T.Parallel at the top of the stack. + switch s.FirstFunction() { + case "testing.RunTests", "testing.(*T).Run", "testing.(*T).Parallel": + // In pre1.7 and post-1.7, background goroutines started by the testing + // package are blocked waiting on a channel. + return strings.HasPrefix(s.State(), "chan receive") + } + return false +} + +func isSyscallStack(s stack.Stack) bool { + // Typically runs in the background when code uses CGo: + // https://github.com/golang/go/issues/16714 + return s.FirstFunction() == "runtime.goexit" && strings.HasPrefix(s.State(), "syscall") +} + +func isStdLibStack(s stack.Stack) bool { + // Importing os/signal starts a background goroutine. + // The name of the function at the top has changed between versions. + if f := s.FirstFunction(); f == "os/signal.signal_recv" || f == "os/signal.loop" { + return true + } + + // Using signal.Notify will start a runtime goroutine. + return strings.Contains(s.Full(), "runtime.ensureSigM") +} + +func isTraceStack(s stack.Stack) bool { + if f := s.FirstFunction(); f != "runtime.goparkunlock" { + return false + } + + return strings.Contains(s.Full(), "runtime.ReadTrace") +} diff --git a/vendor/go.uber.org/goleak/testmain.go b/vendor/go.uber.org/goleak/testmain.go new file mode 100644 index 0000000000000..316f6e1badd71 --- /dev/null +++ b/vendor/go.uber.org/goleak/testmain.go @@ -0,0 +1,63 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package goleak + +import ( + "fmt" + "io" + "os" +) + +// Variables for stubbing in unit tests. +var ( + _osExit = os.Exit + _osStderr io.Writer = os.Stderr +) + +// TestingM is the minimal subset of testing.M that we use. +type TestingM interface { + Run() int +} + +// VerifyTestMain can be used in a TestMain function for package tests to +// verify that there were no goroutine leaks. +// To use it, your TestMain function should look like: +// +// func TestMain(m *testing.M) { +// goleak.VerifyTestMain(m) +// } +// +// See https://golang.org/pkg/testing/#hdr-Main for more details. +// +// This will run all tests as per normal, and if they were successful, look +// for any goroutine leaks and fail the tests if any leaks were found. +func VerifyTestMain(m TestingM, options ...Option) { + exitCode := m.Run() + + if exitCode == 0 { + if err := Find(options...); err != nil { + fmt.Fprintf(_osStderr, "goleak: Errors on successful test run: %v\n", err) + exitCode = 1 + } + } + + _osExit(exitCode) +} diff --git a/vendor/go.uber.org/goleak/tools.go b/vendor/go.uber.org/goleak/tools.go new file mode 100644 index 0000000000000..6a87612cc03e7 --- /dev/null +++ b/vendor/go.uber.org/goleak/tools.go @@ -0,0 +1,28 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build tools + +package goleak + +import ( + // Tools we use during development. + _ "golang.org/x/lint/golint" +) diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 08f8230d6d202..e91cb82c1b4e8 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -325,6 +325,7 @@ includes_OpenBSD=' #include #include #include +#include #include #include #include @@ -507,6 +508,8 @@ ccflags="$@" $2 ~ /^(CLOCK|TIMER)_/ || $2 ~ /^CAN_/ || $2 ~ /^CAP_/ || + $2 ~ /^CP_/ || + $2 ~ /^CPUSTATES$/ || $2 ~ /^ALG_/ || $2 ~ /^FS_(POLICY_FLAGS|KEY_DESC|ENCRYPTION_MODE|[A-Z0-9_]+_KEY_SIZE)/ || $2 ~ /^FS_IOC_.*(ENCRYPTION|VERITY|[GS]ETFLAGS)/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 68605db6248d3..60bbe10adf736 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -527,6 +527,23 @@ func SysctlClockinfo(name string) (*Clockinfo, error) { return &ci, nil } +func SysctlTimeval(name string) (*Timeval, error) { + mib, err := sysctlmib(name) + if err != nil { + return nil, err + } + + var tv Timeval + n := uintptr(unsafe.Sizeof(tv)) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&tv)), &n, nil, 0); err != nil { + return nil, err + } + if n != unsafe.Sizeof(tv) { + return nil, EIO + } + return &tv, nil +} + //sys utimes(path string, timeval *[2]Timeval) (err error) func Utimes(path string, tv []Timeval) error { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index e50e4cb276c86..fad483bb9d0b1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -2122,6 +2122,18 @@ func Klogset(typ int, arg int) (err error) { return nil } +// RemoteIovec is Iovec with the pointer replaced with an integer. +// It is used for ProcessVMReadv and ProcessVMWritev, where the pointer +// refers to a location in a different process' address space, which +// would confuse the Go garbage collector. +type RemoteIovec struct { + Base uintptr + Len int +} + +//sys ProcessVMReadv(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) = SYS_PROCESS_VM_READV +//sys ProcessVMWritev(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) = SYS_PROCESS_VM_WRITEV + /* * Unimplemented */ diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index 8482458734698..3689c80848194 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -339,6 +339,12 @@ const ( CLOCK_UPTIME_FAST = 0x8 CLOCK_UPTIME_PRECISE = 0x7 CLOCK_VIRTUAL = 0x1 + CPUSTATES = 0x5 + CP_IDLE = 0x4 + CP_INTR = 0x3 + CP_NICE = 0x1 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x30000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index 4acd101c3eeba..b8f7c3c930aa5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -339,6 +339,12 @@ const ( CLOCK_UPTIME_FAST = 0x8 CLOCK_UPTIME_PRECISE = 0x7 CLOCK_VIRTUAL = 0x1 + CPUSTATES = 0x5 + CP_IDLE = 0x4 + CP_INTR = 0x3 + CP_NICE = 0x1 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x30000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index e4719873b9ef7..be14bb1a4cdc7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -339,6 +339,12 @@ const ( CLOCK_UPTIME_FAST = 0x8 CLOCK_UPTIME_PRECISE = 0x7 CLOCK_VIRTUAL = 0x1 + CPUSTATES = 0x5 + CP_IDLE = 0x4 + CP_INTR = 0x3 + CP_NICE = 0x1 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x30000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index 5e49769d96a87..7ce9c0081a8c6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -339,6 +339,12 @@ const ( CLOCK_UPTIME_FAST = 0x8 CLOCK_UPTIME_PRECISE = 0x7 CLOCK_VIRTUAL = 0x1 + CPUSTATES = 0x5 + CP_IDLE = 0x4 + CP_INTR = 0x3 + CP_NICE = 0x1 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x30000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index 5402bd55ce104..c865a10df4462 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -146,6 +146,13 @@ const ( BRKINT = 0x2 CFLUSH = 0xf CLOCAL = 0x8000 + CPUSTATES = 0x6 + CP_IDLE = 0x5 + CP_INTR = 0x4 + CP_NICE = 0x1 + CP_SPIN = 0x3 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x10000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index ffaf2d2f9f2ae..9db6b2fb6e2c6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -153,6 +153,13 @@ const ( CLOCK_REALTIME = 0x0 CLOCK_THREAD_CPUTIME_ID = 0x4 CLOCK_UPTIME = 0x5 + CPUSTATES = 0x6 + CP_IDLE = 0x5 + CP_INTR = 0x4 + CP_NICE = 0x1 + CP_SPIN = 0x3 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x10000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index 7aa796a642b62..7072526a640d2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -146,6 +146,13 @@ const ( BRKINT = 0x2 CFLUSH = 0xf CLOCAL = 0x8000 + CPUSTATES = 0x6 + CP_IDLE = 0x5 + CP_INTR = 0x4 + CP_NICE = 0x1 + CP_SPIN = 0x3 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x10000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go index 1792d3f13e6d5..ac5efbe5ac766 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go @@ -156,6 +156,13 @@ const ( CLOCK_REALTIME = 0x0 CLOCK_THREAD_CPUTIME_ID = 0x4 CLOCK_UPTIME = 0x5 + CPUSTATES = 0x6 + CP_IDLE = 0x5 + CP_INTR = 0x4 + CP_NICE = 0x1 + CP_SPIN = 0x3 + CP_SYS = 0x2 + CP_USER = 0x0 CREAD = 0x800 CRTSCTS = 0x10000 CS5 = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index df217825f0694..f6603de4f57c6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1847,6 +1847,52 @@ func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ProcessVMReadv(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) { + var _p0 unsafe.Pointer + if len(localIov) > 0 { + _p0 = unsafe.Pointer(&localIov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + var _p1 unsafe.Pointer + if len(remoteIov) > 0 { + _p1 = unsafe.Pointer(&remoteIov[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PROCESS_VM_READV, uintptr(pid), uintptr(_p0), uintptr(len(localIov)), uintptr(_p1), uintptr(len(remoteIov)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ProcessVMWritev(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) { + var _p0 unsafe.Pointer + if len(localIov) > 0 { + _p0 = unsafe.Pointer(&localIov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + var _p1 unsafe.Pointer + if len(remoteIov) > 0 { + _p1 = unsafe.Pointer(&remoteIov[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PROCESS_VM_WRITEV, uintptr(pid), uintptr(_p0), uintptr(len(localIov)), uintptr(_p1), uintptr(len(remoteIov)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index a114b1aa50a5f..a98fe77827afb 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -53,10 +53,9 @@ func Every(interval time.Duration) Limit { // // The methods AllowN, ReserveN, and WaitN consume n tokens. type Limiter struct { - limit Limit - burst int - mu sync.Mutex + limit Limit + burst int tokens float64 // last is the last time the limiter's tokens field was updated last time.Time @@ -76,6 +75,8 @@ func (lim *Limiter) Limit() Limit { // Burst values allow more events to happen at once. // A zero Burst allows no events, unless limit == Inf. func (lim *Limiter) Burst() int { + lim.mu.Lock() + defer lim.mu.Unlock() return lim.burst } @@ -229,7 +230,7 @@ func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { lim.mu.Unlock() if n > burst && limit != Inf { - return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst) + return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, burst) } // Check if ctx is already cancelled select { @@ -359,6 +360,7 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio // advance calculates and returns an updated state for lim resulting from the passage of time. // lim is not changed. +// advance requires that lim.mu is held. func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) { last := lim.last if now.Before(last) { diff --git a/vendor/golang.org/x/tools/cmd/goimports/goimports.go b/vendor/golang.org/x/tools/cmd/goimports/goimports.go index f38b17247b120..27708972d1e58 100644 --- a/vendor/golang.org/x/tools/cmd/goimports/goimports.go +++ b/vendor/golang.org/x/tools/cmd/goimports/goimports.go @@ -10,7 +10,6 @@ import ( "errors" "flag" "fmt" - "go/build" "go/scanner" "io" "io/ioutil" @@ -22,6 +21,7 @@ import ( "runtime/pprof" "strings" + "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/imports" ) @@ -43,14 +43,8 @@ var ( TabIndent: true, Comments: true, Fragment: true, - // This environment, and its caches, will be reused for the whole run. Env: &imports.ProcessEnv{ - GOPATH: build.Default.GOPATH, - GOROOT: build.Default.GOROOT, - GOFLAGS: os.Getenv("GOFLAGS"), - GO111MODULE: os.Getenv("GO111MODULE"), - GOPROXY: os.Getenv("GOPROXY"), - GOSUMDB: os.Getenv("GOSUMDB"), + GocmdRunner: &gocommand.Runner{}, }, } exitCode = 0 @@ -58,7 +52,7 @@ var ( func init() { flag.BoolVar(&options.AllErrors, "e", false, "report all errors (not just the first 10 on different lines)") - flag.StringVar(&options.Env.LocalPrefix, "local", "", "put imports beginning with this string after 3rd-party packages; comma-separated list") + flag.StringVar(&options.LocalPrefix, "local", "", "put imports beginning with this string after 3rd-party packages; comma-separated list") flag.BoolVar(&options.FormatOnly, "format-only", false, "if true, don't fix imports and only format. In this mode, goimports is effectively gofmt, with the addition that imports are grouped into sections.") } @@ -154,7 +148,6 @@ func processFile(filename string, in io.Reader, out io.Writer, argType argumentT // formatting has changed if *list { fmt.Fprintln(out, filename) - exitCode = 1 } if *write { if argType == fromStdin { diff --git a/vendor/golang.org/x/tools/go/analysis/doc.go b/vendor/golang.org/x/tools/go/analysis/doc.go index ea56b724e8b4d..fb17a0e4154c1 100644 --- a/vendor/golang.org/x/tools/go/analysis/doc.go +++ b/vendor/golang.org/x/tools/go/analysis/doc.go @@ -170,6 +170,15 @@ Diagnostic is defined as: The optional Category field is a short identifier that classifies the kind of message when an analysis produces several kinds of diagnostic. +Many analyses want to associate diagnostics with a severity level. +Because Diagnostic does not have a severity level field, an Analyzer's +diagnostics effectively all have the same severity level. To separate which +diagnostics are high severity and which are low severity, expose multiple +Analyzers instead. Analyzers should also be separated when their +diagnostics belong in different groups, or could be tagged differently +before being shown to the end user. Analyzers should document their severity +level to help downstream tools surface diagnostics properly. + Most Analyzers inspect typed Go syntax trees, but a few, such as asmdecl and buildtag, inspect the raw text of Go source files or even non-Go files such as assembly. To report a diagnostic against a line of a diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index cb6b14c1b9aec..220d409878e5c 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -543,7 +543,7 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse Module: p.Module, } - if (state.cfg.Mode&TypecheckCgo) != 0 && len(p.CgoFiles) != 0 { + if (state.cfg.Mode&typecheckCgo) != 0 && len(p.CgoFiles) != 0 { if len(p.CompiledGoFiles) > len(p.GoFiles) { // We need the cgo definitions, which are in the first // CompiledGoFile after the non-cgo ones. This is a hack but there @@ -635,6 +635,39 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse pkg.CompiledGoFiles = pkg.GoFiles } + // Temporary work-around for golang/go#39986. Parse filenames out of + // error messages. This happens if there are unrecoverable syntax + // errors in the source, so we can't match on a specific error message. + if err := p.Error; err != nil && len(err.ImportStack) == 0 && len(pkg.CompiledGoFiles) == 0 { + addFilenameFromPos := func(pos string) bool { + split := strings.Split(pos, ":") + if len(split) < 1 { + return false + } + filename := strings.TrimSpace(split[0]) + if filename == "" { + return false + } + if !filepath.IsAbs(filename) { + filename = filepath.Join(state.cfg.Dir, filename) + } + info, _ := os.Stat(filename) + if info == nil { + return false + } + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename) + pkg.GoFiles = append(pkg.GoFiles, filename) + return true + } + found := addFilenameFromPos(err.Pos) + // In some cases, go list only reports the error position in the + // error text, not the error position. One such case is when the + // file's package name is a keyword (see golang.org/issue/39763). + if !found { + addFilenameFromPos(err.Err) + } + } + if p.Error != nil { msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363. // Address golang.org/issue/35964 by appending import stack to error message. diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go index b82c90d7c6662..4eabfd98c63f1 100644 --- a/vendor/golang.org/x/tools/go/packages/golist_overlay.go +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -102,8 +102,11 @@ func (state *golistState) processGolistOverlay(response *responseDeduper) (modif } } } - // The overlay could have included an entirely new package. - if pkg == nil { + // The overlay could have included an entirely new package or an + // ad-hoc package. An ad-hoc package is one that we have manually + // constructed from inadequate `go list` results for a file= query. + // It will have the ID command-line-arguments. + if pkg == nil || pkg.ID == "command-line-arguments" { // Try to find the module or gopath dir the file is contained in. // Then for modules, add the module opath to the beginning. pkgPath, ok, err := state.getPkgPath(dir) @@ -113,42 +116,55 @@ func (state *golistState) processGolistOverlay(response *responseDeduper) (modif if !ok { break } + var forTest string // only set for x tests isXTest := strings.HasSuffix(pkgName, "_test") if isXTest { + forTest = pkgPath pkgPath += "_test" } id := pkgPath - if isTestFile && !isXTest { - id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath) - } - // Try to reclaim a package with the same ID, if it exists in the response. - for _, p := range response.dr.Packages { - if reclaimPackage(p, id, opath, contents) { - pkg = p - break + if isTestFile { + if isXTest { + id = fmt.Sprintf("%s [%s.test]", pkgPath, forTest) + } else { + id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath) } } - // Otherwise, create a new package. - if pkg == nil { - pkg = &Package{ - PkgPath: pkgPath, - ID: id, - Name: pkgName, - Imports: make(map[string]*Package), + if pkg != nil { + // TODO(rstambler): We should change the package's path and ID + // here. The only issue is that this messes with the roots. + } else { + // Try to reclaim a package with the same ID, if it exists in the response. + for _, p := range response.dr.Packages { + if reclaimPackage(p, id, opath, contents) { + pkg = p + break + } } - response.addPackage(pkg) - havePkgs[pkg.PkgPath] = id - // Add the production package's sources for a test variant. - if isTestFile && !isXTest && testVariantOf != nil { - pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) - pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) - // Add the package under test and its imports to the test variant. - pkg.forTest = testVariantOf.PkgPath - for k, v := range testVariantOf.Imports { - pkg.Imports[k] = &Package{ID: v.ID} + // Otherwise, create a new package. + if pkg == nil { + pkg = &Package{ + PkgPath: pkgPath, + ID: id, + Name: pkgName, + Imports: make(map[string]*Package), + } + response.addPackage(pkg) + havePkgs[pkg.PkgPath] = id + // Add the production package's sources for a test variant. + if isTestFile && !isXTest && testVariantOf != nil { + pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) + // Add the package under test and its imports to the test variant. + pkg.forTest = testVariantOf.PkgPath + for k, v := range testVariantOf.Imports { + pkg.Imports[k] = &Package{ID: v.ID} + } + } + if isXTest { + pkg.forTest = forTest } } - // TODO(rstambler): Handle forTest for x_tests. } } if !fileExists { diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 1e6f9a467504e..04053f1e7d4cc 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -19,7 +19,6 @@ import ( "log" "os" "path/filepath" - "reflect" "strings" "sync" "time" @@ -27,6 +26,7 @@ import ( "golang.org/x/tools/go/gcexportdata" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" + "golang.org/x/tools/internal/typesinternal" ) // A LoadMode controls the amount of detail to return when loading. @@ -73,9 +73,9 @@ const ( // NeedTypesSizes adds TypesSizes. NeedTypesSizes - // TypecheckCgo enables full support for type checking cgo. Requires Go 1.15+. + // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+. // Modifies CompiledGoFiles and Types, and has no effect on its own. - TypecheckCgo + typecheckCgo // NeedModule adds Module. NeedModule @@ -191,6 +191,13 @@ type driver func(cfg *Config, patterns ...string) (*driverResponse, error) // driverResponse contains the results for a driver query. type driverResponse struct { + // NotHandled is returned if the request can't be handled by the current + // driver. If an external driver returns a response with NotHandled, the + // rest of the driverResponse is ignored, and go/packages will fallback + // to the next driver. If go/packages is extended in the future to support + // lists of multiple drivers, go/packages will fall back to the next driver. + NotHandled bool + // Sizes, if not nil, is the types.Sizes to use when type checking. Sizes *types.StdSizes @@ -232,14 +239,22 @@ func Load(cfg *Config, patterns ...string) ([]*Package, error) { return l.refine(response.Roots, response.Packages...) } -// defaultDriver is a driver that looks for an external driver binary, and if -// it does not find it falls back to the built in go list driver. +// defaultDriver is a driver that implements go/packages' fallback behavior. +// It will try to request to an external driver, if one exists. If there's +// no external driver, or the driver returns a response with NotHandled set, +// defaultDriver will fall back to the go list driver. func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) { driver := findExternalDriver(cfg) if driver == nil { driver = goListDriver } - return driver(cfg, patterns...) + response, err := driver(cfg, patterns...) + if err != nil { + return response, err + } else if response.NotHandled { + return goListDriver(cfg, patterns...) + } + return response, nil } // A Package describes a loaded Go package. @@ -346,6 +361,7 @@ func init() { packagesinternal.SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) { config.(*Config).gocmdRunner = runner } + packagesinternal.TypecheckCgo = int(typecheckCgo) } // An Error describes a problem with a package's metadata, syntax, or types. @@ -906,18 +922,14 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { Error: appendError, Sizes: ld.sizes, } - if (ld.Mode & TypecheckCgo) != 0 { - // TODO: remove this when we stop supporting 1.14. - rtc := reflect.ValueOf(tc).Elem() - usesCgo := rtc.FieldByName("UsesCgo") - if !usesCgo.IsValid() { + if (ld.Mode & typecheckCgo) != 0 { + if !typesinternal.SetUsesCgo(tc) { appendError(Error{ - Msg: "TypecheckCgo requires Go 1.15+", + Msg: "typecheckCgo requires Go 1.15+", Kind: ListError, }) return } - usesCgo.SetBool(true) } types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go index 26586810c7f5a..a194f533902b1 100644 --- a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go +++ b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go @@ -16,6 +16,11 @@ import ( "golang.org/x/tools/go/ast/astutil" ) +var ( + GetTypeErrors func(p interface{}) []types.Error + SetTypeErrors func(p interface{}, errors []types.Error) +) + func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos { // Get the end position for the type error. offset, end := fset.PositionFor(start, false).Offset, start @@ -48,7 +53,7 @@ func ZeroValue(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.T case *types.Chan, *types.Interface, *types.Map, *types.Pointer, *types.Signature, *types.Slice: return ast.NewIdent("nil") case *types.Struct: - texpr := typeExpr(fset, f, pkg, typ) // typ because we want the name here. + texpr := TypeExpr(fset, f, pkg, typ) // typ because we want the name here. if texpr == nil { return nil } @@ -56,7 +61,7 @@ func ZeroValue(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.T Type: texpr, } case *types.Array: - texpr := typeExpr(fset, f, pkg, u.Elem()) + texpr := TypeExpr(fset, f, pkg, u.Elem()) if texpr == nil { return nil } @@ -70,7 +75,7 @@ func ZeroValue(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.T return nil } -func typeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { +func TypeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { switch t := typ.(type) { case *types.Basic: switch t.Kind() { @@ -79,7 +84,96 @@ func typeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Ty default: return ast.NewIdent(t.Name()) } + case *types.Pointer: + x := TypeExpr(fset, f, pkg, t.Elem()) + if x == nil { + return nil + } + return &ast.UnaryExpr{ + Op: token.MUL, + X: x, + } + case *types.Array: + elt := TypeExpr(fset, f, pkg, t.Elem()) + if elt == nil { + return nil + } + return &ast.ArrayType{ + Len: &ast.BasicLit{ + Kind: token.INT, + Value: fmt.Sprintf("%d", t.Len()), + }, + Elt: elt, + } + case *types.Slice: + elt := TypeExpr(fset, f, pkg, t.Elem()) + if elt == nil { + return nil + } + return &ast.ArrayType{ + Elt: elt, + } + case *types.Map: + key := TypeExpr(fset, f, pkg, t.Key()) + value := TypeExpr(fset, f, pkg, t.Elem()) + if key == nil || value == nil { + return nil + } + return &ast.MapType{ + Key: key, + Value: value, + } + case *types.Chan: + dir := ast.ChanDir(t.Dir()) + if t.Dir() == types.SendRecv { + dir = ast.SEND | ast.RECV + } + value := TypeExpr(fset, f, pkg, t.Elem()) + if value == nil { + return nil + } + return &ast.ChanType{ + Dir: dir, + Value: value, + } + case *types.Signature: + var params []*ast.Field + for i := 0; i < t.Params().Len(); i++ { + p := TypeExpr(fset, f, pkg, t.Params().At(i).Type()) + if p == nil { + return nil + } + params = append(params, &ast.Field{ + Type: p, + Names: []*ast.Ident{ + { + Name: t.Params().At(i).Name(), + }, + }, + }) + } + var returns []*ast.Field + for i := 0; i < t.Results().Len(); i++ { + r := TypeExpr(fset, f, pkg, t.Results().At(i).Type()) + if r == nil { + return nil + } + returns = append(returns, &ast.Field{ + Type: r, + }) + } + return &ast.FuncType{ + Params: &ast.FieldList{ + List: params, + }, + Results: &ast.FieldList{ + List: returns, + }, + } case *types.Named: + if t.Obj().Pkg() == nil { + return ast.NewIdent(t.Obj().Name()) + } if t.Obj().Pkg() == pkg { return ast.NewIdent(t.Obj().Name()) } @@ -106,9 +200,6 @@ func typeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Ty } } -var GetTypeErrors = func(p interface{}) []types.Error { return nil } -var SetTypeErrors = func(p interface{}, errors []types.Error) {} - type TypeErrorPass string const ( @@ -116,3 +207,77 @@ const ( NoResultValues TypeErrorPass = "noresultvalues" UndeclaredName TypeErrorPass = "undeclaredname" ) + +// StmtToInsertVarBefore returns the ast.Stmt before which we can safely insert a new variable. +// Some examples: +// +// Basic Example: +// z := 1 +// y := z + x +// If x is undeclared, then this function would return `y := z + x`, so that we +// can insert `x := ` on the line before `y := z + x`. +// +// If stmt example: +// if z == 1 { +// } else if z == y {} +// If y is undeclared, then this function would return `if z == 1 {`, because we cannot +// insert a statement between an if and an else if statement. As a result, we need to find +// the top of the if chain to insert `y := ` before. +func StmtToInsertVarBefore(path []ast.Node) ast.Stmt { + enclosingIndex := -1 + for i, p := range path { + if _, ok := p.(ast.Stmt); ok { + enclosingIndex = i + break + } + } + if enclosingIndex == -1 { + return nil + } + enclosingStmt := path[enclosingIndex] + switch enclosingStmt.(type) { + case *ast.IfStmt: + // The enclosingStmt is inside of the if declaration, + // We need to check if we are in an else-if stmt and + // get the base if statement. + return baseIfStmt(path, enclosingIndex) + case *ast.CaseClause: + // Get the enclosing switch stmt if the enclosingStmt is + // inside of the case statement. + for i := enclosingIndex + 1; i < len(path); i++ { + if node, ok := path[i].(*ast.SwitchStmt); ok { + return node + } else if node, ok := path[i].(*ast.TypeSwitchStmt); ok { + return node + } + } + } + if len(path) <= enclosingIndex+1 { + return enclosingStmt.(ast.Stmt) + } + // Check if the enclosing statement is inside another node. + switch expr := path[enclosingIndex+1].(type) { + case *ast.IfStmt: + // Get the base if statement. + return baseIfStmt(path, enclosingIndex+1) + case *ast.ForStmt: + if expr.Init == enclosingStmt || expr.Post == enclosingStmt { + return expr + } + } + return enclosingStmt.(ast.Stmt) +} + +// baseIfStmt walks up the if/else-if chain until we get to +// the top of the current if chain. +func baseIfStmt(path []ast.Node, index int) ast.Stmt { + stmt := path[index] + for i := index + 1; i < len(path); i++ { + if node, ok := path[i].(*ast.IfStmt); ok && node.Else == stmt { + stmt = node + continue + } + break + } + return stmt.(ast.Stmt) +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index 9aa7984561702..f516e17623d6b 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -23,57 +23,106 @@ import ( // An Runner will run go command invocations and serialize // them if it sees a concurrency error. type Runner struct { - // LoadMu guards packages.Load calls and associated state. - loadMu sync.Mutex - serializeLoads int + // once guards the runner initialization. + once sync.Once + + // inFlight tracks available workers. + inFlight chan struct{} + + // serialized guards the ability to run a go command serially, + // to avoid deadlocks when claiming workers. + serialized chan struct{} +} + +const maxInFlight = 10 + +func (runner *Runner) initialize() { + runner.once.Do(func() { + runner.inFlight = make(chan struct{}, maxInFlight) + runner.serialized = make(chan struct{}, 1) + }) } // 1.13: go: updates to go.mod needed, but contents have changed // 1.14: go: updating go.mod: existing contents have changed since last read var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`) -// Run calls Runner.RunRaw, serializing requests if they fight over -// go.mod changes. +// Run is a convenience wrapper around RunRaw. +// It returns only stdout and a "friendly" error. func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) { stdout, _, friendly, _ := runner.RunRaw(ctx, inv) return stdout, friendly } -// RunRaw calls Invocation.runRaw, serializing requests if they fight over +// RunPiped runs the invocation serially, always waiting for any concurrent +// invocations to complete first. +func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error { + _, err := runner.runPiped(ctx, inv, stdout, stderr) + return err +} + +// RunRaw runs the invocation, serializing requests only if they fight over // go.mod changes. func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { - // We want to run invocations concurrently as much as possible. However, - // if go.mod updates are needed, only one can make them and the others will - // fail. We need to retry in those cases, but we don't want to thrash so - // badly we never recover. To avoid that, once we've seen one concurrency - // error, start serializing everything until the backlog has cleared out. - runner.loadMu.Lock() - var locked bool // If true, we hold the mutex and have incremented. - if runner.serializeLoads == 0 { - runner.loadMu.Unlock() - } else { - locked = true - runner.serializeLoads++ + // Make sure the runner is always initialized. + runner.initialize() + + // First, try to run the go command concurrently. + stdout, stderr, friendlyErr, err := runner.runConcurrent(ctx, inv) + + // If we encounter a load concurrency error, we need to retry serially. + if friendlyErr == nil || !modConcurrencyError.MatchString(friendlyErr.Error()) { + return stdout, stderr, friendlyErr, err } - defer func() { - if locked { - runner.serializeLoads-- - runner.loadMu.Unlock() - } - }() + event.Error(ctx, "Load concurrency error, will retry serially", err) - for { - stdout, stderr, friendlyErr, err := inv.runRaw(ctx) - if friendlyErr == nil || !modConcurrencyError.MatchString(friendlyErr.Error()) { - return stdout, stderr, friendlyErr, err - } - event.Error(ctx, "Load concurrency error, will retry serially", err) - if !locked { - runner.loadMu.Lock() - runner.serializeLoads++ - locked = true + // Run serially by calling runPiped. + stdout.Reset() + stderr.Reset() + friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr) + return stdout, stderr, friendlyErr, err +} + +func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + // Wait for 1 worker to become available. + select { + case <-ctx.Done(): + return nil, nil, nil, ctx.Err() + case runner.inFlight <- struct{}{}: + defer func() { <-runner.inFlight }() + } + + stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{} + friendlyErr, err := inv.runWithFriendlyError(ctx, stdout, stderr) + return stdout, stderr, friendlyErr, err +} + +func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) { + // Make sure the runner is always initialized. + runner.initialize() + + // Acquire the serialization lock. This avoids deadlocks between two + // runPiped commands. + select { + case <-ctx.Done(): + return nil, ctx.Err() + case runner.serialized <- struct{}{}: + defer func() { <-runner.serialized }() + } + + // Wait for all in-progress go commands to return before proceeding, + // to avoid load concurrency errors. + for i := 0; i < maxInFlight; i++ { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case runner.inFlight <- struct{}{}: + // Make sure we always "return" any workers we took. + defer func() { <-runner.inFlight }() } } + + return inv.runWithFriendlyError(ctx, stdout, stderr) } // An Invocation represents a call to the go command. @@ -86,12 +135,8 @@ type Invocation struct { Logf func(format string, args ...interface{}) } -// RunRaw is like RunPiped, but also returns the raw stderr and error for callers -// that want to do low-level error handling/recovery. -func (i *Invocation) runRaw(ctx context.Context) (stdout *bytes.Buffer, stderr *bytes.Buffer, friendlyError error, rawError error) { - stdout = &bytes.Buffer{} - stderr = &bytes.Buffer{} - rawError = i.RunPiped(ctx, stdout, stderr) +func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io.Writer) (friendlyError error, rawError error) { + rawError = i.run(ctx, stdout, stderr) if rawError != nil { friendlyError = rawError // Check for 'go' executable not being found. @@ -106,8 +151,7 @@ func (i *Invocation) runRaw(ctx context.Context) (stdout *bytes.Buffer, stderr * return } -// RunPiped is like Run, but relies on the given stdout/stderr -func (i *Invocation) RunPiped(ctx context.Context, stdout, stderr io.Writer) error { +func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { log := i.Logf if log == nil { log = func(string, ...interface{}) {} @@ -141,7 +185,6 @@ func (i *Invocation) RunPiped(ctx context.Context, stdout, stderr io.Writer) err cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir) cmd.Dir = i.WorkingDir } - defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) return runCmdContext(ctx, cmd) diff --git a/vendor/golang.org/x/tools/internal/gocommand/vendor.go b/vendor/golang.org/x/tools/internal/gocommand/vendor.go new file mode 100644 index 0000000000000..1cd8d8473e993 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/vendor.go @@ -0,0 +1,102 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocommand + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "golang.org/x/mod/semver" +) + +// ModuleJSON holds information about a module. +type ModuleJSON struct { + Path string // module path + Replace *ModuleJSON // replaced by this module + Main bool // is this the main module? + Indirect bool // is this module only an indirect dependency of main module? + Dir string // directory holding files for this module, if any + GoMod string // path to go.mod file for this module, if any + GoVersion string // go version used in module +} + +var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`) + +// VendorEnabled reports whether vendoring is enabled. It takes a *Runner to execute Go commands +// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields, +// of which only Verb and Args are modified to run the appropriate Go command. +// Inspired by setDefaultBuildMod in modload/init.go +func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) { + mainMod, go114, err := getMainModuleAnd114(ctx, inv, r) + if err != nil { + return nil, false, err + } + + // We check the GOFLAGS to see if there is anything overridden or not. + inv.Verb = "env" + inv.Args = []string{"GOFLAGS"} + stdout, err := r.Run(ctx, inv) + if err != nil { + return nil, false, err + } + goflags := string(bytes.TrimSpace(stdout.Bytes())) + matches := modFlagRegexp.FindStringSubmatch(goflags) + var modFlag string + if len(matches) != 0 { + modFlag = matches[1] + } + if modFlag != "" { + // Don't override an explicit '-mod=' argument. + return mainMod, modFlag == "vendor", nil + } + if mainMod == nil || !go114 { + return mainMod, false, nil + } + // Check 1.14's automatic vendor mode. + if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() { + if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 { + // The Go version is at least 1.14, and a vendor directory exists. + // Set -mod=vendor by default. + return mainMod, true, nil + } + } + return mainMod, false, nil +} + +// getMainModuleAnd114 gets the main module's information and whether the +// go command in use is 1.14+. This is the information needed to figure out +// if vendoring should be enabled. +func getMainModuleAnd114(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) { + const format = `{{.Path}} +{{.Dir}} +{{.GoMod}} +{{.GoVersion}} +{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}} +` + inv.Verb = "list" + inv.Args = []string{"-m", "-f", format} + stdout, err := r.Run(ctx, inv) + if err != nil { + return nil, false, err + } + + lines := strings.Split(stdout.String(), "\n") + if len(lines) < 5 { + return nil, false, fmt.Errorf("unexpected stdout: %q", stdout.String()) + } + mod := &ModuleJSON{ + Path: lines[0], + Dir: lines[1], + GoMod: lines[2], + GoVersion: lines[3], + Main: true, + } + return mod, lines[4] == "go1.14", nil +} diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index 3a53bb6bf7b80..ecd13e87ad5b2 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -7,6 +7,7 @@ package imports import ( "bytes" "context" + "encoding/json" "fmt" "go/ast" "go/build" @@ -31,25 +32,25 @@ import ( // importToGroup is a list of functions which map from an import path to // a group number. -var importToGroup = []func(env *ProcessEnv, importPath string) (num int, ok bool){ - func(env *ProcessEnv, importPath string) (num int, ok bool) { - if env.LocalPrefix == "" { +var importToGroup = []func(localPrefix, importPath string) (num int, ok bool){ + func(localPrefix, importPath string) (num int, ok bool) { + if localPrefix == "" { return } - for _, p := range strings.Split(env.LocalPrefix, ",") { + for _, p := range strings.Split(localPrefix, ",") { if strings.HasPrefix(importPath, p) || strings.TrimSuffix(p, "/") == importPath { return 3, true } } return }, - func(_ *ProcessEnv, importPath string) (num int, ok bool) { + func(_, importPath string) (num int, ok bool) { if strings.HasPrefix(importPath, "appengine") { return 2, true } return }, - func(_ *ProcessEnv, importPath string) (num int, ok bool) { + func(_, importPath string) (num int, ok bool) { firstComponent := strings.Split(importPath, "/")[0] if strings.Contains(firstComponent, ".") { return 1, true @@ -58,9 +59,9 @@ var importToGroup = []func(env *ProcessEnv, importPath string) (num int, ok bool }, } -func importGroup(env *ProcessEnv, importPath string) int { +func importGroup(localPrefix, importPath string) int { for _, fn := range importToGroup { - if n, ok := fn(env, importPath); ok { + if n, ok := fn(localPrefix, importPath); ok { return n } } @@ -277,7 +278,12 @@ func (p *pass) loadPackageNames(imports []*ImportInfo) error { unknown = append(unknown, imp.ImportPath) } - names, err := p.env.GetResolver().loadPackageNames(unknown, p.srcDir) + resolver, err := p.env.GetResolver() + if err != nil { + return err + } + + names, err := resolver.loadPackageNames(unknown, p.srcDir) if err != nil { return err } @@ -598,7 +604,7 @@ func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filena // Start off with the standard library. for importPath, exports := range stdlib { p := &pkg{ - dir: filepath.Join(env.GOROOT, "src", importPath), + dir: filepath.Join(env.goroot(), "src", importPath), importPathShort: importPath, packageName: path.Base(importPath), relevance: MaxRelevance, @@ -639,15 +645,23 @@ func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filena wrappedCallback.exportsLoaded(pkg, exports) }, } - return env.GetResolver().scan(ctx, scanFilter) + resolver, err := env.GetResolver() + if err != nil { + return err + } + return resolver.scan(ctx, scanFilter) } -func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) map[string]int { +func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) (map[string]int, error) { result := make(map[string]int) + resolver, err := env.GetResolver() + if err != nil { + return nil, err + } for _, path := range paths { - result[path] = env.GetResolver().scoreImportPath(ctx, path) + result[path] = resolver.scoreImportPath(ctx, path) } - return result + return result, nil } func PrimeCache(ctx context.Context, env *ProcessEnv) error { @@ -673,8 +687,9 @@ func candidateImportName(pkg *pkg) string { return "" } -// getAllCandidates gets all of the candidates to be imported, regardless of if they are needed. -func getAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error { +// GetAllCandidates gets all of the packages starting with prefix that can be +// imported by filename, sorted by import path. +func GetAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error { callback := &scanCallback{ rootFound: func(gopathwalk.Root) bool { return true @@ -713,7 +728,8 @@ type PackageExport struct { Exports []string } -func getPackageExports(ctx context.Context, wrapped func(PackageExport), searchPkg, filename, filePkg string, env *ProcessEnv) error { +// GetPackageExports returns all known packages with name pkg and their exports. +func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchPkg, filename, filePkg string, env *ProcessEnv) error { callback := &scanCallback{ rootFound: func(gopathwalk.Root) bool { return true @@ -743,19 +759,22 @@ func getPackageExports(ctx context.Context, wrapped func(PackageExport), searchP return getCandidatePkgs(ctx, callback, filename, filePkg, env) } +var RequiredGoEnvVars = []string{"GO111MODULE", "GOFLAGS", "GOINSECURE", "GOMOD", "GOMODCACHE", "GONOPROXY", "GONOSUMDB", "GOPATH", "GOPROXY", "GOROOT", "GOSUMDB"} + // ProcessEnv contains environment variables and settings that affect the use of // the go command, the go/build package, etc. type ProcessEnv struct { - LocalPrefix string - GocmdRunner *gocommand.Runner BuildFlags []string - // If non-empty, these will be used instead of the - // process-wide values. - GOPATH, GOROOT, GO111MODULE, GOPROXY, GOFLAGS, GOSUMDB string - WorkingDir string + // Env overrides the OS environment, and can be used to specify + // GOPROXY, GO111MODULE, etc. PATH cannot be set here, because + // exec.Command will not honor it. + // Specifying all of RequiredGoEnvVars avoids a call to `go env`. + Env map[string]string + + WorkingDir string // If Logf is non-nil, debug logging is enabled through this function. Logf func(format string, args ...interface{}) @@ -763,6 +782,22 @@ type ProcessEnv struct { resolver Resolver } +func (e *ProcessEnv) goroot() string { + return e.mustGetEnv("GOROOT") +} + +func (e *ProcessEnv) gopath() string { + return e.mustGetEnv("GOPATH") +} + +func (e *ProcessEnv) mustGetEnv(k string) string { + v, ok := e.Env[k] + if !ok { + panic(fmt.Sprintf("%v not set in evaluated environment", k)) + } + return v +} + // CopyConfig copies the env's configuration into a new env. func (e *ProcessEnv) CopyConfig() *ProcessEnv { copy := *e @@ -770,42 +805,63 @@ func (e *ProcessEnv) CopyConfig() *ProcessEnv { return © } -func (e *ProcessEnv) env() []string { - env := os.Environ() - add := func(k, v string) { - if v != "" { - env = append(env, k+"="+v) +func (e *ProcessEnv) init() error { + foundAllRequired := true + for _, k := range RequiredGoEnvVars { + if _, ok := e.Env[k]; !ok { + foundAllRequired = false + break } } - add("GOPATH", e.GOPATH) - add("GOROOT", e.GOROOT) - add("GO111MODULE", e.GO111MODULE) - add("GOPROXY", e.GOPROXY) - add("GOFLAGS", e.GOFLAGS) - add("GOSUMDB", e.GOSUMDB) - if e.WorkingDir != "" { - add("PWD", e.WorkingDir) + if foundAllRequired { + return nil + } + + if e.Env == nil { + e.Env = map[string]string{} + } + + goEnv := map[string]string{} + stdout, err := e.invokeGo(context.TODO(), "env", append([]string{"-json"}, RequiredGoEnvVars...)...) + if err != nil { + return err + } + if err := json.Unmarshal(stdout.Bytes(), &goEnv); err != nil { + return err + } + for k, v := range goEnv { + e.Env[k] = v + } + return nil +} + +func (e *ProcessEnv) env() []string { + var env []string // the gocommand package will prepend os.Environ. + for k, v := range e.Env { + env = append(env, k+"="+v) } return env } -func (e *ProcessEnv) GetResolver() Resolver { +func (e *ProcessEnv) GetResolver() (Resolver, error) { if e.resolver != nil { - return e.resolver + return e.resolver, nil + } + if err := e.init(); err != nil { + return nil, err } - out, err := e.invokeGo(context.TODO(), "env", "GOMOD") - if err != nil || len(bytes.TrimSpace(out.Bytes())) == 0 { + if len(e.Env["GOMOD"]) == 0 { e.resolver = newGopathResolver(e) - return e.resolver + return e.resolver, nil } e.resolver = newModuleResolver(e) - return e.resolver + return e.resolver, nil } func (e *ProcessEnv) buildContext() *build.Context { ctx := build.Default - ctx.GOROOT = e.GOROOT - ctx.GOPATH = e.GOPATH + ctx.GOROOT = e.goroot() + ctx.GOPATH = e.gopath() // As of Go 1.14, build.Context has a Dir field // (see golang.org/issue/34860). @@ -839,7 +895,7 @@ func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string) func addStdlibCandidates(pass *pass, refs references) { add := func(pkg string) { // Prevent self-imports. - if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.env.GOROOT, "src", pkg) == pass.srcDir { + if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.env.goroot(), "src", pkg) == pass.srcDir { return } exports := copyExports(stdlib[pkg]) @@ -924,10 +980,13 @@ func addExternalCandidates(pass *pass, refs references, filename string) error { return false // We'll do our own loading after we sort. }, } - err := pass.env.GetResolver().scan(context.Background(), callback) + resolver, err := pass.env.GetResolver() if err != nil { return err } + if err = resolver.scan(context.Background(), callback); err != nil { + return err + } // Search for imports matching potential package references. type result struct { @@ -1325,7 +1384,10 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl fullFile := filepath.Join(dir, fi.Name()) f, err := parser.ParseFile(fset, fullFile, nil, 0) if err != nil { - return "", nil, fmt.Errorf("parsing %s: %v", fullFile, err) + if env.Logf != nil { + env.Logf("error parsing %v: %v", fullFile, err) + } + continue } if f.Name.Name == "documentation" { // Special case from go/build.ImportDir, not @@ -1365,6 +1427,10 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa pass.env.Logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir) } } + resolver, err := pass.env.GetResolver() + if err != nil { + return nil, err + } // Collect exports for packages with matching names. rescv := make([]chan *pkg, len(candidates)) @@ -1403,7 +1469,7 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa } // If we're an x_test, load the package under test's test variant. includeTest := strings.HasSuffix(pass.f.Name.Name, "_test") && c.pkg.dir == pass.srcDir - _, exports, err := pass.env.GetResolver().loadExports(ctx, c.pkg, includeTest) + _, exports, err := resolver.loadExports(ctx, c.pkg, includeTest) if err != nil { if pass.env.Logf != nil { pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err) diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go index f43d6b22e54d9..2815edc33d736 100644 --- a/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -11,29 +11,29 @@ package imports import ( "bufio" "bytes" - "context" "fmt" "go/ast" - "go/build" "go/format" "go/parser" "go/printer" "go/token" "io" - "io/ioutil" - "os" "regexp" "strconv" "strings" "golang.org/x/tools/go/ast/astutil" - "golang.org/x/tools/internal/gocommand" ) // Options is golang.org/x/tools/imports.Options with extra internal-only options. type Options struct { Env *ProcessEnv // The environment to use. Note: this contains the cached module and filesystem state. + // LocalPrefix is a comma-separated string of import path prefixes, which, if + // set, instructs Process to sort the import paths with the given prefixes + // into another group after 3rd-party packages. + LocalPrefix string + Fragment bool // Accept fragment of a source file (no package statement) AllErrors bool // Report all errors (not just the first 10 on different lines) @@ -44,13 +44,8 @@ type Options struct { FormatOnly bool // Disable the insertion and deletion of imports } -// Process implements golang.org/x/tools/imports.Process with explicit context in env. +// Process implements golang.org/x/tools/imports.Process with explicit context in opt.Env. func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) { - src, opt, err = initialize(filename, src, opt) - if err != nil { - return nil, err - } - fileSet := token.NewFileSet() file, adjust, err := parse(fileSet, filename, src, opt) if err != nil { @@ -66,16 +61,12 @@ func Process(filename string, src []byte, opt *Options) (formatted []byte, err e } // FixImports returns a list of fixes to the imports that, when applied, -// will leave the imports in the same state as Process. +// will leave the imports in the same state as Process. src and opt must +// be specified. // // Note that filename's directory influences which imports can be chosen, // so it is important that filename be accurate. func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) { - src, opt, err = initialize(filename, src, opt) - if err != nil { - return nil, err - } - fileSet := token.NewFileSet() file, _, err := parse(fileSet, filename, src, opt) if err != nil { @@ -86,13 +77,9 @@ func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix, } // ApplyFixes applies all of the fixes to the file and formats it. extraMode -// is added in when parsing the file. +// is added in when parsing the file. src and opts must be specified, but no +// env is needed. func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, extraMode parser.Mode) (formatted []byte, err error) { - src, opt, err = initialize(filename, src, opt) - if err != nil { - return nil, err - } - // Don't use parse() -- we don't care about fragments or statement lists // here, and we need to work with unparseable files. fileSet := token.NewFileSet() @@ -116,63 +103,9 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e return formatFile(fileSet, file, src, nil, opt) } -// GetAllCandidates gets all of the packages starting with prefix that can be -// imported by filename, sorted by import path. -func GetAllCandidates(ctx context.Context, callback func(ImportFix), searchPrefix, filename, filePkg string, opt *Options) error { - _, opt, err := initialize(filename, []byte{}, opt) - if err != nil { - return err - } - return getAllCandidates(ctx, callback, searchPrefix, filename, filePkg, opt.Env) -} - -// GetPackageExports returns all known packages with name pkg and their exports. -func GetPackageExports(ctx context.Context, callback func(PackageExport), searchPkg, filename, filePkg string, opt *Options) error { - _, opt, err := initialize(filename, []byte{}, opt) - if err != nil { - return err - } - return getPackageExports(ctx, callback, searchPkg, filename, filePkg, opt.Env) -} - -// initialize sets the values for opt and src. -// If they are provided, they are not changed. Otherwise opt is set to the -// default values and src is read from the file system. -func initialize(filename string, src []byte, opt *Options) ([]byte, *Options, error) { - // Use defaults if opt is nil. - if opt == nil { - opt = &Options{Comments: true, TabIndent: true, TabWidth: 8} - } - - // Set the env if the user has not provided it. - if opt.Env == nil { - opt.Env = &ProcessEnv{ - GOPATH: build.Default.GOPATH, - GOROOT: build.Default.GOROOT, - GOFLAGS: os.Getenv("GOFLAGS"), - GO111MODULE: os.Getenv("GO111MODULE"), - GOPROXY: os.Getenv("GOPROXY"), - GOSUMDB: os.Getenv("GOSUMDB"), - } - } - // Set the gocmdRunner if the user has not provided it. - if opt.Env.GocmdRunner == nil { - opt.Env.GocmdRunner = &gocommand.Runner{} - } - if src == nil { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, nil, err - } - src = b - } - - return src, opt, nil -} - func formatFile(fileSet *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) { - mergeImports(opt.Env, fileSet, file) - sortImports(opt.Env, fileSet, file) + mergeImports(fileSet, file) + sortImports(opt.LocalPrefix, fileSet, file) imps := astutil.Imports(fileSet, file) var spacesBefore []string // import paths we need spaces before for _, impSection := range imps { @@ -183,7 +116,7 @@ func formatFile(fileSet *token.FileSet, file *ast.File, src []byte, adjust func( lastGroup := -1 for _, importSpec := range impSection { importPath, _ := strconv.Unquote(importSpec.Path.Value) - groupNum := importGroup(opt.Env, importPath) + groupNum := importGroup(opt.LocalPrefix, importPath) if groupNum != lastGroup && lastGroup != -1 { spacesBefore = append(spacesBefore, importPath) } diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index 69e3eecc4c7fd..664fbbf5ba8c4 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -15,7 +15,7 @@ import ( "strings" "golang.org/x/mod/module" - "golang.org/x/mod/semver" + "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/gopathwalk" ) @@ -24,31 +24,21 @@ import ( type ModuleResolver struct { env *ProcessEnv moduleCacheDir string - dummyVendorMod *ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory. + dummyVendorMod *gocommand.ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory. roots []gopathwalk.Root scanSema chan struct{} // scanSema prevents concurrent scans and guards scannedRoots. scannedRoots map[gopathwalk.Root]bool initialized bool - main *ModuleJSON - modsByModPath []*ModuleJSON // All modules, ordered by # of path components in module Path... - modsByDir []*ModuleJSON // ...or Dir. + main *gocommand.ModuleJSON + modsByModPath []*gocommand.ModuleJSON // All modules, ordered by # of path components in module Path... + modsByDir []*gocommand.ModuleJSON // ...or Dir. // moduleCacheCache stores information about the module cache. moduleCacheCache *dirInfoCache otherCache *dirInfoCache } -type ModuleJSON struct { - Path string // module path - Replace *ModuleJSON // replaced by this module - Main bool // is this the main module? - Indirect bool // is this module only an indirect dependency of main module? - Dir string // directory holding files for this module, if any - GoMod string // path to go.mod file for this module, if any - GoVersion string // go version used in module -} - func newModuleResolver(e *ProcessEnv) *ModuleResolver { r := &ModuleResolver{ env: e, @@ -62,7 +52,14 @@ func (r *ModuleResolver) init() error { if r.initialized { return nil } - mainMod, vendorEnabled, err := vendorEnabled(r.env) + + inv := gocommand.Invocation{ + BuildFlags: r.env.BuildFlags, + Env: r.env.env(), + Logf: r.env.Logf, + WorkingDir: r.env.WorkingDir, + } + mainMod, vendorEnabled, err := gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner) if err != nil { return err } @@ -71,18 +68,22 @@ func (r *ModuleResolver) init() error { // Vendor mode is on, so all the non-Main modules are irrelevant, // and we need to search /vendor for everything. r.main = mainMod - r.dummyVendorMod = &ModuleJSON{ + r.dummyVendorMod = &gocommand.ModuleJSON{ Path: "", Dir: filepath.Join(mainMod.Dir, "vendor"), } - r.modsByModPath = []*ModuleJSON{mainMod, r.dummyVendorMod} - r.modsByDir = []*ModuleJSON{mainMod, r.dummyVendorMod} + r.modsByModPath = []*gocommand.ModuleJSON{mainMod, r.dummyVendorMod} + r.modsByDir = []*gocommand.ModuleJSON{mainMod, r.dummyVendorMod} } else { // Vendor mode is off, so run go list -m ... to find everything. r.initAllMods() } - r.moduleCacheDir = filepath.Join(filepath.SplitList(r.env.GOPATH)[0], "/pkg/mod") + if gmc := r.env.Env["GOMODCACHE"]; gmc != "" { + r.moduleCacheDir = gmc + } else { + r.moduleCacheDir = filepath.Join(filepath.SplitList(r.env.gopath())[0], "/pkg/mod") + } sort.Slice(r.modsByModPath, func(i, j int) bool { count := func(x int) int { @@ -98,7 +99,7 @@ func (r *ModuleResolver) init() error { }) r.roots = []gopathwalk.Root{ - {filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT}, + {filepath.Join(r.env.goroot(), "/src"), gopathwalk.RootGOROOT}, } if r.main != nil { r.roots = append(r.roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule}) @@ -106,7 +107,7 @@ func (r *ModuleResolver) init() error { if vendorEnabled { r.roots = append(r.roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther}) } else { - addDep := func(mod *ModuleJSON) { + addDep := func(mod *gocommand.ModuleJSON) { if mod.Replace == nil { // This is redundant with the cache, but we'll skip it cheaply enough. r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootModuleCache}) @@ -151,7 +152,7 @@ func (r *ModuleResolver) initAllMods() error { return err } for dec := json.NewDecoder(stdout); dec.More(); { - mod := &ModuleJSON{} + mod := &gocommand.ModuleJSON{} if err := dec.Decode(mod); err != nil { return err } @@ -197,7 +198,7 @@ func (r *ModuleResolver) ClearForNewMod() { // findPackage returns the module and directory that contains the package at // the given import path, or returns nil, "" if no module is in scope. -func (r *ModuleResolver) findPackage(importPath string) (*ModuleJSON, string) { +func (r *ModuleResolver) findPackage(importPath string) (*gocommand.ModuleJSON, string) { // This can't find packages in the stdlib, but that's harmless for all // the existing code paths. for _, m := range r.modsByModPath { @@ -283,7 +284,7 @@ func (r *ModuleResolver) cacheExports(ctx context.Context, env *ProcessEnv, info // findModuleByDir returns the module that contains dir, or nil if no such // module is in scope. -func (r *ModuleResolver) findModuleByDir(dir string) *ModuleJSON { +func (r *ModuleResolver) findModuleByDir(dir string) *gocommand.ModuleJSON { // This is quite tricky and may not be correct. dir could be: // - a package in the main module. // - a replace target underneath the main module's directory. @@ -310,7 +311,7 @@ func (r *ModuleResolver) findModuleByDir(dir string) *ModuleJSON { // dirIsNestedModule reports if dir is contained in a nested module underneath // mod, not actually in mod. -func (r *ModuleResolver) dirIsNestedModule(dir string, mod *ModuleJSON) bool { +func (r *ModuleResolver) dirIsNestedModule(dir string, mod *gocommand.ModuleJSON) bool { if !strings.HasPrefix(dir, mod.Dir) { return false } @@ -490,7 +491,7 @@ func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) int { return modRelevance(mod) } -func modRelevance(mod *ModuleJSON) int { +func modRelevance(mod *gocommand.ModuleJSON) int { switch { case mod == nil: // out of scope return MaxRelevance - 4 @@ -656,63 +657,3 @@ func modulePath(mod []byte) string { } return "" // missing module path } - -var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`) - -// vendorEnabled indicates if vendoring is enabled. -// Inspired by setDefaultBuildMod in modload/init.go -func vendorEnabled(env *ProcessEnv) (*ModuleJSON, bool, error) { - mainMod, go114, err := getMainModuleAnd114(env) - if err != nil { - return nil, false, err - } - matches := modFlagRegexp.FindStringSubmatch(env.GOFLAGS) - var modFlag string - if len(matches) != 0 { - modFlag = matches[1] - } - if modFlag != "" { - // Don't override an explicit '-mod=' argument. - return mainMod, modFlag == "vendor", nil - } - if mainMod == nil || !go114 { - return mainMod, false, nil - } - // Check 1.14's automatic vendor mode. - if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() { - if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 { - // The Go version is at least 1.14, and a vendor directory exists. - // Set -mod=vendor by default. - return mainMod, true, nil - } - } - return mainMod, false, nil -} - -// getMainModuleAnd114 gets the main module's information and whether the -// go command in use is 1.14+. This is the information needed to figure out -// if vendoring should be enabled. -func getMainModuleAnd114(env *ProcessEnv) (*ModuleJSON, bool, error) { - const format = `{{.Path}} -{{.Dir}} -{{.GoMod}} -{{.GoVersion}} -{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}} -` - stdout, err := env.invokeGo(context.TODO(), "list", "-m", "-f", format) - if err != nil { - return nil, false, nil - } - lines := strings.Split(stdout.String(), "\n") - if len(lines) < 5 { - return nil, false, fmt.Errorf("unexpected stdout: %q", stdout) - } - mod := &ModuleJSON{ - Path: lines[0], - Dir: lines[1], - GoMod: lines[2], - GoVersion: lines[3], - Main: true, - } - return mod, lines[4] == "go1.14", nil -} diff --git a/vendor/golang.org/x/tools/internal/imports/sortimports.go b/vendor/golang.org/x/tools/internal/imports/sortimports.go index 226279471d39a..be8ffa25fec29 100644 --- a/vendor/golang.org/x/tools/internal/imports/sortimports.go +++ b/vendor/golang.org/x/tools/internal/imports/sortimports.go @@ -15,7 +15,7 @@ import ( // sortImports sorts runs of consecutive import lines in import blocks in f. // It also removes duplicate imports when it is possible to do so without data loss. -func sortImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) { +func sortImports(localPrefix string, fset *token.FileSet, f *ast.File) { for i, d := range f.Decls { d, ok := d.(*ast.GenDecl) if !ok || d.Tok != token.IMPORT { @@ -40,11 +40,11 @@ func sortImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) { for j, s := range d.Specs { if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line { // j begins a new run. End this one. - specs = append(specs, sortSpecs(env, fset, f, d.Specs[i:j])...) + specs = append(specs, sortSpecs(localPrefix, fset, f, d.Specs[i:j])...) i = j } } - specs = append(specs, sortSpecs(env, fset, f, d.Specs[i:])...) + specs = append(specs, sortSpecs(localPrefix, fset, f, d.Specs[i:])...) d.Specs = specs // Deduping can leave a blank line before the rparen; clean that up. @@ -60,7 +60,7 @@ func sortImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) { // mergeImports merges all the import declarations into the first one. // Taken from golang.org/x/tools/ast/astutil. -func mergeImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) { +func mergeImports(fset *token.FileSet, f *ast.File) { if len(f.Decls) <= 1 { return } @@ -142,7 +142,7 @@ type posSpan struct { End token.Pos } -func sortSpecs(env *ProcessEnv, fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec { +func sortSpecs(localPrefix string, fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec { // Can't short-circuit here even if specs are already sorted, // since they might yet need deduplication. // A lone import, however, may be safely ignored. @@ -191,7 +191,7 @@ func sortSpecs(env *ProcessEnv, fset *token.FileSet, f *ast.File, specs []ast.Sp // Reassign the import paths to have the same position sequence. // Reassign each comment to abut the end of its spec. // Sort the comments by new position. - sort.Sort(byImportSpec{env, specs}) + sort.Sort(byImportSpec{localPrefix, specs}) // Dedup. Thanks to our sorting, we can just consider // adjacent pairs of imports. @@ -245,8 +245,8 @@ func sortSpecs(env *ProcessEnv, fset *token.FileSet, f *ast.File, specs []ast.Sp } type byImportSpec struct { - env *ProcessEnv - specs []ast.Spec // slice of *ast.ImportSpec + localPrefix string + specs []ast.Spec // slice of *ast.ImportSpec } func (x byImportSpec) Len() int { return len(x.specs) } @@ -255,8 +255,8 @@ func (x byImportSpec) Less(i, j int) bool { ipath := importPath(x.specs[i]) jpath := importPath(x.specs[j]) - igroup := importGroup(x.env, ipath) - jgroup := importGroup(x.env, jpath) + igroup := importGroup(x.localPrefix, ipath) + jgroup := importGroup(x.localPrefix, jpath) if igroup != jgroup { return igroup < jgroup } diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go index ff5a6b17d3227..2c4527f2436a9 100644 --- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -10,3 +10,5 @@ var GetForTest = func(p interface{}) string { return "" } var GetGoCmdRunner = func(config interface{}) *gocommand.Runner { return nil } var SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {} + +var TypecheckCgo int diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go new file mode 100644 index 0000000000000..a5bb408e2f1b3 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -0,0 +1,28 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "go/types" + "reflect" + "unsafe" +) + +func SetUsesCgo(conf *types.Config) bool { + v := reflect.ValueOf(conf).Elem() + + f := v.FieldByName("go115UsesCgo") + if !f.IsValid() { + f = v.FieldByName("UsesCgo") + if !f.IsValid() { + return false + } + } + + addr := unsafe.Pointer(f.UnsafeAddr()) + *(*bool)(addr) = true + + return true +} diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json index f0f8e54086f86..3767415986764 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json @@ -884,7 +884,7 @@ ] }, "getIamPolicy": { - "description": "Returns the IAM access control policy for the specified Project.\nPermission is denied if the policy or the resource does not exist.\n\nAuthorization requires the Google IAM permission\n`resourcemanager.projects.getIamPolicy` on the project.\n\nFor additional information about resource structure and identification,\nsee [Resource Names](/apis/design/resource_names).", + "description": "Returns the IAM access control policy for the specified Project.\nPermission is denied if the policy or the resource does not exist.\n\nAuthorization requires the Google IAM permission\n`resourcemanager.projects.getIamPolicy` on the project.\n\nFor additional information about `resource` (e.g. my-project-id) structure\nand identification, see [Resource Names](/apis/design/resource_names).", "flatPath": "v1/projects/{resource}:getIamPolicy", "httpMethod": "POST", "id": "cloudresourcemanager.projects.getIamPolicy", @@ -948,7 +948,7 @@ "parameterOrder": [], "parameters": { "filter": { - "description": "An expression for filtering the results of the request. Filter rules are\ncase insensitive. The fields eligible for filtering are:\n\n+ `name`\n+ `id`\n+ `labels.\u003ckey\u003e` (where *key* is the name of a label)\n+ `parent.type`\n+ `parent.id`\n\nSome examples of using labels as filters:\n\n| Filter | Description |\n|------------------|-----------------------------------------------------|\n| name:how* | The project's name starts with \"how\". |\n| name:Howl | The project's name is `Howl` or `howl`. |\n| name:HOWL | Equivalent to above. |\n| NAME:howl | Equivalent to above. |\n| labels.color:* | The project has the label `color`. |\n| labels.color:red | The project's label `color` has the value `red`. |\n| labels.color:red\u0026nbsp;labels.size:big |The project's label `color` has\n the value `red` and its label `size` has the value `big`. |\n\nIf no filter is specified, the call will return projects for which the user\nhas the `resourcemanager.projects.get` permission.\n\nNOTE: To perform a by-parent query (eg., what projects are directly in a\nFolder), the caller must have the `resourcemanager.projects.list`\npermission on the parent and the filter must contain both a `parent.type`\nand a `parent.id` restriction\n(example: \"parent.type:folder parent.id:123\"). In this case an alternate\nsearch index is used which provides more consistent results.\n\nOptional.", + "description": "An expression for filtering the results of the request. Filter rules are\ncase insensitive. Some eligible fields for filtering are:\n\n+ `name`\n+ `id`\n+ `labels.\u003ckey\u003e` (where *key* is the name of a label)\n+ `parent.type`\n+ `parent.id`\n+ `lifecycleState`\n\nSome examples of filter strings:\n\n| Filter | Description |\n|------------------|-----------------------------------------------------|\n| name:how* | The project's name starts with \"how\". |\n| name:Howl | The project's name is `Howl` or `howl`. |\n| name:HOWL | Equivalent to above. |\n| NAME:howl | Equivalent to above. |\n| labels.color:* | The project has the label `color`. |\n| labels.color:red | The project's label `color` has the value `red`. |\n| labels.color:red\u0026nbsp;labels.size:big | The project's label `color` |\n: : has the value `red` and its :\n: : label`size` has the value :\n: : `big`. :\n| lifecycleState:DELETE_REQUESTED | Only show projects that are |\n: : pending deletion. :\n\nIf no filter is specified, the call will return projects for which the user\nhas the `resourcemanager.projects.get` permission.\n\nNOTE: To perform a by-parent query (eg., what projects are directly in a\nFolder), the caller must have the `resourcemanager.projects.list`\npermission on the parent and the filter must contain both a `parent.type`\nand a `parent.id` restriction\n(example: \"parent.type:folder parent.id:123\"). In this case an alternate\nsearch index is used which provides more consistent results.\n\nOptional.", "location": "query", "type": "string" }, @@ -1032,7 +1032,7 @@ ] }, "setIamPolicy": { - "description": "Sets the IAM access control policy for the specified Project. Overwrites\nany existing policy.\n\nThe following constraints apply when using `setIamPolicy()`:\n\n+ Project does not support `allUsers` and `allAuthenticatedUsers` as\n`members` in a `Binding` of a `Policy`.\n\n+ The owner role can be granted to a `user`, `serviceAccount`, or a group\nthat is part of an organization. For example,\ngroup@myownpersonaldomain.com could be added as an owner to a project in\nthe myownpersonaldomain.com organization, but not the examplepetstore.com\norganization.\n\n+ Service accounts can be made owners of a project directly\nwithout any restrictions. However, to be added as an owner, a user must be\ninvited via Cloud Platform console and must accept the invitation.\n\n+ A user cannot be granted the owner role using `setIamPolicy()`. The user\nmust be granted the owner role using the Cloud Platform Console and must\nexplicitly accept the invitation.\n\n+ You can only grant ownership of a project to a member by using the\nGCP Console. Inviting a member will deliver an invitation email that\nthey must accept. An invitation email is not generated if you are\ngranting a role other than owner, or if both the member you are inviting\nand the project are part of your organization.\n\n+ Membership changes that leave the project without any owners that have\naccepted the Terms of Service (ToS) will be rejected.\n\n+ If the project is not part of an organization, there must be at least\none owner who has accepted the Terms of Service (ToS) agreement in the\npolicy. Calling `setIamPolicy()` to remove the last ToS-accepted owner\nfrom the policy will fail. This restriction also applies to legacy\nprojects that no longer have owners who have accepted the ToS. Edits to\nIAM policies will be rejected until the lack of a ToS-accepting owner is\nrectified.\n\n+ This method will replace the existing policy, and cannot be used to\nappend additional IAM settings.\n\nNote: Removing service accounts from policies or changing their roles\ncan render services completely inoperable. It is important to understand\nhow the service account is being used before removing or updating its\nroles.\n\nAuthorization requires the Google IAM permission\n`resourcemanager.projects.setIamPolicy` on the project", + "description": "Sets the IAM access control policy for the specified Project.\n\nCAUTION: This method will replace the existing policy, and cannot be used\nto append additional IAM settings.\n\nNOTE: Removing service accounts from policies or changing their roles can\nrender services completely inoperable. It is important to understand how\nthe service account is being used before removing or updating its roles.\n\nFor additional information about `resource` (e.g. my-project-id) structure\nand identification, see [Resource Names](/apis/design/resource_names).\n\nThe following constraints apply when using `setIamPolicy()`:\n\n+ Project does not support `allUsers` and `allAuthenticatedUsers` as\n`members` in a `Binding` of a `Policy`.\n\n+ The owner role can be granted to a `user`, `serviceAccount`, or a group\nthat is part of an organization. For example,\ngroup@myownpersonaldomain.com could be added as an owner to a project in\nthe myownpersonaldomain.com organization, but not the examplepetstore.com\norganization.\n\n+ Service accounts can be made owners of a project directly\nwithout any restrictions. However, to be added as an owner, a user must be\ninvited via Cloud Platform console and must accept the invitation.\n\n+ A user cannot be granted the owner role using `setIamPolicy()`. The user\nmust be granted the owner role using the Cloud Platform Console and must\nexplicitly accept the invitation.\n\n+ You can only grant ownership of a project to a member by using the\nGCP Console. Inviting a member will deliver an invitation email that\nthey must accept. An invitation email is not generated if you are\ngranting a role other than owner, or if both the member you are inviting\nand the project are part of your organization.\n\n+ Membership changes that leave the project without any owners that have\naccepted the Terms of Service (ToS) will be rejected.\n\n+ If the project is not part of an organization, there must be at least\none owner who has accepted the Terms of Service (ToS) agreement in the\npolicy. Calling `setIamPolicy()` to remove the last ToS-accepted owner\nfrom the policy will fail. This restriction also applies to legacy\nprojects that no longer have owners who have accepted the ToS. Edits to\nIAM policies will be rejected until the lack of a ToS-accepting owner is\nrectified.\n\nAuthorization requires the Google IAM permission\n`resourcemanager.projects.setIamPolicy` on the project", "flatPath": "v1/projects/{resource}:setIamPolicy", "httpMethod": "POST", "id": "cloudresourcemanager.projects.setIamPolicy", @@ -1087,7 +1087,7 @@ ] }, "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified Project.\n\nThere are no permissions required for making this API call.", + "description": "Returns permissions that a caller has on the specified Project.\n\nFor additional information about `resource` (e.g. my-project-id) structure\nand identification, see [Resource Names](/apis/design/resource_names).\n\nThere are no permissions required for making this API call.", "flatPath": "v1/projects/{resource}:testIamPermissions", "httpMethod": "POST", "id": "cloudresourcemanager.projects.testIamPermissions", @@ -1171,7 +1171,7 @@ } } }, - "revision": "20200504", + "revision": "20200622", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "Ancestor": { @@ -1186,7 +1186,7 @@ "type": "object" }, "AuditConfig": { - "description": "Specifies the audit configuration for a service.\nThe configuration determines which permission types are logged, and what\nidentities, if any, are exempted from logging.\nAn AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service,\nthe union of the two AuditConfigs is used for that service: the log_types\nspecified in each AuditConfig are enabled, and the exempted_members in each\nAuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n {\n \"audit_configs\": [\n {\n \"service\": \"allServices\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n },\n {\n \"log_type\": \"ADMIN_READ\",\n }\n ]\n },\n {\n \"service\": \"sampleservice.googleapis.com\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n },\n {\n \"log_type\": \"DATA_WRITE\",\n \"exempted_members\": [\n \"user:aliya@example.com\"\n ]\n }\n ]\n }\n ]\n }\n\nFor sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ\nlogging. It also exempts jose@example.com from DATA_READ logging, and\naliya@example.com from DATA_WRITE logging.", + "description": "Specifies the audit configuration for a service.\nThe configuration determines which permission types are logged, and what\nidentities, if any, are exempted from logging.\nAn AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service,\nthe union of the two AuditConfigs is used for that service: the log_types\nspecified in each AuditConfig are enabled, and the exempted_members in each\nAuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n {\n \"audit_configs\": [\n {\n \"service\": \"allServices\",\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\"\n },\n {\n \"log_type\": \"ADMIN_READ\"\n }\n ]\n },\n {\n \"service\": \"sampleservice.googleapis.com\",\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\"\n },\n {\n \"log_type\": \"DATA_WRITE\",\n \"exempted_members\": [\n \"user:aliya@example.com\"\n ]\n }\n ]\n }\n ]\n }\n\nFor sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ\nlogging. It also exempts jose@example.com from DATA_READ logging, and\naliya@example.com from DATA_WRITE logging.", "id": "AuditConfig", "properties": { "auditLogConfigs": { @@ -1204,7 +1204,7 @@ "type": "object" }, "AuditLogConfig": { - "description": "Provides the configuration for logging a type of permissions.\nExample:\n\n {\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n }\n ]\n }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting\njose@example.com from DATA_READ logging.", + "description": "Provides the configuration for logging a type of permissions.\nExample:\n\n {\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:jose@example.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\"\n }\n ]\n }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting\njose@example.com from DATA_READ logging.", "id": "AuditLogConfig", "properties": { "exemptedMembers": { @@ -1289,7 +1289,7 @@ "type": "object" }, "Constraint": { - "description": "A `Constraint` describes a way in which a resource's configuration can be\nrestricted. For example, it controls which cloud services can be activated\nacross an organization, or whether a Compute Engine instance can have\nserial port connections established. `Constraints` can be configured by the\norganization's policy adminstrator to fit the needs of the organzation by\nsetting Policies for `Constraints` at different locations in the\norganization's resource hierarchy. Policies are inherited down the resource\nhierarchy from higher levels, but can also be overridden. For details about\nthe inheritance rules please read about\nPolicies.\n\n`Constraints` have a default behavior determined by the `constraint_default`\nfield, which is the enforcement behavior that is used in the absence of a\n`Policy` being defined or inherited for the resource in question.", + "description": "A `Constraint` describes a way in which a resource's configuration can be\nrestricted. For example, it controls which cloud services can be activated\nacross an organization, or whether a Compute Engine instance can have\nserial port connections established. `Constraints` can be configured by the\norganization's policy administrator to fit the needs of the organzation by\nsetting Policies for `Constraints` at different locations in the\norganization's resource hierarchy. Policies are inherited down the resource\nhierarchy from higher levels, but can also be overridden. For details about\nthe inheritance rules please read about\n[Policies](/resource-manager/reference/rest/v1/Policy).\n\n`Constraints` have a default behavior determined by the `constraint_default`\nfield, which is the enforcement behavior that is used in the absence of a\n`Policy` being defined or inherited for the resource in question.", "id": "Constraint", "properties": { "booleanConstraint": { @@ -1297,7 +1297,7 @@ "description": "Defines this constraint as being a BooleanConstraint." }, "constraintDefault": { - "description": "The evaluation behavior of this constraint in the absense of 'Policy'.", + "description": "The evaluation behavior of this constraint in the absence of 'Policy'.", "enum": [ "CONSTRAINT_DEFAULT_UNSPECIFIED", "ALLOW", @@ -1438,7 +1438,7 @@ "type": "object" }, "GetAncestryResponse": { - "description": "Response from the GetAncestry method.", + "description": "Response from the\nprojects.getAncestry\nmethod.", "id": "GetAncestryResponse", "properties": { "ancestor": { @@ -1532,7 +1532,7 @@ "type": "object" }, "ListAvailableOrgPolicyConstraintsRequest": { - "description": "The request sent to the [ListAvailableOrgPolicyConstraints]\ngoogle.cloud.OrgPolicy.v1.ListAvailableOrgPolicyConstraints] method.", + "description": "The request sent to the `ListAvailableOrgPolicyConstraints` method on the\nproject, folder, or organization.", "id": "ListAvailableOrgPolicyConstraintsRequest", "properties": { "pageSize": { @@ -1548,7 +1548,7 @@ "type": "object" }, "ListAvailableOrgPolicyConstraintsResponse": { - "description": "The response returned from the ListAvailableOrgPolicyConstraints method.\nReturns all `Constraints` that could be set at this level of the hierarchy\n(contrast with the response from `ListPolicies`, which returns all policies\nwhich are set).", + "description": "The response returned from the `ListAvailableOrgPolicyConstraints` method.\nReturns all `Constraints` that could be set at this level of the hierarchy\n(contrast with the response from `ListPolicies`, which returns all policies\nwhich are set).", "id": "ListAvailableOrgPolicyConstraintsResponse", "properties": { "constraints": { @@ -1615,7 +1615,7 @@ "type": "object" }, "ListOrgPoliciesResponse": { - "description": "The response returned from the ListOrgPolicies method. It will be empty\nif no `Policies` are set on the resource.", + "description": "The response returned from the `ListOrgPolicies` method. It will be empty\nif no `Policies` are set on the resource.", "id": "ListOrgPoliciesResponse", "properties": { "nextPageToken": { @@ -1665,7 +1665,7 @@ "type": "array" }, "inheritFromParent": { - "description": "Determines the inheritance behavior for this `Policy`.\n\nBy default, a `ListPolicy` set at a resource supercedes any `Policy` set\nanywhere up the resource hierarchy. However, if `inherit_from_parent` is\nset to `true`, then the values from the effective `Policy` of the parent\nresource are inherited, meaning the values set in this `Policy` are\nadded to the values inherited up the hierarchy.\n\nSetting `Policy` hierarchies that inherit both allowed values and denied\nvalues isn't recommended in most circumstances to keep the configuration\nsimple and understandable. However, it is possible to set a `Policy` with\n`allowed_values` set that inherits a `Policy` with `denied_values` set.\nIn this case, the values that are allowed must be in `allowed_values` and\nnot present in `denied_values`.\n\nFor example, suppose you have a `Constraint`\n`constraints/serviceuser.services`, which has a `constraint_type` of\n`list_constraint`, and with `constraint_default` set to `ALLOW`.\nSuppose that at the Organization level, a `Policy` is applied that\nrestricts the allowed API activations to {`E1`, `E2`}. Then, if a\n`Policy` is applied to a project below the Organization that has\n`inherit_from_parent` set to `false` and field all_values set to DENY,\nthen an attempt to activate any API will be denied.\n\nThe following examples demonstrate different possible layerings for\n`projects/bar` parented by `organizations/foo`:\n\nExample 1 (no inherited values):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"E1\" allowed_values:\"E2\"}\n `projects/bar` has `inherit_from_parent` `false` and values:\n {allowed_values: \"E3\" allowed_values: \"E4\"}\nThe accepted values at `organizations/foo` are `E1`, `E2`.\nThe accepted values at `projects/bar` are `E3`, and `E4`.\n\nExample 2 (inherited values):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"E1\" allowed_values:\"E2\"}\n `projects/bar` has a `Policy` with values:\n {value: \"E3\" value: \"E4\" inherit_from_parent: true}\nThe accepted values at `organizations/foo` are `E1`, `E2`.\nThe accepted values at `projects/bar` are `E1`, `E2`, `E3`, and `E4`.\n\nExample 3 (inheriting both allowed and denied values):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"E1\" allowed_values: \"E2\"}\n `projects/bar` has a `Policy` with:\n {denied_values: \"E1\"}\nThe accepted values at `organizations/foo` are `E1`, `E2`.\nThe value accepted at `projects/bar` is `E2`.\n\nExample 4 (RestoreDefault):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"E1\" allowed_values:\"E2\"}\n `projects/bar` has a `Policy` with values:\n {RestoreDefault: {}}\nThe accepted values at `organizations/foo` are `E1`, `E2`.\nThe accepted values at `projects/bar` are either all or none depending on\nthe value of `constraint_default` (if `ALLOW`, all; if\n`DENY`, none).\n\nExample 5 (no policy inherits parent policy):\n `organizations/foo` has no `Policy` set.\n `projects/bar` has no `Policy` set.\nThe accepted values at both levels are either all or none depending on\nthe value of `constraint_default` (if `ALLOW`, all; if\n`DENY`, none).\n\nExample 6 (ListConstraint allowing all):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"E1\" allowed_values: \"E2\"}\n `projects/bar` has a `Policy` with:\n {all: ALLOW}\nThe accepted values at `organizations/foo` are `E1`, E2`.\nAny value is accepted at `projects/bar`.\n\nExample 7 (ListConstraint allowing none):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"E1\" allowed_values: \"E2\"}\n `projects/bar` has a `Policy` with:\n {all: DENY}\nThe accepted values at `organizations/foo` are `E1`, E2`.\nNo value is accepted at `projects/bar`.\n\nExample 10 (allowed and denied subtrees of Resource Manager hierarchy):\nGiven the following resource hierarchy\n O1-\u003e{F1, F2}; F1-\u003e{P1}; F2-\u003e{P2, P3},\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"under:organizations/O1\"}\n `projects/bar` has a `Policy` with:\n {allowed_values: \"under:projects/P3\"}\n {denied_values: \"under:folders/F2\"}\nThe accepted values at `organizations/foo` are `organizations/O1`,\n `folders/F1`, `folders/F2`, `projects/P1`, `projects/P2`,\n `projects/P3`.\nThe accepted values at `projects/bar` are `organizations/O1`,\n `folders/F1`, `projects/P1`.", + "description": "Determines the inheritance behavior for this `Policy`.\n\nBy default, a `ListPolicy` set at a resource supersedes any `Policy` set\nanywhere up the resource hierarchy. However, if `inherit_from_parent` is\nset to `true`, then the values from the effective `Policy` of the parent\nresource are inherited, meaning the values set in this `Policy` are\nadded to the values inherited up the hierarchy.\n\nSetting `Policy` hierarchies that inherit both allowed values and denied\nvalues isn't recommended in most circumstances to keep the configuration\nsimple and understandable. However, it is possible to set a `Policy` with\n`allowed_values` set that inherits a `Policy` with `denied_values` set.\nIn this case, the values that are allowed must be in `allowed_values` and\nnot present in `denied_values`.\n\nFor example, suppose you have a `Constraint`\n`constraints/serviceuser.services`, which has a `constraint_type` of\n`list_constraint`, and with `constraint_default` set to `ALLOW`.\nSuppose that at the Organization level, a `Policy` is applied that\nrestricts the allowed API activations to {`E1`, `E2`}. Then, if a\n`Policy` is applied to a project below the Organization that has\n`inherit_from_parent` set to `false` and field all_values set to DENY,\nthen an attempt to activate any API will be denied.\n\nThe following examples demonstrate different possible layerings for\n`projects/bar` parented by `organizations/foo`:\n\nExample 1 (no inherited values):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"E1\" allowed_values:\"E2\"}\n `projects/bar` has `inherit_from_parent` `false` and values:\n {allowed_values: \"E3\" allowed_values: \"E4\"}\nThe accepted values at `organizations/foo` are `E1`, `E2`.\nThe accepted values at `projects/bar` are `E3`, and `E4`.\n\nExample 2 (inherited values):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"E1\" allowed_values:\"E2\"}\n `projects/bar` has a `Policy` with values:\n {value: \"E3\" value: \"E4\" inherit_from_parent: true}\nThe accepted values at `organizations/foo` are `E1`, `E2`.\nThe accepted values at `projects/bar` are `E1`, `E2`, `E3`, and `E4`.\n\nExample 3 (inheriting both allowed and denied values):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"E1\" allowed_values: \"E2\"}\n `projects/bar` has a `Policy` with:\n {denied_values: \"E1\"}\nThe accepted values at `organizations/foo` are `E1`, `E2`.\nThe value accepted at `projects/bar` is `E2`.\n\nExample 4 (RestoreDefault):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"E1\" allowed_values:\"E2\"}\n `projects/bar` has a `Policy` with values:\n {RestoreDefault: {}}\nThe accepted values at `organizations/foo` are `E1`, `E2`.\nThe accepted values at `projects/bar` are either all or none depending on\nthe value of `constraint_default` (if `ALLOW`, all; if\n`DENY`, none).\n\nExample 5 (no policy inherits parent policy):\n `organizations/foo` has no `Policy` set.\n `projects/bar` has no `Policy` set.\nThe accepted values at both levels are either all or none depending on\nthe value of `constraint_default` (if `ALLOW`, all; if\n`DENY`, none).\n\nExample 6 (ListConstraint allowing all):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"E1\" allowed_values: \"E2\"}\n `projects/bar` has a `Policy` with:\n {all: ALLOW}\nThe accepted values at `organizations/foo` are `E1`, E2`.\nAny value is accepted at `projects/bar`.\n\nExample 7 (ListConstraint allowing none):\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"E1\" allowed_values: \"E2\"}\n `projects/bar` has a `Policy` with:\n {all: DENY}\nThe accepted values at `organizations/foo` are `E1`, E2`.\nNo value is accepted at `projects/bar`.\n\nExample 10 (allowed and denied subtrees of Resource Manager hierarchy):\nGiven the following resource hierarchy\n O1-\u003e{F1, F2}; F1-\u003e{P1}; F2-\u003e{P2, P3},\n `organizations/foo` has a `Policy` with values:\n {allowed_values: \"under:organizations/O1\"}\n `projects/bar` has a `Policy` with:\n {allowed_values: \"under:projects/P3\"}\n {denied_values: \"under:folders/F2\"}\nThe accepted values at `organizations/foo` are `organizations/O1`,\n `folders/F1`, `folders/F2`, `projects/P1`, `projects/P2`,\n `projects/P3`.\nThe accepted values at `projects/bar` are `organizations/O1`,\n `folders/F1`, `projects/P1`.", "type": "boolean" }, "suggestedValue": { @@ -1737,7 +1737,7 @@ "description": "For boolean `Constraints`, whether to enforce the `Constraint` or not." }, "constraint": { - "description": "The name of the `Constraint` the `Policy` is configuring, for example,\n`constraints/serviceuser.services`.\n\nImmutable after creation.", + "description": "The name of the `Constraint` the `Policy` is configuring, for example,\n`constraints/serviceuser.services`.\n\nA [list of available\nconstraints](/resource-manager/docs/organization-policy/org-policy-constraints)\nis available.\n\nImmutable after creation.", "type": "string" }, "etag": { @@ -1859,7 +1859,7 @@ "additionalProperties": { "type": "string" }, - "description": "The labels associated with this Project.\n\nLabel keys must be between 1 and 63 characters long and must conform\nto the following regular expression: \\[a-z\\](\\[-a-z0-9\\]*\\[a-z0-9\\])?.\n\nLabel values must be between 0 and 63 characters long and must conform\nto the regular expression (\\[a-z\\](\\[-a-z0-9\\]*\\[a-z0-9\\])?)?. A label\nvalue can be empty.\n\nNo more than 256 labels can be associated with a given resource.\n\nClients should store labels in a representation such as JSON that does not\ndepend on specific characters being disallowed.\n\nExample: \u003ccode\u003e\"environment\" : \"dev\"\u003c/code\u003e\nRead-write.", + "description": "The labels associated with this Project.\n\nLabel keys must be between 1 and 63 characters long and must conform\nto the following regular expression: a-z{0,62}.\n\nLabel values must be between 0 and 63 characters long and must conform\nto the regular expression [a-z0-9_-]{0,63}. A label value can be empty.\n\nNo more than 256 labels can be associated with a given resource.\n\nClients should store labels in a representation such as JSON that does not\ndepend on specific characters being disallowed.\n\nExample: \u003ccode\u003e\"environment\" : \"dev\"\u003c/code\u003e\nRead-write.", "type": "object" }, "lifecycleState": { diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go index 146ff8e01c034..bb95808a55d0e 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go @@ -79,6 +79,7 @@ const apiId = "cloudresourcemanager:v1" const apiName = "cloudresourcemanager" const apiVersion = "v1" const basePath = "https://cloudresourcemanager.googleapis.com/" +const mtlsBasePath = "https://cloudresourcemanager.mtls.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -98,6 +99,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -245,7 +247,7 @@ func (s *Ancestor) MarshalJSON() ([]byte, error) { // { // "audit_configs": [ // { -// "service": "allServices" +// "service": "allServices", // "audit_log_configs": [ // { // "log_type": "DATA_READ", @@ -254,18 +256,18 @@ func (s *Ancestor) MarshalJSON() ([]byte, error) { // ] // }, // { -// "log_type": "DATA_WRITE", +// "log_type": "DATA_WRITE" // }, // { -// "log_type": "ADMIN_READ", +// "log_type": "ADMIN_READ" // } // ] // }, // { -// "service": "sampleservice.googleapis.com" +// "service": "sampleservice.googleapis.com", // "audit_log_configs": [ // { -// "log_type": "DATA_READ", +// "log_type": "DATA_READ" // }, // { // "log_type": "DATA_WRITE", @@ -332,7 +334,7 @@ func (s *AuditConfig) MarshalJSON() ([]byte, error) { // ] // }, // { -// "log_type": "DATA_WRITE", +// "log_type": "DATA_WRITE" // } // ] // } @@ -634,7 +636,7 @@ func (s *ClearOrgPolicyRequest) MarshalJSON() ([]byte, error) { // have // serial port connections established. `Constraints` can be configured // by the -// organization's policy adminstrator to fit the needs of the +// organization's policy administrator to fit the needs of the // organzation by // setting Policies for `Constraints` at different locations in // the @@ -642,10 +644,12 @@ func (s *ClearOrgPolicyRequest) MarshalJSON() ([]byte, error) { // resource // hierarchy from higher levels, but can also be overridden. For details // about -// the inheritance rules please read about -// Policies. +// the inheritance rules please read +// about +// [Policies](/resource-manager/reference/rest/v1/Policy). // -// `Constraints` have a default behavior determined by the +// `Constr +// aints` have a default behavior determined by the // `constraint_default` // field, which is the enforcement behavior that is used in the absence // of a @@ -656,7 +660,7 @@ type Constraint struct { BooleanConstraint *BooleanConstraint `json:"booleanConstraint,omitempty"` // ConstraintDefault: The evaluation behavior of this constraint in the - // absense of 'Policy'. + // absence of 'Policy'. // // Possible values: // "CONSTRAINT_DEFAULT_UNSPECIFIED" - This is only used for @@ -925,7 +929,9 @@ func (s *FolderOperationError) MarshalJSON() ([]byte, error) { type GetAncestryRequest struct { } -// GetAncestryResponse: Response from the GetAncestry method. +// GetAncestryResponse: Response from the +// projects.getAncestry +// method. type GetAncestryResponse struct { // Ancestor: Ancestors are ordered from bottom to top of the resource // hierarchy. The @@ -1173,9 +1179,8 @@ func (s *Lien) MarshalJSON() ([]byte, error) { } // ListAvailableOrgPolicyConstraintsRequest: The request sent to the -// [ListAvailableOrgPolicyConstraints] -// google.cloud.OrgPolicy.v1.ListAvai -// lableOrgPolicyConstraints] method. +// `ListAvailableOrgPolicyConstraints` method on the +// project, folder, or organization. type ListAvailableOrgPolicyConstraintsRequest struct { // PageSize: Size of the pages to be returned. This is currently // unsupported and will @@ -1214,7 +1219,7 @@ func (s *ListAvailableOrgPolicyConstraintsRequest) MarshalJSON() ([]byte, error) } // ListAvailableOrgPolicyConstraintsResponse: The response returned from -// the ListAvailableOrgPolicyConstraints method. +// the `ListAvailableOrgPolicyConstraints` method. // Returns all `Constraints` that could be set at this level of the // hierarchy // (contrast with the response from `ListPolicies`, which returns all @@ -1375,7 +1380,7 @@ func (s *ListOrgPoliciesRequest) MarshalJSON() ([]byte, error) { } // ListOrgPoliciesResponse: The response returned from the -// ListOrgPolicies method. It will be empty +// `ListOrgPolicies` method. It will be empty // if no `Policies` are set on the resource. type ListOrgPoliciesResponse struct { // NextPageToken: Page token used to retrieve the next page. This is @@ -1472,7 +1477,7 @@ type ListPolicy struct { // InheritFromParent: Determines the inheritance behavior for this // `Policy`. // - // By default, a `ListPolicy` set at a resource supercedes any `Policy` + // By default, a `ListPolicy` set at a resource supersedes any `Policy` // set // anywhere up the resource hierarchy. However, if `inherit_from_parent` // is @@ -1770,6 +1775,12 @@ type OrgPolicy struct { // for example, // `constraints/serviceuser.services`. // + // A [list of + // available + // constraints](/resource-manager/docs/organization-policy/org- + // policy-constraints) + // is available. + // // Immutable after creation. Constraint string `json:"constraint,omitempty"` @@ -2143,14 +2154,12 @@ type Project struct { // // Label keys must be between 1 and 63 characters long and must // conform - // to the following regular expression: - // \[a-z\](\[-a-z0-9\]*\[a-z0-9\])?. + // to the following regular expression: a-z{0,62}. // // Label values must be between 0 and 63 characters long and must // conform - // to the regular expression (\[a-z\](\[-a-z0-9\]*\[a-z0-9\])?)?. A - // label - // value can be empty. + // to the regular expression [a-z0-9_-]{0,63}. A label value can be + // empty. // // No more than 256 labels can be associated with a given // resource. @@ -2704,7 +2713,7 @@ func (c *FoldersClearOrgPolicyCall) Header() http.Header { func (c *FoldersClearOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2852,7 +2861,7 @@ func (c *FoldersGetEffectiveOrgPolicyCall) Header() http.Header { func (c *FoldersGetEffectiveOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3001,7 +3010,7 @@ func (c *FoldersGetOrgPolicyCall) Header() http.Header { func (c *FoldersGetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3143,7 +3152,7 @@ func (c *FoldersListAvailableOrgPolicyConstraintsCall) Header() http.Header { func (c *FoldersListAvailableOrgPolicyConstraintsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3308,7 +3317,7 @@ func (c *FoldersListOrgPoliciesCall) Header() http.Header { func (c *FoldersListOrgPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3476,7 +3485,7 @@ func (c *FoldersSetOrgPolicyCall) Header() http.Header { func (c *FoldersSetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3624,7 +3633,7 @@ func (c *LiensCreateCall) Header() http.Header { func (c *LiensCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3756,7 +3765,7 @@ func (c *LiensDeleteCall) Header() http.Header { func (c *LiensDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3906,7 +3915,7 @@ func (c *LiensGetCall) Header() http.Header { func (c *LiensGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4085,7 +4094,7 @@ func (c *LiensListCall) Header() http.Header { func (c *LiensListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4259,7 +4268,7 @@ func (c *OperationsGetCall) Header() http.Header { func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4395,7 +4404,7 @@ func (c *OrganizationsClearOrgPolicyCall) Header() http.Header { func (c *OrganizationsClearOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4545,7 +4554,7 @@ func (c *OrganizationsGetCall) Header() http.Header { func (c *OrganizationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4689,7 +4698,7 @@ func (c *OrganizationsGetEffectiveOrgPolicyCall) Header() http.Header { func (c *OrganizationsGetEffectiveOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4840,7 +4849,7 @@ func (c *OrganizationsGetIamPolicyCall) Header() http.Header { func (c *OrganizationsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4989,7 +4998,7 @@ func (c *OrganizationsGetOrgPolicyCall) Header() http.Header { func (c *OrganizationsGetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5131,7 +5140,7 @@ func (c *OrganizationsListAvailableOrgPolicyConstraintsCall) Header() http.Heade func (c *OrganizationsListAvailableOrgPolicyConstraintsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5296,7 +5305,7 @@ func (c *OrganizationsListOrgPoliciesCall) Header() http.Header { func (c *OrganizationsListOrgPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5466,7 +5475,7 @@ func (c *OrganizationsSearchCall) Header() http.Header { func (c *OrganizationsSearchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5624,7 +5633,7 @@ func (c *OrganizationsSetIamPolicyCall) Header() http.Header { func (c *OrganizationsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5770,7 +5779,7 @@ func (c *OrganizationsSetOrgPolicyCall) Header() http.Header { func (c *OrganizationsSetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5915,7 +5924,7 @@ func (c *OrganizationsTestIamPermissionsCall) Header() http.Header { func (c *OrganizationsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6056,7 +6065,7 @@ func (c *ProjectsClearOrgPolicyCall) Header() http.Header { func (c *ProjectsClearOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6218,7 +6227,7 @@ func (c *ProjectsCreateCall) Header() http.Header { func (c *ProjectsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6365,7 +6374,7 @@ func (c *ProjectsDeleteCall) Header() http.Header { func (c *ProjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6508,7 +6517,7 @@ func (c *ProjectsGetCall) Header() http.Header { func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6648,7 +6657,7 @@ func (c *ProjectsGetAncestryCall) Header() http.Header { func (c *ProjectsGetAncestryCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6796,7 +6805,7 @@ func (c *ProjectsGetEffectiveOrgPolicyCall) Header() http.Header { func (c *ProjectsGetEffectiveOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6911,9 +6920,10 @@ type ProjectsGetIamPolicyCall struct { // permission // `resourcemanager.projects.getIamPolicy` on the project. // -// For additional information about resource structure and -// identification, -// see [Resource Names](/apis/design/resource_names). +// For additional information about `resource` (e.g. my-project-id) +// structure +// and identification, see [Resource +// Names](/apis/design/resource_names). func (r *ProjectsService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ProjectsGetIamPolicyCall { c := &ProjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource @@ -6948,7 +6958,7 @@ func (c *ProjectsGetIamPolicyCall) Header() http.Header { func (c *ProjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7012,7 +7022,7 @@ func (c *ProjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er } return ret, nil // { - // "description": "Returns the IAM access control policy for the specified Project.\nPermission is denied if the policy or the resource does not exist.\n\nAuthorization requires the Google IAM permission\n`resourcemanager.projects.getIamPolicy` on the project.\n\nFor additional information about resource structure and identification,\nsee [Resource Names](/apis/design/resource_names).", + // "description": "Returns the IAM access control policy for the specified Project.\nPermission is denied if the policy or the resource does not exist.\n\nAuthorization requires the Google IAM permission\n`resourcemanager.projects.getIamPolicy` on the project.\n\nFor additional information about `resource` (e.g. my-project-id) structure\nand identification, see [Resource Names](/apis/design/resource_names).", // "flatPath": "v1/projects/{resource}:getIamPolicy", // "httpMethod": "POST", // "id": "cloudresourcemanager.projects.getIamPolicy", @@ -7096,7 +7106,7 @@ func (c *ProjectsGetOrgPolicyCall) Header() http.Header { func (c *ProjectsGetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7233,15 +7243,16 @@ func (r *ProjectsService) List() *ProjectsListCall { // Filter sets the optional parameter "filter": An expression for // filtering the results of the request. Filter rules are -// case insensitive. The fields eligible for filtering are: +// case insensitive. Some eligible fields for filtering are: // // + `name` // + `id` // + `labels.` (where *key* is the name of a label) // + `parent.type` // + `parent.id` +// + `lifecycleState` // -// Some examples of using labels as filters: +// Some examples of filter strings: // // | Filter | Description // @@ -7260,10 +7271,18 @@ func (r *ProjectsService) List() *ProjectsListCall { // | // | labels.color:red | The project's label `color` has the value `red`. // | -// | labels.color:red labels.size:big |The project's label `color` -// has -// the value `red` and its label `size` has the value `big`. +// | labels.color:red labels.size:big | The project's label `color` +// | +// : : has the value `red` and its +// : +// : : label`size` has the value +// : +// : : `big`. +// : +// | lifecycleState:DELETE_REQUESTED | Only show projects that are // | +// : : pending deletion. +// : // // If no filter is specified, the call will return projects for which // the user @@ -7338,7 +7357,7 @@ func (c *ProjectsListCall) Header() http.Header { func (c *ProjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7404,7 +7423,7 @@ func (c *ProjectsListCall) Do(opts ...googleapi.CallOption) (*ListProjectsRespon // "parameterOrder": [], // "parameters": { // "filter": { - // "description": "An expression for filtering the results of the request. Filter rules are\ncase insensitive. The fields eligible for filtering are:\n\n+ `name`\n+ `id`\n+ `labels.\u003ckey\u003e` (where *key* is the name of a label)\n+ `parent.type`\n+ `parent.id`\n\nSome examples of using labels as filters:\n\n| Filter | Description |\n|------------------|-----------------------------------------------------|\n| name:how* | The project's name starts with \"how\". |\n| name:Howl | The project's name is `Howl` or `howl`. |\n| name:HOWL | Equivalent to above. |\n| NAME:howl | Equivalent to above. |\n| labels.color:* | The project has the label `color`. |\n| labels.color:red | The project's label `color` has the value `red`. |\n| labels.color:red\u0026nbsp;labels.size:big |The project's label `color` has\n the value `red` and its label `size` has the value `big`. |\n\nIf no filter is specified, the call will return projects for which the user\nhas the `resourcemanager.projects.get` permission.\n\nNOTE: To perform a by-parent query (eg., what projects are directly in a\nFolder), the caller must have the `resourcemanager.projects.list`\npermission on the parent and the filter must contain both a `parent.type`\nand a `parent.id` restriction\n(example: \"parent.type:folder parent.id:123\"). In this case an alternate\nsearch index is used which provides more consistent results.\n\nOptional.", + // "description": "An expression for filtering the results of the request. Filter rules are\ncase insensitive. Some eligible fields for filtering are:\n\n+ `name`\n+ `id`\n+ `labels.\u003ckey\u003e` (where *key* is the name of a label)\n+ `parent.type`\n+ `parent.id`\n+ `lifecycleState`\n\nSome examples of filter strings:\n\n| Filter | Description |\n|------------------|-----------------------------------------------------|\n| name:how* | The project's name starts with \"how\". |\n| name:Howl | The project's name is `Howl` or `howl`. |\n| name:HOWL | Equivalent to above. |\n| NAME:howl | Equivalent to above. |\n| labels.color:* | The project has the label `color`. |\n| labels.color:red | The project's label `color` has the value `red`. |\n| labels.color:red\u0026nbsp;labels.size:big | The project's label `color` |\n: : has the value `red` and its :\n: : label`size` has the value :\n: : `big`. :\n| lifecycleState:DELETE_REQUESTED | Only show projects that are |\n: : pending deletion. :\n\nIf no filter is specified, the call will return projects for which the user\nhas the `resourcemanager.projects.get` permission.\n\nNOTE: To perform a by-parent query (eg., what projects are directly in a\nFolder), the caller must have the `resourcemanager.projects.list`\npermission on the parent and the filter must contain both a `parent.type`\nand a `parent.id` restriction\n(example: \"parent.type:folder parent.id:123\"). In this case an alternate\nsearch index is used which provides more consistent results.\n\nOptional.", // "location": "query", // "type": "string" // }, @@ -7500,7 +7519,7 @@ func (c *ProjectsListAvailableOrgPolicyConstraintsCall) Header() http.Header { func (c *ProjectsListAvailableOrgPolicyConstraintsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7665,7 +7684,7 @@ func (c *ProjectsListOrgPoliciesCall) Header() http.Header { func (c *ProjectsListOrgPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7793,8 +7812,23 @@ type ProjectsSetIamPolicyCall struct { } // SetIamPolicy: Sets the IAM access control policy for the specified -// Project. Overwrites -// any existing policy. +// Project. +// +// CAUTION: This method will replace the existing policy, and cannot be +// used +// to append additional IAM settings. +// +// NOTE: Removing service accounts from policies or changing their roles +// can +// render services completely inoperable. It is important to understand +// how +// the service account is being used before removing or updating its +// roles. +// +// For additional information about `resource` (e.g. my-project-id) +// structure +// and identification, see [Resource +// Names](/apis/design/resource_names). // // The following constraints apply when using `setIamPolicy()`: // @@ -7851,18 +7885,6 @@ type ProjectsSetIamPolicyCall struct { // is // rectified. // -// + This method will replace the existing policy, and cannot be used -// to -// append additional IAM settings. -// -// Note: Removing service accounts from policies or changing their -// roles -// can render services completely inoperable. It is important to -// understand -// how the service account is being used before removing or updating -// its -// roles. -// // Authorization requires the Google IAM // permission // `resourcemanager.projects.setIamPolicy` on the project @@ -7900,7 +7922,7 @@ func (c *ProjectsSetIamPolicyCall) Header() http.Header { func (c *ProjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7964,7 +7986,7 @@ func (c *ProjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er } return ret, nil // { - // "description": "Sets the IAM access control policy for the specified Project. Overwrites\nany existing policy.\n\nThe following constraints apply when using `setIamPolicy()`:\n\n+ Project does not support `allUsers` and `allAuthenticatedUsers` as\n`members` in a `Binding` of a `Policy`.\n\n+ The owner role can be granted to a `user`, `serviceAccount`, or a group\nthat is part of an organization. For example,\ngroup@myownpersonaldomain.com could be added as an owner to a project in\nthe myownpersonaldomain.com organization, but not the examplepetstore.com\norganization.\n\n+ Service accounts can be made owners of a project directly\nwithout any restrictions. However, to be added as an owner, a user must be\ninvited via Cloud Platform console and must accept the invitation.\n\n+ A user cannot be granted the owner role using `setIamPolicy()`. The user\nmust be granted the owner role using the Cloud Platform Console and must\nexplicitly accept the invitation.\n\n+ You can only grant ownership of a project to a member by using the\nGCP Console. Inviting a member will deliver an invitation email that\nthey must accept. An invitation email is not generated if you are\ngranting a role other than owner, or if both the member you are inviting\nand the project are part of your organization.\n\n+ Membership changes that leave the project without any owners that have\naccepted the Terms of Service (ToS) will be rejected.\n\n+ If the project is not part of an organization, there must be at least\none owner who has accepted the Terms of Service (ToS) agreement in the\npolicy. Calling `setIamPolicy()` to remove the last ToS-accepted owner\nfrom the policy will fail. This restriction also applies to legacy\nprojects that no longer have owners who have accepted the ToS. Edits to\nIAM policies will be rejected until the lack of a ToS-accepting owner is\nrectified.\n\n+ This method will replace the existing policy, and cannot be used to\nappend additional IAM settings.\n\nNote: Removing service accounts from policies or changing their roles\ncan render services completely inoperable. It is important to understand\nhow the service account is being used before removing or updating its\nroles.\n\nAuthorization requires the Google IAM permission\n`resourcemanager.projects.setIamPolicy` on the project", + // "description": "Sets the IAM access control policy for the specified Project.\n\nCAUTION: This method will replace the existing policy, and cannot be used\nto append additional IAM settings.\n\nNOTE: Removing service accounts from policies or changing their roles can\nrender services completely inoperable. It is important to understand how\nthe service account is being used before removing or updating its roles.\n\nFor additional information about `resource` (e.g. my-project-id) structure\nand identification, see [Resource Names](/apis/design/resource_names).\n\nThe following constraints apply when using `setIamPolicy()`:\n\n+ Project does not support `allUsers` and `allAuthenticatedUsers` as\n`members` in a `Binding` of a `Policy`.\n\n+ The owner role can be granted to a `user`, `serviceAccount`, or a group\nthat is part of an organization. For example,\ngroup@myownpersonaldomain.com could be added as an owner to a project in\nthe myownpersonaldomain.com organization, but not the examplepetstore.com\norganization.\n\n+ Service accounts can be made owners of a project directly\nwithout any restrictions. However, to be added as an owner, a user must be\ninvited via Cloud Platform console and must accept the invitation.\n\n+ A user cannot be granted the owner role using `setIamPolicy()`. The user\nmust be granted the owner role using the Cloud Platform Console and must\nexplicitly accept the invitation.\n\n+ You can only grant ownership of a project to a member by using the\nGCP Console. Inviting a member will deliver an invitation email that\nthey must accept. An invitation email is not generated if you are\ngranting a role other than owner, or if both the member you are inviting\nand the project are part of your organization.\n\n+ Membership changes that leave the project without any owners that have\naccepted the Terms of Service (ToS) will be rejected.\n\n+ If the project is not part of an organization, there must be at least\none owner who has accepted the Terms of Service (ToS) agreement in the\npolicy. Calling `setIamPolicy()` to remove the last ToS-accepted owner\nfrom the policy will fail. This restriction also applies to legacy\nprojects that no longer have owners who have accepted the ToS. Edits to\nIAM policies will be rejected until the lack of a ToS-accepting owner is\nrectified.\n\nAuthorization requires the Google IAM permission\n`resourcemanager.projects.setIamPolicy` on the project", // "flatPath": "v1/projects/{resource}:setIamPolicy", // "httpMethod": "POST", // "id": "cloudresourcemanager.projects.setIamPolicy", @@ -8045,7 +8067,7 @@ func (c *ProjectsSetOrgPolicyCall) Header() http.Header { func (c *ProjectsSetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8153,6 +8175,11 @@ type ProjectsTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified Project. // +// For additional information about `resource` (e.g. my-project-id) +// structure +// and identification, see [Resource +// Names](/apis/design/resource_names). +// // There are no permissions required for making this API call. func (r *ProjectsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsTestIamPermissionsCall { c := &ProjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -8188,7 +8215,7 @@ func (c *ProjectsTestIamPermissionsCall) Header() http.Header { func (c *ProjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8252,7 +8279,7 @@ func (c *ProjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified Project.\n\nThere are no permissions required for making this API call.", + // "description": "Returns permissions that a caller has on the specified Project.\n\nFor additional information about `resource` (e.g. my-project-id) structure\nand identification, see [Resource Names](/apis/design/resource_names).\n\nThere are no permissions required for making this API call.", // "flatPath": "v1/projects/{resource}:testIamPermissions", // "httpMethod": "POST", // "id": "cloudresourcemanager.projects.testIamPermissions", @@ -8336,7 +8363,7 @@ func (c *ProjectsUndeleteCall) Header() http.Header { func (c *ProjectsUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8479,7 +8506,7 @@ func (c *ProjectsUpdateCall) Header() http.Header { func (c *ProjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json index 4d0c2a8f807cf..afdbe854289bb 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -29,7 +29,7 @@ "description": "Creates and runs virtual machines on Google Cloud Platform.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/compute/docs/reference/latest/", - "etag": "\"u9GIe6H63LSGq-9_t39K2Zx_EAc/PZk7-wpz6XHgQMe6JksWYBgwpSs\"", + "etag": "\"u9GIe6H63LSGq-9_t39K2Zx_EAc/L9LOTkeGMiZOvax0wDyduhAKK-M\"", "icons": { "x16": "https://www.google.com/images/icons/product/compute_engine-16.png", "x32": "https://www.google.com/images/icons/product/compute_engine-32.png" @@ -185,7 +185,7 @@ ] }, "list": { - "description": "Retrieves a list of accelerator types available to the specified project.", + "description": "Retrieves a list of accelerator types that are available to the specified project.", "httpMethod": "GET", "id": "compute.acceleratorTypes.list", "parameterOrder": [ @@ -1398,7 +1398,7 @@ ] }, "insert": { - "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Understanding backend services for more information.", + "description": "Creates a BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview.", "httpMethod": "POST", "id": "compute.backendServices.insert", "parameterOrder": [ @@ -1480,7 +1480,7 @@ ] }, "patch": { - "description": "Patches the specified BackendService resource with the data included in the request. There are several Understanding backend services to keep in mind when updating a backend service. Read Understanding backend services for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Patches the specified BackendService resource with the data included in the request. For more information, see Backend services overview. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", "id": "compute.backendServices.patch", "parameterOrder": [ @@ -1561,7 +1561,7 @@ ] }, "update": { - "description": "Updates the specified BackendService resource with the data included in the request. There are several Understanding backend services to keep in mind when updating a backend service. Read Understanding backend services for more information.", + "description": "Updates the specified BackendService resource with the data included in the request. For more information, see Backend services overview.", "httpMethod": "PUT", "id": "compute.backendServices.update", "parameterOrder": [ @@ -5675,7 +5675,7 @@ ] }, "listErrors": { - "description": "Lists all errors thrown by actions on instances for a given managed instance group.", + "description": "Lists all errors thrown by actions on instances for a given managed instance group. The filter and orderBy query parameters are not supported.", "httpMethod": "GET", "id": "compute.instanceGroupManagers.listErrors", "parameterOrder": [ @@ -5738,7 +5738,7 @@ ] }, "listManagedInstances": { - "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", + "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported.", "httpMethod": "POST", "id": "compute.instanceGroupManagers.listManagedInstances", "parameterOrder": [ @@ -6326,7 +6326,7 @@ ] }, "listInstances": { - "description": "Lists the instances in the specified instance group.", + "description": "Lists the instances in the specified instance group. The orderBy query parameter is not supported.", "httpMethod": "POST", "id": "compute.instanceGroups.listInstances", "parameterOrder": [ @@ -7465,7 +7465,7 @@ ] }, "listReferrers": { - "description": "Retrieves the list of referrers to instances contained within the specified zone. For more information, read Viewing Referrers to VM Instances.", + "description": "Retrieves a list of resources that refer to the VM instance specified in the request. For example, if the VM instance is part of a managed instance group, the referrers list includes the managed instance group. For more information, read Viewing Referrers to VM Instances.", "httpMethod": "GET", "id": "compute.instances.listReferrers", "parameterOrder": [ @@ -8027,7 +8027,7 @@ ] }, "setScheduling": { - "description": "Sets an instance's scheduling options.", + "description": "Sets an instance's scheduling options. You can only call this method on a stopped instance, that is, a VM instance that is in a `TERMINATED` state. See Instance Life Cycle for more information on the possible instance states.", "httpMethod": "POST", "id": "compute.instances.setScheduling", "parameterOrder": [ @@ -13126,7 +13126,7 @@ ] }, "insert": { - "description": "Creates a regional BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a regional backend service. Read Understanding backend services for more information.", + "description": "Creates a regional BackendService resource in the specified project using the data included in the request. For more information, see Backend services overview.", "httpMethod": "POST", "id": "compute.regionBackendServices.insert", "parameterOrder": [ @@ -13224,7 +13224,7 @@ ] }, "patch": { - "description": "Updates the specified regional BackendService resource with the data included in the request. There are several Understanding backend services to keep in mind when updating a backend service. Read Understanding backend services for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Understanding backend services This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "httpMethod": "PATCH", "id": "compute.regionBackendServices.patch", "parameterOrder": [ @@ -13273,7 +13273,7 @@ ] }, "update": { - "description": "Updates the specified regional BackendService resource with the data included in the request. There are several Understanding backend services to keep in mind when updating a backend service. Read Understanding backend services for more information.", + "description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Backend services overview.", "httpMethod": "PUT", "id": "compute.regionBackendServices.update", "parameterOrder": [ @@ -14194,6 +14194,242 @@ } } }, + "regionHealthCheckServices": { + "methods": { + "delete": { + "description": "Deletes the specified regional HealthCheckService.", + "httpMethod": "DELETE", + "id": "compute.regionHealthCheckServices.delete", + "parameterOrder": [ + "project", + "region", + "healthCheckService" + ], + "parameters": { + "healthCheckService": { + "description": "Name of the HealthCheckService to delete. The name must be 1-63 characters long, and comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/healthCheckServices/{healthCheckService}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified regional HealthCheckService resource.", + "httpMethod": "GET", + "id": "compute.regionHealthCheckServices.get", + "parameterOrder": [ + "project", + "region", + "healthCheckService" + ], + "parameters": { + "healthCheckService": { + "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/healthCheckServices/{healthCheckService}", + "response": { + "$ref": "HealthCheckService" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a regional HealthCheckService resource in the specified project and region using the data included in the request.", + "httpMethod": "POST", + "id": "compute.regionHealthCheckServices.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/healthCheckServices", + "request": { + "$ref": "HealthCheckService" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Lists all the HealthCheckService resources that have been configured for the specified project in the given region.", + "httpMethod": "GET", + "id": "compute.regionHealthCheckServices.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/healthCheckServices", + "response": { + "$ref": "HealthCheckServicesList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "description": "Updates the specified regional HealthCheckService resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "httpMethod": "PATCH", + "id": "compute.regionHealthCheckServices.patch", + "parameterOrder": [ + "project", + "region", + "healthCheckService" + ], + "parameters": { + "healthCheckService": { + "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/healthCheckServices/{healthCheckService}", + "request": { + "$ref": "HealthCheckService" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, "regionHealthChecks": { "methods": { "delete": { @@ -14848,7 +15084,7 @@ ] }, "listErrors": { - "description": "Lists all errors thrown by actions on instances for a given regional managed instance group.", + "description": "Lists all errors thrown by actions on instances for a given regional managed instance group. The filter and orderBy query parameters are not supported.", "httpMethod": "GET", "id": "compute.regionInstanceGroupManagers.listErrors", "parameterOrder": [ @@ -14911,7 +15147,7 @@ ] }, "listManagedInstances": { - "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances.", + "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported.", "httpMethod": "POST", "id": "compute.regionInstanceGroupManagers.listManagedInstances", "parameterOrder": [ @@ -15315,7 +15551,7 @@ ] }, "listInstances": { - "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running.", + "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running. The orderBy query parameter is not supported.", "httpMethod": "POST", "id": "compute.regionInstanceGroups.listInstances", "parameterOrder": [ @@ -15429,20 +15665,20 @@ } } }, - "regionOperations": { + "regionNotificationEndpoints": { "methods": { "delete": { - "description": "Deletes the specified region-specific Operations resource.", + "description": "Deletes the specified NotificationEndpoint in the given region", "httpMethod": "DELETE", - "id": "compute.regionOperations.delete", + "id": "compute.regionNotificationEndpoints.delete", "parameterOrder": [ "project", "region", - "operation" + "notificationEndpoint" ], "parameters": { - "operation": { - "description": "Name of the Operations resource to delete.", + "notificationEndpoint": { + "description": "Name of the NotificationEndpoint resource to delete.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -15456,31 +15692,39 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" } }, - "path": "{project}/regions/{region}/operations/{operation}", + "path": "{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", + "response": { + "$ref": "Operation" + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] }, "get": { - "description": "Retrieves the specified region-specific Operations resource.", + "description": "Returns the specified NotificationEndpoint resource in the given region.", "httpMethod": "GET", - "id": "compute.regionOperations.get", + "id": "compute.regionNotificationEndpoints.get", "parameterOrder": [ "project", "region", - "operation" + "notificationEndpoint" ], "parameters": { - "operation": { - "description": "Name of the Operations resource to return.", + "notificationEndpoint": { + "description": "Name of the NotificationEndpoint resource to return.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -15494,16 +15738,16 @@ "type": "string" }, "region": { - "description": "Name of the region for this request.", + "description": "Name of the region scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "{project}/regions/{region}/operations/{operation}", + "path": "{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", "response": { - "$ref": "Operation" + "$ref": "NotificationEndpoint" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -15511,10 +15755,192 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "insert": { + "description": "Create a NotificationEndpoint in the specified project in the given region using the parameters that are included in the request.", + "httpMethod": "POST", + "id": "compute.regionNotificationEndpoints.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "{project}/regions/{region}/notificationEndpoints", + "request": { + "$ref": "NotificationEndpoint" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "list": { - "description": "Retrieves a list of Operation resources contained within the specified region.", + "description": "Lists the NotificationEndpoints for a project in the given region.", "httpMethod": "GET", - "id": "compute.regionOperations.list", + "id": "compute.regionNotificationEndpoints.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/notificationEndpoints", + "response": { + "$ref": "NotificationEndpointList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "regionOperations": { + "methods": { + "delete": { + "description": "Deletes the specified region-specific Operations resource.", + "httpMethod": "DELETE", + "id": "compute.regionOperations.delete", + "parameterOrder": [ + "project", + "region", + "operation" + ], + "parameters": { + "operation": { + "description": "Name of the Operations resource to delete.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/operations/{operation}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Retrieves the specified region-specific Operations resource.", + "httpMethod": "GET", + "id": "compute.regionOperations.get", + "parameterOrder": [ + "project", + "region", + "operation" + ], + "parameters": { + "operation": { + "description": "Name of the Operations resource to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/regions/{region}/operations/{operation}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "description": "Retrieves a list of Operation resources contained within the specified region.", + "httpMethod": "GET", + "id": "compute.regionOperations.list", "parameterOrder": [ "project", "region" @@ -19662,7 +20088,7 @@ ] }, "listUsable": { - "description": "Retrieves an aggregated list of all usable subnetworks in the project. The list contains all of the subnetworks in the project and the subnetworks that were shared by a Shared VPC host project.", + "description": "Retrieves an aggregated list of all usable subnetworks in the project.", "httpMethod": "GET", "id": "compute.subnetworks.listUsable", "parameterOrder": [ @@ -23387,7 +23813,7 @@ } } }, - "revision": "20200501", + "revision": "20200612", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -23433,7 +23859,7 @@ "type": "string" }, "maximumCardsPerInstance": { - "description": "[Output Only] Maximum accelerator cards allowed per instance.", + "description": "[Output Only] Maximum number of accelerator cards allowed per instance.", "format": "int32", "type": "integer" }, @@ -23443,7 +23869,7 @@ "type": "string" }, "selfLink": { - "description": "[Output Only] Server-defined fully-qualified URL for this resource.", + "description": "[Output Only] Server-defined, fully qualified URL for this resource.", "type": "string" }, "zone": { @@ -24316,7 +24742,7 @@ "type": "object" }, "AllocationSpecificSKUAllocationReservedInstanceProperties": { - "description": "Properties of the SKU instances being reserved.", + "description": "Properties of the SKU instances being reserved. Next ID: 9", "id": "AllocationSpecificSKUAllocationReservedInstanceProperties", "properties": { "guestAccelerators": { @@ -24337,6 +24763,18 @@ "description": "Specifies type of machine (name only) which has fixed number of vCPUs and fixed amount of memory. This also includes specifying custom machine type following custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern.", "type": "string" }, + "maintenanceInterval": { + "description": "Specifies whether this VM may be a stable fleet VM. Setting this to \"Periodic\" designates this VM as a Stable Fleet VM.\n\nSee go/stable-fleet-ug for more details.", + "enum": [ + "AS_NEEDED", + "PERIODIC" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, "minCpuPlatform": { "description": "Minimum cpu platform the reservation.", "type": "string" @@ -24534,7 +24972,7 @@ "type": "object" }, "AuditConfig": { - "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n{ \"audit_configs\": [ { \"service\": \"allServices\" \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\", }, { \"log_type\": \"ADMIN_READ\", } ] }, { \"service\": \"sampleservice.googleapis.com\" \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] }\n\nFor sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging.", + "description": "Specifies the audit configuration for a service. The configuration determines which permission types are logged, and what identities, if any, are exempted from logging. An AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service, the union of the two AuditConfigs is used for that service: the log_types specified in each AuditConfig are enabled, and the exempted_members in each AuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n{ \"audit_configs\": [ { \"service\": \"allServices\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" }, { \"log_type\": \"ADMIN_READ\" } ] }, { \"service\": \"sampleservice.googleapis.com\", \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\" }, { \"log_type\": \"DATA_WRITE\", \"exempted_members\": [ \"user:aliya@example.com\" ] } ] } ] }\n\nFor sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging, and aliya@example.com from DATA_WRITE logging.", "id": "AuditConfig", "properties": { "auditLogConfigs": { @@ -24559,7 +24997,7 @@ "type": "object" }, "AuditLogConfig": { - "description": "Provides the configuration for logging a type of permissions. Example:\n\n{ \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\", } ] }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.", + "description": "Provides the configuration for logging a type of permissions. Example:\n\n{ \"audit_log_configs\": [ { \"log_type\": \"DATA_READ\", \"exempted_members\": [ \"user:jose@example.com\" ] }, { \"log_type\": \"DATA_WRITE\" } ] }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from DATA_READ logging.", "id": "AuditLogConfig", "properties": { "exemptedMembers": { @@ -24943,6 +25381,7 @@ "MISSING_CUSTOM_METRIC_DATA_POINTS", "MISSING_LOAD_BALANCING_DATA_POINTS", "MODE_OFF", + "MODE_ONLY_SCALE_OUT", "MODE_ONLY_UP", "MORE_THAN_ONE_BACKEND_SERVICE", "NOT_ENOUGH_QUOTA_AVAILABLE", @@ -24969,6 +25408,7 @@ "", "", "", + "", "" ], "type": "string" @@ -25109,9 +25549,11 @@ "enum": [ "OFF", "ON", + "ONLY_SCALE_OUT", "ONLY_UP" ], "enumDescriptions": [ + "", "", "", "" @@ -25194,7 +25636,7 @@ "type": "string" }, "capacityScaler": { - "description": "A multiplier applied to the group's maximum servicing capacity (based on UTILIZATION, RATE or CONNECTION). Default value is 1, which means the group will serve up to 100% of its configured capacity (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available Capacity. Valid range is [0.0,1.0].\n\nThis cannot be used for internal load balancing.", + "description": "A multiplier applied to the group's maximum servicing capacity (based on UTILIZATION, RATE or CONNECTION). Default value is 1, which means the group will serve up to 100% of its configured capacity (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available capacity. Valid range is 0.0 and [0.1,1.0]. You cannot configure a setting larger than 0 and smaller than 0.1. You cannot configure a setting of 0 when there is only one backend attached to the backend service.\n\nThis cannot be used for internal load balancing.", "format": "float", "type": "number" }, @@ -25426,7 +25868,7 @@ "type": "object" }, "BackendService": { - "description": "Represents a Backend Service resource.\n\nA backend service contains configuration values for Google Cloud Platform load balancing services.\n\nBackend services in Google Compute Engine can be either regionally or globally scoped.\n\n* [Global](/compute/docs/reference/rest/{$api_version}/backendServices) * [Regional](/compute/docs/reference/rest/{$api_version}/regionBackendServices)\n\nFor more information, read Backend Services.\n\n(== resource_for {$api_version}.backendService ==)", + "description": "Represents a Backend Service resource.\n\nA backend service defines how Google Cloud load balancers distribute traffic. The backend service configuration contains a set of values, such as the protocol used to connect to backends, various distribution and session settings, health checks, and timeouts. These settings provide fine-grained control over how your load balancer behaves. Most of the settings have default values that allow for easy configuration if you need to get started quickly.\n\nBackend services in Google Compute Engine can be either regionally or globally scoped.\n\n* [Global](/compute/docs/reference/rest/{$api_version}/backendServices) * [Regional](/compute/docs/reference/rest/{$api_version}/regionBackendServices)\n\nFor more information, see Backend Services.\n\n(== resource_for {$api_version}.backendService ==)", "id": "BackendService", "properties": { "affinityCookieTtlSec": { @@ -25492,7 +25934,8 @@ "type": "array" }, "iap": { - "$ref": "BackendServiceIAP" + "$ref": "BackendServiceIAP", + "description": "The configurations for Identity-Aware Proxy on this resource." }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", @@ -25523,7 +25966,7 @@ "type": "string" }, "localityLbPolicy": { - "description": "The load balancing algorithm used within the scope of the locality. The possible values are: \n- ROUND_ROBIN: This is a simple policy in which each healthy backend is selected in round robin order. This is the default. \n- LEAST_REQUEST: An O(1) algorithm which selects two random healthy hosts and picks the host which has fewer active requests. \n- RING_HASH: The ring/modulo hash load balancer implements consistent hashing to backends. The algorithm has the property that the addition/removal of a host from a set of N hosts only affects 1/N of the requests. \n- RANDOM: The load balancer selects a random healthy host. \n- ORIGINAL_DESTINATION: Backend host is selected based on the client connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection before the connection was redirected to the load balancer. \n- MAGLEV: used as a drop in replacement for the ring hash load balancer. Maglev is not as stable as ring hash but has faster table lookup build times and host selection times. For more information about Maglev, refer to https://ai.google/research/pubs/pub44824 \n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. \n\nIf sessionAffinity is not NONE, and this field is not set to \u003eMAGLEV or RING_HASH, session affinity settings will not take effect.", + "description": "The load balancing algorithm used within the scope of the locality. The possible values are: \n- ROUND_ROBIN: This is a simple policy in which each healthy backend is selected in round robin order. This is the default. \n- LEAST_REQUEST: An O(1) algorithm which selects two random healthy hosts and picks the host which has fewer active requests. \n- RING_HASH: The ring/modulo hash load balancer implements consistent hashing to backends. The algorithm has the property that the addition/removal of a host from a set of N hosts only affects 1/N of the requests. \n- RANDOM: The load balancer selects a random healthy host. \n- ORIGINAL_DESTINATION: Backend host is selected based on the client connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection before the connection was redirected to the load balancer. \n- MAGLEV: used as a drop in replacement for the ring hash load balancer. Maglev is not as stable as ring hash but has faster table lookup build times and host selection times. For more information about Maglev, see https://ai.google/research/pubs/pub44824 \n\nThis field is applicable to either: \n- A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. \n- A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. \n\nIf sessionAffinity is not NONE, and this field is not set to \u003eMAGLEV or RING_HASH, session affinity settings will not take effect.", "enum": [ "INVALID_LB_POLICY", "LEAST_REQUEST", @@ -25625,7 +26068,7 @@ "type": "string" }, "timeoutSec": { - "description": "The backend service timeout has a different meaning depending on the type of load balancer. For more information read, Backend service settings The default is 30 seconds.", + "description": "The backend service timeout has a different meaning depending on the type of load balancer. For more information see, Backend service settings The default is 30 seconds.", "format": "int32", "type": "integer" } @@ -25791,6 +26234,13 @@ "BackendServiceGroupHealth": { "id": "BackendServiceGroupHealth", "properties": { + "annotations": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata defined as annotations on the network endpoint group.", + "type": "object" + }, "healthStatus": { "description": "Health state of the backend instances or endpoints in requested instance or network endpoint group, determined based on configured health checks.", "items": { @@ -25811,12 +26261,15 @@ "id": "BackendServiceIAP", "properties": { "enabled": { + "description": "Whether the serving infrastructure will authenticate and authorize all incoming requests. If true, the oauth2ClientId and oauth2ClientSecret fields must be non-empty.", "type": "boolean" }, "oauth2ClientId": { + "description": "OAuth2 client ID to use for the authentication flow.", "type": "string" }, "oauth2ClientSecret": { + "description": "OAuth2 client secret to use for the authentication flow. For security reasons, this value cannot be retrieved via the API. Instead, the SHA-256 hash of the value is returned in the oauth2ClientSecretSha256 field.", "type": "string" }, "oauth2ClientSecretSha256": { @@ -26912,7 +27365,7 @@ "type": "string" }, "physicalBlockSizeBytes": { - "description": "Physical block size of the persistent disk, in bytes. If not present in a request, a default value is used. Currently supported sizes are 4096 and 16384, other sizes may be added in the future. If an unsupported value is requested, the error message will list the supported values for the caller's project.", + "description": "Physical block size of the persistent disk, in bytes. If not present in a request, a default value is used. The currently supported size is 4096, other sizes may be added in the future. If an unsupported value is requested, the error message will list the supported values for the caller's project.", "format": "int64", "type": "string" }, @@ -26943,6 +27396,14 @@ "format": "int64", "type": "string" }, + "sourceDisk": { + "description": "The source disk used to create this disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk \n- projects/project/zones/zone/disks/disk \n- zones/zone/disks/disk", + "type": "string" + }, + "sourceDiskId": { + "description": "[Output Only] The unique ID of the disk used to create this disk. This value identifies the exact disk that was used to create this persistent disk. For example, if you created the persistent disk from a disk that was later deleted and recreated under the same name, the source disk ID would identify the exact version of the disk that was used.", + "type": "string" + }, "sourceImage": { "description": "The source image used to create this disk. If the source image is deleted, this field will not be set.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-9 to use the latest Debian 9 image:\nprojects/debian-cloud/global/images/family/debian-9\n\n\nAlternatively, use a specific version of a public operating system image:\nprojects/debian-cloud/global/images/debian-9-stretch-vYYYYMMDD\n\n\nTo create a disk with a custom image that you created, specify the image name in the following format:\nglobal/images/my-custom-image\n\n\nYou can also specify a custom image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\nglobal/images/family/my-image-family", "type": "string" @@ -26986,7 +27447,7 @@ "type": "string" }, "type": { - "description": "URL of the disk type resource describing which disk type to use to create the disk. Provide this when creating the disk. For example: projects/project/zones/zone/diskTypes/pd-standard or pd-ssd", + "description": "URL of the disk type resource describing which disk type to use to create the disk. Provide this when creating the disk. For example: projects/project/zones/zone/diskTypes/pd-standard or pd-ssd", "type": "string" }, "users": { @@ -28328,7 +28789,7 @@ }, "logConfig": { "$ref": "FirewallLogConfig", - "description": "This field denotes the logging options for a particular firewall rule. If logging is enabled, logs will be exported to Stackdriver." + "description": "This field denotes the logging options for a particular firewall rule. If logging is enabled, logs will be exported to Cloud Logging." }, "name": { "annotations": { @@ -29170,6 +29631,7 @@ "FEATURE_TYPE_UNSPECIFIED", "MULTI_IP_SUBNET", "SECURE_BOOT", + "SEV_CAPABLE", "UEFI_COMPATIBLE", "VIRTIO_SCSI_MULTIQUEUE", "WINDOWS" @@ -29180,6 +29642,7 @@ "", "", "", + "", "" ], "type": "string" @@ -29387,6 +29850,10 @@ "description": "Type of the resource.", "type": "string" }, + "logConfig": { + "$ref": "HealthCheckLogConfig", + "description": "Configure logging on this health check." + }, "name": { "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", @@ -29551,6 +30018,17 @@ }, "type": "object" }, + "HealthCheckLogConfig": { + "description": "Configuration of logging on a health check. If logging is enabled, logs will be exported to Stackdriver.", + "id": "HealthCheckLogConfig", + "properties": { + "enable": { + "description": "Indicates whether or not to export logs. This is false by default, which means no health check logging will be done.", + "type": "boolean" + } + }, + "type": "object" + }, "HealthCheckReference": { "description": "A full or valid partial URL to a health check. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/beta/projects/project-id/global/httpHealthChecks/health-check \n- projects/project-id/global/httpHealthChecks/health-check \n- global/httpHealthChecks/health-check", "id": "HealthCheckReference", @@ -29561,6 +30039,203 @@ }, "type": "object" }, + "HealthCheckService": { + "description": "Represents a Health-Check as a Service resource.\n\n(== resource_for {$api_version}.regionHealthCheckServices ==)", + "id": "HealthCheckService", + "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "fingerprint": { + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a HealthCheckService. An up-to-date fingerprint must be provided in order to patch/update the HealthCheckService; Otherwise, the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve the HealthCheckService.", + "format": "byte", + "type": "string" + }, + "healthChecks": { + "description": "List of URLs to the HealthCheck resources. Must have at least one HealthCheck, and not more than 10. HealthCheck resources must have portSpecification=USE_SERVING_PORT. For regional HealthCheckService, the HealthCheck must be regional and in the same region. For global HealthCheckService, HealthCheck must be global. Mix of regional and global HealthChecks is not supported. Multiple regional HealthChecks must belong to the same region. Regional HealthChecks\u003c/code? must belong to the same region as zones of NEGs.", + "items": { + "type": "string" + }, + "type": "array" + }, + "healthStatusAggregationPolicy": { + "description": "Optional. Policy for how the results from multiple health checks for the same endpoint are aggregated. Defaults to NO_AGGREGATION if unspecified. \n- NO_AGGREGATION. An EndpointHealth message is returned for each backend in the health check service. \n- AND. If any backend's health check reports UNHEALTHY, then UNHEALTHY is the HealthState of the entire health check service. If all backend's are healthy, the HealthState of the health check service is HEALTHY. .", + "enum": [ + "AND", + "NO_AGGREGATION" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#healthCheckService", + "description": "[Output only] Type of the resource. Always compute#healthCheckServicefor health check services.", + "type": "string" + }, + "name": { + "description": "Name of the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "networkEndpointGroups": { + "description": "List of URLs to the NetworkEndpointGroup resources. Must not have more than 100. For regional HealthCheckService, NEGs must be in zones in the region of the HealthCheckService.", + "items": { + "type": "string" + }, + "type": "array" + }, + "notificationEndpoints": { + "description": "List of URLs to the NotificationEndpoint resources. Must not have more than 10. A list of endpoints for receiving notifications of change in health status. For regional HealthCheckService, NotificationEndpoint must be regional and in the same region. For global HealthCheckService, NotificationEndpoint must be global.", + "items": { + "type": "string" + }, + "type": "array" + }, + "region": { + "description": "[Output Only] URL of the region where the health check service resides. This field is not applicable to global health check services. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + } + }, + "type": "object" + }, + "HealthCheckServiceReference": { + "description": "A full or valid partial URL to a health check service. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/beta/projects/project-id/regions/us-west1/healthCheckServices/health-check-service \n- projects/project-id/regions/us-west1/healthCheckServices/health-check-service \n- regions/us-west1/healthCheckServices/health-check-service", + "id": "HealthCheckServiceReference", + "properties": { + "healthCheckService": { + "type": "string" + } + }, + "type": "object" + }, + "HealthCheckServicesList": { + "id": "HealthCheckServicesList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of HealthCheckService resources.", + "items": { + "$ref": "HealthCheckService" + }, + "type": "array" + }, + "kind": { + "default": "compute#healthCheckServicesList", + "description": "[Output Only] Type of the resource. Always compute#healthCheckServicesList for lists of HealthCheckServices.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "HealthChecksAggregatedList": { "id": "HealthChecksAggregatedList", "properties": { @@ -29770,6 +30445,13 @@ "HealthStatus": { "id": "HealthStatus", "properties": { + "annotations": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata defined as annotations for network endpoint.", + "type": "object" + }, "healthState": { "description": "Health state of the instance.", "enum": [ @@ -29813,6 +30495,10 @@ "$ref": "HealthCheckReference", "description": "URL of the health check associated with the health state of the network endpoint." }, + "healthCheckService": { + "$ref": "HealthCheckServiceReference", + "description": "URL of the health check service associated with the health state of the network endpoint." + }, "healthState": { "description": "Health state of the network endpoint determined based on the health checks configured.", "enum": [ @@ -30290,7 +30976,7 @@ }, "urlRewrite": { "$ref": "UrlRewrite", - "description": "The spec to modify the URL of the request, prior to forwarding the request to the matched service." + "description": "The spec to modify the URL of the request, prior to forwarding the request to the matched service.\nurlRewrite is the only action supported in UrlMaps for external HTTP(S) load balancers." }, "weightedBackendServices": { "description": "A list of weighted backend services to send traffic to when a route match occurs. The weights determine the fraction of traffic that flows to their corresponding backend service. If all traffic needs to go to a single backend service, there must be one weightedBackendService with weight set to a non 0 number.\nOnce a backendService is identified and before forwarding the request to the backend service, advanced routing actions like Url rewrites and header transformations are applied depending on additional settings specified in this HttpRouteAction.", @@ -30702,7 +31388,7 @@ "type": "string" }, "sourceImage": { - "description": "URL of the source image used to create this image. This can be a full or valid partial URL. You must provide exactly one of: \n- this property, or \n- the rawDisk.source property, or \n- the sourceDisk property in order to create an image.", + "description": "URL of the source image used to create this image.\n\nIn order to create an image, you must provide the full or partial URL of one of the following: \n- The selfLink URL \n- This property \n- The rawDisk.source URL \n- The sourceDisk URL", "type": "string" }, "sourceImageEncryptionKey": { @@ -30714,7 +31400,7 @@ "type": "string" }, "sourceSnapshot": { - "description": "URL of the source snapshot used to create this image. This can be a full or valid partial URL. You must provide exactly one of: \n- this property, or \n- the sourceImage property, or \n- the rawDisk.source property, or \n- the sourceDisk property in order to create an image.", + "description": "URL of the source snapshot used to create this image.\n\nIn order to create an image, you must provide the full or partial URL of one of the following: \n- The selfLink URL \n- This property \n- The sourceImage URL \n- The rawDisk.source URL \n- The sourceDisk URL", "type": "string" }, "sourceSnapshotEncryptionKey": { @@ -31064,7 +31750,7 @@ "type": "boolean" }, "status": { - "description": "[Output Only] The status of the instance. One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, and TERMINATED.", + "description": "[Output Only] The status of the instance. One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED.", "enum": [ "DEPROVISIONING", "PROVISIONING", @@ -32880,22 +33566,22 @@ "id": "InstanceProperties", "properties": { "canIpForward": { - "description": "Enables instances created based on this template to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the Enable IP forwarding documentation for more information.", + "description": "Enables instances created based on these properties to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the Enable IP forwarding documentation for more information.", "type": "boolean" }, "description": { - "description": "An optional text description for the instances that are created from this instance template.", + "description": "An optional text description for the instances that are created from these properties.", "type": "string" }, "disks": { - "description": "An array of disks that are associated with the instances that are created from this template.", + "description": "An array of disks that are associated with the instances that are created from these properties.", "items": { "$ref": "AttachedDisk" }, "type": "array" }, "guestAccelerators": { - "description": "A list of guest accelerator cards' type and count to use for instances created from the instance template.", + "description": "A list of guest accelerator cards' type and count to use for instances created from these properties.", "items": { "$ref": "AcceleratorConfig" }, @@ -32905,7 +33591,7 @@ "additionalProperties": { "type": "string" }, - "description": "Labels to apply to instances that are created from this template.", + "description": "Labels to apply to instances that are created from these properties.", "type": "object" }, "machineType": { @@ -32914,15 +33600,15 @@ "compute.instanceTemplates.insert" ] }, - "description": "The machine type to use for instances that are created from this template.", + "description": "The machine type to use for instances that are created from these properties.", "type": "string" }, "metadata": { "$ref": "Metadata", - "description": "The metadata key/value pairs to assign to instances that are created from this template. These pairs can consist of custom metadata or predefined keys. See Project and instance metadata for more information." + "description": "The metadata key/value pairs to assign to instances that are created from these properties. These pairs can consist of custom metadata or predefined keys. See Project and instance metadata for more information." }, "minCpuPlatform": { - "description": "Minimum cpu/platform to be used by this instance. The instance may be scheduled on the specified or newer cpu/platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: \"Intel Haswell\" or minCpuPlatform: \"Intel Sandy Bridge\". For more information, read Specifying a Minimum CPU Platform.", + "description": "Minimum cpu/platform to be used by instances. The instance may be scheduled on the specified or newer cpu/platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: \"Intel Haswell\" or minCpuPlatform: \"Intel Sandy Bridge\". For more information, read Specifying a Minimum CPU Platform.", "type": "string" }, "networkInterfaces": { @@ -32933,7 +33619,7 @@ "type": "array" }, "privateIpv6GoogleAccess": { - "description": "The private IPv6 google access type for the VM. If not specified, use INHERIT_FROM_SUBNETWORK as default.", + "description": "The private IPv6 google access type for VMs. If not specified, use INHERIT_FROM_SUBNETWORK as default.", "enum": [ "ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE", "ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE", @@ -32948,10 +33634,10 @@ }, "reservationAffinity": { "$ref": "ReservationAffinity", - "description": "Specifies the reservations that this instance can consume from." + "description": "Specifies the reservations that instances can consume from." }, "resourcePolicies": { - "description": "Resource policies (names, not ULRs) applied to instances created from this template.", + "description": "Resource policies (names, not ULRs) applied to instances created from these properties.", "items": { "type": "string" }, @@ -32959,10 +33645,10 @@ }, "scheduling": { "$ref": "Scheduling", - "description": "Specifies the scheduling options for the instances that are created from this template." + "description": "Specifies the scheduling options for the instances that are created from these properties." }, "serviceAccounts": { - "description": "A list of service accounts with specified scopes. Access tokens for these service accounts are available to the instances that are created from this template. Use metadata queries to obtain the access tokens for these instances.", + "description": "A list of service accounts with specified scopes. Access tokens for these service accounts are available to the instances that are created from these properties. Use metadata queries to obtain the access tokens for these instances.", "items": { "$ref": "ServiceAccount" }, @@ -32973,7 +33659,7 @@ }, "tags": { "$ref": "Tags", - "description": "A list of tags to apply to the instances that are created from this template. The tags identify valid sources or targets for network firewalls. The setTags method can modify this list of tags. Each tag within the list must comply with RFC1035." + "description": "A list of tags to apply to the instances that are created from these properties. The tags identify valid sources or targets for network firewalls. The setTags method can modify this list of tags. Each tag within the list must comply with RFC1035." } }, "type": "object" @@ -34096,7 +34782,7 @@ "type": "object" }, "InterconnectCircuitInfo": { - "description": "Describes a single physical circuit between the Customer and Google. CircuitInfo objects are created by Google, so all fields are output only. Next id: 4", + "description": "Describes a single physical circuit between the Customer and Google. CircuitInfo objects are created by Google, so all fields are output only.", "id": "InterconnectCircuitInfo", "properties": { "customerDemarcId": { @@ -34602,7 +35288,7 @@ "type": "object" }, "InterconnectOutageNotification": { - "description": "Description of a planned outage on this Interconnect. Next id: 9", + "description": "Description of a planned outage on this Interconnect.", "id": "InterconnectOutageNotification", "properties": { "affectedCircuits": { @@ -35745,7 +36431,7 @@ "type": "string" }, "autoCreateSubnetworks": { - "description": "When set to true, the VPC network is created in \"auto\" mode. When set to false, the VPC network is created in \"custom\" mode.\n\nAn auto mode VPC network starts with one subnet per region. Each subnet has a predetermined range as described in Auto mode VPC network IP ranges.", + "description": "When set to true, the VPC network is created in auto mode. When set to false, the VPC network is created in custom mode.\n\nAn auto mode VPC network starts with one subnet per region. Each subnet has a predetermined range as described in Auto mode VPC network IP ranges.\n\nFor custom mode VPC networks, you can add subnets using the subnetworks insert method.", "type": "boolean" }, "creationTimestamp": { @@ -35807,9 +36493,16 @@ "type": "object" }, "NetworkEndpoint": { - "description": "The network endpoint. Next ID: 7", + "description": "The network endpoint.", "id": "NetworkEndpoint", "properties": { + "annotations": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata defined as annotations on the network endpoint.", + "type": "object" + }, "fqdn": { "description": "Optional fully qualified domain name of network endpoint. This can only be specified when NetworkEndpointGroup.network_endpoint_type is NON_GCP_FQDN_PORT.", "type": "string" @@ -35834,6 +36527,13 @@ "description": "Represents a collection of network endpoints.\n\nA network endpoint group (NEG) defines how a set of endpoints should be reached, whether they are reachable, and where they are located. For more information about using NEGs, see Setting up internet NEGs or Setting up zonal NEGs. (== resource_for {$api_version}.networkEndpointGroups ==) (== resource_for {$api_version}.globalNetworkEndpointGroups ==)", "id": "NetworkEndpointGroup", "properties": { + "annotations": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata defined as annotations on the network endpoint group.", + "type": "object" + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -36718,7 +37418,7 @@ "type": "string" }, "nodeTemplate": { - "description": "The URL of the node template to which this node group belongs.", + "description": "URL of the node template to create the node group from.", "type": "string" }, "selfLink": { @@ -37294,7 +37994,7 @@ "type": "object" }, "NodeTemplate": { - "description": "Represent a sole-tenant Node Template resource.\n\nYou can use a template to define properties for nodes in a node group. For more information, read Creating node groups and instances. (== resource_for {$api_version}.nodeTemplates ==) (== NextID: 19 ==)", + "description": "Represent a sole-tenant Node Template resource.\n\nYou can use a template to define properties for nodes in a node group. For more information, read Creating node groups and instances. (== resource_for {$api_version}.nodeTemplates ==)", "id": "NodeTemplate", "properties": { "creationTimestamp": { @@ -38081,6 +38781,187 @@ }, "type": "object" }, + "NotificationEndpoint": { + "description": "Represents a notification endpoint.\n\nA notification endpoint resource defines an endpoint to receive notifications when there are status changes detected by the associated health check service.\n\nFor more information, see Health checks overview. (== resource_for {$api_version}.notificationEndpoint ==) (== resource_for {$api_version}.regionNotificationEndpoints ==)", + "id": "NotificationEndpoint", + "properties": { + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "An optional description of this resource. Provide this property when you create the resource.", + "type": "string" + }, + "grpcSettings": { + "$ref": "NotificationEndpointGrpcSettings", + "description": "Settings of the gRPC notification endpoint including the endpoint URL and the retry duration." + }, + "id": { + "description": "[Output Only] A unique identifier for this resource type. The server generates this identifier.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#notificationEndpoint", + "description": "[Output Only] Type of the resource. Always compute#notificationEndpoint for notification endpoints.", + "type": "string" + }, + "name": { + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" + }, + "region": { + "description": "[Output Only] URL of the region where the notification endpoint resides. This field applies only to the regional resource. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + } + }, + "type": "object" + }, + "NotificationEndpointGrpcSettings": { + "description": "Represents a gRPC setting that describes one gRPC notification endpoint and the retry duration attempting to send notification to this endpoint.", + "id": "NotificationEndpointGrpcSettings", + "properties": { + "authority": { + "description": "Optional. If specified, this field is used to set the authority header by the sender of notifications. See https://tools.ietf.org/html/rfc7540#section-8.1.2.3", + "type": "string" + }, + "endpoint": { + "description": "Endpoint to which gRPC notifications are sent. This must be a valid gRPCLB DNS name.", + "type": "string" + }, + "payloadName": { + "description": "Optional. If specified, this field is used to populate the \"name\" field in gRPC requests.", + "type": "string" + }, + "resendInterval": { + "$ref": "Duration", + "description": "Optional. This field is used to configure how often to send a full update of all non-healthy backends. If unspecified, full updates are not sent. If specified, must be in the range between 600 seconds to 3600 seconds. Nanos are disallowed." + }, + "retryDurationSec": { + "description": "How much time (in seconds) is spent attempting notification retries until a successful response is received. Default is 30s. Limit is 20m (1200s). Must be a positive number.", + "format": "uint32", + "type": "integer" + } + }, + "type": "object" + }, + "NotificationEndpointList": { + "id": "NotificationEndpointList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of NotificationEndpoint resources.", + "items": { + "$ref": "NotificationEndpoint" + }, + "type": "array" + }, + "kind": { + "default": "compute#notificationEndpointList", + "description": "[Output Only] Type of the resource. Always compute#notificationEndpoint for notification endpoints.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "Operation": { "description": "Represents an Operation resource.\n\nGoogle Compute Engine has three Operation resources:\n\n* [Global](/compute/docs/reference/rest/{$api_version}/globalOperations) * [Regional](/compute/docs/reference/rest/{$api_version}/regionOperations) * [Zonal](/compute/docs/reference/rest/{$api_version}/zoneOperations)\n\nYou can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses.\n\nOperations can be global, regional or zonal. \n- For global operations, use the `globalOperations` resource. \n- For regional operations, use the `regionOperations` resource. \n- For zonal operations, use the `zonalOperations` resource. \n\nFor more information, read Global, Regional, and Zonal Resources. (== resource_for {$api_version}.globalOperations ==) (== resource_for {$api_version}.regionOperations ==) (== resource_for {$api_version}.zoneOperations ==)", "id": "Operation", @@ -39185,7 +40066,7 @@ "properties": { "defaultRouteAction": { "$ref": "HttpRouteAction", - "description": "defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices.\nOnly one of defaultRouteAction or defaultUrlRedirect must be set." + "description": "defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices.\nOnly one of defaultRouteAction or defaultUrlRedirect must be set.\nUrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a pathMatcher's defaultRouteAction." }, "defaultService": { "description": "The full or partial URL to the BackendService resource. This will be used if none of the pathRules or routeRules defined by this PathMatcher are matched. For example, the following are all valid URLs to a BackendService resource: \n- https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService \n- compute/v1/projects/project/global/backendServices/backendService \n- global/backendServices/backendService If defaultRouteAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if defaultRouteAction specifies any weightedBackendServices, defaultService must not be specified.\nOnly one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set.\nAuthorization requires one or more of the following Google IAM permissions on the specified resource default_service: \n- compute.backendBuckets.use \n- compute.backendServices.use", @@ -39237,7 +40118,7 @@ }, "routeAction": { "$ref": "HttpRouteAction", - "description": "In response to a matching path, the load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices.\nOnly one of routeAction or urlRedirect must be set." + "description": "In response to a matching path, the load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices.\nOnly one of routeAction or urlRedirect must be set.\nUrlMaps for external HTTP(S) load balancers support only the urlRewrite action within a pathRule's routeAction." }, "service": { "description": "The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendService s. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified.\nOnly one of urlRedirect, service or routeAction.weightedBackendService must be set.", @@ -39485,18 +40366,22 @@ "metric": { "description": "[Output Only] Name of the quota metric.", "enum": [ + "A2_CPUS", "AFFINITY_GROUPS", "AUTOSCALERS", "BACKEND_BUCKETS", "BACKEND_SERVICES", "C2_CPUS", "COMMITMENTS", + "COMMITTED_A2_CPUS", "COMMITTED_C2_CPUS", "COMMITTED_CPUS", "COMMITTED_LICENSES", "COMMITTED_LOCAL_SSD_TOTAL_GB", + "COMMITTED_MEMORY_OPTIMIZED_CPUS", "COMMITTED_N2D_CPUS", "COMMITTED_N2_CPUS", + "COMMITTED_NVIDIA_A100_GPUS", "COMMITTED_NVIDIA_K80_GPUS", "COMMITTED_NVIDIA_P100_GPUS", "COMMITTED_NVIDIA_P4_GPUS", @@ -39526,13 +40411,17 @@ "IN_USE_BACKUP_SCHEDULES", "IN_USE_SNAPSHOT_SCHEDULES", "LOCAL_SSD_TOTAL_GB", + "M1_CPUS", + "M2_CPUS", "MACHINE_IMAGES", "N2D_CPUS", "N2_CPUS", "NETWORKS", "NETWORK_ENDPOINT_GROUPS", + "NETWORK_FIREWALL_POLICIES", "NODE_GROUPS", "NODE_TEMPLATES", + "NVIDIA_A100_GPUS", "NVIDIA_K80_GPUS", "NVIDIA_P100_GPUS", "NVIDIA_P100_VWS_GPUS", @@ -39544,6 +40433,7 @@ "PACKET_MIRRORINGS", "PREEMPTIBLE_CPUS", "PREEMPTIBLE_LOCAL_SSD_GB", + "PREEMPTIBLE_NVIDIA_A100_GPUS", "PREEMPTIBLE_NVIDIA_K80_GPUS", "PREEMPTIBLE_NVIDIA_P100_GPUS", "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS", @@ -39674,6 +40564,15 @@ "", "", "", + "", + "", + "", + "", + "", + "", + "", + "", + "", "" ], "type": "string" @@ -42140,7 +43039,7 @@ "type": "object" }, "Router": { - "description": "Represents a Cloud Router resource.\n\nFor more information about Cloud Router, read the the Cloud Router overview.", + "description": "Represents a Cloud Router resource.\n\nFor more information about Cloud Router, read the Cloud Router overview.", "id": "Router", "properties": { "bgp": { @@ -42872,7 +43771,7 @@ "type": "object" }, "RouterStatusNatStatus": { - "description": "Status of a NAT contained in this router. Next tag: 9", + "description": "Status of a NAT contained in this router.", "id": "RouterStatusNatStatus", "properties": { "autoAllocatedNatIps": { @@ -43161,7 +44060,7 @@ "type": "object" }, "Scheduling": { - "description": "Sets the scheduling options for an Instance. NextID: 11", + "description": "Sets the scheduling options for an Instance. NextID: 12", "id": "Scheduling", "properties": { "automaticRestart": { @@ -43188,7 +44087,7 @@ "type": "string" }, "preemptible": { - "description": "Defines whether the instance is preemptible. This can only be set during instance creation, it cannot be set or changed after the instance has been created.", + "description": "Defines whether the instance is preemptible. This can only be set during instance creation or while the instance is stopped and therefore, in a `TERMINATED` state. See Instance Life Cycle for more information on the possible instance states.", "type": "boolean" } }, @@ -43885,7 +44784,7 @@ "type": "object" }, "SslCertificate": { - "description": "Represents an SSL Certificate resource.\n\nGoogle Compute Engine has two SSL Certificate resources:\n\n* [Global](/compute/docs/reference/rest/{$api_version}/sslCertificates) * [Regional](/compute/docs/reference/rest/{$api_version}/regionSslCertificates)\n\n\n\nThe sslCertificates are used by: \n- external HTTPS load balancers \n- SSL proxy load balancers \n\nThe regionSslCertificates are used by internal HTTPS load balancers.\n\nOptionally, certificate file contents that you upload can contain a set of up to five PEM-encoded certificates. The API call creates an object (sslCertificate) that holds this data. You can use SSL keys and certificates to secure connections to a load balancer. For more information, read Creating and using SSL certificates and SSL certificates quotas and limits. (== resource_for {$api_version}.sslCertificates ==) (== resource_for {$api_version}.regionSslCertificates ==)", + "description": "Represents an SSL Certificate resource.\n\nGoogle Compute Engine has two SSL Certificate resources:\n\n* [Global](/compute/docs/reference/rest/{$api_version}/sslCertificates) * [Regional](/compute/docs/reference/rest/{$api_version}/regionSslCertificates)\n\n\n\nThe sslCertificates are used by: \n- external HTTPS load balancers \n- SSL proxy load balancers \n\nThe regionSslCertificates are used by internal HTTPS load balancers.\n\nOptionally, certificate file contents that you upload can contain a set of up to five PEM-encoded certificates. The API call creates an object (sslCertificate) that holds this data. You can use SSL keys and certificates to secure connections to a load balancer. For more information, read Creating and using SSL certificates, SSL certificates quotas and limits, and Troubleshooting SSL certificates. (== resource_for {$api_version}.sslCertificates ==) (== resource_for {$api_version}.regionSslCertificates ==)", "id": "SslCertificate", "properties": { "certificate": { @@ -44686,7 +45585,7 @@ "type": "string" }, "ipCidrRange": { - "description": "The range of internal addresses that are owned by this subnetwork. Provide this property when you create the subnetwork. For example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and non-overlapping within a network. Only IPv4 is supported. This field can be set only at resource creation time.", + "description": "The range of internal addresses that are owned by this subnetwork. Provide this property when you create the subnetwork. For example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and non-overlapping within a network. Only IPv4 is supported. This field is set at resource creation time. The range can be expanded after creation using expandIpCidrRange.", "type": "string" }, "ipv6CidrRange": { @@ -44700,7 +45599,7 @@ }, "logConfig": { "$ref": "SubnetworkLogConfig", - "description": "This field denotes the VPC flow logging options for this subnetwork. If logging is enabled, logs are exported to Stackdriver." + "description": "This field denotes the VPC flow logging options for this subnetwork. If logging is enabled, logs are exported to Cloud Logging." }, "name": { "description": "The name of the resource, provided by the client when initially creating the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", @@ -47670,7 +48569,7 @@ }, "defaultRouteAction": { "$ref": "HttpRouteAction", - "description": "defaultRouteAction takes effect when none of the hostRules match. The load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices.\nOnly one of defaultRouteAction or defaultUrlRedirect must be set." + "description": "defaultRouteAction takes effect when none of the hostRules match. The load balancer performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices.\nOnly one of defaultRouteAction or defaultUrlRedirect must be set.\nUrlMaps for external HTTP(S) load balancers support only the urlRewrite action within defaultRouteAction." }, "defaultService": { "description": "The full or partial URL of the defaultService resource to which traffic is directed if none of the hostRules match. If defaultRouteAction is additionally specified, advanced routing actions like URL Rewrites, etc. take effect prior to sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified.\nOnly one of defaultService, defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set.", diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go index 3c98c3eaa64b8..d8719cdccfff4 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -178,9 +178,11 @@ func New(client *http.Client) (*Service, error) { s.RegionCommitments = NewRegionCommitmentsService(s) s.RegionDiskTypes = NewRegionDiskTypesService(s) s.RegionDisks = NewRegionDisksService(s) + s.RegionHealthCheckServices = NewRegionHealthCheckServicesService(s) s.RegionHealthChecks = NewRegionHealthChecksService(s) s.RegionInstanceGroupManagers = NewRegionInstanceGroupManagersService(s) s.RegionInstanceGroups = NewRegionInstanceGroupsService(s) + s.RegionNotificationEndpoints = NewRegionNotificationEndpointsService(s) s.RegionOperations = NewRegionOperationsService(s) s.RegionSslCertificates = NewRegionSslCertificatesService(s) s.RegionTargetHttpProxies = NewRegionTargetHttpProxiesService(s) @@ -296,12 +298,16 @@ type Service struct { RegionDisks *RegionDisksService + RegionHealthCheckServices *RegionHealthCheckServicesService + RegionHealthChecks *RegionHealthChecksService RegionInstanceGroupManagers *RegionInstanceGroupManagersService RegionInstanceGroups *RegionInstanceGroupsService + RegionNotificationEndpoints *RegionNotificationEndpointsService + RegionOperations *RegionOperationsService RegionSslCertificates *RegionSslCertificatesService @@ -724,6 +730,15 @@ type RegionDisksService struct { s *Service } +func NewRegionHealthCheckServicesService(s *Service) *RegionHealthCheckServicesService { + rs := &RegionHealthCheckServicesService{s: s} + return rs +} + +type RegionHealthCheckServicesService struct { + s *Service +} + func NewRegionHealthChecksService(s *Service) *RegionHealthChecksService { rs := &RegionHealthChecksService{s: s} return rs @@ -751,6 +766,15 @@ type RegionInstanceGroupsService struct { s *Service } +func NewRegionNotificationEndpointsService(s *Service) *RegionNotificationEndpointsService { + rs := &RegionNotificationEndpointsService{s: s} + return rs +} + +type RegionNotificationEndpointsService struct { + s *Service +} + func NewRegionOperationsService(s *Service) *RegionOperationsService { rs := &RegionOperationsService{s: s} return rs @@ -1061,14 +1085,14 @@ type AcceleratorType struct { // compute#acceleratorType for accelerator types. Kind string `json:"kind,omitempty"` - // MaximumCardsPerInstance: [Output Only] Maximum accelerator cards - // allowed per instance. + // MaximumCardsPerInstance: [Output Only] Maximum number of accelerator + // cards allowed per instance. MaximumCardsPerInstance int64 `json:"maximumCardsPerInstance,omitempty"` // Name: [Output Only] Name of the resource. Name string `json:"name,omitempty"` - // SelfLink: [Output Only] Server-defined fully-qualified URL for this + // SelfLink: [Output Only] Server-defined, fully qualified URL for this // resource. SelfLink string `json:"selfLink,omitempty"` @@ -2318,7 +2342,7 @@ func (s *AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk) } // AllocationSpecificSKUAllocationReservedInstanceProperties: Properties -// of the SKU instances being reserved. +// of the SKU instances being reserved. Next ID: 9 type AllocationSpecificSKUAllocationReservedInstanceProperties struct { // GuestAccelerators: Specifies accelerator type and count. GuestAccelerators []*AcceleratorConfig `json:"guestAccelerators,omitempty"` @@ -2333,6 +2357,17 @@ type AllocationSpecificSKUAllocationReservedInstanceProperties struct { // custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern. MachineType string `json:"machineType,omitempty"` + // MaintenanceInterval: Specifies whether this VM may be a stable fleet + // VM. Setting this to "Periodic" designates this VM as a Stable Fleet + // VM. + // + // See go/stable-fleet-ug for more details. + // + // Possible values: + // "AS_NEEDED" + // "PERIODIC" + MaintenanceInterval string `json:"maintenanceInterval,omitempty"` + // MinCpuPlatform: Minimum cpu platform the reservation. MinCpuPlatform string `json:"minCpuPlatform,omitempty"` @@ -2698,12 +2733,12 @@ func (s *AttachedDiskInitializeParams) MarshalJSON() ([]byte, error) { // // Example Policy with multiple AuditConfigs: // -// { "audit_configs": [ { "service": "allServices" "audit_log_configs": +// { "audit_configs": [ { "service": "allServices", "audit_log_configs": // [ { "log_type": "DATA_READ", "exempted_members": [ -// "user:jose@example.com" ] }, { "log_type": "DATA_WRITE", }, { -// "log_type": "ADMIN_READ", } ] }, { "service": -// "sampleservice.googleapis.com" "audit_log_configs": [ { "log_type": -// "DATA_READ", }, { "log_type": "DATA_WRITE", "exempted_members": [ +// "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { +// "log_type": "ADMIN_READ" } ] }, { "service": +// "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": +// "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ // "user:aliya@example.com" ] } ] } ] } // // For sampleservice, this policy enables DATA_READ, DATA_WRITE and @@ -2750,7 +2785,7 @@ func (s *AuditConfig) MarshalJSON() ([]byte, error) { // // { "audit_log_configs": [ { "log_type": "DATA_READ", // "exempted_members": [ "user:jose@example.com" ] }, { "log_type": -// "DATA_WRITE", } ] } +// "DATA_WRITE" } ] } // // This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting // jose@example.com from DATA_READ logging. @@ -3327,6 +3362,7 @@ type AutoscalerStatusDetails struct { // "MISSING_CUSTOM_METRIC_DATA_POINTS" // "MISSING_LOAD_BALANCING_DATA_POINTS" // "MODE_OFF" + // "MODE_ONLY_SCALE_OUT" // "MODE_ONLY_UP" // "MORE_THAN_ONE_BACKEND_SERVICE" // "NOT_ENOUGH_QUOTA_AVAILABLE" @@ -3539,6 +3575,7 @@ type AutoscalingPolicy struct { // Possible values: // "OFF" // "ON" + // "ONLY_SCALE_OUT" // "ONLY_UP" Mode string `json:"mode,omitempty"` @@ -3783,8 +3820,10 @@ type Backend struct { // capacity (based on UTILIZATION, RATE or CONNECTION). Default value is // 1, which means the group will serve up to 100% of its configured // capacity (depending on balancingMode). A setting of 0 means the group - // is completely drained, offering 0% of its available Capacity. Valid - // range is [0.0,1.0]. + // is completely drained, offering 0% of its available capacity. Valid + // range is 0.0 and [0.1,1.0]. You cannot configure a setting larger + // than 0 and smaller than 0.1. You cannot configure a setting of 0 when + // there is only one backend attached to the backend service. // // This cannot be used for internal load balancing. CapacityScaler float64 `json:"capacityScaler,omitempty"` @@ -4226,8 +4265,13 @@ func (s *BackendBucketListWarningData) MarshalJSON() ([]byte, error) { // BackendService: Represents a Backend Service resource. // -// A backend service contains configuration values for Google Cloud -// Platform load balancing services. +// A backend service defines how Google Cloud load balancers distribute +// traffic. The backend service configuration contains a set of values, +// such as the protocol used to connect to backends, various +// distribution and session settings, health checks, and timeouts. These +// settings provide fine-grained control over how your load balancer +// behaves. Most of the settings have default values that allow for easy +// configuration if you need to get started quickly. // // Backend services in Google Compute Engine can be either regionally or // globally scoped. @@ -4238,7 +4282,7 @@ func (s *BackendBucketListWarningData) MarshalJSON() ([]byte, error) { // [Regional](/compute/docs/reference/rest/{$api_version}/regionBackendSe // rvices) // -// For more information, read Backend Services. +// For more information, see Backend Services. // // (== resource_for {$api_version}.backendService ==) type BackendService struct { @@ -4325,6 +4369,7 @@ type BackendService struct { // backends must not have a health check. A health check must HealthChecks []string `json:"healthChecks,omitempty"` + // Iap: The configurations for Identity-Aware Proxy on this resource. Iap *BackendServiceIAP `json:"iap,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -4369,8 +4414,7 @@ type BackendService struct { // - MAGLEV: used as a drop in replacement for the ring hash load // balancer. Maglev is not as stable as ring hash but has faster table // lookup build times and host selection times. For more information - // about Maglev, refer to https://ai.google/research/pubs/pub44824 - // + // about Maglev, see https://ai.google/research/pubs/pub44824 // // This field is applicable to either: // - A regional backend service with the service_protocol set to HTTP, @@ -4499,7 +4543,7 @@ type BackendService struct { SessionAffinity string `json:"sessionAffinity,omitempty"` // TimeoutSec: The backend service timeout has a different meaning - // depending on the type of load balancer. For more information read, + // depending on the type of load balancer. For more information see, // Backend service settings The default is 30 seconds. TimeoutSec int64 `json:"timeoutSec,omitempty"` @@ -4806,6 +4850,10 @@ func (s *BackendServiceFailoverPolicy) UnmarshalJSON(data []byte) error { } type BackendServiceGroupHealth struct { + // Annotations: Metadata defined as annotations on the network endpoint + // group. + Annotations map[string]string `json:"annotations,omitempty"` + // HealthStatus: Health state of the backend instances or endpoints in // requested instance or network endpoint group, determined based on // configured health checks. @@ -4819,7 +4867,7 @@ type BackendServiceGroupHealth struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "HealthStatus") to + // ForceSendFields is a list of field names (e.g. "Annotations") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -4827,7 +4875,7 @@ type BackendServiceGroupHealth struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "HealthStatus") to include + // NullFields is a list of field names (e.g. "Annotations") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -4844,10 +4892,18 @@ func (s *BackendServiceGroupHealth) MarshalJSON() ([]byte, error) { // BackendServiceIAP: Identity-Aware Proxy type BackendServiceIAP struct { + // Enabled: Whether the serving infrastructure will authenticate and + // authorize all incoming requests. If true, the oauth2ClientId and + // oauth2ClientSecret fields must be non-empty. Enabled bool `json:"enabled,omitempty"` + // Oauth2ClientId: OAuth2 client ID to use for the authentication flow. Oauth2ClientId string `json:"oauth2ClientId,omitempty"` + // Oauth2ClientSecret: OAuth2 client secret to use for the + // authentication flow. For security reasons, this value cannot be + // retrieved via the API. Instead, the SHA-256 hash of the value is + // returned in the oauth2ClientSecretSha256 field. Oauth2ClientSecret string `json:"oauth2ClientSecret,omitempty"` // Oauth2ClientSecretSha256: [Output Only] SHA256 hash value for the @@ -6510,10 +6566,10 @@ type Disk struct { Options string `json:"options,omitempty"` // PhysicalBlockSizeBytes: Physical block size of the persistent disk, - // in bytes. If not present in a request, a default value is used. - // Currently supported sizes are 4096 and 16384, other sizes may be - // added in the future. If an unsupported value is requested, the error - // message will list the supported values for the caller's project. + // in bytes. If not present in a request, a default value is used. The + // currently supported size is 4096, other sizes may be added in the + // future. If an unsupported value is requested, the error message will + // list the supported values for the caller's project. PhysicalBlockSizeBytes int64 `json:"physicalBlockSizeBytes,omitempty,string"` // Region: [Output Only] URL of the region where the disk resides. Only @@ -6545,6 +6601,23 @@ type Disk struct { // inclusive. SizeGb int64 `json:"sizeGb,omitempty,string"` + // SourceDisk: The source disk used to create this disk. You can provide + // this as a partial or full URL to the resource. For example, the + // following are valid values: + // - + // https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk + // - projects/project/zones/zone/disks/disk + // - zones/zone/disks/disk + SourceDisk string `json:"sourceDisk,omitempty"` + + // SourceDiskId: [Output Only] The unique ID of the disk used to create + // this disk. This value identifies the exact disk that was used to + // create this persistent disk. For example, if you created the + // persistent disk from a disk that was later deleted and recreated + // under the same name, the source disk ID would identify the exact + // version of the disk that was used. + SourceDiskId string `json:"sourceDiskId,omitempty"` + // SourceImage: The source image used to create this disk. If the source // image is deleted, this field will not be set. // @@ -6625,7 +6698,7 @@ type Disk struct { // Type: URL of the disk type resource describing which disk type to use // to create the disk. Provide this when creating the disk. For example: - // projects/project/zones/zone/diskTypes/pd-standard or pd-ssd + // projects/project/zones/zone/diskTypes/pd-standard or pd-ssd Type string `json:"type,omitempty"` // Users: [Output Only] Links to the users of the disk (attached @@ -8626,8 +8699,8 @@ type Firewall struct { Kind string `json:"kind,omitempty"` // LogConfig: This field denotes the logging options for a particular - // firewall rule. If logging is enabled, logs will be exported to - // Stackdriver. + // firewall rule. If logging is enabled, logs will be exported to Cloud + // Logging. LogConfig *FirewallLogConfig `json:"logConfig,omitempty"` // Name: Name of the resource; provided by the client when the resource @@ -10106,6 +10179,7 @@ type GuestOsFeature struct { // "FEATURE_TYPE_UNSPECIFIED" // "MULTI_IP_SUBNET" // "SECURE_BOOT" + // "SEV_CAPABLE" // "UEFI_COMPATIBLE" // "VIRTIO_SCSI_MULTIQUEUE" // "WINDOWS" @@ -10408,6 +10482,9 @@ type HealthCheck struct { // Kind: Type of the resource. Kind string `json:"kind,omitempty"` + // LogConfig: Configure logging on this health check. + LogConfig *HealthCheckLogConfig `json:"logConfig,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -10634,6 +10711,36 @@ func (s *HealthCheckListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// HealthCheckLogConfig: Configuration of logging on a health check. If +// logging is enabled, logs will be exported to Stackdriver. +type HealthCheckLogConfig struct { + // Enable: Indicates whether or not to export logs. This is false by + // default, which means no health check logging will be done. + Enable bool `json:"enable,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Enable") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Enable") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HealthCheckLogConfig) MarshalJSON() ([]byte, error) { + type NoMethod HealthCheckLogConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // HealthCheckReference: A full or valid partial URL to a health check. // For example, the following are valid URLs: // - @@ -10666,169 +10773,187 @@ func (s *HealthCheckReference) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type HealthChecksAggregatedList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` +// HealthCheckService: Represents a Health-Check as a Service +// resource. +// +// (== resource_for {$api_version}.regionHealthCheckServices ==) +type HealthCheckService struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // Items: A list of HealthChecksScopedList resources. - Items map[string]HealthChecksScopedList `json:"items,omitempty"` + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` - // Kind: Type of resource. + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a HealthCheckService. An + // up-to-date fingerprint must be provided in order to patch/update the + // HealthCheckService; Otherwise, the request will fail with error 412 + // conditionNotMet. To see the latest fingerprint, make a get() request + // to retrieve the HealthCheckService. + Fingerprint string `json:"fingerprint,omitempty"` + + // HealthChecks: List of URLs to the HealthCheck resources. Must have at + // least one HealthCheck, and not more than 10. HealthCheck resources + // must have portSpecification=USE_SERVING_PORT. For regional + // HealthCheckService, the HealthCheck must be regional and in the same + // region. For global HealthCheckService, HealthCheck must be global. + // Mix of regional and global HealthChecks is not supported. Multiple + // regional HealthChecks must belong to the same region. Regional + // HealthChecks`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *RegionHealthChecksListCall) Filter(filter string) *RegionHealthChecksListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionHealthChecksListCall) MaxResults(maxResults int64) *RegionHealthChecksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *RegionHealthChecksListCall) OrderBy(orderBy string) *RegionHealthChecksListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionHealthChecksListCall) PageToken(pageToken string) *RegionHealthChecksListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionHealthChecksListCall) Fields(s ...googleapi.Field) *RegionHealthChecksListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionHealthChecksListCall) IfNoneMatch(entityTag string) *RegionHealthChecksListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionHealthChecksListCall) Context(ctx context.Context) *RegionHealthChecksListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionHealthChecksListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionHealthChecksListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionHealthChecks.list" call. +// Exactly one of *HealthCheckList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HealthCheckList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &HealthCheckList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of HealthCheck resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.regionHealthChecks.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/healthChecks", + // "response": { + // "$ref": "HealthCheckList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionHealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.regionHealthChecks.patch": + +type RegionHealthChecksPatchCall struct { + s *Service + project string + region string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a HealthCheck resource in the specified project using +// the data included in the request. This method supports PATCH +// semantics and uses the JSON merge patch format and processing rules. +func (r *RegionHealthChecksService) Patch(project string, region string, healthCheck string, healthcheck *HealthCheck) *RegionHealthChecksPatchCall { + c := &RegionHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.healthCheck = healthCheck + c.healthcheck = healthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionHealthChecksPatchCall) RequestId(requestId string) *RegionHealthChecksPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionHealthChecksPatchCall) Fields(s ...googleapi.Field) *RegionHealthChecksPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionHealthChecksPatchCall) Context(ctx context.Context) *RegionHealthChecksPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionHealthChecksPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/healthChecks/{healthCheck}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "healthCheck": c.healthCheck, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionHealthChecks.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -102152,7 +103856,7 @@ func (c *RegionHealthChecksUpdateCall) Header() http.Header { func (c *RegionHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102354,7 +104058,7 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Header() http.Header { func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102520,7 +104224,7 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Header() http.H func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102703,7 +104407,7 @@ func (c *RegionInstanceGroupManagersCreateInstancesCall) Header() http.Header { func (c *RegionInstanceGroupManagersCreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -102886,7 +104590,7 @@ func (c *RegionInstanceGroupManagersDeleteCall) Header() http.Header { func (c *RegionInstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103078,7 +104782,7 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Header() http.Header { func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103253,7 +104957,7 @@ func (c *RegionInstanceGroupManagersGetCall) Header() http.Header { func (c *RegionInstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103434,7 +105138,7 @@ func (c *RegionInstanceGroupManagersInsertCall) Header() http.Header { func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103664,7 +105368,7 @@ func (c *RegionInstanceGroupManagersListCall) Header() http.Header { func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103820,7 +105524,8 @@ type RegionInstanceGroupManagersListErrorsCall struct { } // ListErrors: Lists all errors thrown by actions on instances for a -// given regional managed instance group. +// given regional managed instance group. The filter and orderBy query +// parameters are not supported. func (r *RegionInstanceGroupManagersService) ListErrors(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListErrorsCall { c := &RegionInstanceGroupManagersListErrorsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -103931,7 +105636,7 @@ func (c *RegionInstanceGroupManagersListErrorsCall) Header() http.Header { func (c *RegionInstanceGroupManagersListErrorsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -103997,7 +105702,7 @@ func (c *RegionInstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Lists all errors thrown by actions on instances for a given regional managed instance group.", + // "description": "Lists all errors thrown by actions on instances for a given regional managed instance group. The filter and orderBy query parameters are not supported.", // "httpMethod": "GET", // "id": "compute.regionInstanceGroupManagers.listErrors", // "parameterOrder": [ @@ -104098,7 +105803,7 @@ type RegionInstanceGroupManagersListManagedInstancesCall struct { // ListManagedInstances: Lists the instances in the managed instance // group and instances that are scheduled to be created. The list // includes any current actions that the group has scheduled for its -// instances. +// instances. The orderBy query parameter is not supported. func (r *RegionInstanceGroupManagersService) ListManagedInstances(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListManagedInstancesCall { c := &RegionInstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -104199,7 +105904,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Header() http.Head func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -104262,7 +105967,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea } return ret, nil // { - // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances.", + // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported.", // "httpMethod": "POST", // "id": "compute.regionInstanceGroupManagers.listManagedInstances", // "parameterOrder": [ @@ -104423,7 +106128,7 @@ func (c *RegionInstanceGroupManagersPatchCall) Header() http.Header { func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -104621,7 +106326,7 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Header() http.Header func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -104816,7 +106521,7 @@ func (c *RegionInstanceGroupManagersResizeCall) Header() http.Header { func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105003,7 +106708,7 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Header() http.Heade func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105189,7 +106894,7 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Header() http.Header { func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105363,7 +107068,7 @@ func (c *RegionInstanceGroupsGetCall) Header() http.Header { func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105592,7 +107297,7 @@ func (c *RegionInstanceGroupsListCall) Header() http.Header { func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105750,7 +107455,8 @@ type RegionInstanceGroupsListInstancesCall struct { // ListInstances: Lists the instances in the specified instance group // and displays information about the named ports. Depending on the // specified options, this method can list all instances or only the -// instances that are running. +// instances that are running. The orderBy query parameter is not +// supported. func (r *RegionInstanceGroupsService) ListInstances(project string, region string, instanceGroup string, regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest) *RegionInstanceGroupsListInstancesCall { c := &RegionInstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -105852,7 +107558,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -105919,7 +107625,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running.", + // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running. The orderBy query parameter is not supported.", // "httpMethod": "POST", // "id": "compute.regionInstanceGroups.listInstances", // "parameterOrder": [ @@ -106078,7 +107784,7 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106193,6 +107899,793 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) } +// method id "compute.regionNotificationEndpoints.delete": + +type RegionNotificationEndpointsDeleteCall struct { + s *Service + project string + region string + notificationEndpoint string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified NotificationEndpoint in the given +// region +func (r *RegionNotificationEndpointsService) Delete(project string, region string, notificationEndpoint string) *RegionNotificationEndpointsDeleteCall { + c := &RegionNotificationEndpointsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.notificationEndpoint = notificationEndpoint + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionNotificationEndpointsDeleteCall) RequestId(requestId string) *RegionNotificationEndpointsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionNotificationEndpointsDeleteCall) Fields(s ...googleapi.Field) *RegionNotificationEndpointsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionNotificationEndpointsDeleteCall) Context(ctx context.Context) *RegionNotificationEndpointsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionNotificationEndpointsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionNotificationEndpointsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "notificationEndpoint": c.notificationEndpoint, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionNotificationEndpoints.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionNotificationEndpointsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified NotificationEndpoint in the given region", + // "httpMethod": "DELETE", + // "id": "compute.regionNotificationEndpoints.delete", + // "parameterOrder": [ + // "project", + // "region", + // "notificationEndpoint" + // ], + // "parameters": { + // "notificationEndpoint": { + // "description": "Name of the NotificationEndpoint resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionNotificationEndpoints.get": + +type RegionNotificationEndpointsGetCall struct { + s *Service + project string + region string + notificationEndpoint string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified NotificationEndpoint resource in the given +// region. +func (r *RegionNotificationEndpointsService) Get(project string, region string, notificationEndpoint string) *RegionNotificationEndpointsGetCall { + c := &RegionNotificationEndpointsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.notificationEndpoint = notificationEndpoint + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionNotificationEndpointsGetCall) Fields(s ...googleapi.Field) *RegionNotificationEndpointsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionNotificationEndpointsGetCall) IfNoneMatch(entityTag string) *RegionNotificationEndpointsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionNotificationEndpointsGetCall) Context(ctx context.Context) *RegionNotificationEndpointsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionNotificationEndpointsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionNotificationEndpointsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "notificationEndpoint": c.notificationEndpoint, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionNotificationEndpoints.get" call. +// Exactly one of *NotificationEndpoint or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NotificationEndpoint.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionNotificationEndpointsGetCall) Do(opts ...googleapi.CallOption) (*NotificationEndpoint, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &NotificationEndpoint{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified NotificationEndpoint resource in the given region.", + // "httpMethod": "GET", + // "id": "compute.regionNotificationEndpoints.get", + // "parameterOrder": [ + // "project", + // "region", + // "notificationEndpoint" + // ], + // "parameters": { + // "notificationEndpoint": { + // "description": "Name of the NotificationEndpoint resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/notificationEndpoints/{notificationEndpoint}", + // "response": { + // "$ref": "NotificationEndpoint" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionNotificationEndpoints.insert": + +type RegionNotificationEndpointsInsertCall struct { + s *Service + project string + region string + notificationendpoint *NotificationEndpoint + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Create a NotificationEndpoint in the specified project in the +// given region using the parameters that are included in the request. +func (r *RegionNotificationEndpointsService) Insert(project string, region string, notificationendpoint *NotificationEndpoint) *RegionNotificationEndpointsInsertCall { + c := &RegionNotificationEndpointsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.notificationendpoint = notificationendpoint + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionNotificationEndpointsInsertCall) RequestId(requestId string) *RegionNotificationEndpointsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionNotificationEndpointsInsertCall) Fields(s ...googleapi.Field) *RegionNotificationEndpointsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionNotificationEndpointsInsertCall) Context(ctx context.Context) *RegionNotificationEndpointsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionNotificationEndpointsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionNotificationEndpointsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.notificationendpoint) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/notificationEndpoints") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionNotificationEndpoints.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionNotificationEndpointsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Create a NotificationEndpoint in the specified project in the given region using the parameters that are included in the request.", + // "httpMethod": "POST", + // "id": "compute.regionNotificationEndpoints.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/notificationEndpoints", + // "request": { + // "$ref": "NotificationEndpoint" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionNotificationEndpoints.list": + +type RegionNotificationEndpointsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists the NotificationEndpoints for a project in the given +// region. +func (r *RegionNotificationEndpointsService) List(project string, region string) *RegionNotificationEndpointsListCall { + c := &RegionNotificationEndpointsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either `=`, `!=`, `>`, or +// `<`. +// +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named `example-instance` by specifying `name != +// example-instance`. +// +// You can also filter nested fields. For example, you could specify +// `scheduling.automaticRestart = false` to include instances only if +// they are not scheduled for automatic restarts. You can use filtering +// on nested fields to filter based on resource labels. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example: ``` (scheduling.automaticRestart = +// true) (cpuPlatform = "Intel Skylake") ``` By default, each expression +// is an `AND` expression. However, you can include `AND` and `OR` +// expressions explicitly. For example: ``` (cpuPlatform = "Intel +// Skylake") OR (cpuPlatform = "Intel Broadwell") AND +// (scheduling.automaticRestart = true) ``` +func (c *RegionNotificationEndpointsListCall) Filter(filter string) *RegionNotificationEndpointsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionNotificationEndpointsListCall) MaxResults(maxResults int64) *RegionNotificationEndpointsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using `orderBy="creationTimestamp desc". This sorts +// results based on the `creationTimestamp` field in reverse +// chronological order (newest result first). Use this to sort resources +// like operations so that the newest operation is returned +// first. +// +// Currently, only sorting by `name` or `creationTimestamp desc` is +// supported. +func (c *RegionNotificationEndpointsListCall) OrderBy(orderBy string) *RegionNotificationEndpointsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionNotificationEndpointsListCall) PageToken(pageToken string) *RegionNotificationEndpointsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionNotificationEndpointsListCall) Fields(s ...googleapi.Field) *RegionNotificationEndpointsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionNotificationEndpointsListCall) IfNoneMatch(entityTag string) *RegionNotificationEndpointsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionNotificationEndpointsListCall) Context(ctx context.Context) *RegionNotificationEndpointsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionNotificationEndpointsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionNotificationEndpointsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/notificationEndpoints") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionNotificationEndpoints.list" call. +// Exactly one of *NotificationEndpointList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *NotificationEndpointList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionNotificationEndpointsListCall) Do(opts ...googleapi.CallOption) (*NotificationEndpointList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &NotificationEndpointList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the NotificationEndpoints for a project in the given region.", + // "httpMethod": "GET", + // "id": "compute.regionNotificationEndpoints.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either `=`, `!=`, `\u003e`, or `\u003c`.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`.\n\nYou can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ```", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/notificationEndpoints", + // "response": { + // "$ref": "NotificationEndpointList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionNotificationEndpointsListCall) Pages(ctx context.Context, f func(*NotificationEndpointList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.regionOperations.delete": type RegionOperationsDeleteCall struct { @@ -106242,7 +108735,7 @@ func (c *RegionOperationsDeleteCall) Header() http.Header { func (c *RegionOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106378,7 +108871,7 @@ func (c *RegionOperationsGetCall) Header() http.Header { func (c *RegionOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106610,7 +109103,7 @@ func (c *RegionOperationsListCall) Header() http.Header { func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106814,7 +109307,7 @@ func (c *RegionOperationsWaitCall) Header() http.Header { func (c *RegionOperationsWaitCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -106986,7 +109479,7 @@ func (c *RegionSslCertificatesDeleteCall) Header() http.Header { func (c *RegionSslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107156,7 +109649,7 @@ func (c *RegionSslCertificatesGetCall) Header() http.Header { func (c *RegionSslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107332,7 +109825,7 @@ func (c *RegionSslCertificatesInsertCall) Header() http.Header { func (c *RegionSslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107563,7 +110056,7 @@ func (c *RegionSslCertificatesListCall) Header() http.Header { func (c *RegionSslCertificatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107773,7 +110266,7 @@ func (c *RegionTargetHttpProxiesDeleteCall) Header() http.Header { func (c *RegionTargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -107943,7 +110436,7 @@ func (c *RegionTargetHttpProxiesGetCall) Header() http.Header { func (c *RegionTargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108119,7 +110612,7 @@ func (c *RegionTargetHttpProxiesInsertCall) Header() http.Header { func (c *RegionTargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108350,7 +110843,7 @@ func (c *RegionTargetHttpProxiesListCall) Header() http.Header { func (c *RegionTargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108562,7 +111055,7 @@ func (c *RegionTargetHttpProxiesSetUrlMapCall) Header() http.Header { func (c *RegionTargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108746,7 +111239,7 @@ func (c *RegionTargetHttpsProxiesDeleteCall) Header() http.Header { func (c *RegionTargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -108916,7 +111409,7 @@ func (c *RegionTargetHttpsProxiesGetCall) Header() http.Header { func (c *RegionTargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109092,7 +111585,7 @@ func (c *RegionTargetHttpsProxiesInsertCall) Header() http.Header { func (c *RegionTargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109323,7 +111816,7 @@ func (c *RegionTargetHttpsProxiesListCall) Header() http.Header { func (c *RegionTargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109535,7 +112028,7 @@ func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) Header() http.Header { func (c *RegionTargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109721,7 +112214,7 @@ func (c *RegionTargetHttpsProxiesSetUrlMapCall) Header() http.Header { func (c *RegionTargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -109893,7 +112386,7 @@ func (c *RegionUrlMapsDeleteCall) Header() http.Header { func (c *RegionUrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110062,7 +112555,7 @@ func (c *RegionUrlMapsGetCall) Header() http.Header { func (c *RegionUrlMapsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110226,7 +112719,7 @@ func (c *RegionUrlMapsInsertCall) Header() http.Header { func (c *RegionUrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110457,7 +112950,7 @@ func (c *RegionUrlMapsListCall) Header() http.Header { func (c *RegionUrlMapsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110659,7 +113152,7 @@ func (c *RegionUrlMapsPatchCall) Header() http.Header { func (c *RegionUrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -110834,7 +113327,7 @@ func (c *RegionUrlMapsUpdateCall) Header() http.Header { func (c *RegionUrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111003,7 +113496,7 @@ func (c *RegionUrlMapsValidateCall) Header() http.Header { func (c *RegionUrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111174,7 +113667,7 @@ func (c *RegionsGetCall) Header() http.Header { func (c *RegionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111395,7 +113888,7 @@ func (c *RegionsListCall) Header() http.Header { func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111662,7 +114155,7 @@ func (c *ReservationsAggregatedListCall) Header() http.Header { func (c *ReservationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -111868,7 +114361,7 @@ func (c *ReservationsDeleteCall) Header() http.Header { func (c *ReservationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112036,7 +114529,7 @@ func (c *ReservationsGetCall) Header() http.Header { func (c *ReservationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112204,7 +114697,7 @@ func (c *ReservationsGetIamPolicyCall) Header() http.Header { func (c *ReservationsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112380,7 +114873,7 @@ func (c *ReservationsInsertCall) Header() http.Header { func (c *ReservationsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112611,7 +115104,7 @@ func (c *ReservationsListCall) Header() http.Header { func (c *ReservationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112825,7 +115318,7 @@ func (c *ReservationsResizeCall) Header() http.Header { func (c *ReservationsResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -112993,7 +115486,7 @@ func (c *ReservationsSetIamPolicyCall) Header() http.Header { func (c *ReservationsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113156,7 +115649,7 @@ func (c *ReservationsTestIamPermissionsCall) Header() http.Header { func (c *ReservationsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113402,7 +115895,7 @@ func (c *ResourcePoliciesAggregatedListCall) Header() http.Header { func (c *ResourcePoliciesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113608,7 +116101,7 @@ func (c *ResourcePoliciesDeleteCall) Header() http.Header { func (c *ResourcePoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113776,7 +116269,7 @@ func (c *ResourcePoliciesGetCall) Header() http.Header { func (c *ResourcePoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -113944,7 +116437,7 @@ func (c *ResourcePoliciesGetIamPolicyCall) Header() http.Header { func (c *ResourcePoliciesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114119,7 +116612,7 @@ func (c *ResourcePoliciesInsertCall) Header() http.Header { func (c *ResourcePoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114350,7 +116843,7 @@ func (c *ResourcePoliciesListCall) Header() http.Header { func (c *ResourcePoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114544,7 +117037,7 @@ func (c *ResourcePoliciesSetIamPolicyCall) Header() http.Header { func (c *ResourcePoliciesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114707,7 +117200,7 @@ func (c *ResourcePoliciesTestIamPermissionsCall) Header() http.Header { func (c *ResourcePoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -114953,7 +117446,7 @@ func (c *RoutersAggregatedListCall) Header() http.Header { func (c *RoutersAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115159,7 +117652,7 @@ func (c *RoutersDeleteCall) Header() http.Header { func (c *RoutersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115328,7 +117821,7 @@ func (c *RoutersGetCall) Header() http.Header { func (c *RoutersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115561,7 +118054,7 @@ func (c *RoutersGetNatMappingInfoCall) Header() http.Header { func (c *RoutersGetNatMappingInfoCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115773,7 +118266,7 @@ func (c *RoutersGetRouterStatusCall) Header() http.Header { func (c *RoutersGetRouterStatusCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -115949,7 +118442,7 @@ func (c *RoutersInsertCall) Header() http.Header { func (c *RoutersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116180,7 +118673,7 @@ func (c *RoutersListCall) Header() http.Header { func (c *RoutersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116394,7 +118887,7 @@ func (c *RoutersPatchCall) Header() http.Header { func (c *RoutersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116563,7 +119056,7 @@ func (c *RoutersPreviewCall) Header() http.Header { func (c *RoutersPreviewCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116749,7 +119242,7 @@ func (c *RoutersUpdateCall) Header() http.Header { func (c *RoutersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -116932,7 +119425,7 @@ func (c *RoutesDeleteCall) Header() http.Header { func (c *RoutesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117091,7 +119584,7 @@ func (c *RoutesGetCall) Header() http.Header { func (c *RoutesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117257,7 +119750,7 @@ func (c *RoutesInsertCall) Header() http.Header { func (c *RoutesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117478,7 +119971,7 @@ func (c *RoutesListCall) Header() http.Header { func (c *RoutesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117660,7 +120153,7 @@ func (c *SecurityPoliciesAddRuleCall) Header() http.Header { func (c *SecurityPoliciesAddRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117828,7 +120321,7 @@ func (c *SecurityPoliciesDeleteCall) Header() http.Header { func (c *SecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -117986,7 +120479,7 @@ func (c *SecurityPoliciesGetCall) Header() http.Header { func (c *SecurityPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118149,7 +120642,7 @@ func (c *SecurityPoliciesGetRuleCall) Header() http.Header { func (c *SecurityPoliciesGetRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118320,7 +120813,7 @@ func (c *SecurityPoliciesInsertCall) Header() http.Header { func (c *SecurityPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118540,7 +121033,7 @@ func (c *SecurityPoliciesListCall) Header() http.Header { func (c *SecurityPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118795,7 +121288,7 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Header() http.Head func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -118978,7 +121471,7 @@ func (c *SecurityPoliciesPatchCall) Header() http.Header { func (c *SecurityPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119141,7 +121634,7 @@ func (c *SecurityPoliciesPatchRuleCall) Header() http.Header { func (c *SecurityPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119303,7 +121796,7 @@ func (c *SecurityPoliciesRemoveRuleCall) Header() http.Header { func (c *SecurityPoliciesRemoveRuleCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119476,7 +121969,7 @@ func (c *SnapshotsDeleteCall) Header() http.Header { func (c *SnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119635,7 +122128,7 @@ func (c *SnapshotsGetCall) Header() http.Header { func (c *SnapshotsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -119792,7 +122285,7 @@ func (c *SnapshotsGetIamPolicyCall) Header() http.Header { func (c *SnapshotsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120013,7 +122506,7 @@ func (c *SnapshotsListCall) Header() http.Header { func (c *SnapshotsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120196,7 +122689,7 @@ func (c *SnapshotsSetIamPolicyCall) Header() http.Header { func (c *SnapshotsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120348,7 +122841,7 @@ func (c *SnapshotsSetLabelsCall) Header() http.Header { func (c *SnapshotsSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120500,7 +122993,7 @@ func (c *SnapshotsTestIamPermissionsCall) Header() http.Header { func (c *SnapshotsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120738,7 +123231,7 @@ func (c *SslCertificatesAggregatedListCall) Header() http.Header { func (c *SslCertificatesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -120942,7 +123435,7 @@ func (c *SslCertificatesDeleteCall) Header() http.Header { func (c *SslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121100,7 +123593,7 @@ func (c *SslCertificatesGetCall) Header() http.Header { func (c *SslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121265,7 +123758,7 @@ func (c *SslCertificatesInsertCall) Header() http.Header { func (c *SslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121485,7 +123978,7 @@ func (c *SslCertificatesListCall) Header() http.Header { func (c *SslCertificatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121686,7 +124179,7 @@ func (c *SslPoliciesDeleteCall) Header() http.Header { func (c *SslPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -121843,7 +124336,7 @@ func (c *SslPoliciesGetCall) Header() http.Header { func (c *SslPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122007,7 +124500,7 @@ func (c *SslPoliciesInsertCall) Header() http.Header { func (c *SslPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122227,7 +124720,7 @@ func (c *SslPoliciesListCall) Header() http.Header { func (c *SslPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122482,7 +124975,7 @@ func (c *SslPoliciesListAvailableFeaturesCall) Header() http.Header { func (c *SslPoliciesListAvailableFeaturesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122665,7 +125158,7 @@ func (c *SslPoliciesPatchCall) Header() http.Header { func (c *SslPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -122905,7 +125398,7 @@ func (c *SubnetworksAggregatedListCall) Header() http.Header { func (c *SubnetworksAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123111,7 +125604,7 @@ func (c *SubnetworksDeleteCall) Header() http.Header { func (c *SubnetworksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123290,7 +125783,7 @@ func (c *SubnetworksExpandIpCidrRangeCall) Header() http.Header { func (c *SubnetworksExpandIpCidrRangeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123467,7 +125960,7 @@ func (c *SubnetworksGetCall) Header() http.Header { func (c *SubnetworksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123635,7 +126128,7 @@ func (c *SubnetworksGetIamPolicyCall) Header() http.Header { func (c *SubnetworksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -123811,7 +126304,7 @@ func (c *SubnetworksInsertCall) Header() http.Header { func (c *SubnetworksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124042,7 +126535,7 @@ func (c *SubnetworksListCall) Header() http.Header { func (c *SubnetworksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124197,8 +126690,7 @@ type SubnetworksListUsableCall struct { } // ListUsable: Retrieves an aggregated list of all usable subnetworks in -// the project. The list contains all of the subnetworks in the project -// and the subnetworks that were shared by a Shared VPC host project. +// the project. func (r *SubnetworksService) ListUsable(project string) *SubnetworksListUsableCall { c := &SubnetworksListUsableCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -124307,7 +126799,7 @@ func (c *SubnetworksListUsableCall) Header() http.Header { func (c *SubnetworksListUsableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124369,7 +126861,7 @@ func (c *SubnetworksListUsableCall) Do(opts ...googleapi.CallOption) (*UsableSub } return ret, nil // { - // "description": "Retrieves an aggregated list of all usable subnetworks in the project. The list contains all of the subnetworks in the project and the subnetworks that were shared by a Shared VPC host project.", + // "description": "Retrieves an aggregated list of all usable subnetworks in the project.", // "httpMethod": "GET", // "id": "compute.subnetworks.listUsable", // "parameterOrder": [ @@ -124528,7 +127020,7 @@ func (c *SubnetworksPatchCall) Header() http.Header { func (c *SubnetworksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124702,7 +127194,7 @@ func (c *SubnetworksSetIamPolicyCall) Header() http.Header { func (c *SubnetworksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -124885,7 +127377,7 @@ func (c *SubnetworksSetPrivateIpGoogleAccessCall) Header() http.Header { func (c *SubnetworksSetPrivateIpGoogleAccessCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125053,7 +127545,7 @@ func (c *SubnetworksTestIamPermissionsCall) Header() http.Header { func (c *SubnetworksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125300,7 +127792,7 @@ func (c *TargetHttpProxiesAggregatedListCall) Header() http.Header { func (c *TargetHttpProxiesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125505,7 +127997,7 @@ func (c *TargetHttpProxiesDeleteCall) Header() http.Header { func (c *TargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125664,7 +128156,7 @@ func (c *TargetHttpProxiesGetCall) Header() http.Header { func (c *TargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -125830,7 +128322,7 @@ func (c *TargetHttpProxiesInsertCall) Header() http.Header { func (c *TargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126051,7 +128543,7 @@ func (c *TargetHttpProxiesListCall) Header() http.Header { func (c *TargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126253,7 +128745,7 @@ func (c *TargetHttpProxiesSetUrlMapCall) Header() http.Header { func (c *TargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126495,7 +128987,7 @@ func (c *TargetHttpsProxiesAggregatedListCall) Header() http.Header { func (c *TargetHttpsProxiesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126699,7 +129191,7 @@ func (c *TargetHttpsProxiesDeleteCall) Header() http.Header { func (c *TargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -126857,7 +129349,7 @@ func (c *TargetHttpsProxiesGetCall) Header() http.Header { func (c *TargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127022,7 +129514,7 @@ func (c *TargetHttpsProxiesInsertCall) Header() http.Header { func (c *TargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127242,7 +129734,7 @@ func (c *TargetHttpsProxiesListCall) Header() http.Header { func (c *TargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127443,7 +129935,7 @@ func (c *TargetHttpsProxiesSetQuicOverrideCall) Header() http.Header { func (c *TargetHttpsProxiesSetQuicOverrideCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127617,7 +130109,7 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Header() http.Header { func (c *TargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127796,7 +130288,7 @@ func (c *TargetHttpsProxiesSetSslPolicyCall) Header() http.Header { func (c *TargetHttpsProxiesSetSslPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -127970,7 +130462,7 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Header() http.Header { func (c *TargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128212,7 +130704,7 @@ func (c *TargetInstancesAggregatedListCall) Header() http.Header { func (c *TargetInstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128419,7 +130911,7 @@ func (c *TargetInstancesDeleteCall) Header() http.Header { func (c *TargetInstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128589,7 +131081,7 @@ func (c *TargetInstancesGetCall) Header() http.Header { func (c *TargetInstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128766,7 +131258,7 @@ func (c *TargetInstancesInsertCall) Header() http.Header { func (c *TargetInstancesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -128998,7 +131490,7 @@ func (c *TargetInstancesListCall) Header() http.Header { func (c *TargetInstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129211,7 +131703,7 @@ func (c *TargetPoolsAddHealthCheckCall) Header() http.Header { func (c *TargetPoolsAddHealthCheckCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129398,7 +131890,7 @@ func (c *TargetPoolsAddInstanceCall) Header() http.Header { func (c *TargetPoolsAddInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129649,7 +132141,7 @@ func (c *TargetPoolsAggregatedListCall) Header() http.Header { func (c *TargetPoolsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -129856,7 +132348,7 @@ func (c *TargetPoolsDeleteCall) Header() http.Header { func (c *TargetPoolsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130026,7 +132518,7 @@ func (c *TargetPoolsGetCall) Header() http.Header { func (c *TargetPoolsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130186,7 +132678,7 @@ func (c *TargetPoolsGetHealthCall) Header() http.Header { func (c *TargetPoolsGetHealthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130368,7 +132860,7 @@ func (c *TargetPoolsInsertCall) Header() http.Header { func (c *TargetPoolsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130600,7 +133092,7 @@ func (c *TargetPoolsListCall) Header() http.Header { func (c *TargetPoolsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -130813,7 +133305,7 @@ func (c *TargetPoolsRemoveHealthCheckCall) Header() http.Header { func (c *TargetPoolsRemoveHealthCheckCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131000,7 +133492,7 @@ func (c *TargetPoolsRemoveInstanceCall) Header() http.Header { func (c *TargetPoolsRemoveInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131194,7 +133686,7 @@ func (c *TargetPoolsSetBackupCall) Header() http.Header { func (c *TargetPoolsSetBackupCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131382,7 +133874,7 @@ func (c *TargetSslProxiesDeleteCall) Header() http.Header { func (c *TargetSslProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131540,7 +134032,7 @@ func (c *TargetSslProxiesGetCall) Header() http.Header { func (c *TargetSslProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131705,7 +134197,7 @@ func (c *TargetSslProxiesInsertCall) Header() http.Header { func (c *TargetSslProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -131925,7 +134417,7 @@ func (c *TargetSslProxiesListCall) Header() http.Header { func (c *TargetSslProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -132126,7 +134618,7 @@ func (c *TargetSslProxiesSetBackendServiceCall) Header() http.Header { func (c *TargetSslProxiesSetBackendServiceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -132301,7 +134793,7 @@ func (c *TargetSslProxiesSetProxyHeaderCall) Header() http.Header { func (c *TargetSslProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -132476,7 +134968,7 @@ func (c *TargetSslProxiesSetSslCertificatesCall) Header() http.Header { func (c *TargetSslProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -132654,7 +135146,7 @@ func (c *TargetSslProxiesSetSslPolicyCall) Header() http.Header { func (c *TargetSslProxiesSetSslPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -132826,7 +135318,7 @@ func (c *TargetTcpProxiesDeleteCall) Header() http.Header { func (c *TargetTcpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -132984,7 +135476,7 @@ func (c *TargetTcpProxiesGetCall) Header() http.Header { func (c *TargetTcpProxiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -133149,7 +135641,7 @@ func (c *TargetTcpProxiesInsertCall) Header() http.Header { func (c *TargetTcpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -133369,7 +135861,7 @@ func (c *TargetTcpProxiesListCall) Header() http.Header { func (c *TargetTcpProxiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -133570,7 +136062,7 @@ func (c *TargetTcpProxiesSetBackendServiceCall) Header() http.Header { func (c *TargetTcpProxiesSetBackendServiceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -133745,7 +136237,7 @@ func (c *TargetTcpProxiesSetProxyHeaderCall) Header() http.Header { func (c *TargetTcpProxiesSetProxyHeaderCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -133986,7 +136478,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) Header() http.Header { func (c *TargetVpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -134192,7 +136684,7 @@ func (c *TargetVpnGatewaysDeleteCall) Header() http.Header { func (c *TargetVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -134361,7 +136853,7 @@ func (c *TargetVpnGatewaysGetCall) Header() http.Header { func (c *TargetVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -134537,7 +137029,7 @@ func (c *TargetVpnGatewaysInsertCall) Header() http.Header { func (c *TargetVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -134768,7 +137260,7 @@ func (c *TargetVpnGatewaysListCall) Header() http.Header { func (c *TargetVpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -135045,7 +137537,7 @@ func (c *UrlMapsAggregatedListCall) Header() http.Header { func (c *UrlMapsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -135250,7 +137742,7 @@ func (c *UrlMapsDeleteCall) Header() http.Header { func (c *UrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -135409,7 +137901,7 @@ func (c *UrlMapsGetCall) Header() http.Header { func (c *UrlMapsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -135575,7 +138067,7 @@ func (c *UrlMapsInsertCall) Header() http.Header { func (c *UrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -135742,7 +138234,7 @@ func (c *UrlMapsInvalidateCacheCall) Header() http.Header { func (c *UrlMapsInvalidateCacheCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -135972,7 +138464,7 @@ func (c *UrlMapsListCall) Header() http.Header { func (c *UrlMapsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -136176,7 +138668,7 @@ func (c *UrlMapsPatchCall) Header() http.Header { func (c *UrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -136353,7 +138845,7 @@ func (c *UrlMapsUpdateCall) Header() http.Header { func (c *UrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -136512,7 +139004,7 @@ func (c *UrlMapsValidateCall) Header() http.Header { func (c *UrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -136748,7 +139240,7 @@ func (c *VpnGatewaysAggregatedListCall) Header() http.Header { func (c *VpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -136954,7 +139446,7 @@ func (c *VpnGatewaysDeleteCall) Header() http.Header { func (c *VpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -137123,7 +139615,7 @@ func (c *VpnGatewaysGetCall) Header() http.Header { func (c *VpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -137290,7 +139782,7 @@ func (c *VpnGatewaysGetStatusCall) Header() http.Header { func (c *VpnGatewaysGetStatusCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -137466,7 +139958,7 @@ func (c *VpnGatewaysInsertCall) Header() http.Header { func (c *VpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -137697,7 +140189,7 @@ func (c *VpnGatewaysListCall) Header() http.Header { func (c *VpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -137910,7 +140402,7 @@ func (c *VpnGatewaysSetLabelsCall) Header() http.Header { func (c *VpnGatewaysSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -138078,7 +140570,7 @@ func (c *VpnGatewaysTestIamPermissionsCall) Header() http.Header { func (c *VpnGatewaysTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -138324,7 +140816,7 @@ func (c *VpnTunnelsAggregatedListCall) Header() http.Header { func (c *VpnTunnelsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -138530,7 +141022,7 @@ func (c *VpnTunnelsDeleteCall) Header() http.Header { func (c *VpnTunnelsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -138699,7 +141191,7 @@ func (c *VpnTunnelsGetCall) Header() http.Header { func (c *VpnTunnelsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -138875,7 +141367,7 @@ func (c *VpnTunnelsInsertCall) Header() http.Header { func (c *VpnTunnelsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139106,7 +141598,7 @@ func (c *VpnTunnelsListCall) Header() http.Header { func (c *VpnTunnelsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139298,7 +141790,7 @@ func (c *ZoneOperationsDeleteCall) Header() http.Header { func (c *ZoneOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139434,7 +141926,7 @@ func (c *ZoneOperationsGetCall) Header() http.Header { func (c *ZoneOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139666,7 +142158,7 @@ func (c *ZoneOperationsListCall) Header() http.Header { func (c *ZoneOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -139870,7 +142362,7 @@ func (c *ZoneOperationsWaitCall) Header() http.Header { func (c *ZoneOperationsWaitCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -140034,7 +142526,7 @@ func (c *ZonesGetCall) Header() http.Header { func (c *ZonesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -140255,7 +142747,7 @@ func (c *ZonesListCall) Header() http.Header { func (c *ZonesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } diff --git a/vendor/google.golang.org/api/internal/gensupport/resumable.go b/vendor/google.golang.org/api/internal/gensupport/resumable.go index e67ccd9a614fa..edc87ec24f634 100644 --- a/vendor/google.golang.org/api/internal/gensupport/resumable.go +++ b/vendor/google.golang.org/api/internal/gensupport/resumable.go @@ -28,6 +28,8 @@ var ( backoff = func() Backoff { return &gax.Backoff{Initial: 100 * time.Millisecond} } + // isRetryable is a platform-specific hook, specified in retryable_linux.go + syscallRetryable func(error) bool = func(err error) bool { return false } ) const ( @@ -160,21 +162,6 @@ func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, e // rx is private to the auto-generated API code. // Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close. func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) { - var shouldRetry = func(status int, err error) bool { - if 500 <= status && status <= 599 { - return true - } - if status == statusTooManyRequests { - return true - } - if err == io.ErrUnexpectedEOF { - return true - } - if err, ok := err.(interface{ Temporary() bool }); ok { - return err.Temporary() - } - return false - } // There are a couple of cases where it's possible for err and resp to both // be non-nil. However, we expose a simpler contract to our callers: exactly @@ -239,3 +226,33 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err return prepareReturn(resp, err) } } + +// shouldRetry indicates whether an error is retryable for the purposes of this +// package, following guidance from +// https://cloud.google.com/storage/docs/exponential-backoff . +func shouldRetry(status int, err error) bool { + if 500 <= status && status <= 599 { + return true + } + if status == statusTooManyRequests { + return true + } + if err == io.ErrUnexpectedEOF { + return true + } + // Transient network errors should be retried. + if syscallRetryable(err) { + return true + } + if err, ok := err.(interface{ Temporary() bool }); ok { + if err.Temporary() { + return true + } + } + // If Go 1.13 error unwrapping is available, use this to examine wrapped + // errors. + if err, ok := err.(interface{ Unwrap() error }); ok { + return shouldRetry(status, err.Unwrap()) + } + return false +} diff --git a/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go b/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go new file mode 100644 index 0000000000000..fed998b5d0720 --- /dev/null +++ b/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go @@ -0,0 +1,15 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package gensupport + +import "syscall" + +func init() { + // Initialize syscallRetryable to return true on transient socket-level + // errors. These errors are specific to Linux. + syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } +} diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go index 5799393093d45..3338c8d193a75 100644 --- a/vendor/google.golang.org/api/internal/gensupport/send.go +++ b/vendor/google.golang.org/api/internal/gensupport/send.go @@ -9,6 +9,7 @@ import ( "encoding/json" "errors" "net/http" + "time" ) // Hook is the type of a function that is called once before each HTTP request @@ -77,6 +78,90 @@ func send(ctx context.Context, client *http.Client, req *http.Request) (*http.Re return resp, err } +// SendRequestWithRetry sends a single HTTP request using the given client, +// with retries if a retryable error is returned. +// If ctx is non-nil, it calls all hooks, then sends the request with +// req.WithContext, then calls any functions returned by the hooks in +// reverse order. +func SendRequestWithRetry(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + // Disallow Accept-Encoding because it interferes with the automatic gzip handling + // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. + if _, ok := req.Header["Accept-Encoding"]; ok { + return nil, errors.New("google api: custom Accept-Encoding headers not allowed") + } + if ctx == nil { + return client.Do(req) + } + // Call hooks in order of registration, store returned funcs. + post := make([]func(resp *http.Response), len(hooks)) + for i, h := range hooks { + fn := h(ctx, req) + post[i] = fn + } + + // Send request with retry. + resp, err := sendAndRetry(ctx, client, req) + + // Call returned funcs in reverse order. + for i := len(post) - 1; i >= 0; i-- { + if fn := post[i]; fn != nil { + fn(resp) + } + } + return resp, err +} + +func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + var resp *http.Response + var err error + + // Loop to retry the request, up to the context deadline. + var pause time.Duration + bo := backoff() + + for { + select { + case <-ctx.Done(): + // If we got an error, and the context has been canceled, + // the context's error is probably more useful. + if err == nil { + err = ctx.Err() + } + return resp, err + case <-time.After(pause): + } + + resp, err = client.Do(req.WithContext(ctx)) + + var status int + if resp != nil { + status = resp.StatusCode + } + + // Check if we can retry the request. A retry can only be done if the error + // is retryable and the request body can be re-created using GetBody (this + // will not be possible if the body was unbuffered). + if req.GetBody == nil || !shouldRetry(status, err) { + break + } + var errBody error + req.Body, errBody = req.GetBody() + if errBody != nil { + break + } + + pause = bo.Pause() + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + } + return resp, err +} + // DecodeResponse decodes the body of res into target. If there is no body, // target is unchanged. func DecodeResponse(target interface{}, res *http.Response) error { diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index 0d8210baa7bcb..f435519decfb3 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -18,25 +18,26 @@ import ( // DialSettings holds information needed to establish a connection with a // Google API service. type DialSettings struct { - Endpoint string - DefaultEndpoint string - Scopes []string - TokenSource oauth2.TokenSource - Credentials *google.Credentials - CredentialsFile string // if set, Token Source is ignored. - CredentialsJSON []byte - UserAgent string - APIKey string - Audiences []string - HTTPClient *http.Client - GRPCDialOpts []grpc.DialOption - GRPCConn *grpc.ClientConn - GRPCConnPool ConnPool - GRPCConnPoolSize int - NoAuth bool - TelemetryDisabled bool - ClientCertSource func(*tls.CertificateRequestInfo) (*tls.Certificate, error) - CustomClaims map[string]interface{} + Endpoint string + DefaultEndpoint string + DefaultMTLSEndpoint string + Scopes []string + TokenSource oauth2.TokenSource + Credentials *google.Credentials + CredentialsFile string // if set, Token Source is ignored. + CredentialsJSON []byte + UserAgent string + APIKey string + Audiences []string + HTTPClient *http.Client + GRPCDialOpts []grpc.DialOption + GRPCConn *grpc.ClientConn + GRPCConnPool ConnPool + GRPCConnPoolSize int + NoAuth bool + TelemetryDisabled bool + ClientCertSource func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + CustomClaims map[string]interface{} // Google API system parameters. For more information please read: // https://cloud.google.com/apis/docs/system-parameters diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go index 48121e42f9021..ff5b530cfe080 100644 --- a/vendor/google.golang.org/api/option/internaloption/internaloption.go +++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -24,3 +24,17 @@ func (o defaultEndpointOption) Apply(settings *internal.DialSettings) { func WithDefaultEndpoint(url string) option.ClientOption { return defaultEndpointOption(url) } + +type defaultMTLSEndpointOption string + +func (o defaultMTLSEndpointOption) Apply(settings *internal.DialSettings) { + settings.DefaultMTLSEndpoint = string(o) +} + +// WithDefaultMTLSEndpoint is an option that indicates the default mTLS endpoint. +// +// It should only be used internally by generated clients. +// +func WithDefaultMTLSEndpoint(url string) option.ClientOption { + return defaultMTLSEndpointOption(url) +} diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index 8bb2fa6db673f..e78776b2be886 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -26,7 +26,7 @@ "description": "Stores and retrieves potentially large, immutable data objects.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/storage/docs/json_api/", - "etag": "\"u9GIe6H63LSGq-9_t39K2Zx_EAc/SMGMLKKvE-TZrla7d9TA_SDVTI0\"", + "etag": "\"u9GIe6H63LSGq-9_t39K2Zx_EAc/5Ir-e9ddNPcr5skzvRsSnJlvTYg\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -1781,7 +1781,7 @@ "type": "string" }, "kmsKeyName": { - "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + "description": "Not currently supported. Specifying the parameter causes the request to fail with status code 400 - Bad Request.", "location": "query", "type": "string" }, @@ -3229,7 +3229,7 @@ } } }, - "revision": "20200430", + "revision": "20200611", "rootUrl": "https://storage.googleapis.com/", "schemas": { "Bucket": { diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 5206ec320be53..dc51a22ede627 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -2442,7 +2442,7 @@ func (c *BucketAccessControlsDeleteCall) Header() http.Header { func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2590,7 +2590,7 @@ func (c *BucketAccessControlsGetCall) Header() http.Header { func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2757,7 +2757,7 @@ func (c *BucketAccessControlsInsertCall) Header() http.Header { func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2930,7 +2930,7 @@ func (c *BucketAccessControlsListCall) Header() http.Header { func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3091,7 +3091,7 @@ func (c *BucketAccessControlsPatchCall) Header() http.Header { func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3265,7 +3265,7 @@ func (c *BucketAccessControlsUpdateCall) Header() http.Header { func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3451,7 +3451,7 @@ func (c *BucketsDeleteCall) Header() http.Header { func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3630,7 +3630,7 @@ func (c *BucketsGetCall) Header() http.Header { func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3836,7 +3836,7 @@ func (c *BucketsGetIamPolicyCall) Header() http.Header { func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4053,7 +4053,7 @@ func (c *BucketsInsertCall) Header() http.Header { func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4310,7 +4310,7 @@ func (c *BucketsListCall) Header() http.Header { func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4520,7 +4520,7 @@ func (c *BucketsLockRetentionPolicyCall) Header() http.Header { func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4755,7 +4755,7 @@ func (c *BucketsPatchCall) Header() http.Header { func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4984,7 +4984,7 @@ func (c *BucketsSetIamPolicyCall) Header() http.Header { func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5159,7 +5159,7 @@ func (c *BucketsTestIamPermissionsCall) Header() http.Header { func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5399,7 +5399,7 @@ func (c *BucketsUpdateCall) Header() http.Header { func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5611,7 +5611,7 @@ func (c *ChannelsStopCall) Header() http.Header { func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5728,7 +5728,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header { func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5876,7 +5876,7 @@ func (c *DefaultObjectAccessControlsGetCall) Header() http.Header { func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6044,7 +6044,7 @@ func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header { func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6234,7 +6234,7 @@ func (c *DefaultObjectAccessControlsListCall) Header() http.Header { func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6407,7 +6407,7 @@ func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header { func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6581,7 +6581,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header { func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6753,7 +6753,7 @@ func (c *NotificationsDeleteCall) Header() http.Header { func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6901,7 +6901,7 @@ func (c *NotificationsGetCall) Header() http.Header { func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7071,7 +7071,7 @@ func (c *NotificationsInsertCall) Header() http.Header { func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7246,7 +7246,7 @@ func (c *NotificationsListCall) Header() http.Header { func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7419,7 +7419,7 @@ func (c *ObjectAccessControlsDeleteCall) Header() http.Header { func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7591,7 +7591,7 @@ func (c *ObjectAccessControlsGetCall) Header() http.Header { func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7782,7 +7782,7 @@ func (c *ObjectAccessControlsInsertCall) Header() http.Header { func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7979,7 +7979,7 @@ func (c *ObjectAccessControlsListCall) Header() http.Header { func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8164,7 +8164,7 @@ func (c *ObjectAccessControlsPatchCall) Header() http.Header { func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8362,7 +8362,7 @@ func (c *ObjectAccessControlsUpdateCall) Header() http.Header { func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8547,11 +8547,9 @@ func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) return c } -// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of -// the Cloud KMS key, of the form -// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// that will be used to encrypt the object. Overrides the object -// metadata's kms_key_name value, if any. +// KmsKeyName sets the optional parameter "kmsKeyName": Not currently +// supported. Specifying the parameter causes the request to fail with +// status code 400 - Bad Request. func (c *ObjectsComposeCall) KmsKeyName(kmsKeyName string) *ObjectsComposeCall { c.urlParams_.Set("kmsKeyName", kmsKeyName) return c @@ -8599,7 +8597,7 @@ func (c *ObjectsComposeCall) Header() http.Header { func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8718,7 +8716,7 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "kmsKeyName": { - // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "description": "Not currently supported. Specifying the parameter causes the request to fail with status code 400 - Bad Request.", // "location": "query", // "type": "string" // }, @@ -8942,7 +8940,7 @@ func (c *ObjectsCopyCall) Header() http.Header { func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9270,7 +9268,7 @@ func (c *ObjectsDeleteCall) Header() http.Header { func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9503,7 +9501,7 @@ func (c *ObjectsGetCall) Header() http.Header { func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9753,7 +9751,7 @@ func (c *ObjectsGetIamPolicyCall) Header() http.Header { func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10070,7 +10068,7 @@ func (c *ObjectsInsertCall) Header() http.Header { func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10104,7 +10102,7 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) + return gensupport.SendRequestWithRetry(c.ctx_, c.s.client, req) } // Do executes the "storage.objects.insert" call. @@ -10443,7 +10441,7 @@ func (c *ObjectsListCall) Header() http.Header { func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10760,7 +10758,7 @@ func (c *ObjectsPatchCall) Header() http.Header { func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11152,7 +11150,7 @@ func (c *ObjectsRewriteCall) Header() http.Header { func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11455,7 +11453,7 @@ func (c *ObjectsSetIamPolicyCall) Header() http.Header { func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11655,7 +11653,7 @@ func (c *ObjectsTestIamPermissionsCall) Header() http.Header { func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11916,7 +11914,7 @@ func (c *ObjectsUpdateCall) Header() http.Header { func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12234,7 +12232,7 @@ func (c *ObjectsWatchAllCall) Header() http.Header { func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12450,7 +12448,7 @@ func (c *ProjectsHmacKeysCreateCall) Header() http.Header { func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12600,7 +12598,7 @@ func (c *ProjectsHmacKeysDeleteCall) Header() http.Header { func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12735,7 +12733,7 @@ func (c *ProjectsHmacKeysGetCall) Header() http.Header { func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12935,7 +12933,7 @@ func (c *ProjectsHmacKeysListCall) Header() http.Header { func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13130,7 +13128,7 @@ func (c *ProjectsHmacKeysUpdateCall) Header() http.Header { func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13307,7 +13305,7 @@ func (c *ProjectsServiceAccountGetCall) Header() http.Header { func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200518") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index 0131a2cfaf0fa..77ee798e0f7d0 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -187,6 +187,29 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C return grpc.DialContext(ctx, o.Endpoint, grpcOpts...) } +// generateDefaultMtlsEndpoint attempts to derive the mTLS version of the +// defaultEndpoint via regex, and returns defaultEndpoint if unsuccessful. +// +// We need to applying the following 2 transformations: +// 1. pubsub.googleapis.com to pubsub.mtls.googleapis.com +// 2. pubsub.sandbox.googleapis.com to pubsub.mtls.sandbox.googleapis.com +// +// TODO(cbro): In the future, the mTLS endpoint will be read from Service Config +// and passed in as defaultMtlsEndpoint instead of generated from defaultEndpoint, +// and this function will be removed. +func generateDefaultMtlsEndpoint(defaultEndpoint string) string { + var domains = []string{ + ".sandbox.googleapis.com", // must come first because .googleapis.com is a substring + ".googleapis.com", + } + for _, domain := range domains { + if strings.Contains(defaultEndpoint, domain) { + return strings.Replace(defaultEndpoint, domain, ".mtls"+domain, -1) + } + } + return defaultEndpoint +} + func addOCStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption { if settings.TelemetryDisabled { return opts @@ -271,6 +294,13 @@ func processAndValidateOpts(opts []option.ClientOption) (*internal.DialSettings, if err := o.Validate(); err != nil { return nil, err } + + // NOTE(cbro): this is used only by the nightly mtls_smoketest and should + // not otherwise be used. It will be removed or renamed at some point. + if os.Getenv("GOOGLE_API_USE_MTLS") == "always" { + o.Endpoint = generateDefaultMtlsEndpoint(o.Endpoint) + } + return &o, nil } diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index 96cf2e1689cbb..44503014172c9 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -16,7 +16,6 @@ import ( "net/url" "os" "strings" - "sync" "time" "go.opencensus.io/plugin/ochttp" @@ -191,31 +190,23 @@ func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source) htt return trans } -var fallback struct { - *http.Transport - sync.Once -} - // fallbackBaseTransport is used in 2 && emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { - emitter.indent -= 2 + // [Go] This was changed so that indentations are more regular. + if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { + // The first indent inside a sequence will just skip the "- " indicator. + emitter.indent += 2 + } else { + // Everything else aligns to the chosen indentation. + emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent) } } return true @@ -725,16 +728,9 @@ func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_e // Expect a block item node. func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { if first { - // [Go] The original logic here would not indent the sequence when - // inside a mapping. In Go we always indent it. - indentless := false - original := emitter.indent - if !yaml_emitter_increase_indent(emitter, false, indentless) { + if !yaml_emitter_increase_indent(emitter, false, false) { return false } - if emitter.indent > original+2 { - emitter.indent -= 2 - } } if event.typ == yaml_SEQUENCE_END_EVENT { emitter.indent = emitter.indents[len(emitter.indents)-1] @@ -785,6 +781,13 @@ func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_ev if !yaml_emitter_write_indent(emitter) { return false } + if len(emitter.line_comment) > 0 { + // [Go] A line comment was provided for the key. That's unusual as the + // scanner associates line comments with the value. Either way, + // save the line comment and render it appropriately later. + emitter.key_line_comment = emitter.line_comment + emitter.line_comment = nil + } if yaml_emitter_check_simple_key(emitter) { emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) return yaml_emitter_emit_node(emitter, event, false, false, true, true) @@ -810,6 +813,29 @@ func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_ return false } } + if len(emitter.key_line_comment) > 0 { + // [Go] A line comment was previously provided for the key. Handle it before + // the value so the inline comments are placed correctly. + if yaml_emitter_silent_nil_event(emitter, event) && len(emitter.line_comment) == 0 { + // Nothing other than the line comment will be written on the line. + emitter.line_comment = emitter.key_line_comment + emitter.key_line_comment = nil + } else { + // An actual value is coming, so emit the comment line. + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + if !yaml_emitter_process_line_comment(emitter) { + return false + } + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + // Indent in unless it's a block that will reindent anyway. + if event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || (event.typ != yaml_MAPPING_START_EVENT && event.typ != yaml_SEQUENCE_START_EVENT) { + emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent) + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + } emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { return false @@ -823,6 +849,10 @@ func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_ return true } +func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0 +} + // Expect a node. func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, root bool, sequence bool, mapping bool, simple_key bool) bool { diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go index aea9050b833ae..ac66fccc059e3 100644 --- a/vendor/gopkg.in/yaml.v3/parserc.go +++ b/vendor/gopkg.in/yaml.v3/parserc.go @@ -648,6 +648,10 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i implicit: implicit, style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } return true } if len(anchor) > 0 || len(tag) > 0 { @@ -694,25 +698,13 @@ func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_e if token.typ == yaml_BLOCK_ENTRY_TOKEN { mark := token.end_mark - prior_head := len(parser.head_comment) + prior_head_len := len(parser.head_comment) skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) token = peek_token(parser) if token == nil { return false } - if prior_head > 0 && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { - // [Go] It's a sequence under a sequence entry, so the former head comment - // is for the list itself, not the first list item under it. - parser.stem_comment = parser.head_comment[:prior_head] - if len(parser.head_comment) == prior_head { - parser.head_comment = nil - } else { - // Copy suffix to prevent very strange bugs if someone ever appends - // further bytes to the prefix in the stem_comment slice above. - parser.head_comment = append([]byte(nil), parser.head_comment[prior_head+1:]...) - } - - } if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) return yaml_parser_parse_node(parser, event, true, false) @@ -754,7 +746,9 @@ func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *y if token.typ == yaml_BLOCK_ENTRY_TOKEN { mark := token.end_mark + prior_head_len := len(parser.head_comment) skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) token = peek_token(parser) if token == nil { return false @@ -780,6 +774,32 @@ func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *y return true } +// Split stem comment from head comment. +// +// When a sequence or map is found under a sequence entry, the former head comment +// is assigned to the underlying sequence or map as a whole, not the individual +// sequence or map entry as would be expected otherwise. To handle this case the +// previous head comment is moved aside as the stem comment. +func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { + if stem_len == 0 { + return + } + + token := peek_token(parser) + if token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + return + } + + parser.stem_comment = parser.head_comment[:stem_len] + if len(parser.head_comment) == stem_len { + parser.head_comment = nil + } else { + // Copy suffix to prevent very strange bugs if someone ever appends + // further bytes to the prefix in the stem_comment slice above. + parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...) + } +} + // Parse the productions: // block_mapping ::= BLOCK-MAPPING_START // ******************* diff --git a/vendor/gopkg.in/yaml.v3/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go index 2e9cbf97fa178..d9a539c39aecd 100644 --- a/vendor/gopkg.in/yaml.v3/scannerc.go +++ b/vendor/gopkg.in/yaml.v3/scannerc.go @@ -749,6 +749,11 @@ func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { if !ok { return } + if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN { + // Sequence indicators alone have no line comments. It becomes + // a head comment for whatever follows. + return + } if !yaml_parser_scan_line_comment(parser, comment_mark) { ok = false return diff --git a/vendor/gopkg.in/yaml.v3/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go index 6ec4346231cf5..56e8a849031f2 100644 --- a/vendor/gopkg.in/yaml.v3/yaml.go +++ b/vendor/gopkg.in/yaml.v3/yaml.go @@ -346,6 +346,12 @@ const ( // and maps, Node is an intermediate representation that allows detailed // control over the content being decoded or encoded. // +// It's worth noting that although Node offers access into details such as +// line numbers, colums, and comments, the content when re-encoded will not +// have its original textual representation preserved. An effort is made to +// render the data plesantly, and to preserve comments near the data they +// describe, though. +// // Values that make use of the Node type interact with the yaml package in the // same way any other type would do, by encoding and decoding yaml data // directly or indirectly into them. diff --git a/vendor/gopkg.in/yaml.v3/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go index 2719cfbb0b756..7c6d007706193 100644 --- a/vendor/gopkg.in/yaml.v3/yamlh.go +++ b/vendor/gopkg.in/yaml.v3/yamlh.go @@ -787,6 +787,8 @@ type yaml_emitter_t struct { foot_comment []byte tail_comment []byte + key_line_comment []byte + // Dumper stuff opened bool // If the stream was already opened? diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index bf125b62a73f1..e7aaead8c32a9 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -873,6 +873,9 @@ const ( // FieldManagerConflict is used to report when another client claims to manage this field, // It should only be returned for a request using server-side apply. CauseTypeFieldManagerConflict CauseType = "FieldManagerConflict" + // CauseTypeResourceVersionTooLarge is used to report that the requested resource version + // is newer than the data observed by the API server, so the request cannot be served. + CauseTypeResourceVersionTooLarge CauseType = "ResourceVersionTooLarge" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go index 7449cbb0a01ab..7b64e68157e06 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/http.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -446,7 +446,7 @@ redirectLoop: // Only follow redirects to the same host. Otherwise, propagate the redirect response back. if requireSameHostRedirects && location.Hostname() != originalLocation.Hostname() { - break redirectLoop + return nil, nil, fmt.Errorf("hostname mismatch: expected %s, found %s", originalLocation.Hostname(), location.Hostname()) } // Reset the connection. diff --git a/vendor/modules.txt b/vendor/modules.txt index 5d05b0da6dd77..77acf5edd5ffe 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -16,7 +16,7 @@ cloud.google.com/go/bigtable/internal/option cloud.google.com/go/storage # github.com/Azure/azure-pipeline-go v0.2.2 github.com/Azure/azure-pipeline-go/pipeline -# github.com/Azure/azure-sdk-for-go v43.0.0+incompatible => github.com/Azure/azure-sdk-for-go v36.2.0+incompatible +# github.com/Azure/azure-sdk-for-go v44.2.0+incompatible => github.com/Azure/azure-sdk-for-go v36.2.0+incompatible github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network github.com/Azure/azure-sdk-for-go/version @@ -25,20 +25,22 @@ github.com/Azure/azure-storage-blob-go/azblob # github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 github.com/Azure/go-ansiterm github.com/Azure/go-ansiterm/winterm -# github.com/Azure/go-autorest/autorest v0.10.2 +# github.com/Azure/go-autorest v14.2.0+incompatible +github.com/Azure/go-autorest +# github.com/Azure/go-autorest/autorest v0.11.2 github.com/Azure/go-autorest/autorest github.com/Azure/go-autorest/autorest/azure -# github.com/Azure/go-autorest/autorest/adal v0.8.3 +# github.com/Azure/go-autorest/autorest/adal v0.9.0 github.com/Azure/go-autorest/autorest/adal -# github.com/Azure/go-autorest/autorest/date v0.2.0 +# github.com/Azure/go-autorest/autorest/date v0.3.0 github.com/Azure/go-autorest/autorest/date # github.com/Azure/go-autorest/autorest/to v0.3.1-0.20191028180845-3492b2aff503 github.com/Azure/go-autorest/autorest/to # github.com/Azure/go-autorest/autorest/validation v0.2.1-0.20191028180845-3492b2aff503 github.com/Azure/go-autorest/autorest/validation -# github.com/Azure/go-autorest/logger v0.1.0 +# github.com/Azure/go-autorest/logger v0.2.0 github.com/Azure/go-autorest/logger -# github.com/Azure/go-autorest/tracing v0.5.0 +# github.com/Azure/go-autorest/tracing v0.6.0 github.com/Azure/go-autorest/tracing # github.com/BurntSushi/toml v0.3.1 github.com/BurntSushi/toml @@ -63,7 +65,7 @@ github.com/armon/go-metrics github.com/armon/go-metrics/prometheus # github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496 github.com/asaskevich/govalidator -# github.com/aws/aws-sdk-go v1.31.9 +# github.com/aws/aws-sdk-go v1.33.12 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn github.com/aws/aws-sdk-go/aws/awserr @@ -127,8 +129,8 @@ github.com/bradfitz/gomemcache/memcache # github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee ## explicit github.com/c2h5oh/datasize -# github.com/cenkalti/backoff v2.2.1+incompatible -github.com/cenkalti/backoff +# github.com/cenkalti/backoff/v4 v4.0.2 +github.com/cenkalti/backoff/v4 # github.com/cespare/xxhash v1.1.0 github.com/cespare/xxhash # github.com/cespare/xxhash/v2 v2.1.1 @@ -149,7 +151,7 @@ github.com/coreos/go-systemd/journal github.com/coreos/go-systemd/sdjournal # github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f github.com/coreos/pkg/capnslog -# github.com/cortexproject/cortex v1.2.1-0.20200709155522-19502213923d +# github.com/cortexproject/cortex v1.2.1-0.20200727121049-4cfa4a2978c2 ## explicit github.com/cortexproject/cortex/pkg/alertmanager github.com/cortexproject/cortex/pkg/alertmanager/alerts @@ -205,8 +207,8 @@ github.com/cortexproject/cortex/pkg/ring/kv/consul github.com/cortexproject/cortex/pkg/ring/kv/etcd github.com/cortexproject/cortex/pkg/ring/kv/memberlist github.com/cortexproject/cortex/pkg/ruler -github.com/cortexproject/cortex/pkg/ruler/legacy_rulefmt github.com/cortexproject/cortex/pkg/ruler/rules +github.com/cortexproject/cortex/pkg/ruler/rules/local github.com/cortexproject/cortex/pkg/ruler/rules/objectclient github.com/cortexproject/cortex/pkg/storage/backend/azure github.com/cortexproject/cortex/pkg/storage/backend/filesystem @@ -218,6 +220,7 @@ github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb github.com/cortexproject/cortex/pkg/util github.com/cortexproject/cortex/pkg/util/chunkcompat github.com/cortexproject/cortex/pkg/util/extract +github.com/cortexproject/cortex/pkg/util/fakeauth github.com/cortexproject/cortex/pkg/util/flagext github.com/cortexproject/cortex/pkg/util/grpc github.com/cortexproject/cortex/pkg/util/grpc/healthcheck @@ -238,7 +241,7 @@ github.com/cortexproject/cortex/tools/querytee github.com/davecgh/go-spew/spew # github.com/dgrijalva/jwt-go v3.2.0+incompatible github.com/dgrijalva/jwt-go -# github.com/digitalocean/godo v1.37.0 +# github.com/digitalocean/godo v1.42.0 github.com/digitalocean/godo # github.com/docker/distribution v2.7.1+incompatible github.com/docker/distribution/digestset @@ -347,7 +350,7 @@ github.com/go-openapi/runtime/middleware/denco github.com/go-openapi/runtime/middleware/header github.com/go-openapi/runtime/middleware/untyped github.com/go-openapi/runtime/security -# github.com/go-openapi/spec v0.19.7 +# github.com/go-openapi/spec v0.19.8 github.com/go-openapi/spec # github.com/go-openapi/strfmt v0.19.5 github.com/go-openapi/strfmt @@ -427,7 +430,7 @@ github.com/googleapis/gax-go/v2 github.com/googleapis/gnostic/OpenAPIv2 github.com/googleapis/gnostic/compiler github.com/googleapis/gnostic/extensions -# github.com/gophercloud/gophercloud v0.11.0 +# github.com/gophercloud/gophercloud v0.12.0 github.com/gophercloud/gophercloud github.com/gophercloud/gophercloud/openstack github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips @@ -469,7 +472,7 @@ github.com/grpc-ecosystem/grpc-gateway/utilities github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc # github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed github.com/hailocab/go-hostpool -# github.com/hashicorp/consul/api v1.4.0 +# github.com/hashicorp/consul/api v1.5.0 github.com/hashicorp/consul/api # github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/errwrap @@ -491,7 +494,7 @@ github.com/hashicorp/go-sockaddr ## explicit github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru -# github.com/hashicorp/memberlist v0.2.0 +# github.com/hashicorp/memberlist v0.2.2 github.com/hashicorp/memberlist # github.com/hashicorp/serf v0.9.0 github.com/hashicorp/serf/coordinate @@ -562,7 +565,7 @@ github.com/mattn/go-ieproxy github.com/mattn/go-isatty # github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/matttproud/golang_protobuf_extensions/pbutil -# github.com/miekg/dns v1.1.29 +# github.com/miekg/dns v1.1.30 github.com/miekg/dns # github.com/minio/minio-go/v6 v6.0.56 github.com/minio/minio-go/v6 @@ -601,9 +604,9 @@ github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 # github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02 github.com/opentracing-contrib/go-grpc -# github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9 +# github.com/opentracing-contrib/go-stdlib v1.0.0 github.com/opentracing-contrib/go-stdlib/nethttp -# github.com/opentracing/opentracing-go v1.1.1-0.20200124165624-2876d2018785 +# github.com/opentracing/opentracing-go v1.2.0 ## explicit github.com/opentracing/opentracing-go github.com/opentracing/opentracing-go/ext @@ -617,7 +620,7 @@ github.com/pierrec/lz4/internal/xxh32 github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib/difflib -# github.com/prometheus/alertmanager v0.20.0 +# github.com/prometheus/alertmanager v0.21.0 github.com/prometheus/alertmanager/api github.com/prometheus/alertmanager/api/metrics github.com/prometheus/alertmanager/api/v1 @@ -640,7 +643,6 @@ github.com/prometheus/alertmanager/nflog github.com/prometheus/alertmanager/nflog/nflogpb github.com/prometheus/alertmanager/notify github.com/prometheus/alertmanager/notify/email -github.com/prometheus/alertmanager/notify/hipchat github.com/prometheus/alertmanager/notify/opsgenie github.com/prometheus/alertmanager/notify/pagerduty github.com/prometheus/alertmanager/notify/pushover @@ -685,7 +687,7 @@ github.com/prometheus/node_exporter/https github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v1.8.2-0.20200707115909-30505a202a4c +# github.com/prometheus/prometheus v1.8.2-0.20200727090838-6f296594a852 ## explicit github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -744,9 +746,9 @@ github.com/prometheus/prometheus/util/teststorage github.com/prometheus/prometheus/util/testutil github.com/prometheus/prometheus/util/treecache github.com/prometheus/prometheus/web/api/v1 -# github.com/rs/cors v1.6.0 +# github.com/rs/cors v1.7.0 github.com/rs/cors -# github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da +# github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e github.com/samuel/go-zookeeper/zk # github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b => github.com/satori/go.uuid v1.2.0 github.com/satori/go.uuid @@ -762,7 +764,7 @@ github.com/sercand/kuberesolver github.com/shurcooL/httpfs/filter github.com/shurcooL/httpfs/union github.com/shurcooL/httpfs/vfsutil -# github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd +# github.com/shurcooL/vfsgen v0.0.0-20200627165143-92b8a710ab6c ## explicit github.com/shurcooL/vfsgen # github.com/sirupsen/logrus v1.5.0 @@ -821,7 +823,7 @@ github.com/tmc/grpc-websocket-proxy/wsproxy # github.com/tonistiigi/fifo v0.0.0-20190226154929-a9fb20d87448 ## explicit github.com/tonistiigi/fifo -# github.com/uber/jaeger-client-go v2.23.1+incompatible +# github.com/uber/jaeger-client-go v2.25.0+incompatible ## explicit github.com/uber/jaeger-client-go github.com/uber/jaeger-client-go/config @@ -848,7 +850,7 @@ github.com/uber/jaeger-lib/metrics/prometheus ## explicit # github.com/ugorji/go/codec v1.1.7 github.com/ugorji/go/codec -# github.com/weaveworks/common v0.0.0-20200512154658-384f10054ec5 +# github.com/weaveworks/common v0.0.0-20200625145055-4b1847531bc9 ## explicit github.com/weaveworks/common/aws github.com/weaveworks/common/errors @@ -976,6 +978,9 @@ go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate # go.uber.org/atomic v1.6.0 go.uber.org/atomic +# go.uber.org/goleak v1.0.0 +go.uber.org/goleak +go.uber.org/goleak/internal/stack # go.uber.org/multierr v1.5.0 go.uber.org/multierr # go.uber.org/zap v1.14.1 @@ -985,7 +990,7 @@ go.uber.org/zap/internal/bufferpool go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore -# golang.org/x/crypto v0.0.0-20200422194213-44a606286825 +# golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt golang.org/x/crypto/blake2b @@ -996,10 +1001,10 @@ golang.org/x/crypto/ssh/terminal # golang.org/x/lint v0.0.0-20200302205851-738671d3881b golang.org/x/lint golang.org/x/lint/golint -# golang.org/x/mod v0.2.0 +# golang.org/x/mod v0.3.0 golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.0.0-20200602114024-627f9648deb9 +# golang.org/x/net v0.0.0-20200707034311-ab3426394381 ## explicit golang.org/x/net/bpf golang.org/x/net/context @@ -1025,11 +1030,10 @@ golang.org/x/oauth2/google golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a +# golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae -## explicit +# golang.org/x/sys v0.0.0-20200724161237-0e2f3a69832c golang.org/x/sys/cpu golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix @@ -1041,9 +1045,9 @@ golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 +# golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e golang.org/x/time/rate -# golang.org/x/tools v0.0.0-20200603131246-cc40288be839 +# golang.org/x/tools v0.0.0-20200725200936-102e7d357031 golang.org/x/tools/cmd/goimports golang.org/x/tools/go/analysis golang.org/x/tools/go/analysis/passes/inspect @@ -1068,10 +1072,11 @@ golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/gopathwalk golang.org/x/tools/internal/imports golang.org/x/tools/internal/packagesinternal +golang.org/x/tools/internal/typesinternal # golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/api v0.26.0 +# google.golang.org/api v0.29.0 google.golang.org/api/cloudresourcemanager/v1 google.golang.org/api/compute/v1 google.golang.org/api/googleapi @@ -1100,7 +1105,7 @@ google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/socket google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20200603110839-e855014d5736 +# google.golang.org/genproto v0.0.0-20200724131911-43cab4749ae7 google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/httpbody google.golang.org/genproto/googleapis/bigtable/admin/v2 @@ -1228,7 +1233,7 @@ gopkg.in/tomb.v1 # gopkg.in/yaml.v2 v2.3.0 ## explicit gopkg.in/yaml.v2 -# gopkg.in/yaml.v3 v3.0.0-20200603094226-e3079894b1e8 +# gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 gopkg.in/yaml.v3 # honnef.co/go/tools v0.0.1-2020.1.3 honnef.co/go/tools/arg @@ -1260,7 +1265,7 @@ honnef.co/go/tools/staticcheck honnef.co/go/tools/stylecheck honnef.co/go/tools/unused honnef.co/go/tools/version -# k8s.io/api v0.18.3 +# k8s.io/api v0.18.6 k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1beta1 k8s.io/api/apps/v1 @@ -1301,7 +1306,7 @@ k8s.io/api/settings/v1alpha1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apimachinery v0.18.3 +# k8s.io/apimachinery v0.18.6 k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta k8s.io/apimachinery/pkg/api/resource @@ -1421,7 +1426,6 @@ sigs.k8s.io/structured-merge-diff/v3/value sigs.k8s.io/yaml # github.com/hpcloud/tail => github.com/grafana/tail v0.0.0-20191024143944-0b54ddf21fe7 # github.com/Azure/azure-sdk-for-go => github.com/Azure/azure-sdk-for-go v36.2.0+incompatible -# github.com/Azure/go-autorest => github.com/Azure/go-autorest v13.3.0+incompatible # k8s.io/client-go => k8s.io/client-go v0.18.3 # github.com/satori/go.uuid => github.com/satori/go.uuid v1.2.0 # github.com/gocql/gocql => github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85