From fe679fbca68e1b3f8e980538ec6138efa1221af3 Mon Sep 17 00:00:00 2001 From: Tomas Kral Date: Wed, 30 Nov 2016 14:40:55 +0100 Subject: [PATCH 1/3] switch from godep to glide --- Godeps/Godeps.json | 2250 ----------- Godeps/Readme | 5 - glide.lock | 767 ++++ glide.yaml | 59 + script/godep-restore.sh | 44 - vendor/bitbucket.org/ww/goautoneg/Makefile | 13 - vendor/bitbucket.org/ww/goautoneg/README.txt | 67 - vendor/bitbucket.org/ww/goautoneg/autoneg.go | 162 - .../cloud => cloud.google.com/go}/LICENSE | 0 .../go}/compute/metadata/metadata.go | 94 +- vendor/cloud.google.com/go/internal/cloud.go | 64 + vendor/github.com/Azure/go-ansiterm/LICENSE | 21 - vendor/github.com/Azure/go-ansiterm/README.md | 12 - .../github.com/Azure/go-ansiterm/constants.go | 188 - .../github.com/Azure/go-ansiterm/context.go | 7 - .../Azure/go-ansiterm/csi_entry_state.go | 49 - .../Azure/go-ansiterm/csi_param_state.go | 38 - .../go-ansiterm/escape_intermediate_state.go | 36 - .../Azure/go-ansiterm/escape_state.go | 47 - .../Azure/go-ansiterm/event_handler.go | 90 - .../Azure/go-ansiterm/ground_state.go | 24 - .../Azure/go-ansiterm/osc_string_state.go | 31 - vendor/github.com/Azure/go-ansiterm/parser.go | 136 - .../go-ansiterm/parser_action_helpers.go | 103 - .../Azure/go-ansiterm/parser_actions.go | 122 - vendor/github.com/Azure/go-ansiterm/states.go | 71 - .../github.com/Azure/go-ansiterm/utilities.go | 21 - .../Azure/go-ansiterm/winterm/ansi.go | 182 - .../Azure/go-ansiterm/winterm/api.go | 322 -- .../go-ansiterm/winterm/attr_translation.go | 100 - .../go-ansiterm/winterm/cursor_helpers.go | 101 - .../go-ansiterm/winterm/erase_helpers.go | 84 - .../go-ansiterm/winterm/scroll_helper.go | 118 - .../Azure/go-ansiterm/winterm/utilities.go | 9 - .../go-ansiterm/winterm/win_event_handler.go | 726 ---- vendor/github.com/MakeNowJust/heredoc/LICENSE | 21 - .../github.com/MakeNowJust/heredoc/README.md | 53 - .../github.com/MakeNowJust/heredoc/heredoc.go | 89 - vendor/github.com/Sirupsen/logrus/.gitignore | 1 - vendor/github.com/Sirupsen/logrus/.travis.yml | 9 - .../github.com/Sirupsen/logrus/CHANGELOG.md | 66 - vendor/github.com/Sirupsen/logrus/README.md | 388 -- vendor/github.com/Sirupsen/logrus/doc.go | 26 - vendor/github.com/Sirupsen/logrus/entry.go | 20 +- vendor/github.com/Sirupsen/logrus/exported.go | 5 - vendor/github.com/Sirupsen/logrus/hooks.go | 6 +- .../Sirupsen/logrus/json_formatter.go | 7 +- vendor/github.com/Sirupsen/logrus/logger.go | 19 +- vendor/github.com/Sirupsen/logrus/logrus.go | 53 +- .../Sirupsen/logrus/terminal_darwin.go | 12 + .../Sirupsen/logrus/terminal_freebsd.go | 20 + .../Sirupsen/logrus/terminal_notwindows.go | 6 +- .../{terminal_bsd.go => terminal_openbsd.go} | 2 - .../Sirupsen/logrus/terminal_solaris.go | 15 - .../Sirupsen/logrus/terminal_windows.go | 4 +- .../Sirupsen/logrus/text_formatter.go | 48 +- .../beorn7/perks/quantile/exampledata.txt | 2388 ------------ vendor/github.com/blang/semver/README.md | 142 - .../coreos/etcd/auth/authpb/auth.pb.go | 4 +- .../coreos/etcd/auth/authpb/auth.proto | 37 - .../github.com/coreos/etcd/client/README.md | 117 - .../github.com/coreos/etcd/client/client.go | 4 +- .../coreos/etcd/client/cluster_error.go | 6 +- .../coreos/etcd/client/keys.generated.go | 3 +- vendor/github.com/coreos/etcd/client/keys.go | 27 +- vendor/github.com/coreos/etcd/client/util.go | 30 + .../github.com/coreos/etcd/clientv3/README.md | 77 - .../github.com/coreos/etcd/clientv3/auth.go | 1 + .../coreos/etcd/clientv3/balancer.go | 73 +- .../github.com/coreos/etcd/clientv3/client.go | 119 +- .../github.com/coreos/etcd/clientv3/config.go | 13 +- vendor/github.com/coreos/etcd/clientv3/kv.go | 24 +- .../github.com/coreos/etcd/clientv3/lease.go | 46 +- .../github.com/coreos/etcd/clientv3/logger.go | 40 +- vendor/github.com/coreos/etcd/clientv3/op.go | 160 +- .../github.com/coreos/etcd/clientv3/retry.go | 14 +- vendor/github.com/coreos/etcd/clientv3/txn.go | 8 +- .../github.com/coreos/etcd/clientv3/watch.go | 568 ++- .../etcdserver/api/v3rpc/rpctypes/error.go | 43 +- .../etcdserver/etcdserverpb/etcdserver.pb.go | 6 +- .../etcdserver/etcdserverpb/etcdserver.proto | 34 - .../etcdserverpb/raft_internal.pb.go | 140 +- .../etcdserverpb/raft_internal.proto | 72 - .../etcd/etcdserver/etcdserverpb/rpc.pb.go | 1599 ++++++-- .../etcd/etcdserver/etcdserverpb/rpc.pb.gw.go | 45 + .../etcd/etcdserver/etcdserverpb/rpc.proto | 894 ----- .../coreos/etcd/mvcc/mvccpb/kv.pb.go | 92 +- .../coreos/etcd/mvcc/mvccpb/kv.proto | 46 - .../coreos/etcd/pkg/fileutil/dir_unix.go | 22 + .../coreos/etcd/pkg/fileutil/dir_windows.go | 46 + .../coreos/etcd/pkg/fileutil/fileutil.go | 25 +- .../coreos/etcd/pkg/transport/listener.go | 36 +- .../etcd/pkg/transport/timeout_listener.go | 10 +- .../etcd/pkg/transport/timeout_transport.go | 2 +- .../coreos/etcd/pkg/transport/transport.go | 5 +- .../github.com/coreos/pkg/capnslog/README.md | 39 - vendor/github.com/coreos/pkg/health/README.md | 11 - .../github.com/coreos/pkg/httputil/README.md | 13 - vendor/github.com/dgrijalva/jwt-go/.gitignore | 4 - .../github.com/dgrijalva/jwt-go/.travis.yml | 8 - .../dgrijalva/jwt-go/MIGRATION_GUIDE.md | 96 - vendor/github.com/dgrijalva/jwt-go/README.md | 85 - .../dgrijalva/jwt-go/VERSION_HISTORY.md | 105 - .../github.com/docker/distribution/.gitignore | 37 - .../github.com/docker/distribution/.mailmap | 18 - vendor/github.com/docker/distribution/AUTHORS | 147 - .../docker/distribution/BUILDING.md | 119 - .../docker/distribution/CHANGELOG.md | 35 - .../docker/distribution/CONTRIBUTING.md | 140 - .../github.com/docker/distribution/Dockerfile | 18 - .../docker/distribution/Jenkinsfile | 8 - .../docker/distribution/MAINTAINERS | 58 - .../github.com/docker/distribution/Makefile | 106 - .../github.com/docker/distribution/README.md | 131 - .../github.com/docker/distribution/ROADMAP.md | 267 -- .../github.com/docker/distribution/blobs.go | 12 - .../github.com/docker/distribution/circle.yml | 93 - .../docker/distribution/context/http.go | 16 +- .../docker/distribution/coverpkg.sh | 7 - .../distribution/manifest/schema2/builder.go | 3 - .../distribution/manifest/schema2/manifest.go | 3 - .../docker/distribution/manifest/versioned.go | 6 +- .../distribution/reference/reference.go | 8 +- .../docker/api/types/versions/README.md | 14 - .../cli/command/bundlefile/bundlefile.go | 71 - .../docker/docker/pkg/signal/README.md | 1 - .../docker/docker/pkg/system/chtimes.go | 52 - .../docker/docker/pkg/system/chtimes_unix.go | 14 - .../docker/pkg/system/chtimes_windows.go | 27 - .../docker/docker/pkg/system/errors.go | 10 - .../docker/pkg/system/events_windows.go | 83 - .../docker/docker/pkg/system/filesys.go | 19 - .../docker/pkg/system/filesys_windows.go | 82 - .../docker/docker/pkg/system/lstat.go | 19 - .../docker/docker/pkg/system/lstat_windows.go | 25 - .../docker/docker/pkg/system/meminfo.go | 17 - .../docker/docker/pkg/system/meminfo_linux.go | 65 - .../docker/pkg/system/meminfo_solaris.go | 128 - .../docker/pkg/system/meminfo_unsupported.go | 8 - .../docker/pkg/system/meminfo_windows.go | 44 - .../docker/docker/pkg/system/mknod.go | 22 - .../docker/docker/pkg/system/mknod_windows.go | 13 - .../docker/docker/pkg/system/path_unix.go | 14 - .../docker/docker/pkg/system/path_windows.go | 37 - .../docker/docker/pkg/system/stat.go | 53 - .../docker/docker/pkg/system/stat_darwin.go | 32 - .../docker/docker/pkg/system/stat_freebsd.go | 27 - .../docker/docker/pkg/system/stat_linux.go | 33 - .../docker/docker/pkg/system/stat_openbsd.go | 15 - .../docker/docker/pkg/system/stat_solaris.go | 34 - .../docker/pkg/system/stat_unsupported.go | 17 - .../docker/docker/pkg/system/stat_windows.go | 43 - .../docker/docker/pkg/system/syscall_unix.go | 17 - .../docker/pkg/system/syscall_windows.go | 103 - .../docker/docker/pkg/system/umask.go | 13 - .../docker/docker/pkg/system/umask_windows.go | 9 - .../docker/pkg/system/utimes_freebsd.go | 22 - .../docker/docker/pkg/system/utimes_linux.go | 26 - .../docker/pkg/system/utimes_unsupported.go | 10 - .../docker/docker/pkg/system/xattrs_linux.go | 63 - .../docker/pkg/system/xattrs_unsupported.go | 13 - .../docker/docker/pkg/term/ascii.go | 66 - .../docker/docker/pkg/term/tc_linux_cgo.go | 50 - .../docker/docker/pkg/term/tc_other.go | 20 - .../docker/docker/pkg/term/tc_solaris_cgo.go | 63 - .../github.com/docker/docker/pkg/term/term.go | 123 - .../docker/docker/pkg/term/term_solaris.go | 41 - .../docker/docker/pkg/term/term_unix.go | 29 - .../docker/docker/pkg/term/term_windows.go | 233 -- .../docker/docker/pkg/term/termios_darwin.go | 69 - .../docker/docker/pkg/term/termios_freebsd.go | 69 - .../docker/docker/pkg/term/termios_linux.go | 47 - .../docker/docker/pkg/term/termios_openbsd.go | 69 - .../docker/pkg/term/windows/ansi_reader.go | 261 -- .../docker/pkg/term/windows/ansi_writer.go | 64 - .../docker/docker/pkg/term/windows/console.go | 35 - .../docker/docker/pkg/term/windows/windows.go | 33 - .../docker/engine-api/types/client.go | 61 +- .../engine-api/types/container/config.go | 25 - .../engine-api/types/container/host_config.go | 14 +- .../docker/engine-api/types/errors.go | 6 - .../docker/engine-api/types/filters/parse.go | 16 +- .../engine-api/types/network/network.go | 5 +- .../docker/engine-api/types/plugin.go | 169 - .../docker/engine-api/types/seccomp.go | 5 - .../docker/engine-api/types/swarm/common.go | 21 - .../engine-api/types/swarm/container.go | 67 - .../docker/engine-api/types/swarm/network.go | 99 - .../docker/engine-api/types/swarm/node.go | 118 - .../docker/engine-api/types/swarm/service.go | 44 - .../docker/engine-api/types/swarm/swarm.go | 129 - .../docker/engine-api/types/swarm/task.go | 115 - .../docker/engine-api/types/types.go | 94 +- .../engine-api/types/versions/README.md | 14 - .../docker/go-units/CONTRIBUTING.md | 67 - vendor/github.com/docker/go-units/MAINTAINERS | 27 - vendor/github.com/docker/go-units/README.md | 18 - vendor/github.com/docker/go-units/circle.yml | 11 - .../docker/libtrust/CONTRIBUTING.md | 13 - vendor/github.com/docker/libtrust/MAINTAINERS | 3 - vendor/github.com/docker/libtrust/README.md | 18 - .../github.com/emicklei/go-restful/.gitignore | 70 - .../github.com/emicklei/go-restful/CHANGES.md | 163 - .../github.com/emicklei/go-restful/README.md | 74 - vendor/github.com/emicklei/go-restful/Srcfile | 1 - .../emicklei/go-restful/bench_test.sh | 10 - .../emicklei/go-restful/coverage.sh | 2 - .../github.com/emicklei/go-restful/install.sh | 10 - .../emicklei/go-restful/swagger/CHANGES.md | 43 - .../emicklei/go-restful/swagger/README.md | 76 - .../github.com/evanphx/json-patch/.travis.yml | 14 - .../github.com/evanphx/json-patch/README.md | 29 - vendor/github.com/fatih/structs/.gitignore | 23 - vendor/github.com/fatih/structs/.travis.yml | 11 - vendor/github.com/fatih/structs/README.md | 163 - vendor/github.com/fatih/structs/structs.go | 7 +- vendor/github.com/flynn/go-shlex/Makefile | 21 - vendor/github.com/flynn/go-shlex/README.md | 2 - .../fsouza/go-dockerclient/.gitignore | 2 - .../fsouza/go-dockerclient/.travis.yml | 27 - .../github.com/fsouza/go-dockerclient/AUTHORS | 132 - .../fsouza/go-dockerclient/DOCKER-LICENSE | 6 - .../fsouza/go-dockerclient/Makefile | 57 - .../fsouza/go-dockerclient/README.markdown | 105 - .../github.com/Sirupsen/logrus/CHANGELOG.md | 55 - .../github.com/Sirupsen/logrus/README.md | 365 -- .../docker/docker/pkg/archive/README.md | 1 - .../docker/go-units/CONTRIBUTING.md | 67 - .../github.com/docker/go-units/MAINTAINERS | 27 - .../github.com/docker/go-units/README.md | 18 - .../github.com/docker/go-units/circle.yml | 11 - .../hashicorp/go-cleanhttp/README.md | 30 - .../runc/libcontainer/user/MAINTAINERS | 2 - .../golang.org/x/net/context/context.go | 2 +- .../external/golang.org/x/sys/unix/mkall.sh | 274 -- .../golang.org/x/sys/unix/mkerrors.sh | 476 --- .../golang.org/x/sys/unix/mksyscall.pl | 323 -- .../x/sys/unix/mksyscall_solaris.pl | 294 -- .../golang.org/x/sys/unix/mksysctl_openbsd.pl | 264 -- .../golang.org/x/sys/unix/mksysnum_darwin.pl | 39 - .../x/sys/unix/mksysnum_dragonfly.pl | 50 - .../golang.org/x/sys/unix/mksysnum_freebsd.pl | 63 - .../golang.org/x/sys/unix/mksysnum_linux.pl | 58 - .../golang.org/x/sys/unix/mksysnum_netbsd.pl | 58 - .../golang.org/x/sys/unix/mksysnum_openbsd.pl | 50 - .../external/golang.org/x/sys/unix/syscall.go | 2 +- vendor/github.com/ghodss/yaml/.gitignore | 20 - vendor/github.com/ghodss/yaml/.travis.yml | 7 - vendor/github.com/ghodss/yaml/README.md | 116 - .../github.com/gogo/protobuf/proto/Makefile | 43 - vendor/github.com/golang/glog/README | 44 - .../github.com/golang/protobuf/proto/Makefile | 43 - vendor/github.com/gonum/blas/.travis.yml | 39 - vendor/github.com/gonum/blas/README.md | 96 - vendor/github.com/gonum/blas/blas.go | 388 -- vendor/github.com/gonum/blas/blas64/blas64.go | 434 --- vendor/github.com/gonum/blas/native/dgemm.go | 391 -- vendor/github.com/gonum/blas/native/doc.go | 88 - .../gonum/blas/native/general_double.go | 155 - .../gonum/blas/native/general_single.go | 157 - .../gonum/blas/native/internal/math32/math.go | 113 - .../gonum/blas/native/internal/math32/sqrt.go | 25 - .../blas/native/internal/math32/sqrt_amd64.go | 20 - .../blas/native/internal/math32/sqrt_amd64.s | 20 - .../gonum/blas/native/level1double.go | 599 --- .../gonum/blas/native/level1double_ddot.go | 46 - .../gonum/blas/native/level1single.go | 623 ---- .../gonum/blas/native/level1single_dsdot.go | 50 - .../gonum/blas/native/level1single_sdot.go | 50 - .../gonum/blas/native/level1single_sdsdot.go | 50 - .../gonum/blas/native/level2double.go | 2258 ------------ .../gonum/blas/native/level2single.go | 2292 ------------ .../gonum/blas/native/level3double.go | 831 ----- .../gonum/blas/native/level3single.go | 843 ----- vendor/github.com/gonum/blas/native/native.go | 66 - vendor/github.com/gonum/blas/native/sgemm.go | 395 -- .../gonum/blas/native/single_precision | 143 - vendor/github.com/gonum/graph/.gitignore | 1 - vendor/github.com/gonum/graph/.travis.yml | 29 - vendor/github.com/gonum/graph/README.md | 15 - .../gonum/graph/concrete/concrete.go | 8 - .../graph/concrete/dense_directed_matrix.go | 136 - .../graph/concrete/dense_undirected_matrix.go | 131 - .../gonum/graph/concrete/directed.go | 269 -- .../gonum/graph/concrete/undirected.go | 250 -- .../github.com/gonum/graph/concrete/util.go | 44 - vendor/github.com/gonum/graph/doc.go | 38 - .../gonum/graph/encoding/dot/dot.go | 383 -- vendor/github.com/gonum/graph/graph.go | 173 - .../github.com/gonum/graph/internal/linear.go | 73 - vendor/github.com/gonum/graph/internal/set.go | 211 -- .../github.com/gonum/graph/internal/sort.go | 28 - vendor/github.com/gonum/graph/path/a_star.go | 157 - .../gonum/graph/path/bellman_ford_moore.go | 59 - .../gonum/graph/path/control_flow.go | 118 - .../github.com/gonum/graph/path/dijkstra.go | 136 - .../github.com/gonum/graph/path/disjoint.go | 87 - .../gonum/graph/path/floydwarshall.go | 55 - .../gonum/graph/path/johnson_apsp.go | 137 - .../github.com/gonum/graph/path/shortest.go | 319 -- .../gonum/graph/path/spanning_tree.go | 108 - .../gonum/graph/topo/bron_kerbosch.go | 225 -- .../gonum/graph/topo/johnson_cycles.go | 285 -- .../gonum/graph/topo/non_tomita_choice.go | 9 - vendor/github.com/gonum/graph/topo/tarjan.go | 161 - .../gonum/graph/topo/tomita_choice.go | 9 - vendor/github.com/gonum/graph/topo/topo.go | 58 - .../gonum/graph/traverse/traverse.go | 182 - vendor/github.com/gonum/internal/asm/caxpy.go | 22 - vendor/github.com/gonum/internal/asm/cdotc.go | 23 - vendor/github.com/gonum/internal/asm/cdotu.go | 23 - vendor/github.com/gonum/internal/asm/complex | 58 - vendor/github.com/gonum/internal/asm/conj.go | 7 - vendor/github.com/gonum/internal/asm/daxpy.go | 22 - .../gonum/internal/asm/daxpy_amd64.go | 12 - .../gonum/internal/asm/daxpy_amd64.s | 140 - vendor/github.com/gonum/internal/asm/ddot.go | 23 - .../gonum/internal/asm/ddot_amd64.go | 10 - .../gonum/internal/asm/ddot_amd64.s | 140 - vendor/github.com/gonum/internal/asm/dsdot.go | 23 - .../github.com/gonum/internal/asm/generate.go | 8 - vendor/github.com/gonum/internal/asm/saxpy.go | 22 - vendor/github.com/gonum/internal/asm/sdot.go | 23 - .../gonum/internal/asm/single_precision | 30 - vendor/github.com/gonum/internal/asm/zaxpy.go | 22 - vendor/github.com/gonum/internal/asm/zdotc.go | 25 - vendor/github.com/gonum/internal/asm/zdotu.go | 23 - vendor/github.com/gonum/lapack/.gitignore | 1 - vendor/github.com/gonum/lapack/.travis.yml | 38 - vendor/github.com/gonum/lapack/README.md | 58 - vendor/github.com/gonum/lapack/lapack.go | 60 - .../gonum/lapack/lapack64/lapack64.go | 49 - .../github.com/gonum/lapack/native/dgelq2.go | 44 - .../github.com/gonum/lapack/native/dgelqf.go | 84 - .../github.com/gonum/lapack/native/dgels.go | 200 - .../github.com/gonum/lapack/native/dgeqr2.go | 57 - .../github.com/gonum/lapack/native/dgeqrf.go | 98 - .../github.com/gonum/lapack/native/dlange.go | 76 - .../github.com/gonum/lapack/native/dlapy2.go | 12 - .../github.com/gonum/lapack/native/dlarf.go | 82 - .../github.com/gonum/lapack/native/dlarfb.go | 424 --- .../github.com/gonum/lapack/native/dlarfg.go | 60 - .../github.com/gonum/lapack/native/dlarft.go | 148 - .../github.com/gonum/lapack/native/dlascl.go | 72 - .../github.com/gonum/lapack/native/dlaset.go | 37 - .../github.com/gonum/lapack/native/dlassq.go | 29 - vendor/github.com/gonum/lapack/native/doc.go | 28 - .../github.com/gonum/lapack/native/dorm2r.go | 86 - .../github.com/gonum/lapack/native/dorml2.go | 83 - .../github.com/gonum/lapack/native/dormlq.go | 155 - .../github.com/gonum/lapack/native/dormqr.go | 139 - .../github.com/gonum/lapack/native/dpotf2.go | 73 - .../github.com/gonum/lapack/native/dpotrf.go | 75 - .../github.com/gonum/lapack/native/dtrtrs.go | 31 - .../github.com/gonum/lapack/native/general.go | 92 - .../github.com/gonum/lapack/native/iladlc.go | 31 - .../github.com/gonum/lapack/native/iladlr.go | 28 - .../github.com/gonum/lapack/native/ilaenv.go | 375 -- .../github.com/gonum/matrix/mat64/cholesky.go | 140 - vendor/github.com/gonum/matrix/mat64/dense.go | 646 ---- .../gonum/matrix/mat64/dense_arithmetic.go | 975 ----- vendor/github.com/gonum/matrix/mat64/eigen.go | 819 ----- .../github.com/gonum/matrix/mat64/format.go | 153 - .../gonum/matrix/mat64/index_bound_checks.go | 151 - .../matrix/mat64/index_no_bound_checks.go | 151 - vendor/github.com/gonum/matrix/mat64/inner.go | 102 - vendor/github.com/gonum/matrix/mat64/io.go | 18 - vendor/github.com/gonum/matrix/mat64/lq.go | 193 - vendor/github.com/gonum/matrix/mat64/lu.go | 212 -- .../github.com/gonum/matrix/mat64/matrix.go | 472 --- vendor/github.com/gonum/matrix/mat64/pool.go | 80 - vendor/github.com/gonum/matrix/mat64/qr.go | 185 - vendor/github.com/gonum/matrix/mat64/svd.go | 479 --- .../gonum/matrix/mat64/symmetric.go | 198 - .../gonum/matrix/mat64/triangular.go | 177 - .../github.com/gonum/matrix/mat64/vector.go | 349 -- vendor/github.com/google/gofuzz/.travis.yml | 13 - .../github.com/google/gofuzz/CONTRIBUTING.md | 67 - vendor/github.com/google/gofuzz/README.md | 71 - vendor/github.com/gorilla/context/.travis.yml | 9 - vendor/github.com/gorilla/context/README.md | 7 - vendor/github.com/gorilla/mux/.travis.yml | 7 - vendor/github.com/gorilla/mux/README.md | 7 - .../runtime/internal/stream_chunk.proto | 12 - vendor/github.com/imdario/mergo/.travis.yml | 2 - vendor/github.com/imdario/mergo/README.md | 68 - .../inconshreveable/mousetrap/README.md | 23 - .../github.com/jonboulle/clockwork/.gitignore | 25 - .../jonboulle/clockwork/.travis.yml | 3 - .../github.com/jonboulle/clockwork/README.md | 61 - vendor/github.com/juju/ratelimit/README.md | 117 - .../openshift/origin/pkg/api/graph/graph.go | 693 ---- .../pkg/api/graph/graphview/dc_pipeline.go | 84 - .../pkg/api/graph/graphview/image_pipeline.go | 240 -- .../origin/pkg/api/graph/graphview/intset.go | 46 - .../origin/pkg/api/graph/graphview/petset.go | 51 - .../origin/pkg/api/graph/graphview/pod.go | 39 - .../origin/pkg/api/graph/graphview/rc.go | 100 - .../pkg/api/graph/graphview/service_group.go | 134 - .../origin/pkg/api/graph/interfaces.go | 134 - .../openshift/origin/pkg/api/graph/types.go | 69 - .../origin/pkg/api/kubegraph/analysis/hpa.go | 180 - .../origin/pkg/api/kubegraph/analysis/pod.go | 135 - .../pkg/api/kubegraph/analysis/podspec.go | 124 - .../origin/pkg/api/kubegraph/analysis/rc.go | 56 - .../origin/pkg/api/kubegraph/edges.go | 248 -- .../origin/pkg/api/kubegraph/nodes/nodes.go | 187 - .../origin/pkg/api/kubegraph/nodes/types.go | 325 -- .../origin/pkg/api/restmapper/discovery.go | 176 - .../openshift/origin/pkg/auth/api/types.go | 2 +- .../authenticator/request/x509request/x509.go | 61 +- .../origin/pkg/authorization/api/register.go | 1 + .../origin/pkg/authorization/api/synthetic.go | 1 + .../origin/pkg/authorization/api/types.go | 22 + .../api/zz_generated.deepcopy.go | 40 + .../pkg/authorization/reaper/cluster_role.go | 60 - .../origin/pkg/authorization/reaper/role.go | 47 - .../openshift/origin/pkg/build/api/types.go | 66 +- .../pkg/build/api/zz_generated.deepcopy.go | 74 +- .../origin/pkg/build/client/clients.go | 109 - .../openshift/origin/pkg/build/cmd/doc.go | 2 - .../openshift/origin/pkg/build/cmd/reaper.go | 150 - .../origin/pkg/build/graph/analysis/bc.go | 351 -- .../openshift/origin/pkg/build/graph/edges.go | 133 - .../origin/pkg/build/graph/helpers.go | 111 - .../origin/pkg/build/graph/nodes/nodes.go | 47 - .../origin/pkg/build/graph/nodes/types.go | 90 - .../openshift/origin/pkg/build/util/doc.go | 3 - .../openshift/origin/pkg/build/util/util.go | 169 - .../openshift/origin/pkg/client/client.go | 5 + .../origin/pkg/client/deploymentconfigs.go | 13 + .../origin/pkg/client/imagestreams.go | 7 - .../origin/pkg/client/oauthclient.go | 7 + .../origin/pkg/client/subjectrulesreviews.go | 32 + .../origin/pkg/cmd/cli/config/smart_merge.go | 36 +- .../pkg/cmd/cli/describe/chaindescriber.go | 319 -- .../pkg/cmd/cli/describe/deployments.go | 417 --- .../origin/pkg/cmd/cli/describe/describer.go | 1606 -------- .../origin/pkg/cmd/cli/describe/helpers.go | 427 --- .../origin/pkg/cmd/cli/describe/printer.go | 1057 ------ .../pkg/cmd/cli/describe/projectstatus.go | 1458 -------- .../origin/pkg/cmd/flagtypes/addr.go | 176 - .../openshift/origin/pkg/cmd/flagtypes/doc.go | 3 - .../origin/pkg/cmd/flagtypes/glog.go | 30 - .../openshift/origin/pkg/cmd/flagtypes/net.go | 59 - .../cmd/util/clientcmd/cached_discovery.go | 136 - .../pkg/cmd/util/clientcmd/clientcmd.go | 247 -- .../pkg/cmd/util/clientcmd/clientconfig.go | 29 - .../origin/pkg/cmd/util/clientcmd/errors.go | 108 - .../origin/pkg/cmd/util/clientcmd/factory.go | 1071 ------ .../pkg/cmd/util/clientcmd/negotiate.go | 116 - .../cmd/util/clientcmd/shortcut_restmapper.go | 141 - .../openshift/origin/pkg/cmd/util/cmd.go | 26 +- .../openshift/origin/pkg/cmd/util/terminal.go | 122 - .../origin/pkg/deploy/api/helpers.go | 7 + .../origin/pkg/deploy/api/register.go | 2 + .../openshift/origin/pkg/deploy/api/types.go | 459 +-- .../origin/pkg/deploy/api/v1/conversion.go | 35 +- .../origin/pkg/deploy/api/v1/defaults.go | 29 +- .../origin/pkg/deploy/api/v1/generated.pb.go | 1100 ++++-- .../origin/pkg/deploy/api/v1/generated.proto | 410 --- .../origin/pkg/deploy/api/v1/register.go | 2 + .../origin/pkg/deploy/api/v1/swagger_doc.go | 36 +- .../origin/pkg/deploy/api/v1/types.go | 376 +- .../deploy/api/v1/zz_generated.conversion.go | 102 +- .../deploy/api/v1/zz_generated.deepcopy.go | 45 +- .../pkg/deploy/api/zz_generated.deepcopy.go | 63 +- .../openshift/origin/pkg/deploy/cmd/delete.go | 136 - .../openshift/origin/pkg/deploy/cmd/doc.go | 3 - .../origin/pkg/deploy/cmd/generate.go | 41 - .../origin/pkg/deploy/cmd/history.go | 99 - .../origin/pkg/deploy/cmd/rollback.go | 56 - .../openshift/origin/pkg/deploy/cmd/scale.go | 98 - .../origin/pkg/deploy/graph/analysis/dc.go | 126 - .../origin/pkg/deploy/graph/analysis/doc.go | 3 - .../origin/pkg/deploy/graph/edges.go | 85 - .../origin/pkg/deploy/graph/helpers.go | 49 - .../origin/pkg/deploy/graph/nodes/nodes.go | 38 - .../origin/pkg/deploy/graph/nodes/types.go | 39 - .../openshift/origin/pkg/deploy/util/util.go | 496 --- .../openshift/origin/pkg/image/api/helper.go | 90 +- .../origin/pkg/image/api/v1/generated.proto | 435 --- .../origin/pkg/image/api/v1/swagger_doc.go | 2 +- .../origin/pkg/image/api/v1/types.go | 9 +- .../openshift/origin/pkg/image/graph/edges.go | 56 - .../origin/pkg/image/graph/nodes/nodes.go | 172 - .../origin/pkg/image/graph/nodes/types.go | 207 -- .../origin/pkg/image/reference/reference.go | 51 + .../origin/pkg/oauth/api/register.go | 2 + .../openshift/origin/pkg/oauth/api/types.go | 18 + .../pkg/oauth/api/zz_generated.deepcopy.go | 28 + .../openshift/origin/pkg/project/api/types.go | 1 + .../openshift/origin/pkg/quota/api/types.go | 39 +- .../pkg/quota/api/zz_generated.deepcopy.go | 4 +- .../openshift/origin/pkg/route/api/helper.go | 21 +- .../openshift/origin/pkg/route/api/types.go | 20 + .../pkg/route/api/zz_generated.deepcopy.go | 2 + .../origin/pkg/route/generator/doc.go | 2 - .../origin/pkg/route/generator/generate.go | 93 - .../pkg/route/graph/analysis/analysis.go | 228 -- .../origin/pkg/route/graph/analysis/doc.go | 3 - .../openshift/origin/pkg/route/graph/doc.go | 2 - .../openshift/origin/pkg/route/graph/edges.go | 45 - .../origin/pkg/route/graph/nodes/doc.go | 2 - .../origin/pkg/route/graph/nodes/nodes.go | 22 - .../origin/pkg/route/graph/nodes/types.go | 33 - .../openshift/origin/pkg/sdn/api/plugin.go | 31 + .../origin/pkg/user/reaper/bindings.go | 57 - .../openshift/origin/pkg/user/reaper/group.go | 76 - .../openshift/origin/pkg/user/reaper/user.go | 104 - .../openshift/origin/pkg/util/doc.go | 4 - .../openshift/origin/pkg/util/dot/dot.go | 14 - .../openshift/origin/pkg/util/errors/doc.go | 2 - .../origin/pkg/util/errors/errors.go | 39 - .../openshift/origin/pkg/util/etcd.go | 21 - .../openshift/origin/pkg/util/labels.go | 278 -- .../origin/pkg/util/parallel/parallel.go | 27 - .../openshift/origin/pkg/util/strings.go | 21 - .../openshift/origin/pkg/version/version.go | 26 +- vendor/github.com/pborman/uuid/CONTRIBUTORS | 1 - .../client_golang/prometheus/.gitignore | 1 - .../client_golang/prometheus/README.md | 53 - .../bitbucket.org/ww/goautoneg/README.txt | 67 - .../github.com/prometheus/procfs/.travis.yml | 7 - .../github.com/prometheus/procfs/AUTHORS.md | 20 - .../prometheus/procfs/CONTRIBUTING.md | 18 - vendor/github.com/prometheus/procfs/Makefile | 6 - vendor/github.com/prometheus/procfs/README.md | 10 - vendor/github.com/spf13/cobra/.gitignore | 36 - vendor/github.com/spf13/cobra/.mailmap | 3 - vendor/github.com/spf13/cobra/.travis.yml | 18 - vendor/github.com/spf13/cobra/README.md | 898 ----- .../spf13/cobra/bash_completions.md | 206 -- vendor/github.com/spf13/pflag/.travis.yml | 17 - vendor/github.com/spf13/pflag/README.md | 275 -- vendor/github.com/spf13/pflag/bool.go | 7 +- vendor/github.com/spf13/pflag/count.go | 7 +- vendor/github.com/spf13/pflag/flag.go | 73 +- vendor/github.com/spf13/pflag/float32.go | 7 +- vendor/github.com/spf13/pflag/float64.go | 7 +- vendor/github.com/spf13/pflag/int.go | 7 +- vendor/github.com/spf13/pflag/int32.go | 7 +- vendor/github.com/spf13/pflag/int64.go | 7 +- vendor/github.com/spf13/pflag/int8.go | 7 +- vendor/github.com/spf13/pflag/string.go | 4 +- vendor/github.com/spf13/pflag/string_array.go | 109 + vendor/github.com/spf13/pflag/string_slice.go | 33 +- vendor/github.com/spf13/pflag/uint.go | 7 +- vendor/github.com/spf13/pflag/uint16.go | 9 +- vendor/github.com/spf13/pflag/uint32.go | 11 +- vendor/github.com/spf13/pflag/uint64.go | 7 +- vendor/github.com/spf13/pflag/uint8.go | 7 +- vendor/github.com/ugorji/go/codec/README.md | 148 - .../ugorji/go/codec/fast-path.go.tmpl | 540 --- .../ugorji/go/codec/gen-dec-array.go.tmpl | 104 - .../ugorji/go/codec/gen-dec-map.go.tmpl | 58 - .../ugorji/go/codec/gen-helper.go.tmpl | 364 -- vendor/github.com/ugorji/go/codec/prebuild.sh | 199 - .../ugorji/go/codec/test-cbor-goldens.json | 639 ---- vendor/github.com/ugorji/go/codec/test.py | 126 - vendor/github.com/ugorji/go/codec/tests.sh | 80 - vendor/github.com/urfave/cli/.travis.yml | 18 - vendor/github.com/urfave/cli/LICENSE | 28 +- vendor/github.com/urfave/cli/README.md | 434 --- vendor/github.com/urfave/cli/app.go | 241 +- vendor/github.com/urfave/cli/appveyor.yml | 16 - vendor/github.com/urfave/cli/category.go | 14 + vendor/github.com/urfave/cli/cli.go | 23 +- vendor/github.com/urfave/cli/command.go | 98 +- vendor/github.com/urfave/cli/context.go | 345 +- vendor/github.com/urfave/cli/errors.go | 110 + vendor/github.com/urfave/cli/flag.go | 663 ++-- .../github.com/urfave/cli/flag_generated.go | 627 ++++ vendor/github.com/urfave/cli/funcs.go | 28 + vendor/github.com/urfave/cli/help.go | 171 +- .../xeipuuv/gojsonpointer/README.md | 8 - .../xeipuuv/gojsonreference/README.md | 10 - .../xeipuuv/gojsonschema/.gitignore | 1 - .../xeipuuv/gojsonschema/.travis.yml | 7 - .../github.com/xeipuuv/gojsonschema/README.md | 236 -- .../github.com/xeipuuv/gojsonschema/errors.go | 37 +- .../xeipuuv/gojsonschema/format_checkers.go | 25 +- .../xeipuuv/gojsonschema/jsonLoader.go | 200 +- .../xeipuuv/gojsonschema/locales.go | 83 +- .../github.com/xeipuuv/gojsonschema/result.go | 3 +- .../github.com/xeipuuv/gojsonschema/schema.go | 86 +- .../xeipuuv/gojsonschema/schemaPool.go | 8 +- .../xeipuuv/gojsonschema/subSchema.go | 2 +- .../github.com/xeipuuv/gojsonschema/utils.go | 14 +- .../xeipuuv/gojsonschema/validation.go | 43 +- vendor/golang.org/x/net/context/context.go | 2 +- .../x/net/context/ctxhttp/ctxhttp.go | 2 +- .../x/net/context/ctxhttp/ctxhttp_pre17.go | 2 +- vendor/golang.org/x/net/http2/.gitignore | 2 - vendor/golang.org/x/net/http2/Dockerfile | 51 - vendor/golang.org/x/net/http2/Makefile | 3 - vendor/golang.org/x/net/http2/README | 20 - .../x/net/internal/timeseries/timeseries.go | 2 +- vendor/golang.org/x/net/trace/trace.go | 2 +- vendor/golang.org/x/oauth2/.travis.yml | 14 - vendor/golang.org/x/oauth2/AUTHORS | 3 - vendor/golang.org/x/oauth2/CONTRIBUTING.md | 31 - vendor/golang.org/x/oauth2/CONTRIBUTORS | 3 - vendor/golang.org/x/oauth2/README.md | 64 - .../golang.org/x/oauth2/client_appengine.go | 4 +- .../golang.org/x/oauth2/google/appengine.go | 5 +- .../x/oauth2/google/appengine_hook.go | 4 +- .../x/oauth2/google/appenginevm_hook.go | 14 + vendor/golang.org/x/oauth2/google/default.go | 9 +- vendor/golang.org/x/oauth2/google/google.go | 40 +- vendor/golang.org/x/oauth2/google/jwt.go | 74 + vendor/golang.org/x/oauth2/google/sdk.go | 2 +- vendor/golang.org/x/oauth2/internal/oauth2.go | 2 +- vendor/golang.org/x/oauth2/internal/token.go | 48 +- .../golang.org/x/oauth2/internal/transport.go | 10 +- vendor/golang.org/x/oauth2/jws/jws.go | 108 +- vendor/golang.org/x/oauth2/jwt/jwt.go | 12 +- vendor/golang.org/x/oauth2/oauth2.go | 22 +- vendor/golang.org/x/oauth2/token.go | 27 +- vendor/golang.org/x/oauth2/transport.go | 2 +- vendor/golang.org/x/sys/unix/.gitignore | 1 - vendor/golang.org/x/sys/unix/mkall.sh | 274 -- vendor/golang.org/x/sys/unix/mkerrors.sh | 476 --- vendor/golang.org/x/sys/unix/mksyscall.pl | 323 -- .../x/sys/unix/mksyscall_solaris.pl | 294 -- .../golang.org/x/sys/unix/mksysctl_openbsd.pl | 264 -- .../golang.org/x/sys/unix/mksysnum_darwin.pl | 39 - .../x/sys/unix/mksysnum_dragonfly.pl | 50 - .../golang.org/x/sys/unix/mksysnum_freebsd.pl | 63 - .../golang.org/x/sys/unix/mksysnum_linux.pl | 58 - .../golang.org/x/sys/unix/mksysnum_netbsd.pl | 58 - .../golang.org/x/sys/unix/mksysnum_openbsd.pl | 50 - vendor/golang.org/x/sys/unix/syscall.go | 2 +- vendor/google.golang.org/appengine/LICENSE | 202 + .../google.golang.org/appengine/appengine.go | 76 + .../appengine/appengine_vm.go | 56 + vendor/google.golang.org/appengine/errors.go | 46 + .../google.golang.org/appengine/identity.go | 142 + .../appengine/internal/api.go | 640 ++++ .../appengine/internal/api_classic.go | 133 + .../appengine/internal/api_common.go | 101 + .../appengine/internal/app_id.go | 28 + .../app_identity/app_identity_service.pb.go | 296 ++ .../appengine/internal/base/api_base.pb.go | 133 + .../internal/datastore/datastore_v3.pb.go | 2778 ++++++++++++++ .../appengine/internal/identity.go | 14 + .../appengine/internal/identity_classic.go | 27 + .../appengine/internal/identity_vm.go | 97 + .../appengine/internal/internal.go | 144 + .../appengine/internal/log/log_service.pb.go | 899 +++++ .../appengine/internal/metadata.go | 61 + .../internal/modules/modules_service.pb.go | 375 ++ .../appengine/internal/net.go | 56 + .../internal/remote_api/remote_api.pb.go | 231 ++ .../appengine/internal/transaction.go | 107 + .../internal/urlfetch/urlfetch_service.pb.go | 355 ++ .../google.golang.org/appengine/namespace.go | 25 + vendor/google.golang.org/appengine/timeout.go | 20 + .../appengine/urlfetch/urlfetch.go | 210 ++ .../google.golang.org/cloud/internal/cloud.go | 128 - vendor/google.golang.org/grpc/.travis.yml | 21 - vendor/google.golang.org/grpc/CONTRIBUTING.md | 46 - vendor/google.golang.org/grpc/Makefile | 52 - vendor/google.golang.org/grpc/README.md | 32 - vendor/google.golang.org/grpc/balancer.go | 20 +- vendor/google.golang.org/grpc/call.go | 11 +- vendor/google.golang.org/grpc/clientconn.go | 125 +- vendor/google.golang.org/grpc/codegen.sh | 17 - vendor/google.golang.org/grpc/codes/codes.go | 2 +- vendor/google.golang.org/grpc/coverage.sh | 47 - .../grpc/credentials/credentials.go | 51 +- vendor/google.golang.org/grpc/doc.go | 2 +- .../google.golang.org/grpc/grpclog/logger.go | 2 +- vendor/google.golang.org/grpc/interceptor.go | 16 + .../grpc/metadata/metadata.go | 15 +- vendor/google.golang.org/grpc/rpc_util.go | 4 +- vendor/google.golang.org/grpc/server.go | 26 +- vendor/google.golang.org/grpc/stream.go | 33 +- .../grpc/transport/handler_server.go | 4 +- .../grpc/transport/http2_client.go | 100 +- .../grpc/transport/http2_server.go | 12 +- .../grpc/transport/http_util.go | 13 +- .../grpc/transport/transport.go | 33 +- vendor/gopkg.in/inf.v0/dec.go | 2 +- vendor/gopkg.in/yaml.v2/.travis.yml | 9 - vendor/gopkg.in/yaml.v2/README.md | 131 - vendor/k8s.io/client-go/1.4/LICENSE | 202 + vendor/k8s.io/client-go/1.4/pkg/api/OWNERS | 6 - .../client-go/1.4/pkg/api/node_example.json | 49 - .../api/replication_controller_example.json | 83 - .../1.4/pkg/api/resource/generated.proto | 93 - .../1.4/pkg/api/unversioned/generated.proto | 378 -- .../client-go/1.4/pkg/api/v1/generated.proto | 3088 ---------------- .../client-go/1.4/pkg/conversion/OWNERS | 5 - .../k8s.io/client-go/1.4/pkg/runtime/OWNERS | 5 - .../client-go/1.4/pkg/runtime/generated.proto | 124 - .../1.4/pkg/util/intstr/generated.proto | 42 - .../client-go/1.4/pkg/version/.gitattributes | 1 - .../1.4/pkg/watch/versioned/generated.proto | 43 - .../federation/apis/federation/doc.go | 2 +- .../federation/apis/federation/v1beta1/doc.go | 2 +- .../apis/federation/v1beta1/generated.proto | 114 - vendor/k8s.io/kubernetes/pkg/api/OWNERS | 6 - .../kubernetes/pkg/api/annotations/doc.go | 2 +- vendor/k8s.io/kubernetes/pkg/api/context.go | 14 + vendor/k8s.io/kubernetes/pkg/api/doc.go | 2 +- .../k8s.io/kubernetes/pkg/api/errors/doc.go | 2 +- vendor/k8s.io/kubernetes/pkg/api/helpers.go | 15 + vendor/k8s.io/kubernetes/pkg/api/meta/doc.go | 2 +- .../kubernetes/pkg/api/node_example.json | 49 - .../api/replication_controller_example.json | 83 - .../pkg/api/resource/generated.proto | 93 - vendor/k8s.io/kubernetes/pkg/api/rest/doc.go | 2 +- vendor/k8s.io/kubernetes/pkg/api/types.go | 2 + .../kubernetes/pkg/api/unversioned/doc.go | 2 +- .../pkg/api/unversioned/generated.proto | 378 -- .../pkg/api/unversioned/group_version.go | 24 +- .../kubernetes/pkg/api/v1/conversion.go | 17 + vendor/k8s.io/kubernetes/pkg/api/v1/doc.go | 2 +- .../kubernetes/pkg/api/v1/generated.proto | 3273 ----------------- vendor/k8s.io/kubernetes/pkg/api/v1/types.go | 13 +- .../kubernetes/pkg/api/validation/doc.go | 2 +- .../pkg/api/validation/validation.go | 14 + .../k8s.io/kubernetes/pkg/apimachinery/doc.go | 2 +- vendor/k8s.io/kubernetes/pkg/apis/apps/doc.go | 2 +- .../kubernetes/pkg/apis/apps/v1alpha1/doc.go | 2 +- .../pkg/apis/apps/v1alpha1/generated.proto | 102 - .../kubernetes/pkg/apis/authentication/doc.go | 2 +- .../pkg/apis/authentication/v1beta1/doc.go | 2 +- .../authentication/v1beta1/generated.proto | 89 - .../kubernetes/pkg/apis/authorization/doc.go | 2 +- .../pkg/apis/authorization/v1beta1/doc.go | 2 +- .../authorization/v1beta1/generated.proto | 159 - .../kubernetes/pkg/apis/autoscaling/doc.go | 2 +- .../kubernetes/pkg/apis/autoscaling/v1/doc.go | 2 +- .../pkg/apis/autoscaling/v1/generated.proto | 131 - .../k8s.io/kubernetes/pkg/apis/batch/doc.go | 2 +- .../kubernetes/pkg/apis/batch/v1/doc.go | 2 +- .../pkg/apis/batch/v1/generated.proto | 177 - .../kubernetes/pkg/apis/batch/v2alpha1/doc.go | 2 +- .../pkg/apis/batch/v2alpha1/generated.proto | 254 -- .../kubernetes/pkg/apis/certificates/doc.go | 2 +- .../pkg/apis/certificates/v1alpha1/doc.go | 2 +- .../certificates/v1alpha1/generated.proto | 87 - .../pkg/apis/componentconfig/doc.go | 2 +- .../pkg/apis/componentconfig/types.go | 8 +- .../apis/componentconfig/v1alpha1/defaults.go | 5 +- .../pkg/apis/componentconfig/v1alpha1/doc.go | 2 +- .../apis/componentconfig/v1alpha1/types.go | 8 +- .../v1alpha1/zz_generated.conversion.go | 4 + .../v1alpha1/zz_generated.deepcopy.go | 2 + .../componentconfig/zz_generated.deepcopy.go | 2 + .../kubernetes/pkg/apis/extensions/doc.go | 2 +- .../pkg/apis/extensions/v1beta1/doc.go | 2 +- .../apis/extensions/v1beta1/generated.proto | 1013 ----- .../k8s.io/kubernetes/pkg/apis/policy/doc.go | 2 +- .../pkg/apis/policy/v1alpha1/doc.go | 2 +- .../pkg/apis/policy/v1alpha1/generated.proto | 87 - vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go | 2 +- .../kubernetes/pkg/apis/rbac/v1alpha1/doc.go | 2 +- .../pkg/apis/rbac/v1alpha1/generated.proto | 159 - .../k8s.io/kubernetes/pkg/apis/storage/doc.go | 2 +- .../pkg/apis/storage/util/helpers.go | 136 + .../pkg/apis/storage/v1beta1/doc.go | 2 +- .../pkg/apis/storage/v1beta1/generated.proto | 59 - vendor/k8s.io/kubernetes/pkg/auth/user/doc.go | 2 +- .../k8s.io/kubernetes/pkg/capabilities/doc.go | 2 +- .../k8s.io/kubernetes/pkg/client/cache/doc.go | 2 +- .../kubernetes/pkg/client/record/doc.go | 2 +- .../pkg/client/restclient/config.go | 10 + .../unversioned/clientcmd/client_config.go | 135 +- .../pkg/client/unversioned/clientcmd/doc.go | 2 +- .../client/unversioned/clientcmd/loader.go | 25 + .../clientcmd/merged_client_builder.go | 19 +- .../client/unversioned/clientcmd/overrides.go | 8 +- .../kubernetes/pkg/client/unversioned/doc.go | 2 +- .../k8s.io/kubernetes/pkg/controller/OWNERS | 4 - .../deployment/util/deployment_util.go | 10 +- .../k8s.io/kubernetes/pkg/controller/doc.go | 2 +- .../pkg/controller/framework/doc.go | 2 +- .../controller/framework/informers/core.go | 203 - .../controller/framework/informers/factory.go | 193 - .../controller/framework/shared_informer.go | 31 + .../pkg/controller/replication/doc.go | 19 - .../replication/replication_controller.go | 738 ---- .../replication_controller_utils.go | 84 - .../k8s.io/kubernetes/pkg/conversion/OWNERS | 5 - .../k8s.io/kubernetes/pkg/conversion/doc.go | 2 +- .../pkg/conversion/queryparams/doc.go | 2 +- .../kubernetes/pkg/credentialprovider/OWNERS | 3 - .../kubernetes/pkg/credentialprovider/doc.go | 2 +- vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go | 2 +- vendor/k8s.io/kubernetes/pkg/fields/doc.go | 2 +- vendor/k8s.io/kubernetes/pkg/kubectl/OWNERS | 7 - .../pkg/kubectl/cmd/util/factory.go | 39 +- .../pkg/kubectl/cmd/util/helpers.go | 143 +- .../pkg/kubectl/cmd/util/printing.go | 10 +- .../k8s.io/kubernetes/pkg/kubectl/describe.go | 102 +- vendor/k8s.io/kubernetes/pkg/kubectl/doc.go | 2 +- .../kubernetes/pkg/kubectl/resource/doc.go | 2 +- .../kubernetes/pkg/kubectl/resource/result.go | 21 +- .../kubernetes/pkg/kubectl/resource_filter.go | 67 + .../pkg/kubectl/resource_printer.go | 68 +- .../kubernetes/pkg/kubectl/rollout_status.go | 16 +- .../kubernetes/pkg/kubectl/service_basic.go | 4 + .../k8s.io/kubernetes/pkg/kubelet/qos/doc.go | 2 +- .../kubernetes/pkg/kubelet/qos/policy.go | 9 +- .../kubernetes/pkg/kubelet/types/doc.go | 2 +- vendor/k8s.io/kubernetes/pkg/labels/doc.go | 2 +- .../k8s.io/kubernetes/pkg/master/ports/doc.go | 2 +- .../pkg/master/thirdparty_controller.go | 146 + .../kubernetes/pkg/registry/generic/doc.go | 2 +- .../registry/thirdpartyresourcedata/doc.go | 2 +- vendor/k8s.io/kubernetes/pkg/runtime/OWNERS | 5 - vendor/k8s.io/kubernetes/pkg/runtime/doc.go | 2 +- .../kubernetes/pkg/runtime/generated.proto | 124 - .../pkg/runtime/serializer/protobuf/doc.go | 2 +- vendor/k8s.io/kubernetes/pkg/storage/OWNERS | 6 - vendor/k8s.io/kubernetes/pkg/storage/doc.go | 2 +- .../k8s.io/kubernetes/pkg/storage/etcd/doc.go | 2 +- .../kubernetes/pkg/storage/etcd/util/doc.go | 2 +- vendor/k8s.io/kubernetes/pkg/types/doc.go | 2 +- .../pkg/util/cache/lruexpirecache.go | 31 +- .../k8s.io/kubernetes/pkg/util/config/doc.go | 2 +- vendor/k8s.io/kubernetes/pkg/util/doc.go | 2 +- .../k8s.io/kubernetes/pkg/util/errors/doc.go | 2 +- .../kubernetes/pkg/util/errors/errors.go | 14 + vendor/k8s.io/kubernetes/pkg/util/exec/doc.go | 2 +- .../pkg/util/interrupt/interrupt.go | 104 - .../pkg/util/intstr/generated.proto | 42 - .../kubernetes/pkg/util/jsonpath/doc.go | 2 +- .../k8s.io/kubernetes/pkg/util/labels/doc.go | 2 +- .../kubernetes/pkg/util/metrics/util.go | 71 - vendor/k8s.io/kubernetes/pkg/util/net/http.go | 28 + vendor/k8s.io/kubernetes/pkg/util/pod/doc.go | 2 +- .../k8s.io/kubernetes/pkg/util/term/resize.go | 144 - .../kubernetes/pkg/util/term/resizeevents.go | 60 - .../pkg/util/term/resizeevents_windows.go | 61 - .../kubernetes/pkg/util/term/setsize.go | 28 - .../pkg/util/term/setsize_unsupported.go | 24 - .../k8s.io/kubernetes/pkg/util/term/term.go | 110 - vendor/k8s.io/kubernetes/pkg/util/wait/doc.go | 2 +- .../k8s.io/kubernetes/pkg/util/wait/wait.go | 5 + .../util/workqueue/default_rate_limiters.go | 211 -- .../pkg/util/workqueue/delaying_queue.go | 240 -- .../kubernetes/pkg/util/workqueue/doc.go | 26 - .../kubernetes/pkg/util/workqueue/metrics.go | 153 - .../pkg/util/workqueue/parallelizer.go | 48 - .../kubernetes/pkg/util/workqueue/queue.go | 172 - .../util/workqueue/rate_limitting_queue.go | 68 - .../pkg/util/workqueue/timed_queue.go | 52 - .../kubernetes/pkg/version/.gitattributes | 1 - vendor/k8s.io/kubernetes/pkg/version/base.go | 6 +- vendor/k8s.io/kubernetes/pkg/version/doc.go | 2 +- vendor/k8s.io/kubernetes/pkg/watch/doc.go | 2 +- vendor/k8s.io/kubernetes/pkg/watch/until.go | 2 +- .../pkg/watch/versioned/generated.proto | 43 - vendor/k8s.io/kubernetes/pkg/watch/watch.go | 15 + .../plugin/pkg/client/auth/oidc/OWNERS | 2 - .../third_party/forked/golang/LICENSE | 27 + .../third_party/forked/golang/PATENTS | 22 + 861 files changed, 16930 insertions(+), 81692 deletions(-) delete mode 100644 Godeps/Godeps.json delete mode 100644 Godeps/Readme create mode 100644 glide.lock create mode 100644 glide.yaml delete mode 100755 script/godep-restore.sh delete mode 100644 vendor/bitbucket.org/ww/goautoneg/Makefile delete mode 100644 vendor/bitbucket.org/ww/goautoneg/README.txt delete mode 100644 vendor/bitbucket.org/ww/goautoneg/autoneg.go rename vendor/{google.golang.org/cloud => cloud.google.com/go}/LICENSE (100%) rename vendor/{google.golang.org/cloud => cloud.google.com/go}/compute/metadata/metadata.go (80%) create mode 100644 vendor/cloud.google.com/go/internal/cloud.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/LICENSE delete mode 100644 vendor/github.com/Azure/go-ansiterm/README.md delete mode 100644 vendor/github.com/Azure/go-ansiterm/constants.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/context.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/csi_entry_state.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/csi_param_state.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/escape_state.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/event_handler.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/ground_state.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/osc_string_state.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/parser.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/parser_actions.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/states.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/utilities.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/ansi.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/api.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/utilities.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go delete mode 100644 vendor/github.com/MakeNowJust/heredoc/LICENSE delete mode 100644 vendor/github.com/MakeNowJust/heredoc/README.md delete mode 100644 vendor/github.com/MakeNowJust/heredoc/heredoc.go delete mode 100644 vendor/github.com/Sirupsen/logrus/.gitignore delete mode 100644 vendor/github.com/Sirupsen/logrus/.travis.yml delete mode 100644 vendor/github.com/Sirupsen/logrus/CHANGELOG.md delete mode 100644 vendor/github.com/Sirupsen/logrus/README.md delete mode 100644 vendor/github.com/Sirupsen/logrus/doc.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_darwin.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_freebsd.go rename vendor/github.com/Sirupsen/logrus/{terminal_bsd.go => terminal_openbsd.go} (67%) delete mode 100644 vendor/github.com/Sirupsen/logrus/terminal_solaris.go delete mode 100644 vendor/github.com/beorn7/perks/quantile/exampledata.txt delete mode 100644 vendor/github.com/blang/semver/README.md delete mode 100644 vendor/github.com/coreos/etcd/auth/authpb/auth.proto delete mode 100644 vendor/github.com/coreos/etcd/client/README.md delete mode 100644 vendor/github.com/coreos/etcd/clientv3/README.md delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto delete mode 100644 vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go create mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go delete mode 100644 vendor/github.com/coreos/pkg/capnslog/README.md delete mode 100644 vendor/github.com/coreos/pkg/health/README.md delete mode 100644 vendor/github.com/coreos/pkg/httputil/README.md delete mode 100644 vendor/github.com/dgrijalva/jwt-go/.gitignore delete mode 100644 vendor/github.com/dgrijalva/jwt-go/.travis.yml delete mode 100644 vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md delete mode 100644 vendor/github.com/dgrijalva/jwt-go/README.md delete mode 100644 vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md delete mode 100644 vendor/github.com/docker/distribution/.gitignore delete mode 100644 vendor/github.com/docker/distribution/.mailmap delete mode 100644 vendor/github.com/docker/distribution/AUTHORS delete mode 100644 vendor/github.com/docker/distribution/BUILDING.md delete mode 100644 vendor/github.com/docker/distribution/CHANGELOG.md delete mode 100644 vendor/github.com/docker/distribution/CONTRIBUTING.md delete mode 100644 vendor/github.com/docker/distribution/Dockerfile delete mode 100644 vendor/github.com/docker/distribution/Jenkinsfile delete mode 100644 vendor/github.com/docker/distribution/MAINTAINERS delete mode 100644 vendor/github.com/docker/distribution/Makefile delete mode 100644 vendor/github.com/docker/distribution/README.md delete mode 100644 vendor/github.com/docker/distribution/ROADMAP.md delete mode 100644 vendor/github.com/docker/distribution/circle.yml delete mode 100755 vendor/github.com/docker/distribution/coverpkg.sh delete mode 100644 vendor/github.com/docker/docker/api/types/versions/README.md delete mode 100644 vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile.go delete mode 100644 vendor/github.com/docker/docker/pkg/signal/README.md delete mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/errors.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/events_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/filesys.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/filesys_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/lstat.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/lstat_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/mknod.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/mknod_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/path_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/path_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/stat.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/stat_darwin.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/stat_freebsd.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/stat_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/stat_openbsd.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/stat_solaris.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/stat_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/stat_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/syscall_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/syscall_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/umask.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/umask_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/utimes_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/xattrs_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/ascii.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/tc_other.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/term.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/term_solaris.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/term_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/term_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/termios_darwin.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/termios_freebsd.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/termios_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/termios_openbsd.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/windows/console.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/windows/windows.go delete mode 100644 vendor/github.com/docker/engine-api/types/errors.go delete mode 100644 vendor/github.com/docker/engine-api/types/plugin.go delete mode 100644 vendor/github.com/docker/engine-api/types/swarm/common.go delete mode 100644 vendor/github.com/docker/engine-api/types/swarm/container.go delete mode 100644 vendor/github.com/docker/engine-api/types/swarm/network.go delete mode 100644 vendor/github.com/docker/engine-api/types/swarm/node.go delete mode 100644 vendor/github.com/docker/engine-api/types/swarm/service.go delete mode 100644 vendor/github.com/docker/engine-api/types/swarm/swarm.go delete mode 100644 vendor/github.com/docker/engine-api/types/swarm/task.go delete mode 100644 vendor/github.com/docker/engine-api/types/versions/README.md delete mode 100644 vendor/github.com/docker/go-units/CONTRIBUTING.md delete mode 100644 vendor/github.com/docker/go-units/MAINTAINERS delete mode 100644 vendor/github.com/docker/go-units/README.md delete mode 100644 vendor/github.com/docker/go-units/circle.yml delete mode 100644 vendor/github.com/docker/libtrust/CONTRIBUTING.md delete mode 100644 vendor/github.com/docker/libtrust/MAINTAINERS delete mode 100644 vendor/github.com/docker/libtrust/README.md delete mode 100644 vendor/github.com/emicklei/go-restful/.gitignore delete mode 100644 vendor/github.com/emicklei/go-restful/CHANGES.md delete mode 100644 vendor/github.com/emicklei/go-restful/README.md delete mode 100644 vendor/github.com/emicklei/go-restful/Srcfile delete mode 100644 vendor/github.com/emicklei/go-restful/bench_test.sh delete mode 100644 vendor/github.com/emicklei/go-restful/coverage.sh delete mode 100644 vendor/github.com/emicklei/go-restful/install.sh delete mode 100644 vendor/github.com/emicklei/go-restful/swagger/CHANGES.md delete mode 100644 vendor/github.com/emicklei/go-restful/swagger/README.md delete mode 100644 vendor/github.com/evanphx/json-patch/.travis.yml delete mode 100644 vendor/github.com/evanphx/json-patch/README.md delete mode 100644 vendor/github.com/fatih/structs/.gitignore delete mode 100644 vendor/github.com/fatih/structs/.travis.yml delete mode 100644 vendor/github.com/fatih/structs/README.md delete mode 100644 vendor/github.com/flynn/go-shlex/Makefile delete mode 100644 vendor/github.com/flynn/go-shlex/README.md delete mode 100644 vendor/github.com/fsouza/go-dockerclient/.gitignore delete mode 100644 vendor/github.com/fsouza/go-dockerclient/.travis.yml delete mode 100644 vendor/github.com/fsouza/go-dockerclient/AUTHORS delete mode 100644 vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE delete mode 100644 vendor/github.com/fsouza/go-dockerclient/Makefile delete mode 100644 vendor/github.com/fsouza/go-dockerclient/README.markdown delete mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md delete mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md delete mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md delete mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/CONTRIBUTING.md delete mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/MAINTAINERS delete mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/README.md delete mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/circle.yml delete mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/README.md delete mode 100644 vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS delete mode 100755 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mkall.sh delete mode 100755 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mkerrors.sh delete mode 100755 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksyscall.pl delete mode 100755 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksyscall_solaris.pl delete mode 100755 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysctl_openbsd.pl delete mode 100755 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_darwin.pl delete mode 100755 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_dragonfly.pl delete mode 100755 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_freebsd.pl delete mode 100755 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_linux.pl delete mode 100755 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_netbsd.pl delete mode 100755 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_openbsd.pl delete mode 100644 vendor/github.com/ghodss/yaml/.gitignore delete mode 100644 vendor/github.com/ghodss/yaml/.travis.yml delete mode 100644 vendor/github.com/ghodss/yaml/README.md delete mode 100644 vendor/github.com/gogo/protobuf/proto/Makefile delete mode 100644 vendor/github.com/golang/glog/README delete mode 100644 vendor/github.com/golang/protobuf/proto/Makefile delete mode 100644 vendor/github.com/gonum/blas/.travis.yml delete mode 100644 vendor/github.com/gonum/blas/README.md delete mode 100644 vendor/github.com/gonum/blas/blas.go delete mode 100644 vendor/github.com/gonum/blas/blas64/blas64.go delete mode 100644 vendor/github.com/gonum/blas/native/dgemm.go delete mode 100644 vendor/github.com/gonum/blas/native/doc.go delete mode 100644 vendor/github.com/gonum/blas/native/general_double.go delete mode 100644 vendor/github.com/gonum/blas/native/general_single.go delete mode 100644 vendor/github.com/gonum/blas/native/internal/math32/math.go delete mode 100644 vendor/github.com/gonum/blas/native/internal/math32/sqrt.go delete mode 100644 vendor/github.com/gonum/blas/native/internal/math32/sqrt_amd64.go delete mode 100644 vendor/github.com/gonum/blas/native/internal/math32/sqrt_amd64.s delete mode 100644 vendor/github.com/gonum/blas/native/level1double.go delete mode 100644 vendor/github.com/gonum/blas/native/level1double_ddot.go delete mode 100644 vendor/github.com/gonum/blas/native/level1single.go delete mode 100644 vendor/github.com/gonum/blas/native/level1single_dsdot.go delete mode 100644 vendor/github.com/gonum/blas/native/level1single_sdot.go delete mode 100644 vendor/github.com/gonum/blas/native/level1single_sdsdot.go delete mode 100644 vendor/github.com/gonum/blas/native/level2double.go delete mode 100644 vendor/github.com/gonum/blas/native/level2single.go delete mode 100644 vendor/github.com/gonum/blas/native/level3double.go delete mode 100644 vendor/github.com/gonum/blas/native/level3single.go delete mode 100644 vendor/github.com/gonum/blas/native/native.go delete mode 100644 vendor/github.com/gonum/blas/native/sgemm.go delete mode 100755 vendor/github.com/gonum/blas/native/single_precision delete mode 100644 vendor/github.com/gonum/graph/.gitignore delete mode 100644 vendor/github.com/gonum/graph/.travis.yml delete mode 100644 vendor/github.com/gonum/graph/README.md delete mode 100644 vendor/github.com/gonum/graph/concrete/concrete.go delete mode 100644 vendor/github.com/gonum/graph/concrete/dense_directed_matrix.go delete mode 100644 vendor/github.com/gonum/graph/concrete/dense_undirected_matrix.go delete mode 100644 vendor/github.com/gonum/graph/concrete/directed.go delete mode 100644 vendor/github.com/gonum/graph/concrete/undirected.go delete mode 100644 vendor/github.com/gonum/graph/concrete/util.go delete mode 100644 vendor/github.com/gonum/graph/doc.go delete mode 100644 vendor/github.com/gonum/graph/encoding/dot/dot.go delete mode 100644 vendor/github.com/gonum/graph/graph.go delete mode 100644 vendor/github.com/gonum/graph/internal/linear.go delete mode 100644 vendor/github.com/gonum/graph/internal/set.go delete mode 100644 vendor/github.com/gonum/graph/internal/sort.go delete mode 100644 vendor/github.com/gonum/graph/path/a_star.go delete mode 100644 vendor/github.com/gonum/graph/path/bellman_ford_moore.go delete mode 100644 vendor/github.com/gonum/graph/path/control_flow.go delete mode 100644 vendor/github.com/gonum/graph/path/dijkstra.go delete mode 100644 vendor/github.com/gonum/graph/path/disjoint.go delete mode 100644 vendor/github.com/gonum/graph/path/floydwarshall.go delete mode 100644 vendor/github.com/gonum/graph/path/johnson_apsp.go delete mode 100644 vendor/github.com/gonum/graph/path/shortest.go delete mode 100644 vendor/github.com/gonum/graph/path/spanning_tree.go delete mode 100644 vendor/github.com/gonum/graph/topo/bron_kerbosch.go delete mode 100644 vendor/github.com/gonum/graph/topo/johnson_cycles.go delete mode 100644 vendor/github.com/gonum/graph/topo/non_tomita_choice.go delete mode 100644 vendor/github.com/gonum/graph/topo/tarjan.go delete mode 100644 vendor/github.com/gonum/graph/topo/tomita_choice.go delete mode 100644 vendor/github.com/gonum/graph/topo/topo.go delete mode 100644 vendor/github.com/gonum/graph/traverse/traverse.go delete mode 100644 vendor/github.com/gonum/internal/asm/caxpy.go delete mode 100644 vendor/github.com/gonum/internal/asm/cdotc.go delete mode 100644 vendor/github.com/gonum/internal/asm/cdotu.go delete mode 100755 vendor/github.com/gonum/internal/asm/complex delete mode 100644 vendor/github.com/gonum/internal/asm/conj.go delete mode 100644 vendor/github.com/gonum/internal/asm/daxpy.go delete mode 100644 vendor/github.com/gonum/internal/asm/daxpy_amd64.go delete mode 100644 vendor/github.com/gonum/internal/asm/daxpy_amd64.s delete mode 100644 vendor/github.com/gonum/internal/asm/ddot.go delete mode 100644 vendor/github.com/gonum/internal/asm/ddot_amd64.go delete mode 100644 vendor/github.com/gonum/internal/asm/ddot_amd64.s delete mode 100644 vendor/github.com/gonum/internal/asm/dsdot.go delete mode 100644 vendor/github.com/gonum/internal/asm/generate.go delete mode 100644 vendor/github.com/gonum/internal/asm/saxpy.go delete mode 100644 vendor/github.com/gonum/internal/asm/sdot.go delete mode 100755 vendor/github.com/gonum/internal/asm/single_precision delete mode 100644 vendor/github.com/gonum/internal/asm/zaxpy.go delete mode 100644 vendor/github.com/gonum/internal/asm/zdotc.go delete mode 100644 vendor/github.com/gonum/internal/asm/zdotu.go delete mode 100644 vendor/github.com/gonum/lapack/.gitignore delete mode 100644 vendor/github.com/gonum/lapack/.travis.yml delete mode 100644 vendor/github.com/gonum/lapack/README.md delete mode 100644 vendor/github.com/gonum/lapack/lapack.go delete mode 100644 vendor/github.com/gonum/lapack/lapack64/lapack64.go delete mode 100644 vendor/github.com/gonum/lapack/native/dgelq2.go delete mode 100644 vendor/github.com/gonum/lapack/native/dgelqf.go delete mode 100644 vendor/github.com/gonum/lapack/native/dgels.go delete mode 100644 vendor/github.com/gonum/lapack/native/dgeqr2.go delete mode 100644 vendor/github.com/gonum/lapack/native/dgeqrf.go delete mode 100644 vendor/github.com/gonum/lapack/native/dlange.go delete mode 100644 vendor/github.com/gonum/lapack/native/dlapy2.go delete mode 100644 vendor/github.com/gonum/lapack/native/dlarf.go delete mode 100644 vendor/github.com/gonum/lapack/native/dlarfb.go delete mode 100644 vendor/github.com/gonum/lapack/native/dlarfg.go delete mode 100644 vendor/github.com/gonum/lapack/native/dlarft.go delete mode 100644 vendor/github.com/gonum/lapack/native/dlascl.go delete mode 100644 vendor/github.com/gonum/lapack/native/dlaset.go delete mode 100644 vendor/github.com/gonum/lapack/native/dlassq.go delete mode 100644 vendor/github.com/gonum/lapack/native/doc.go delete mode 100644 vendor/github.com/gonum/lapack/native/dorm2r.go delete mode 100644 vendor/github.com/gonum/lapack/native/dorml2.go delete mode 100644 vendor/github.com/gonum/lapack/native/dormlq.go delete mode 100644 vendor/github.com/gonum/lapack/native/dormqr.go delete mode 100644 vendor/github.com/gonum/lapack/native/dpotf2.go delete mode 100644 vendor/github.com/gonum/lapack/native/dpotrf.go delete mode 100644 vendor/github.com/gonum/lapack/native/dtrtrs.go delete mode 100644 vendor/github.com/gonum/lapack/native/general.go delete mode 100644 vendor/github.com/gonum/lapack/native/iladlc.go delete mode 100644 vendor/github.com/gonum/lapack/native/iladlr.go delete mode 100644 vendor/github.com/gonum/lapack/native/ilaenv.go delete mode 100644 vendor/github.com/gonum/matrix/mat64/cholesky.go delete mode 100644 vendor/github.com/gonum/matrix/mat64/dense.go delete mode 100644 vendor/github.com/gonum/matrix/mat64/dense_arithmetic.go delete mode 100644 vendor/github.com/gonum/matrix/mat64/eigen.go delete mode 100644 vendor/github.com/gonum/matrix/mat64/format.go delete mode 100644 vendor/github.com/gonum/matrix/mat64/index_bound_checks.go delete mode 100644 vendor/github.com/gonum/matrix/mat64/index_no_bound_checks.go delete mode 100644 vendor/github.com/gonum/matrix/mat64/inner.go delete mode 100644 vendor/github.com/gonum/matrix/mat64/io.go delete mode 100644 vendor/github.com/gonum/matrix/mat64/lq.go delete mode 100644 vendor/github.com/gonum/matrix/mat64/lu.go delete mode 100644 vendor/github.com/gonum/matrix/mat64/matrix.go delete mode 100644 vendor/github.com/gonum/matrix/mat64/pool.go delete mode 100644 vendor/github.com/gonum/matrix/mat64/qr.go delete mode 100644 vendor/github.com/gonum/matrix/mat64/svd.go delete mode 100644 vendor/github.com/gonum/matrix/mat64/symmetric.go delete mode 100644 vendor/github.com/gonum/matrix/mat64/triangular.go delete mode 100644 vendor/github.com/gonum/matrix/mat64/vector.go delete mode 100644 vendor/github.com/google/gofuzz/.travis.yml delete mode 100644 vendor/github.com/google/gofuzz/CONTRIBUTING.md delete mode 100644 vendor/github.com/google/gofuzz/README.md delete mode 100644 vendor/github.com/gorilla/context/.travis.yml delete mode 100644 vendor/github.com/gorilla/context/README.md delete mode 100644 vendor/github.com/gorilla/mux/.travis.yml delete mode 100644 vendor/github.com/gorilla/mux/README.md delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.proto delete mode 100644 vendor/github.com/imdario/mergo/.travis.yml delete mode 100644 vendor/github.com/imdario/mergo/README.md delete mode 100644 vendor/github.com/inconshreveable/mousetrap/README.md delete mode 100644 vendor/github.com/jonboulle/clockwork/.gitignore delete mode 100644 vendor/github.com/jonboulle/clockwork/.travis.yml delete mode 100644 vendor/github.com/jonboulle/clockwork/README.md delete mode 100644 vendor/github.com/juju/ratelimit/README.md delete mode 100644 vendor/github.com/openshift/origin/pkg/api/graph/graph.go delete mode 100644 vendor/github.com/openshift/origin/pkg/api/graph/graphview/dc_pipeline.go delete mode 100644 vendor/github.com/openshift/origin/pkg/api/graph/graphview/image_pipeline.go delete mode 100644 vendor/github.com/openshift/origin/pkg/api/graph/graphview/intset.go delete mode 100644 vendor/github.com/openshift/origin/pkg/api/graph/graphview/petset.go delete mode 100644 vendor/github.com/openshift/origin/pkg/api/graph/graphview/pod.go delete mode 100644 vendor/github.com/openshift/origin/pkg/api/graph/graphview/rc.go delete mode 100644 vendor/github.com/openshift/origin/pkg/api/graph/graphview/service_group.go delete mode 100644 vendor/github.com/openshift/origin/pkg/api/graph/interfaces.go delete mode 100644 vendor/github.com/openshift/origin/pkg/api/graph/types.go delete mode 100644 vendor/github.com/openshift/origin/pkg/api/kubegraph/analysis/hpa.go delete mode 100644 vendor/github.com/openshift/origin/pkg/api/kubegraph/analysis/pod.go delete mode 100644 vendor/github.com/openshift/origin/pkg/api/kubegraph/analysis/podspec.go delete mode 100644 vendor/github.com/openshift/origin/pkg/api/kubegraph/analysis/rc.go delete mode 100644 vendor/github.com/openshift/origin/pkg/api/kubegraph/edges.go delete mode 100644 vendor/github.com/openshift/origin/pkg/api/kubegraph/nodes/nodes.go delete mode 100644 vendor/github.com/openshift/origin/pkg/api/kubegraph/nodes/types.go delete mode 100644 vendor/github.com/openshift/origin/pkg/api/restmapper/discovery.go delete mode 100644 vendor/github.com/openshift/origin/pkg/authorization/reaper/cluster_role.go delete mode 100644 vendor/github.com/openshift/origin/pkg/authorization/reaper/role.go delete mode 100644 vendor/github.com/openshift/origin/pkg/build/client/clients.go delete mode 100644 vendor/github.com/openshift/origin/pkg/build/cmd/doc.go delete mode 100644 vendor/github.com/openshift/origin/pkg/build/cmd/reaper.go delete mode 100644 vendor/github.com/openshift/origin/pkg/build/graph/analysis/bc.go delete mode 100644 vendor/github.com/openshift/origin/pkg/build/graph/edges.go delete mode 100644 vendor/github.com/openshift/origin/pkg/build/graph/helpers.go delete mode 100644 vendor/github.com/openshift/origin/pkg/build/graph/nodes/nodes.go delete mode 100644 vendor/github.com/openshift/origin/pkg/build/graph/nodes/types.go delete mode 100644 vendor/github.com/openshift/origin/pkg/build/util/doc.go delete mode 100644 vendor/github.com/openshift/origin/pkg/build/util/util.go create mode 100644 vendor/github.com/openshift/origin/pkg/client/subjectrulesreviews.go delete mode 100644 vendor/github.com/openshift/origin/pkg/cmd/cli/describe/chaindescriber.go delete mode 100644 vendor/github.com/openshift/origin/pkg/cmd/cli/describe/deployments.go delete mode 100644 vendor/github.com/openshift/origin/pkg/cmd/cli/describe/describer.go delete mode 100644 vendor/github.com/openshift/origin/pkg/cmd/cli/describe/helpers.go delete mode 100644 vendor/github.com/openshift/origin/pkg/cmd/cli/describe/printer.go delete mode 100644 vendor/github.com/openshift/origin/pkg/cmd/cli/describe/projectstatus.go delete mode 100644 vendor/github.com/openshift/origin/pkg/cmd/flagtypes/addr.go delete mode 100644 vendor/github.com/openshift/origin/pkg/cmd/flagtypes/doc.go delete mode 100644 vendor/github.com/openshift/origin/pkg/cmd/flagtypes/glog.go delete mode 100644 vendor/github.com/openshift/origin/pkg/cmd/flagtypes/net.go delete mode 100644 vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/cached_discovery.go delete mode 100644 vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/clientcmd.go delete mode 100644 vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/clientconfig.go delete mode 100644 vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/errors.go delete mode 100644 vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/factory.go delete mode 100644 vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/negotiate.go delete mode 100644 vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/shortcut_restmapper.go delete mode 100644 vendor/github.com/openshift/origin/pkg/cmd/util/terminal.go delete mode 100644 vendor/github.com/openshift/origin/pkg/deploy/api/v1/generated.proto delete mode 100644 vendor/github.com/openshift/origin/pkg/deploy/cmd/delete.go delete mode 100644 vendor/github.com/openshift/origin/pkg/deploy/cmd/doc.go delete mode 100644 vendor/github.com/openshift/origin/pkg/deploy/cmd/generate.go delete mode 100644 vendor/github.com/openshift/origin/pkg/deploy/cmd/history.go delete mode 100644 vendor/github.com/openshift/origin/pkg/deploy/cmd/rollback.go delete mode 100644 vendor/github.com/openshift/origin/pkg/deploy/cmd/scale.go delete mode 100644 vendor/github.com/openshift/origin/pkg/deploy/graph/analysis/dc.go delete mode 100644 vendor/github.com/openshift/origin/pkg/deploy/graph/analysis/doc.go delete mode 100644 vendor/github.com/openshift/origin/pkg/deploy/graph/edges.go delete mode 100644 vendor/github.com/openshift/origin/pkg/deploy/graph/helpers.go delete mode 100644 vendor/github.com/openshift/origin/pkg/deploy/graph/nodes/nodes.go delete mode 100644 vendor/github.com/openshift/origin/pkg/deploy/graph/nodes/types.go delete mode 100644 vendor/github.com/openshift/origin/pkg/deploy/util/util.go delete mode 100644 vendor/github.com/openshift/origin/pkg/image/api/v1/generated.proto delete mode 100644 vendor/github.com/openshift/origin/pkg/image/graph/edges.go delete mode 100644 vendor/github.com/openshift/origin/pkg/image/graph/nodes/nodes.go delete mode 100644 vendor/github.com/openshift/origin/pkg/image/graph/nodes/types.go create mode 100644 vendor/github.com/openshift/origin/pkg/image/reference/reference.go delete mode 100644 vendor/github.com/openshift/origin/pkg/route/generator/doc.go delete mode 100644 vendor/github.com/openshift/origin/pkg/route/generator/generate.go delete mode 100644 vendor/github.com/openshift/origin/pkg/route/graph/analysis/analysis.go delete mode 100644 vendor/github.com/openshift/origin/pkg/route/graph/analysis/doc.go delete mode 100644 vendor/github.com/openshift/origin/pkg/route/graph/doc.go delete mode 100644 vendor/github.com/openshift/origin/pkg/route/graph/edges.go delete mode 100644 vendor/github.com/openshift/origin/pkg/route/graph/nodes/doc.go delete mode 100644 vendor/github.com/openshift/origin/pkg/route/graph/nodes/nodes.go delete mode 100644 vendor/github.com/openshift/origin/pkg/route/graph/nodes/types.go create mode 100644 vendor/github.com/openshift/origin/pkg/sdn/api/plugin.go delete mode 100644 vendor/github.com/openshift/origin/pkg/user/reaper/bindings.go delete mode 100644 vendor/github.com/openshift/origin/pkg/user/reaper/group.go delete mode 100644 vendor/github.com/openshift/origin/pkg/user/reaper/user.go delete mode 100644 vendor/github.com/openshift/origin/pkg/util/doc.go delete mode 100644 vendor/github.com/openshift/origin/pkg/util/dot/dot.go delete mode 100644 vendor/github.com/openshift/origin/pkg/util/errors/doc.go delete mode 100644 vendor/github.com/openshift/origin/pkg/util/errors/errors.go delete mode 100644 vendor/github.com/openshift/origin/pkg/util/etcd.go delete mode 100644 vendor/github.com/openshift/origin/pkg/util/labels.go delete mode 100644 vendor/github.com/openshift/origin/pkg/util/parallel/parallel.go delete mode 100644 vendor/github.com/openshift/origin/pkg/util/strings.go delete mode 100644 vendor/github.com/pborman/uuid/CONTRIBUTORS delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/.gitignore delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/README.md delete mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt delete mode 100644 vendor/github.com/prometheus/procfs/.travis.yml delete mode 100644 vendor/github.com/prometheus/procfs/AUTHORS.md delete mode 100644 vendor/github.com/prometheus/procfs/CONTRIBUTING.md delete mode 100644 vendor/github.com/prometheus/procfs/Makefile delete mode 100644 vendor/github.com/prometheus/procfs/README.md delete mode 100644 vendor/github.com/spf13/cobra/.gitignore delete mode 100644 vendor/github.com/spf13/cobra/.mailmap delete mode 100644 vendor/github.com/spf13/cobra/.travis.yml delete mode 100644 vendor/github.com/spf13/cobra/README.md delete mode 100644 vendor/github.com/spf13/cobra/bash_completions.md delete mode 100644 vendor/github.com/spf13/pflag/.travis.yml delete mode 100644 vendor/github.com/spf13/pflag/README.md create mode 100644 vendor/github.com/spf13/pflag/string_array.go delete mode 100644 vendor/github.com/ugorji/go/codec/README.md delete mode 100644 vendor/github.com/ugorji/go/codec/fast-path.go.tmpl delete mode 100644 vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl delete mode 100644 vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl delete mode 100644 vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl delete mode 100755 vendor/github.com/ugorji/go/codec/prebuild.sh delete mode 100644 vendor/github.com/ugorji/go/codec/test-cbor-goldens.json delete mode 100755 vendor/github.com/ugorji/go/codec/test.py delete mode 100755 vendor/github.com/ugorji/go/codec/tests.sh delete mode 100644 vendor/github.com/urfave/cli/.travis.yml delete mode 100644 vendor/github.com/urfave/cli/README.md delete mode 100644 vendor/github.com/urfave/cli/appveyor.yml create mode 100644 vendor/github.com/urfave/cli/errors.go create mode 100644 vendor/github.com/urfave/cli/flag_generated.go create mode 100644 vendor/github.com/urfave/cli/funcs.go delete mode 100644 vendor/github.com/xeipuuv/gojsonpointer/README.md delete mode 100644 vendor/github.com/xeipuuv/gojsonreference/README.md delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/.gitignore delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/.travis.yml delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/README.md delete mode 100644 vendor/golang.org/x/net/http2/.gitignore delete mode 100644 vendor/golang.org/x/net/http2/Dockerfile delete mode 100644 vendor/golang.org/x/net/http2/Makefile delete mode 100644 vendor/golang.org/x/net/http2/README delete mode 100644 vendor/golang.org/x/oauth2/.travis.yml delete mode 100644 vendor/golang.org/x/oauth2/AUTHORS delete mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTING.md delete mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/oauth2/README.md create mode 100644 vendor/golang.org/x/oauth2/google/appenginevm_hook.go create mode 100644 vendor/golang.org/x/oauth2/google/jwt.go delete mode 100644 vendor/golang.org/x/sys/unix/.gitignore delete mode 100755 vendor/golang.org/x/sys/unix/mkall.sh delete mode 100755 vendor/golang.org/x/sys/unix/mkerrors.sh delete mode 100755 vendor/golang.org/x/sys/unix/mksyscall.pl delete mode 100755 vendor/golang.org/x/sys/unix/mksyscall_solaris.pl delete mode 100755 vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl delete mode 100755 vendor/golang.org/x/sys/unix/mksysnum_darwin.pl delete mode 100755 vendor/golang.org/x/sys/unix/mksysnum_dragonfly.pl delete mode 100755 vendor/golang.org/x/sys/unix/mksysnum_freebsd.pl delete mode 100755 vendor/golang.org/x/sys/unix/mksysnum_linux.pl delete mode 100755 vendor/golang.org/x/sys/unix/mksysnum_netbsd.pl delete mode 100755 vendor/golang.org/x/sys/unix/mksysnum_openbsd.pl create mode 100644 vendor/google.golang.org/appengine/LICENSE create mode 100644 vendor/google.golang.org/appengine/appengine.go create mode 100644 vendor/google.golang.org/appengine/appengine_vm.go create mode 100644 vendor/google.golang.org/appengine/errors.go create mode 100644 vendor/google.golang.org/appengine/identity.go create mode 100644 vendor/google.golang.org/appengine/internal/api.go create mode 100644 vendor/google.golang.org/appengine/internal/api_classic.go create mode 100644 vendor/google.golang.org/appengine/internal/api_common.go create mode 100644 vendor/google.golang.org/appengine/internal/app_id.go create mode 100644 vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/identity.go create mode 100644 vendor/google.golang.org/appengine/internal/identity_classic.go create mode 100644 vendor/google.golang.org/appengine/internal/identity_vm.go create mode 100644 vendor/google.golang.org/appengine/internal/internal.go create mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/metadata.go create mode 100644 vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/net.go create mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/transaction.go create mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go create mode 100644 vendor/google.golang.org/appengine/namespace.go create mode 100644 vendor/google.golang.org/appengine/timeout.go create mode 100644 vendor/google.golang.org/appengine/urlfetch/urlfetch.go delete mode 100644 vendor/google.golang.org/cloud/internal/cloud.go delete mode 100644 vendor/google.golang.org/grpc/.travis.yml delete mode 100644 vendor/google.golang.org/grpc/CONTRIBUTING.md delete mode 100644 vendor/google.golang.org/grpc/Makefile delete mode 100644 vendor/google.golang.org/grpc/README.md delete mode 100755 vendor/google.golang.org/grpc/codegen.sh delete mode 100755 vendor/google.golang.org/grpc/coverage.sh delete mode 100644 vendor/gopkg.in/yaml.v2/.travis.yml delete mode 100644 vendor/gopkg.in/yaml.v2/README.md create mode 100644 vendor/k8s.io/client-go/1.4/LICENSE delete mode 100644 vendor/k8s.io/client-go/1.4/pkg/api/OWNERS delete mode 100644 vendor/k8s.io/client-go/1.4/pkg/api/node_example.json delete mode 100644 vendor/k8s.io/client-go/1.4/pkg/api/replication_controller_example.json delete mode 100644 vendor/k8s.io/client-go/1.4/pkg/api/resource/generated.proto delete mode 100644 vendor/k8s.io/client-go/1.4/pkg/api/unversioned/generated.proto delete mode 100644 vendor/k8s.io/client-go/1.4/pkg/api/v1/generated.proto delete mode 100644 vendor/k8s.io/client-go/1.4/pkg/conversion/OWNERS delete mode 100644 vendor/k8s.io/client-go/1.4/pkg/runtime/OWNERS delete mode 100644 vendor/k8s.io/client-go/1.4/pkg/runtime/generated.proto delete mode 100644 vendor/k8s.io/client-go/1.4/pkg/util/intstr/generated.proto delete mode 100644 vendor/k8s.io/client-go/1.4/pkg/version/.gitattributes delete mode 100644 vendor/k8s.io/client-go/1.4/pkg/watch/versioned/generated.proto delete mode 100644 vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/OWNERS delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/node_example.json delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/replication_controller_example.json delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/resource/generated.proto delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/unversioned/generated.proto delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/v1/generated.proto delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/generated.proto delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/storage/util/helpers.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/kubernetes/pkg/controller/OWNERS delete mode 100644 vendor/k8s.io/kubernetes/pkg/controller/framework/informers/core.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/controller/framework/informers/factory.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/controller/replication/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller_utils.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/conversion/OWNERS delete mode 100644 vendor/k8s.io/kubernetes/pkg/credentialprovider/OWNERS delete mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/OWNERS create mode 100644 vendor/k8s.io/kubernetes/pkg/kubectl/resource_filter.go create mode 100644 vendor/k8s.io/kubernetes/pkg/master/thirdparty_controller.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/runtime/OWNERS delete mode 100644 vendor/k8s.io/kubernetes/pkg/runtime/generated.proto delete mode 100644 vendor/k8s.io/kubernetes/pkg/storage/OWNERS delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/interrupt/interrupt.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/intstr/generated.proto delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/metrics/util.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/term/resize.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/term/resizeevents.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/term/resizeevents_windows.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/term/setsize.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/term/setsize_unsupported.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/term/term.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/workqueue/default_rate_limiters.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/workqueue/delaying_queue.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/workqueue/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/workqueue/metrics.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/workqueue/parallelizer.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/workqueue/queue.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/workqueue/rate_limitting_queue.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/workqueue/timed_queue.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/version/.gitattributes delete mode 100644 vendor/k8s.io/kubernetes/pkg/watch/versioned/generated.proto delete mode 100644 vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/OWNERS create mode 100644 vendor/k8s.io/kubernetes/third_party/forked/golang/LICENSE create mode 100644 vendor/k8s.io/kubernetes/third_party/forked/golang/PATENTS diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json deleted file mode 100644 index fbfe8ba2e..000000000 --- a/Godeps/Godeps.json +++ /dev/null @@ -1,2250 +0,0 @@ -{ - "ImportPath": "github.com/kubernetes-incubator/kompose", - "GoVersion": "go1.6", - "GodepVersion": "v74", - "Packages": [ - "./..." - ], - "Deps": [ - { - "ImportPath": "bitbucket.org/ww/goautoneg", - "Comment": "null-5", - "Rev": "'75cd24fc2f2c2a2088577d12123ddee5f54e0675'" - }, - { - "ImportPath": "github.com/Azure/go-ansiterm", - "Rev": "fa152c58bc15761d0200cb75fe958b89a9d4888e" - }, - { - "ImportPath": "github.com/Azure/go-ansiterm/winterm", - "Rev": "fa152c58bc15761d0200cb75fe958b89a9d4888e" - }, - { - "ImportPath": "github.com/MakeNowJust/heredoc", - "Rev": "1d91351acdc1cb2f2c995864674b754134b86ca7" - }, - { - "ImportPath": "github.com/Sirupsen/logrus", - "Comment": "v0.10.0", - "Rev": "4b6ea7319e214d98c938f12692336f7ca9348d6b" - }, - { - "ImportPath": "github.com/beorn7/perks/quantile", - "Rev": "3ac7bf7a47d159a033b107610db8a1b6575507a4" - }, - { - "ImportPath": "github.com/blang/semver", - "Comment": "v3.0.1", - "Rev": "31b736133b98f26d5e078ec9eb591666edfd091f" - }, - { - "ImportPath": "github.com/coreos/etcd/auth/authpb", - "Comment": "v3.0.6", - "Rev": "9efa00d1030d4bf62eb8e5ec130023aeb1b8e2d0" - }, - { - "ImportPath": "github.com/coreos/etcd/client", - "Comment": "v3.0.6", - "Rev": "9efa00d1030d4bf62eb8e5ec130023aeb1b8e2d0" - }, - { - "ImportPath": "github.com/coreos/etcd/clientv3", - "Comment": "v3.0.6", - "Rev": "9efa00d1030d4bf62eb8e5ec130023aeb1b8e2d0" - }, - { - "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes", - "Comment": "v3.0.6", - "Rev": "9efa00d1030d4bf62eb8e5ec130023aeb1b8e2d0" - }, - { - "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb", - "Comment": "v3.0.6", - "Rev": "9efa00d1030d4bf62eb8e5ec130023aeb1b8e2d0" - }, - { - "ImportPath": "github.com/coreos/etcd/mvcc/mvccpb", - "Comment": "v3.0.6", - "Rev": "9efa00d1030d4bf62eb8e5ec130023aeb1b8e2d0" - }, - { - "ImportPath": "github.com/coreos/etcd/pkg/fileutil", - "Comment": "v3.0.6", - "Rev": "9efa00d1030d4bf62eb8e5ec130023aeb1b8e2d0" - }, - { - "ImportPath": "github.com/coreos/etcd/pkg/pathutil", - "Comment": "v3.0.6", - "Rev": "9efa00d1030d4bf62eb8e5ec130023aeb1b8e2d0" - }, - { - "ImportPath": "github.com/coreos/etcd/pkg/tlsutil", - "Comment": "v3.0.6", - "Rev": "9efa00d1030d4bf62eb8e5ec130023aeb1b8e2d0" - }, - { - "ImportPath": "github.com/coreos/etcd/pkg/transport", - "Comment": "v3.0.6", - "Rev": "9efa00d1030d4bf62eb8e5ec130023aeb1b8e2d0" - }, - { - "ImportPath": "github.com/coreos/etcd/pkg/types", - "Comment": "v3.0.6", - "Rev": "9efa00d1030d4bf62eb8e5ec130023aeb1b8e2d0" - }, - { - "ImportPath": "github.com/coreos/go-oidc/http", - "Rev": "5cf2aa52da8c574d3aa4458f471ad6ae2240fe6b" - }, - { - "ImportPath": "github.com/coreos/go-oidc/jose", - "Rev": "5cf2aa52da8c574d3aa4458f471ad6ae2240fe6b" - }, - { - "ImportPath": "github.com/coreos/go-oidc/key", - "Rev": "5cf2aa52da8c574d3aa4458f471ad6ae2240fe6b" - }, - { - "ImportPath": "github.com/coreos/go-oidc/oauth2", - "Rev": "5cf2aa52da8c574d3aa4458f471ad6ae2240fe6b" - }, - { - "ImportPath": "github.com/coreos/go-oidc/oidc", - "Rev": "5cf2aa52da8c574d3aa4458f471ad6ae2240fe6b" - }, - { - "ImportPath": "github.com/coreos/go-systemd/journal", - "Comment": "v8-2-g4484981", - "Rev": "4484981625c1a6a2ecb40a390fcb6a9bcfee76e3" - }, - { - "ImportPath": "github.com/coreos/pkg/capnslog", - "Comment": "v2-8-gfa29b1d", - "Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8" - }, - { - "ImportPath": "github.com/coreos/pkg/health", - "Comment": "v2-8-gfa29b1d", - "Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8" - }, - { - "ImportPath": "github.com/coreos/pkg/httputil", - "Comment": "v2-8-gfa29b1d", - "Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8" - }, - { - "ImportPath": "github.com/coreos/pkg/timeutil", - "Comment": "v2-8-gfa29b1d", - "Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8" - }, - { - "ImportPath": "github.com/davecgh/go-spew/spew", - "Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d" - }, - { - "ImportPath": "github.com/dgrijalva/jwt-go", - "Comment": "v3.0.0-4-g01aeca5", - "Rev": "01aeca54ebda6e0fbfafd0a524d234159c05ec20" - }, - { - "ImportPath": "github.com/docker/distribution", - "Comment": "docs-v2.4.1-2016-06-28-136-g1921dde", - "Rev": "1921dde3f1e52bf7dac07a0a2fcd55b770e134c5" - }, - { - "ImportPath": "github.com/docker/distribution/context", - "Comment": "docs-v2.4.1-2016-06-28-136-g1921dde", - "Rev": "1921dde3f1e52bf7dac07a0a2fcd55b770e134c5" - }, - { - "ImportPath": "github.com/docker/distribution/digest", - "Comment": "docs-v2.4.1-2016-06-28-136-g1921dde", - "Rev": "1921dde3f1e52bf7dac07a0a2fcd55b770e134c5" - }, - { - "ImportPath": "github.com/docker/distribution/manifest", - "Comment": "docs-v2.4.1-2016-06-28-136-g1921dde", - "Rev": "1921dde3f1e52bf7dac07a0a2fcd55b770e134c5" - }, - { - "ImportPath": "github.com/docker/distribution/manifest/schema1", - "Comment": "docs-v2.4.1-2016-06-28-136-g1921dde", - "Rev": "1921dde3f1e52bf7dac07a0a2fcd55b770e134c5" - }, - { - "ImportPath": "github.com/docker/distribution/manifest/schema2", - "Comment": "docs-v2.4.1-2016-06-28-136-g1921dde", - "Rev": "1921dde3f1e52bf7dac07a0a2fcd55b770e134c5" - }, - { - "ImportPath": "github.com/docker/distribution/reference", - "Comment": "docs-v2.4.1-2016-06-28-136-g1921dde", - "Rev": "1921dde3f1e52bf7dac07a0a2fcd55b770e134c5" - }, - { - "ImportPath": "github.com/docker/distribution/uuid", - "Comment": "docs-v2.4.1-2016-06-28-136-g1921dde", - "Rev": "1921dde3f1e52bf7dac07a0a2fcd55b770e134c5" - }, - { - "ImportPath": "github.com/docker/docker/api/types", - "Comment": "docs-v1.12.0-rc4-2016-07-15-1840-g601004e", - "Rev": "601004e1a714d77d3a43e957b8ae8adbc867b280" - }, - { - "ImportPath": "github.com/docker/docker/api/types/blkiodev", - "Comment": "docs-v1.12.0-rc4-2016-07-15-1840-g601004e", - "Rev": "601004e1a714d77d3a43e957b8ae8adbc867b280" - }, - { - "ImportPath": "github.com/docker/docker/api/types/container", - "Comment": "docs-v1.12.0-rc4-2016-07-15-1840-g601004e", - "Rev": "601004e1a714d77d3a43e957b8ae8adbc867b280" - }, - { - "ImportPath": "github.com/docker/docker/api/types/filters", - "Comment": "docs-v1.12.0-rc4-2016-07-15-1840-g601004e", - "Rev": "601004e1a714d77d3a43e957b8ae8adbc867b280" - }, - { - "ImportPath": "github.com/docker/docker/api/types/mount", - "Comment": "docs-v1.12.0-rc4-2016-07-15-1840-g601004e", - "Rev": "601004e1a714d77d3a43e957b8ae8adbc867b280" - }, - { - "ImportPath": "github.com/docker/docker/api/types/network", - "Comment": "docs-v1.12.0-rc4-2016-07-15-1840-g601004e", - "Rev": "601004e1a714d77d3a43e957b8ae8adbc867b280" - }, - { - "ImportPath": "github.com/docker/docker/api/types/registry", - "Comment": "docs-v1.12.0-rc4-2016-07-15-1840-g601004e", - "Rev": "601004e1a714d77d3a43e957b8ae8adbc867b280" - }, - { - "ImportPath": "github.com/docker/docker/api/types/strslice", - "Comment": "docs-v1.12.0-rc4-2016-07-15-1840-g601004e", - "Rev": "601004e1a714d77d3a43e957b8ae8adbc867b280" - }, - { - "ImportPath": "github.com/docker/docker/api/types/swarm", - "Comment": "docs-v1.12.0-rc4-2016-07-15-1840-g601004e", - "Rev": "601004e1a714d77d3a43e957b8ae8adbc867b280" - }, - { - "ImportPath": "github.com/docker/docker/api/types/versions", - "Comment": "docs-v1.12.0-rc4-2016-07-15-1840-g601004e", - "Rev": "601004e1a714d77d3a43e957b8ae8adbc867b280" - }, - { - "ImportPath": "github.com/docker/docker/cli/command/bundlefile", - "Comment": "docs-v1.12.0-rc4-2016-07-15-1840-g601004e", - "Rev": "601004e1a714d77d3a43e957b8ae8adbc867b280" - }, - { - "ImportPath": "github.com/docker/docker/opts", - "Comment": "docs-v1.12.0-rc4-2016-07-15-1840-g601004e", - "Rev": "601004e1a714d77d3a43e957b8ae8adbc867b280" - }, - { - "ImportPath": "github.com/docker/docker/pkg/mount", - "Comment": "docs-v1.12.0-rc4-2016-07-15-1840-g601004e", - "Rev": "601004e1a714d77d3a43e957b8ae8adbc867b280" - }, - { - "ImportPath": "github.com/docker/docker/pkg/signal", - "Comment": "docs-v1.12.0-rc4-2016-07-15-1840-g601004e", - "Rev": "601004e1a714d77d3a43e957b8ae8adbc867b280" - }, - { - "ImportPath": "github.com/docker/docker/pkg/system", - "Comment": "docs-v1.12.0-rc4-2016-07-15-1840-g601004e", - "Rev": "601004e1a714d77d3a43e957b8ae8adbc867b280" - }, - { - "ImportPath": "github.com/docker/docker/pkg/term", - "Comment": "docs-v1.12.0-rc4-2016-07-15-1840-g601004e", - "Rev": "601004e1a714d77d3a43e957b8ae8adbc867b280" - }, - { - "ImportPath": "github.com/docker/docker/pkg/term/windows", - "Comment": "docs-v1.12.0-rc4-2016-07-15-1840-g601004e", - "Rev": "601004e1a714d77d3a43e957b8ae8adbc867b280" - }, - { - "ImportPath": "github.com/docker/docker/pkg/urlutil", - "Comment": "docs-v1.12.0-rc4-2016-07-15-1840-g601004e", - "Rev": "601004e1a714d77d3a43e957b8ae8adbc867b280" - }, - { - "ImportPath": "github.com/docker/docker/runconfig/opts", - "Comment": "docs-v1.12.0-rc4-2016-07-15-1840-g601004e", - "Rev": "601004e1a714d77d3a43e957b8ae8adbc867b280" - }, - { - "ImportPath": "github.com/docker/engine-api/types", - "Comment": "v0.3.1-232-g1d24745", - "Rev": "1d247454d4307fb1ddf10d09fd2996394b085904" - }, - { - "ImportPath": "github.com/docker/engine-api/types/blkiodev", - "Comment": "v0.3.1-232-g1d24745", - "Rev": "1d247454d4307fb1ddf10d09fd2996394b085904" - }, - { - "ImportPath": "github.com/docker/engine-api/types/container", - "Comment": "v0.3.1-232-g1d24745", - "Rev": "1d247454d4307fb1ddf10d09fd2996394b085904" - }, - { - "ImportPath": "github.com/docker/engine-api/types/filters", - "Comment": "v0.3.1-232-g1d24745", - "Rev": "1d247454d4307fb1ddf10d09fd2996394b085904" - }, - { - "ImportPath": "github.com/docker/engine-api/types/network", - "Comment": "v0.3.1-232-g1d24745", - "Rev": "1d247454d4307fb1ddf10d09fd2996394b085904" - }, - { - "ImportPath": "github.com/docker/engine-api/types/registry", - "Comment": "v0.3.1-232-g1d24745", - "Rev": "1d247454d4307fb1ddf10d09fd2996394b085904" - }, - { - "ImportPath": "github.com/docker/engine-api/types/strslice", - "Comment": "v0.3.1-232-g1d24745", - "Rev": "1d247454d4307fb1ddf10d09fd2996394b085904" - }, - { - "ImportPath": "github.com/docker/engine-api/types/swarm", - "Comment": "v0.3.1-232-g1d24745", - "Rev": "1d247454d4307fb1ddf10d09fd2996394b085904" - }, - { - "ImportPath": "github.com/docker/engine-api/types/versions", - "Comment": "v0.3.1-232-g1d24745", - "Rev": "1d247454d4307fb1ddf10d09fd2996394b085904" - }, - { - "ImportPath": "github.com/docker/go-connections/nat", - "Comment": "v0.2.0-2-gf549a93", - "Rev": "f549a9393d05688dff0992ef3efd8bbe6c628aeb" - }, - { - "ImportPath": "github.com/docker/go-units", - "Comment": "v0.1.0-21-g0bbddae", - "Rev": "0bbddae09c5a5419a8c6dcdd7ff90da3d450393b" - }, - { - "ImportPath": "github.com/docker/libcompose/config", - "Comment": "v0.3.0-68-gfbdac0a", - "Rev": "fbdac0a6a80837c63eb6c8f43514f7bb3f32df6c" - }, - { - "ImportPath": "github.com/docker/libcompose/logger", - "Comment": "v0.3.0-68-gfbdac0a", - "Rev": "fbdac0a6a80837c63eb6c8f43514f7bb3f32df6c" - }, - { - "ImportPath": "github.com/docker/libcompose/lookup", - "Comment": "v0.3.0-68-gfbdac0a", - "Rev": "fbdac0a6a80837c63eb6c8f43514f7bb3f32df6c" - }, - { - "ImportPath": "github.com/docker/libcompose/project", - "Comment": "v0.3.0-68-gfbdac0a", - "Rev": "fbdac0a6a80837c63eb6c8f43514f7bb3f32df6c" - }, - { - "ImportPath": "github.com/docker/libcompose/project/events", - "Comment": "v0.3.0-68-gfbdac0a", - "Rev": "fbdac0a6a80837c63eb6c8f43514f7bb3f32df6c" - }, - { - "ImportPath": "github.com/docker/libcompose/project/options", - "Comment": "v0.3.0-68-gfbdac0a", - "Rev": "fbdac0a6a80837c63eb6c8f43514f7bb3f32df6c" - }, - { - "ImportPath": "github.com/docker/libcompose/utils", - "Comment": "v0.3.0-68-gfbdac0a", - "Rev": "fbdac0a6a80837c63eb6c8f43514f7bb3f32df6c" - }, - { - "ImportPath": "github.com/docker/libcompose/yaml", - "Comment": "v0.3.0-68-gfbdac0a", - "Rev": "fbdac0a6a80837c63eb6c8f43514f7bb3f32df6c" - }, - { - "ImportPath": "github.com/docker/libtrust", - "Rev": "c54fbb67c1f1e68d7d6f8d2ad7c9360404616a41" - }, - { - "ImportPath": "github.com/emicklei/go-restful", - "Comment": "v1.2-79-g89ef8af", - "Rev": "89ef8af493ab468a45a42bb0d89a06fccdd2fb22" - }, - { - "ImportPath": "github.com/emicklei/go-restful/log", - "Comment": "v1.2-79-g89ef8af", - "Rev": "89ef8af493ab468a45a42bb0d89a06fccdd2fb22" - }, - { - "ImportPath": "github.com/emicklei/go-restful/swagger", - "Comment": "v1.2-79-g89ef8af", - "Rev": "89ef8af493ab468a45a42bb0d89a06fccdd2fb22" - }, - { - "ImportPath": "github.com/evanphx/json-patch", - "Rev": "465937c80b3c07a7c7ad20cc934898646a91c1de" - }, - { - "ImportPath": "github.com/fatih/structs", - "Rev": "be738c8546f55b34e60125afa50ed73a9a9c460e" - }, - { - "ImportPath": "github.com/flynn/go-shlex", - "Rev": "3f9db97f856818214da2e1057f8ad84803971cff" - }, - { - "ImportPath": "github.com/fsouza/go-dockerclient", - "Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b" - }, - { - "ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus", - "Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b" - }, - { - "ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts", - "Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b" - }, - { - "ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive", - "Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b" - }, - { - "ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils", - "Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b" - }, - { - "ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir", - "Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b" - }, - { - "ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools", - "Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b" - }, - { - "ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils", - "Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b" - }, - { - "ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath", - "Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b" - }, - { - "ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools", - "Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b" - }, - { - "ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise", - "Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b" - }, - { - "ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy", - "Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b" - }, - { - "ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system", - "Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b" - }, - { - "ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/go-units", - "Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b" - }, - { - "ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp", - "Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b" - }, - { - "ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user", - "Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b" - }, - { - "ImportPath": "github.com/fsouza/go-dockerclient/external/golang.org/x/net/context", - "Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b" - }, - { - "ImportPath": "github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix", - "Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b" - }, - { - "ImportPath": "github.com/ghodss/yaml", - "Rev": "73d445a93680fa1a78ae23a5839bad48f32ba1ee" - }, - { - "ImportPath": "github.com/gogo/protobuf/proto", - "Comment": "v0.2-33-ge18d7aa", - "Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173" - }, - { - "ImportPath": "github.com/gogo/protobuf/sortkeys", - "Comment": "v0.2-33-ge18d7aa", - "Rev": "e18d7aa8f8c624c915db340349aad4c49b10d173" - }, - { - "ImportPath": "github.com/golang/glog", - "Rev": "335da9dda11408a34b64344f82e9c03779b71673" - }, - { - "ImportPath": "github.com/golang/groupcache/lru", - "Rev": "604ed5785183e59ae2789449d89e73f3a2a77987" - }, - { - "ImportPath": "github.com/golang/protobuf/jsonpb", - "Rev": "8616e8ee5e20a1704615e6c8d7afcdac06087a67" - }, - { - "ImportPath": "github.com/golang/protobuf/proto", - "Rev": "8616e8ee5e20a1704615e6c8d7afcdac06087a67" - }, - { - "ImportPath": "github.com/gonum/blas", - "Rev": "80dca99229cccca259b550ae3f755cf79c65a224" - }, - { - "ImportPath": "github.com/gonum/blas/blas64", - "Rev": "80dca99229cccca259b550ae3f755cf79c65a224" - }, - { - "ImportPath": "github.com/gonum/blas/native", - "Rev": "80dca99229cccca259b550ae3f755cf79c65a224" - }, - { - "ImportPath": "github.com/gonum/blas/native/internal/math32", - "Rev": "80dca99229cccca259b550ae3f755cf79c65a224" - }, - { - "ImportPath": "github.com/gonum/graph", - "Comment": "v0.9-100-gbde6d0f", - "Rev": "bde6d0fbd9dec5a997e906611fe0364001364c41" - }, - { - "ImportPath": "github.com/gonum/graph/concrete", - "Comment": "v0.9-100-gbde6d0f", - "Rev": "bde6d0fbd9dec5a997e906611fe0364001364c41" - }, - { - "ImportPath": "github.com/gonum/graph/encoding/dot", - "Comment": "v0.9-100-gbde6d0f", - "Rev": "bde6d0fbd9dec5a997e906611fe0364001364c41" - }, - { - "ImportPath": "github.com/gonum/graph/internal", - "Comment": "v0.9-100-gbde6d0f", - "Rev": "bde6d0fbd9dec5a997e906611fe0364001364c41" - }, - { - "ImportPath": "github.com/gonum/graph/path", - "Comment": "v0.9-100-gbde6d0f", - "Rev": "bde6d0fbd9dec5a997e906611fe0364001364c41" - }, - { - "ImportPath": "github.com/gonum/graph/topo", - "Comment": "v0.9-100-gbde6d0f", - "Rev": "bde6d0fbd9dec5a997e906611fe0364001364c41" - }, - { - "ImportPath": "github.com/gonum/graph/traverse", - "Comment": "v0.9-100-gbde6d0f", - "Rev": "bde6d0fbd9dec5a997e906611fe0364001364c41" - }, - { - "ImportPath": "github.com/gonum/internal/asm", - "Rev": "5b84ddfb9d3e72d73b8de858c97650be140935c0" - }, - { - "ImportPath": "github.com/gonum/lapack", - "Rev": "88ec467285859a6cd23900147d250a8af1f38b10" - }, - { - "ImportPath": "github.com/gonum/lapack/lapack64", - "Rev": "88ec467285859a6cd23900147d250a8af1f38b10" - }, - { - "ImportPath": "github.com/gonum/lapack/native", - "Rev": "88ec467285859a6cd23900147d250a8af1f38b10" - }, - { - "ImportPath": "github.com/gonum/matrix/mat64", - "Rev": "fb1396264e2e259ff714a408a7b0142d238b198d" - }, - { - "ImportPath": "github.com/google/cadvisor/info/v1", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" - }, - { - "ImportPath": "github.com/google/gofuzz", - "Rev": "bbcb9da2d746f8bdbd6a936686a0a6067ada0ec5" - }, - { - "ImportPath": "github.com/gorilla/context", - "Rev": "215affda49addc4c8ef7e2534915df2c8c35c6cd" - }, - { - "ImportPath": "github.com/gorilla/mux", - "Rev": "8096f47503459bcc74d1f4c487b7e6e42e5746b5" - }, - { - "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime", - "Comment": "v1.0.0-8-gf52d055", - "Rev": "f52d055dc48aec25854ed7d31862f78913cf17d1" - }, - { - "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime/internal", - "Comment": "v1.0.0-8-gf52d055", - "Rev": "f52d055dc48aec25854ed7d31862f78913cf17d1" - }, - { - "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/utilities", - "Comment": "v1.0.0-8-gf52d055", - "Rev": "f52d055dc48aec25854ed7d31862f78913cf17d1" - }, - { - "ImportPath": "github.com/imdario/mergo", - "Comment": "0.1.3-8-g6633656", - "Rev": "6633656539c1639d9d78127b7d47c622b5d7b6dc" - }, - { - "ImportPath": "github.com/inconshreveable/mousetrap", - "Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" - }, - { - "ImportPath": "github.com/jonboulle/clockwork", - "Rev": "3f831b65b61282ba6bece21b91beea2edc4c887a" - }, - { - "ImportPath": "github.com/juju/ratelimit", - "Rev": "77ed1c8a01217656d2080ad51981f6e99adaa177" - }, - { - "ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil", - "Rev": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/api", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/api/extension", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/api/graph", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/api/graph/graphview", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/api/kubegraph", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/api/kubegraph/analysis", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/api/kubegraph/nodes", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/api/latest", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/api/restmapper", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/auth/api", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/auth/authenticator", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/auth/authenticator/request/x509request", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/authorization/api", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/authorization/reaper", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/build/api", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/build/client", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/build/cmd", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/build/graph", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/build/graph/analysis", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/build/graph/nodes", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/build/util", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/client", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/cmd/cli/config", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/cmd/cli/describe", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/cmd/flagtypes", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/cmd/util", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/cmd/util/clientcmd", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/deploy/api", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/deploy/api/install", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/deploy/api/v1", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/deploy/cmd", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/deploy/graph", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/deploy/graph/analysis", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/deploy/graph/nodes", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/deploy/util", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/image/api", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/image/api/docker10", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/image/api/dockerpre012", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/image/api/install", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/image/api/v1", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/image/graph", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/image/graph/nodes", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/oauth/api", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/project/api", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/quota/api", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/quota/util", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/route/api", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/route/generator", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/route/graph", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/route/graph/analysis", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/route/graph/nodes", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/sdn/api", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/security/api", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/template/api", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/user/api", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/user/reaper", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/util", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/util/dot", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/util/errors", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/util/namer", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/util/parallel", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/openshift/origin/pkg/version", - "Comment": "v1.4.0-alpha.0", - "Rev": "67479ffd447d68d20e556746d56eb80458b9294c" - }, - { - "ImportPath": "github.com/pborman/uuid", - "Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4" - }, - { - "ImportPath": "github.com/prometheus/client_golang/prometheus", - "Comment": "0.7.0-52-ge51041b", - "Rev": "e51041b3fa41cece0dca035740ba6411905be473" - }, - { - "ImportPath": "github.com/prometheus/client_model/go", - "Comment": "model-0.0.2-12-gfa8ad6f", - "Rev": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6" - }, - { - "ImportPath": "github.com/prometheus/common/expfmt", - "Rev": "a6ab08426bb262e2d190097751f5cfd1cfdfd17d" - }, - { - "ImportPath": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg", - "Rev": "a6ab08426bb262e2d190097751f5cfd1cfdfd17d" - }, - { - "ImportPath": "github.com/prometheus/common/model", - "Rev": "a6ab08426bb262e2d190097751f5cfd1cfdfd17d" - }, - { - "ImportPath": "github.com/prometheus/procfs", - "Rev": "454a56f35412459b5e684fd5ec0f9211b94f002a" - }, - { - "ImportPath": "github.com/spf13/cobra", - "Rev": "7c674d9e72017ed25f6d2b5e497a1368086b6a6f" - }, - { - "ImportPath": "github.com/spf13/pflag", - "Rev": "1560c1005499d61b80f865c04d39ca7505bf7f0b" - }, - { - "ImportPath": "github.com/ugorji/go/codec", - "Rev": "f4485b318aadd133842532f841dc205a8e339d74" - }, - { - "ImportPath": "github.com/urfave/cli", - "Comment": "v1.14.0", - "Rev": "71f57d300dd6a780ac1856c005c4b518cfd498ec" - }, - { - "ImportPath": "github.com/xeipuuv/gojsonpointer", - "Rev": "e0fe6f68307607d540ed8eac07a342c33fa1b54a" - }, - { - "ImportPath": "github.com/xeipuuv/gojsonreference", - "Rev": "e02fc20de94c78484cd5ffb007f8af96be030a45" - }, - { - "ImportPath": "github.com/xeipuuv/gojsonschema", - "Rev": "ac452913faa25c08bb78810d3e6f88b8a39f8f25" - }, - { - "ImportPath": "golang.org/x/net/context", - "Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0" - }, - { - "ImportPath": "golang.org/x/net/context/ctxhttp", - "Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0" - }, - { - "ImportPath": "golang.org/x/net/http2", - "Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0" - }, - { - "ImportPath": "golang.org/x/net/http2/hpack", - "Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0" - }, - { - "ImportPath": "golang.org/x/net/internal/timeseries", - "Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0" - }, - { - "ImportPath": "golang.org/x/net/lex/httplex", - "Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0" - }, - { - "ImportPath": "golang.org/x/net/trace", - "Rev": "e90d6d0afc4c315a0d87a568ae68577cc15149a0" - }, - { - "ImportPath": "golang.org/x/oauth2", - "Rev": "b5adcc2dcdf009d0391547edc6ecbaff889f5bb9" - }, - { - "ImportPath": "golang.org/x/oauth2/google", - "Rev": "b5adcc2dcdf009d0391547edc6ecbaff889f5bb9" - }, - { - "ImportPath": "golang.org/x/oauth2/internal", - "Rev": "b5adcc2dcdf009d0391547edc6ecbaff889f5bb9" - }, - { - "ImportPath": "golang.org/x/oauth2/jws", - "Rev": "b5adcc2dcdf009d0391547edc6ecbaff889f5bb9" - }, - { - "ImportPath": "golang.org/x/oauth2/jwt", - "Rev": "b5adcc2dcdf009d0391547edc6ecbaff889f5bb9" - }, - { - "ImportPath": "golang.org/x/sys/unix", - "Rev": "833a04a10549a95dc34458c195cbad61bbb6cb4d" - }, - { - "ImportPath": "google.golang.org/cloud/compute/metadata", - "Rev": "eb47ba841d53d93506cfbfbc03927daf9cc48f88" - }, - { - "ImportPath": "google.golang.org/cloud/internal", - "Rev": "eb47ba841d53d93506cfbfbc03927daf9cc48f88" - }, - { - "ImportPath": "google.golang.org/grpc", - "Comment": "v1.0.0-183-g231b4cf", - "Rev": "231b4cfea0e79843053a33f5fe90bd4d84b23cd3" - }, - { - "ImportPath": "google.golang.org/grpc/codes", - "Comment": "v1.0.0-183-g231b4cf", - "Rev": "231b4cfea0e79843053a33f5fe90bd4d84b23cd3" - }, - { - "ImportPath": "google.golang.org/grpc/credentials", - "Comment": "v1.0.0-183-g231b4cf", - "Rev": "231b4cfea0e79843053a33f5fe90bd4d84b23cd3" - }, - { - "ImportPath": "google.golang.org/grpc/grpclog", - "Comment": "v1.0.0-183-g231b4cf", - "Rev": "231b4cfea0e79843053a33f5fe90bd4d84b23cd3" - }, - { - "ImportPath": "google.golang.org/grpc/internal", - "Comment": "v1.0.0-183-g231b4cf", - "Rev": "231b4cfea0e79843053a33f5fe90bd4d84b23cd3" - }, - { - "ImportPath": "google.golang.org/grpc/metadata", - "Comment": "v1.0.0-183-g231b4cf", - "Rev": "231b4cfea0e79843053a33f5fe90bd4d84b23cd3" - }, - { - "ImportPath": "google.golang.org/grpc/naming", - "Comment": "v1.0.0-183-g231b4cf", - "Rev": "231b4cfea0e79843053a33f5fe90bd4d84b23cd3" - }, - { - "ImportPath": "google.golang.org/grpc/peer", - "Comment": "v1.0.0-183-g231b4cf", - "Rev": "231b4cfea0e79843053a33f5fe90bd4d84b23cd3" - }, - { - "ImportPath": "google.golang.org/grpc/transport", - "Comment": "v1.0.0-183-g231b4cf", - "Rev": "231b4cfea0e79843053a33f5fe90bd4d84b23cd3" - }, - { - "ImportPath": "gopkg.in/inf.v0", - "Comment": "v0.9.0", - "Rev": "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4" - }, - { - "ImportPath": "gopkg.in/yaml.v2", - "Rev": "a83829b6f1293c91addabc89d0571c246397bbf4" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/api", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/api/endpoints", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/api/errors", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/api/meta", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/api/meta/metatypes", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/api/pod", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/api/resource", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/api/service", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/api/unversioned", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/api/unversioned/validation", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/api/util", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/api/v1", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/api/validation", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/apimachinery", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/apimachinery/registered", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/apis/autoscaling", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/apis/batch", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/apis/extensions", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/auth/user", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/capabilities", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/conversion", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/conversion/queryparams", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/fields", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/labels", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/runtime", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/runtime/serializer", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/runtime/serializer/json", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/runtime/serializer/protobuf", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/runtime/serializer/recognizer", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/runtime/serializer/streaming", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/runtime/serializer/versioning", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/security/apparmor", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/selection", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/third_party/forked/golang/reflect", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/types", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/util", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/util/clock", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/util/config", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/util/crypto", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/util/errors", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/util/flowcontrol", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/util/framer", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/util/hash", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/util/integer", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/util/intstr", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/util/json", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/util/labels", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/util/net", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/util/net/sets", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/util/parsers", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/util/rand", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/util/runtime", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/util/sets", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/util/uuid", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/util/validation", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/util/validation/field", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/util/wait", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/util/yaml", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/version", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/watch", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/pkg/watch/versioned", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/rest", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/tools/clientcmd/api", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/tools/metrics", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/client-go/1.4/transport", - "Rev": "f8e519fcc08881bcfe82d8755046df62ea30fda0" - }, - { - "ImportPath": "k8s.io/kubernetes/federation/apis/federation", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/federation/apis/federation/install", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/federation/apis/federation/v1beta1", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/unversioned", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/annotations", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/endpoints", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/errors", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/install", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/meta", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/meta/metatypes", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/pod", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/resource", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/rest", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/service", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/unversioned", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/unversioned/validation", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/util", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/v1", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/validation", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apimachinery", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apimachinery/registered", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/apps", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/install", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/v1alpha1", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication/install", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication/v1beta1", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/install", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/install", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/v1", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/batch", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/install", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v1", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates/install", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig/install", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/install", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/validation", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/policy", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/install", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/v1alpha1", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/install", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/storage", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/install", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/v1beta1", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/auth/authenticator", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/auth/user", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/capabilities", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/cache", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/unversioned", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/unversioned", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/unversioned", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/unversioned", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/metrics", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/record", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/restclient", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/transport", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/typed/discovery", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/typed/dynamic", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/auth", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/controller", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/controller/deployment/util", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/controller/framework", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/controller/framework/informers", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/controller/replication", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/conversion", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/conversion/queryparams", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/fieldpath", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/fields", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/kubectl", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/kubectl/cmd/util", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/kubectl/resource", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/kubelet/qos", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/kubelet/types", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/labels", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/master/ports", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/registry/generic", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/runtime", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/json", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/protobuf", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/recognizer", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/streaming", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/versioning", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/security/apparmor", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/securitycontextconstraints/util", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/selection", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/serviceaccount", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/storage", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/storage/etcd", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/storage/etcd/metrics", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/storage/etcd/util", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/storage/etcd3", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/storage/storagebackend", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/storage/storagebackend/factory", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/types", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/cache", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/certificates", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/clock", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/config", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/crypto", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/diff", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/errors", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/exec", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/flag", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/flowcontrol", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/framer", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/hash", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/homedir", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/integer", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/interrupt", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/intstr", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/json", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/jsonpath", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/labels", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/metrics", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/net", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/net/sets", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/parsers", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/pod", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/rand", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/replicaset", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/runtime", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/sets", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/slice", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/strategicpatch", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/term", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/uuid", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/validation", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/validation/field", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/wait", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/workqueue", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/util/yaml", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/version", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/watch", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/watch/versioned", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/client/auth", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/client/auth/gcp", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/client/auth/oidc", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/third_party/forked/golang/json", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/third_party/forked/golang/netutil", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/third_party/forked/golang/reflect", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - }, - { - "ImportPath": "k8s.io/kubernetes/third_party/forked/golang/template", - "Comment": "v1.4.0-beta.3-45-gd19513f", - "Rev": "d19513fe86f3e0769dd5c4674c093a88a5adb8b4" - } - ] -} diff --git a/Godeps/Readme b/Godeps/Readme deleted file mode 100644 index 4cdaa53d5..000000000 --- a/Godeps/Readme +++ /dev/null @@ -1,5 +0,0 @@ -This directory tree is generated automatically by godep. - -Please do not edit. - -See https://github.com/tools/godep for more information. diff --git a/glide.lock b/glide.lock new file mode 100644 index 000000000..27b086d29 --- /dev/null +++ b/glide.lock @@ -0,0 +1,767 @@ +hash: 3caf221b3b3a35894b12799f8a5047107b1d5c9237bfea1706af24ea525f97e6 +updated: 2016-12-06T17:06:23.574663245+01:00 +imports: +- name: cloud.google.com/go + version: 3b1ae45394a234c385be014e9a488f2bb6eef821 + subpackages: + - compute/metadata + - internal + - storage +- name: github.com/beorn7/perks + version: 3ac7bf7a47d159a033b107610db8a1b6575507a4 + subpackages: + - quantile +- name: github.com/blang/semver + version: 31b736133b98f26d5e078ec9eb591666edfd091f +- name: github.com/coreos/etcd + version: 83347907774bf36cbb261c594a32fd7b0f5dd9f6 + subpackages: + - alarm + - auth + - auth/authpb + - client + - clientv3 + - compactor + - discovery + - embed + - error + - etcdserver + - etcdserver/api + - etcdserver/api/v2http + - etcdserver/api/v2http/httptypes + - etcdserver/api/v3rpc + - etcdserver/api/v3rpc/rpctypes + - etcdserver/auth + - etcdserver/etcdserverpb + - etcdserver/membership + - etcdserver/stats + - integration + - lease + - lease/leasehttp + - lease/leasepb + - mvcc + - mvcc/backend + - mvcc/mvccpb + - pkg/adt + - pkg/contention + - pkg/cors + - pkg/crc + - pkg/fileutil + - pkg/httputil + - pkg/idutil + - pkg/ioutil + - pkg/logutil + - pkg/netutil + - pkg/osutil + - pkg/pathutil + - pkg/pbutil + - pkg/runtime + - pkg/schedule + - pkg/testutil + - pkg/tlsutil + - pkg/transport + - pkg/types + - pkg/wait + - proxy/grpcproxy + - proxy/grpcproxy/cache + - raft + - raft/raftpb + - rafthttp + - snap + - snap/snappb + - store + - version + - wal + - wal/walpb +- name: github.com/coreos/go-oidc + version: 5cf2aa52da8c574d3aa4458f471ad6ae2240fe6b + subpackages: + - http + - jose + - key + - oauth2 + - oidc +- name: github.com/coreos/go-systemd + version: 4484981625c1a6a2ecb40a390fcb6a9bcfee76e3 + subpackages: + - daemon + - dbus + - journal + - unit + - util +- name: github.com/coreos/pkg + version: fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8 + subpackages: + - capnslog + - dlopen + - health + - httputil + - timeutil +- name: github.com/davecgh/go-spew + version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d + subpackages: + - spew +- name: github.com/dgrijalva/jwt-go + version: 01aeca54ebda6e0fbfafd0a524d234159c05ec20 +- name: github.com/docker/distribution + version: 12acdf0a6c1e56d965ac6eb395d2bce687bf22fc + subpackages: + - configuration + - context + - digest + - health + - health/checks + - manifest + - manifest/manifestlist + - manifest/schema1 + - manifest/schema2 + - notifications + - reference + - registry/api/errcode + - registry/api/v2 + - registry/auth + - registry/auth/htpasswd + - registry/auth/token + - registry/client + - registry/client/auth + - registry/client/transport + - registry/handlers + - registry/middleware/registry + - registry/middleware/repository + - registry/proxy + - registry/proxy/scheduler + - registry/storage + - registry/storage/cache + - registry/storage/cache/memory + - registry/storage/cache/redis + - registry/storage/driver + - registry/storage/driver/azure + - registry/storage/driver/base + - registry/storage/driver/factory + - registry/storage/driver/filesystem + - registry/storage/driver/gcs + - registry/storage/driver/inmemory + - registry/storage/driver/middleware + - registry/storage/driver/middleware/cloudfront + - registry/storage/driver/s3-aws + - registry/storage/driver/swift + - uuid + - version +- name: github.com/docker/docker + version: 601004e1a714d77d3a43e957b8ae8adbc867b280 + subpackages: + - api/types + - api/types/blkiodev + - api/types/container + - api/types/filters + - api/types/mount + - api/types/network + - api/types/registry + - api/types/strslice + - api/types/swarm + - api/types/versions + - opts + - pkg/mount + - pkg/signal + - pkg/urlutil + - runconfig/opts +- name: github.com/docker/engine-api + version: dea108d3aa0c67d7162a3fd8aa65f38a430019fd + subpackages: + - client + - client/transport + - client/transport/cancellable + - types + - types/blkiodev + - types/container + - types/filters + - types/network + - types/reference + - types/registry + - types/strslice + - types/time + - types/versions +- name: github.com/docker/go-connections + version: f549a9393d05688dff0992ef3efd8bbe6c628aeb + subpackages: + - nat + - sockets + - tlsconfig +- name: github.com/docker/go-units + version: 0bbddae09c5a5419a8c6dcdd7ff90da3d450393b +- name: github.com/docker/libcompose + version: fbdac0a6a80837c63eb6c8f43514f7bb3f32df6c + subpackages: + - config + - logger + - lookup + - project + - project/events + - project/options + - utils + - yaml +- name: github.com/docker/libtrust + version: c54fbb67c1f1e68d7d6f8d2ad7c9360404616a41 +- name: github.com/emicklei/go-restful + version: 89ef8af493ab468a45a42bb0d89a06fccdd2fb22 + subpackages: + - log + - swagger +- name: github.com/evanphx/json-patch + version: 465937c80b3c07a7c7ad20cc934898646a91c1de +- name: github.com/fatih/structs + version: dc3312cb1a4513a366c4c9e622ad55c32df12ed3 +- name: github.com/flynn/go-shlex + version: 3f9db97f856818214da2e1057f8ad84803971cff +- name: github.com/fsouza/go-dockerclient + version: bf97c77db7c945cbcdbf09d56c6f87a66f54537b + subpackages: + - external/github.com/Sirupsen/logrus + - external/github.com/docker/docker/opts + - external/github.com/docker/docker/pkg/archive + - external/github.com/docker/docker/pkg/fileutils + - external/github.com/docker/docker/pkg/homedir + - external/github.com/docker/docker/pkg/idtools + - external/github.com/docker/docker/pkg/ioutils + - external/github.com/docker/docker/pkg/longpath + - external/github.com/docker/docker/pkg/pools + - external/github.com/docker/docker/pkg/promise + - external/github.com/docker/docker/pkg/stdcopy + - external/github.com/docker/docker/pkg/system + - external/github.com/docker/go-units + - external/github.com/hashicorp/go-cleanhttp + - external/github.com/opencontainers/runc/libcontainer/user + - external/golang.org/x/net/context + - external/golang.org/x/sys/unix +- name: github.com/ghodss/yaml + version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee +- name: github.com/gogo/protobuf + version: e18d7aa8f8c624c915db340349aad4c49b10d173 + subpackages: + - gogoproto + - plugin/compare + - plugin/defaultcheck + - plugin/description + - plugin/embedcheck + - plugin/enumstringer + - plugin/equal + - plugin/face + - plugin/gostring + - plugin/marshalto + - plugin/oneofcheck + - plugin/populate + - plugin/size + - plugin/stringer + - plugin/testgen + - plugin/union + - plugin/unmarshal + - proto + - protoc-gen-gogo/descriptor + - protoc-gen-gogo/generator + - protoc-gen-gogo/grpc + - protoc-gen-gogo/plugin + - sortkeys + - vanity + - vanity/command +- name: github.com/golang/glog + version: 335da9dda11408a34b64344f82e9c03779b71673 + repo: https://github.com/openshift/glog +- name: github.com/golang/groupcache + version: 604ed5785183e59ae2789449d89e73f3a2a77987 + subpackages: + - lru +- name: github.com/golang/protobuf + version: 8616e8ee5e20a1704615e6c8d7afcdac06087a67 + subpackages: + - jsonpb + - proto +- name: github.com/google/cadvisor + version: ef63d70156d509efbbacfc3e86ed120228fab914 + subpackages: + - api + - cache/memory + - collector + - container + - container/common + - container/docker + - container/libcontainer + - container/raw + - container/rkt + - container/systemd + - devicemapper + - events + - fs + - healthz + - http + - http/mux + - info/v1 + - info/v2 + - machine + - manager + - manager/watcher + - manager/watcher/raw + - manager/watcher/rkt + - metrics + - pages + - pages/static + - storage + - summary + - utils + - utils/cloudinfo + - utils/cpuload + - utils/cpuload/netlink + - utils/docker + - utils/oomparser + - utils/sysfs + - utils/sysinfo + - utils/tail + - validate + - version +- name: github.com/google/gofuzz + version: bbcb9da2d746f8bdbd6a936686a0a6067ada0ec5 +- name: github.com/gorilla/context + version: 215affda49addc4c8ef7e2534915df2c8c35c6cd +- name: github.com/gorilla/mux + version: 8096f47503459bcc74d1f4c487b7e6e42e5746b5 +- name: github.com/grpc-ecosystem/grpc-gateway + version: f52d055dc48aec25854ed7d31862f78913cf17d1 + subpackages: + - runtime + - runtime/internal + - utilities +- name: github.com/imdario/mergo + version: 6633656539c1639d9d78127b7d47c622b5d7b6dc +- name: github.com/inconshreveable/mousetrap + version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +- name: github.com/jonboulle/clockwork + version: 3f831b65b61282ba6bece21b91beea2edc4c887a +- name: github.com/juju/ratelimit + version: 77ed1c8a01217656d2080ad51981f6e99adaa177 +- name: github.com/matttproud/golang_protobuf_extensions + version: fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a + subpackages: + - pbutil +- name: github.com/openshift/origin + version: b4e0954faa4a0d11d9c1a536b76ad4a8c0206b7c + subpackages: + - pkg/api + - pkg/api/extension + - pkg/api/latest + - pkg/auth/api + - pkg/auth/authenticator + - pkg/auth/authenticator/request/x509request + - pkg/authorization/api + - pkg/build/api + - pkg/client + - pkg/cmd/cli/config + - pkg/cmd/util + - pkg/deploy/api + - pkg/deploy/api/install + - pkg/deploy/api/v1 + - pkg/image/api + - pkg/image/api/docker10 + - pkg/image/api/dockerpre012 + - pkg/image/api/install + - pkg/image/api/v1 + - pkg/image/reference + - pkg/oauth/api + - pkg/project/api + - pkg/quota/api + - pkg/quota/util + - pkg/route/api + - pkg/sdn/api + - pkg/security/api + - pkg/template/api + - pkg/user/api + - pkg/util/namer + - pkg/version +- name: github.com/pborman/uuid + version: ca53cad383cad2479bbba7f7a1a05797ec1386e4 +- name: github.com/prometheus/client_golang + version: e51041b3fa41cece0dca035740ba6411905be473 + subpackages: + - prometheus +- name: github.com/prometheus/client_model + version: fa8ad6fec33561be4280a8f0514318c79d7f6cb6 + subpackages: + - go +- name: github.com/prometheus/common + version: a6ab08426bb262e2d190097751f5cfd1cfdfd17d + subpackages: + - expfmt + - internal/bitbucket.org/ww/goautoneg + - model +- name: github.com/prometheus/procfs + version: 454a56f35412459b5e684fd5ec0f9211b94f002a +- name: github.com/Sirupsen/logrus + version: aaf92c95712104318fc35409745f1533aa5ff327 +- name: github.com/spf13/cobra + version: 7c674d9e72017ed25f6d2b5e497a1368086b6a6f +- name: github.com/spf13/pflag + version: 5ccb023bc27df288a957c5e994cd44fd19619465 +- name: github.com/ugorji/go + version: f4485b318aadd133842532f841dc205a8e339d74 + subpackages: + - codec +- name: github.com/urfave/cli + version: 0bdeddeeb0f650497d603c4ad7b20cfe685682f6 +- name: github.com/xeipuuv/gojsonpointer + version: e0fe6f68307607d540ed8eac07a342c33fa1b54a +- name: github.com/xeipuuv/gojsonreference + version: e02fc20de94c78484cd5ffb007f8af96be030a45 +- name: github.com/xeipuuv/gojsonschema + version: e18f0065e8c148fcf567ac43a3f8f5b66ac0720b +- name: golang.org/x/net + version: e90d6d0afc4c315a0d87a568ae68577cc15149a0 + subpackages: + - context + - context/ctxhttp + - html + - html/atom + - http2 + - http2/hpack + - idna + - internal/timeseries + - lex/httplex + - proxy + - trace + - websocket +- name: golang.org/x/oauth2 + version: 3c3a985cb79f52a3190fbc056984415ca6763d01 + subpackages: + - google + - internal + - jws + - jwt +- name: golang.org/x/sys + version: 833a04a10549a95dc34458c195cbad61bbb6cb4d + subpackages: + - unix + - windows +- name: google.golang.org/appengine + version: 12d5545dc1cfa6047a286d5e853841b6471f4c19 + subpackages: + - internal + - internal/app_identity + - internal/base + - internal/datastore + - internal/log + - internal/modules + - internal/remote_api + - internal/urlfetch + - urlfetch +- name: google.golang.org/grpc + version: b1a2821ca5a4fd6b6e48ddfbb7d6d7584d839d21 + subpackages: + - codes + - credentials + - grpclog + - internal + - metadata + - naming + - peer + - transport +- name: gopkg.in/inf.v0 + version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 +- name: gopkg.in/yaml.v2 + version: a83829b6f1293c91addabc89d0571c246397bbf4 +- name: k8s.io/client-go + version: d72c0e162789e1bbb33c33cfa26858a1375efe01 + subpackages: + - 1.4/discovery + - 1.4/dynamic + - 1.4/kubernetes + - 1.4/kubernetes/typed/apps/v1alpha1 + - 1.4/kubernetes/typed/authentication/v1beta1 + - 1.4/kubernetes/typed/authorization/v1beta1 + - 1.4/kubernetes/typed/autoscaling/v1 + - 1.4/kubernetes/typed/batch/v1 + - 1.4/kubernetes/typed/certificates/v1alpha1 + - 1.4/kubernetes/typed/core/v1 + - 1.4/kubernetes/typed/extensions/v1beta1 + - 1.4/kubernetes/typed/policy/v1alpha1 + - 1.4/kubernetes/typed/rbac/v1alpha1 + - 1.4/kubernetes/typed/storage/v1beta1 + - 1.4/pkg/api + - 1.4/pkg/api/endpoints + - 1.4/pkg/api/errors + - 1.4/pkg/api/install + - 1.4/pkg/api/meta + - 1.4/pkg/api/meta/metatypes + - 1.4/pkg/api/pod + - 1.4/pkg/api/resource + - 1.4/pkg/api/service + - 1.4/pkg/api/unversioned + - 1.4/pkg/api/unversioned/validation + - 1.4/pkg/api/util + - 1.4/pkg/api/v1 + - 1.4/pkg/api/validation + - 1.4/pkg/apimachinery + - 1.4/pkg/apimachinery/registered + - 1.4/pkg/apis/apps + - 1.4/pkg/apis/apps/install + - 1.4/pkg/apis/apps/v1alpha1 + - 1.4/pkg/apis/authentication + - 1.4/pkg/apis/authentication/install + - 1.4/pkg/apis/authentication/v1beta1 + - 1.4/pkg/apis/authorization + - 1.4/pkg/apis/authorization/install + - 1.4/pkg/apis/authorization/v1beta1 + - 1.4/pkg/apis/autoscaling + - 1.4/pkg/apis/autoscaling/install + - 1.4/pkg/apis/autoscaling/v1 + - 1.4/pkg/apis/batch + - 1.4/pkg/apis/batch/install + - 1.4/pkg/apis/batch/v1 + - 1.4/pkg/apis/batch/v2alpha1 + - 1.4/pkg/apis/certificates + - 1.4/pkg/apis/certificates/install + - 1.4/pkg/apis/certificates/v1alpha1 + - 1.4/pkg/apis/componentconfig + - 1.4/pkg/apis/componentconfig/install + - 1.4/pkg/apis/componentconfig/v1alpha1 + - 1.4/pkg/apis/extensions + - 1.4/pkg/apis/extensions/install + - 1.4/pkg/apis/extensions/v1beta1 + - 1.4/pkg/apis/imagepolicy + - 1.4/pkg/apis/imagepolicy/install + - 1.4/pkg/apis/imagepolicy/v1alpha1 + - 1.4/pkg/apis/policy + - 1.4/pkg/apis/policy/install + - 1.4/pkg/apis/policy/v1alpha1 + - 1.4/pkg/apis/rbac + - 1.4/pkg/apis/rbac/install + - 1.4/pkg/apis/rbac/v1alpha1 + - 1.4/pkg/apis/storage + - 1.4/pkg/apis/storage/install + - 1.4/pkg/apis/storage/v1beta1 + - 1.4/pkg/auth/user + - 1.4/pkg/capabilities + - 1.4/pkg/conversion + - 1.4/pkg/conversion/queryparams + - 1.4/pkg/fields + - 1.4/pkg/kubelet/qos + - 1.4/pkg/kubelet/server/portforward + - 1.4/pkg/kubelet/types + - 1.4/pkg/labels + - 1.4/pkg/master/ports + - 1.4/pkg/runtime + - 1.4/pkg/runtime/serializer + - 1.4/pkg/runtime/serializer/json + - 1.4/pkg/runtime/serializer/protobuf + - 1.4/pkg/runtime/serializer/recognizer + - 1.4/pkg/runtime/serializer/streaming + - 1.4/pkg/runtime/serializer/versioning + - 1.4/pkg/security/apparmor + - 1.4/pkg/selection + - 1.4/pkg/third_party/forked/golang/json + - 1.4/pkg/third_party/forked/golang/reflect + - 1.4/pkg/types + - 1.4/pkg/util + - 1.4/pkg/util/clock + - 1.4/pkg/util/config + - 1.4/pkg/util/crypto + - 1.4/pkg/util/diff + - 1.4/pkg/util/errors + - 1.4/pkg/util/flowcontrol + - 1.4/pkg/util/framer + - 1.4/pkg/util/hash + - 1.4/pkg/util/homedir + - 1.4/pkg/util/httpstream + - 1.4/pkg/util/integer + - 1.4/pkg/util/intstr + - 1.4/pkg/util/json + - 1.4/pkg/util/labels + - 1.4/pkg/util/net + - 1.4/pkg/util/net/sets + - 1.4/pkg/util/parsers + - 1.4/pkg/util/rand + - 1.4/pkg/util/runtime + - 1.4/pkg/util/sets + - 1.4/pkg/util/strategicpatch + - 1.4/pkg/util/uuid + - 1.4/pkg/util/validation + - 1.4/pkg/util/validation/field + - 1.4/pkg/util/wait + - 1.4/pkg/util/yaml + - 1.4/pkg/version + - 1.4/pkg/watch + - 1.4/pkg/watch/versioned + - 1.4/rest + - 1.4/tools/clientcmd/api + - 1.4/tools/metrics + - 1.4/transport +- name: k8s.io/kubernetes + version: a9e9cf3b407c1d315686c452bdb918c719c3ea6e + repo: https://github.com/openshift/kubernetes + subpackages: + - federation/apis/federation + - federation/apis/federation/install + - federation/apis/federation/v1beta1 + - federation/client/clientset_generated/federation_internalclientset + - federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned + - federation/client/clientset_generated/federation_internalclientset/typed/extensions/unversioned + - federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned + - pkg/api + - pkg/api/annotations + - pkg/api/endpoints + - pkg/api/errors + - pkg/api/install + - pkg/api/meta + - pkg/api/meta/metatypes + - pkg/api/pod + - pkg/api/resource + - pkg/api/rest + - pkg/api/service + - pkg/api/unversioned + - pkg/api/unversioned/validation + - pkg/api/util + - pkg/api/v1 + - pkg/api/validation + - pkg/apimachinery + - pkg/apimachinery/registered + - pkg/apis/apps + - pkg/apis/apps/install + - pkg/apis/apps/v1alpha1 + - pkg/apis/authentication + - pkg/apis/authentication/install + - pkg/apis/authentication/v1beta1 + - pkg/apis/authorization + - pkg/apis/authorization/install + - pkg/apis/authorization/v1beta1 + - pkg/apis/autoscaling + - pkg/apis/autoscaling/install + - pkg/apis/autoscaling/v1 + - pkg/apis/batch + - pkg/apis/batch/install + - pkg/apis/batch/v1 + - pkg/apis/batch/v2alpha1 + - pkg/apis/certificates + - pkg/apis/certificates/install + - pkg/apis/certificates/v1alpha1 + - pkg/apis/componentconfig + - pkg/apis/componentconfig/install + - pkg/apis/componentconfig/v1alpha1 + - pkg/apis/extensions + - pkg/apis/extensions/install + - pkg/apis/extensions/v1beta1 + - pkg/apis/extensions/validation + - pkg/apis/policy + - pkg/apis/policy/install + - pkg/apis/policy/v1alpha1 + - pkg/apis/rbac + - pkg/apis/rbac/install + - pkg/apis/rbac/v1alpha1 + - pkg/apis/storage + - pkg/apis/storage/install + - pkg/apis/storage/util + - pkg/apis/storage/v1beta1 + - pkg/auth/authenticator + - pkg/auth/user + - pkg/capabilities + - pkg/client/cache + - pkg/client/clientset_generated/internalclientset + - pkg/client/clientset_generated/internalclientset/typed/authentication/unversioned + - pkg/client/clientset_generated/internalclientset/typed/authorization/unversioned + - pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned + - pkg/client/clientset_generated/internalclientset/typed/batch/unversioned + - pkg/client/clientset_generated/internalclientset/typed/certificates/unversioned + - pkg/client/clientset_generated/internalclientset/typed/core/unversioned + - pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned + - pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned + - pkg/client/clientset_generated/internalclientset/typed/storage/unversioned + - pkg/client/metrics + - pkg/client/record + - pkg/client/restclient + - pkg/client/transport + - pkg/client/typed/discovery + - pkg/client/typed/dynamic + - pkg/client/unversioned + - pkg/client/unversioned/adapters/internalclientset + - pkg/client/unversioned/auth + - pkg/client/unversioned/clientcmd + - pkg/client/unversioned/clientcmd/api + - pkg/client/unversioned/clientcmd/api/latest + - pkg/client/unversioned/clientcmd/api/v1 + - pkg/controller + - pkg/controller/deployment/util + - pkg/controller/framework + - pkg/conversion + - pkg/conversion/queryparams + - pkg/credentialprovider + - pkg/fieldpath + - pkg/fields + - pkg/kubectl + - pkg/kubectl/cmd/util + - pkg/kubectl/resource + - pkg/kubelet/qos + - pkg/kubelet/types + - pkg/labels + - pkg/master/ports + - pkg/registry/generic + - pkg/registry/thirdpartyresourcedata + - pkg/runtime + - pkg/runtime/serializer + - pkg/runtime/serializer/json + - pkg/runtime/serializer/protobuf + - pkg/runtime/serializer/recognizer + - pkg/runtime/serializer/streaming + - pkg/runtime/serializer/versioning + - pkg/security/apparmor + - pkg/security/podsecuritypolicy/util + - pkg/securitycontextconstraints/util + - pkg/selection + - pkg/serviceaccount + - pkg/storage + - pkg/storage/etcd + - pkg/storage/etcd/metrics + - pkg/storage/etcd/util + - pkg/storage/etcd3 + - pkg/storage/storagebackend + - pkg/storage/storagebackend/factory + - pkg/types + - pkg/util + - pkg/util/cache + - pkg/util/certificates + - pkg/util/clock + - pkg/util/config + - pkg/util/crypto + - pkg/util/diff + - pkg/util/errors + - pkg/util/exec + - pkg/util/flag + - pkg/util/flowcontrol + - pkg/util/framer + - pkg/util/hash + - pkg/util/homedir + - pkg/util/integer + - pkg/util/intstr + - pkg/util/json + - pkg/util/jsonpath + - pkg/util/labels + - pkg/util/net + - pkg/util/net/sets + - pkg/util/parsers + - pkg/util/pod + - pkg/util/rand + - pkg/util/replicaset + - pkg/util/runtime + - pkg/util/sets + - pkg/util/slice + - pkg/util/strategicpatch + - pkg/util/uuid + - pkg/util/validation + - pkg/util/validation/field + - pkg/util/wait + - pkg/util/yaml + - pkg/version + - pkg/watch + - pkg/watch/versioned + - plugin/pkg/client/auth + - plugin/pkg/client/auth/gcp + - plugin/pkg/client/auth/oidc + - third_party/forked/golang/json + - third_party/forked/golang/netutil + - third_party/forked/golang/reflect + - third_party/forked/golang/template +testImports: [] diff --git a/glide.yaml b/glide.yaml new file mode 100644 index 000000000..adb01f648 --- /dev/null +++ b/glide.yaml @@ -0,0 +1,59 @@ +package: github.com/kubernetes-incubator/kompose +homepage: https://github.com/kubernetes-incubator/kompose +licence: Apache-2.0 + +import: + +- package: github.com/Sirupsen/logrus +- package: github.com/fatih/structs +- package: github.com/ghodss/yaml +- package: github.com/urfave/cli + +- package: github.com/docker/libcompose + version: fbdac0a6a80837c63eb6c8f43514f7bb3f32df6c + subpackages: + - config + - lookup + - project + +- package: github.com/openshift/origin + version: v1.4.0-rc1 + subpackages: + - pkg/client + - pkg/cmd/cli/config + - pkg/deploy/api + - pkg/deploy/api/install + - pkg/image/api + - pkg/image/api/install + +# OpenShift uses Kubernetes fork to carry some patches +# Kubernetes will be in the same version as what OpenShift is using (from OpenShifts Godeps.json) +- package: k8s.io/kubernetes + repo: https://github.com/openshift/kubernetes + subpackages: + - pkg/api + - pkg/api/install + - pkg/api/resource + - pkg/api/unversioned + - pkg/apis/extensions + - pkg/apis/extensions/install + - pkg/client/unversioned + - pkg/client/unversioned/clientcmd + - pkg/kubectl + - pkg/kubectl/cmd/util + - pkg/runtime + - pkg/util/intstr + +# OpenShift uses glog fork to carry some patches +# There are more forks that openshift is using (see: https://github.com/openshift/origin/blob/master/hack/godep-restore.sh) +# But we are not importing any packages that are using forked packages (expect glog). +# This is why only glog is here. +- package: github.com/golang/glog + repo: https://github.com/openshift/glog + +# libcompose and OpenShift depends on different Docker version, +# this makes sure that we are using version that is compatible with both. +# Glide will show WARN about conflicting rev (it this case it is OK) +- package: github.com/docker/docker + version: 601004e1a714d77d3a43e957b8ae8adbc867b280 + \ No newline at end of file diff --git a/script/godep-restore.sh b/script/godep-restore.sh deleted file mode 100755 index 08ea3cdb6..000000000 --- a/script/godep-restore.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -# Copyright 2016 The Kubernetes Authors All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# inspired by: https://github.com/openshift/origin/blob/master/hack/godep-restore.sh - -# Sometimes godep needs 'other' remotes. So add those remotes -preload-remote() { - local orig_org="$1" - local orig_project="$2" - local alt_org="$3" - local alt_project="$4" - - # Go get stinks, which is why we have the || true... - go get -d "${orig_org}/${orig_project}" &>/dev/null || true - - repo_dir="${GOPATH}/src/${orig_org}/${orig_project}" - pushd "${repo_dir}" > /dev/null - git remote add "${alt_org}-remote" "https://${alt_org}/${alt_project}.git" > /dev/null || true - git remote update > /dev/null - popd > /dev/null -} - -echo "Preloading some dependencies" -# OpenShift requires its own Kubernets fork :-( -preload-remote "k8s.io" "kubernetes" "github.com/openshift" "kubernetes" -# OpenShift requires its own glog fork -preload-remote "github.com/golang" "glog" "github.com/openshift" "glog" - -echo "Starting to download all godeps. This takes a while" -godep restore -echo "Download finished into ${GOPATH}" diff --git a/vendor/bitbucket.org/ww/goautoneg/Makefile b/vendor/bitbucket.org/ww/goautoneg/Makefile deleted file mode 100644 index e33ee1730..000000000 --- a/vendor/bitbucket.org/ww/goautoneg/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -include $(GOROOT)/src/Make.inc - -TARG=bitbucket.org/ww/goautoneg -GOFILES=autoneg.go - -include $(GOROOT)/src/Make.pkg - -format: - gofmt -w *.go - -docs: - gomake clean - godoc ${TARG} > README.txt diff --git a/vendor/bitbucket.org/ww/goautoneg/README.txt b/vendor/bitbucket.org/ww/goautoneg/README.txt deleted file mode 100644 index 7723656d5..000000000 --- a/vendor/bitbucket.org/ww/goautoneg/README.txt +++ /dev/null @@ -1,67 +0,0 @@ -PACKAGE - -package goautoneg -import "bitbucket.org/ww/goautoneg" - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -FUNCTIONS - -func Negotiate(header string, alternatives []string) (content_type string) -Negotiate the most appropriate content_type given the accept header -and a list of alternatives. - -func ParseAccept(header string) (accept []Accept) -Parse an Accept Header string returning a sorted list -of clauses - - -TYPES - -type Accept struct { - Type, SubType string - Q float32 - Params map[string]string -} -Structure to represent a clause in an HTTP Accept Header - - -SUBDIRECTORIES - - .hg diff --git a/vendor/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/bitbucket.org/ww/goautoneg/autoneg.go deleted file mode 100644 index 648b38cb6..000000000 --- a/vendor/bitbucket.org/ww/goautoneg/autoneg.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -*/ -package goautoneg - -import ( - "sort" - "strconv" - "strings" -) - -// Structure to represent a clause in an HTTP Accept Header -type Accept struct { - Type, SubType string - Q float64 - Params map[string]string -} - -// For internal use, so that we can use the sort interface -type accept_slice []Accept - -func (accept accept_slice) Len() int { - slice := []Accept(accept) - return len(slice) -} - -func (accept accept_slice) Less(i, j int) bool { - slice := []Accept(accept) - ai, aj := slice[i], slice[j] - if ai.Q > aj.Q { - return true - } - if ai.Type != "*" && aj.Type == "*" { - return true - } - if ai.SubType != "*" && aj.SubType == "*" { - return true - } - return false -} - -func (accept accept_slice) Swap(i, j int) { - slice := []Accept(accept) - slice[i], slice[j] = slice[j], slice[i] -} - -// Parse an Accept Header string returning a sorted list -// of clauses -func ParseAccept(header string) (accept []Accept) { - parts := strings.Split(header, ",") - accept = make([]Accept, 0, len(parts)) - for _, part := range parts { - part := strings.Trim(part, " ") - - a := Accept{} - a.Params = make(map[string]string) - a.Q = 1.0 - - mrp := strings.Split(part, ";") - - media_range := mrp[0] - sp := strings.Split(media_range, "/") - a.Type = strings.Trim(sp[0], " ") - - switch { - case len(sp) == 1 && a.Type == "*": - a.SubType = "*" - case len(sp) == 2: - a.SubType = strings.Trim(sp[1], " ") - default: - continue - } - - if len(mrp) == 1 { - accept = append(accept, a) - continue - } - - for _, param := range mrp[1:] { - sp := strings.SplitN(param, "=", 2) - if len(sp) != 2 { - continue - } - token := strings.Trim(sp[0], " ") - if token == "q" { - a.Q, _ = strconv.ParseFloat(sp[1], 32) - } else { - a.Params[token] = strings.Trim(sp[1], " ") - } - } - - accept = append(accept, a) - } - - slice := accept_slice(accept) - sort.Sort(slice) - - return -} - -// Negotiate the most appropriate content_type given the accept header -// and a list of alternatives. -func Negotiate(header string, alternatives []string) (content_type string) { - asp := make([][]string, 0, len(alternatives)) - for _, ctype := range alternatives { - asp = append(asp, strings.SplitN(ctype, "/", 2)) - } - for _, clause := range ParseAccept(header) { - for i, ctsp := range asp { - if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { - content_type = alternatives[i] - return - } - if clause.Type == ctsp[0] && clause.SubType == "*" { - content_type = alternatives[i] - return - } - if clause.Type == "*" && clause.SubType == "*" { - content_type = alternatives[i] - return - } - } - } - return -} diff --git a/vendor/google.golang.org/cloud/LICENSE b/vendor/cloud.google.com/go/LICENSE similarity index 100% rename from vendor/google.golang.org/cloud/LICENSE rename to vendor/cloud.google.com/go/LICENSE diff --git a/vendor/google.golang.org/cloud/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go similarity index 80% rename from vendor/google.golang.org/cloud/compute/metadata/metadata.go rename to vendor/cloud.google.com/go/compute/metadata/metadata.go index 0a709598d..f9d2bef6c 100644 --- a/vendor/google.golang.org/cloud/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -17,7 +17,7 @@ // // This package is a wrapper around the GCE metadata service, // as documented at https://developers.google.com/compute/docs/metadata. -package metadata +package metadata // import "cloud.google.com/go/compute/metadata" import ( "encoding/json" @@ -27,6 +27,7 @@ import ( "net/http" "net/url" "os" + "runtime" "strings" "sync" "time" @@ -34,11 +35,20 @@ import ( "golang.org/x/net/context" "golang.org/x/net/context/ctxhttp" - "google.golang.org/cloud/internal" + "cloud.google.com/go/internal" ) -// metadataIP is the documented metadata server IP address. -const metadataIP = "169.254.169.254" +const ( + // metadataIP is the documented metadata server IP address. + metadataIP = "169.254.169.254" + + // metadataHostEnv is the environment variable specifying the + // GCE metadata hostname. If empty, the default value of + // metadataIP ("169.254.169.254") is used instead. + // This is variable name is not defined by any spec, as far as + // I know; it was made up for the Go package. + metadataHostEnv = "GCE_METADATA_HOST" +) type cachedValue struct { k string @@ -110,7 +120,7 @@ func getETag(client *http.Client, suffix string) (value, etag string, err error) // deployments. To enable spoofing of the metadata service, the environment // variable GCE_METADATA_HOST is first inspected to decide where metadata // requests shall go. - host := os.Getenv("GCE_METADATA_HOST") + host := os.Getenv(metadataHostEnv) if host == "" { // Using 169.254.169.254 instead of "metadata" here because Go // binaries built with the "netgo" tag and without cgo won't @@ -163,32 +173,34 @@ func (c *cachedValue) get() (v string, err error) { return } -var onGCE struct { - sync.Mutex - set bool - v bool -} +var ( + onGCEOnce sync.Once + onGCE bool +) // OnGCE reports whether this process is running on Google Compute Engine. func OnGCE() bool { - defer onGCE.Unlock() - onGCE.Lock() - if onGCE.set { - return onGCE.v - } - onGCE.set = true - onGCE.v = testOnGCE() - return onGCE.v + onGCEOnce.Do(initOnGCE) + return onGCE +} + +func initOnGCE() { + onGCE = testOnGCE() } func testOnGCE() bool { + // The user explicitly said they're on GCE, so trust them. + if os.Getenv(metadataHostEnv) != "" { + return true + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() resc := make(chan bool, 2) // Try two strategies in parallel. - // See https://github.com/GoogleCloudPlatform/gcloud-golang/issues/194 + // See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194 go func() { res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP) if err != nil { @@ -208,9 +220,53 @@ func testOnGCE() bool { resc <- strsContains(addrs, metadataIP) }() + tryHarder := systemInfoSuggestsGCE() + if tryHarder { + res := <-resc + if res { + // The first strategy succeeded, so let's use it. + return true + } + // Wait for either the DNS or metadata server probe to + // contradict the other one and say we are running on + // GCE. Give it a lot of time to do so, since the system + // info already suggests we're running on a GCE BIOS. + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case res = <-resc: + return res + case <-timer.C: + // Too slow. Who knows what this system is. + return false + } + } + + // There's no hint from the system info that we're running on + // GCE, so use the first probe's result as truth, whether it's + // true or false. The goal here is to optimize for speed for + // users who are NOT running on GCE. We can't assume that + // either a DNS lookup or an HTTP request to a blackholed IP + // address is fast. Worst case this should return when the + // metaClient's Transport.ResponseHeaderTimeout or + // Transport.Dial.Timeout fires (in two seconds). return <-resc } +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + if runtime.GOOS != "linux" { + // We don't have any non-Linux clues available, at least yet. + return false + } + slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(slurp)) + return name == "Google" || name == "Google Compute Engine" +} + // Subscribe subscribes to a value from the metadata service. // The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". // The suffix may contain query parameters. diff --git a/vendor/cloud.google.com/go/internal/cloud.go b/vendor/cloud.google.com/go/internal/cloud.go new file mode 100644 index 000000000..8e0c8f8e5 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/cloud.go @@ -0,0 +1,64 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal provides support for the cloud packages. +// +// Users should not import this package directly. +package internal + +import ( + "fmt" + "net/http" +) + +const userAgent = "gcloud-golang/0.1" + +// Transport is an http.RoundTripper that appends Google Cloud client's +// user-agent to the original request's user-agent header. +type Transport struct { + // TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does. + // Do User-Agent some other way. + + // Base is the actual http.RoundTripper + // requests will use. It must not be nil. + Base http.RoundTripper +} + +// RoundTrip appends a user-agent to the existing user-agent +// header and delegates the request to the base http.RoundTripper. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + req = cloneRequest(req) + ua := req.Header.Get("User-Agent") + if ua == "" { + ua = userAgent + } else { + ua = fmt.Sprintf("%s %s", ua, userAgent) + } + req.Header.Set("User-Agent", ua) + return t.Base.RoundTrip(req) +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} diff --git a/vendor/github.com/Azure/go-ansiterm/LICENSE b/vendor/github.com/Azure/go-ansiterm/LICENSE deleted file mode 100644 index e3d9a64d1..000000000 --- a/vendor/github.com/Azure/go-ansiterm/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Microsoft Corporation - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/Azure/go-ansiterm/README.md b/vendor/github.com/Azure/go-ansiterm/README.md deleted file mode 100644 index 261c041e7..000000000 --- a/vendor/github.com/Azure/go-ansiterm/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# go-ansiterm - -This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent. - -For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position. - -The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go). - -See parser_test.go for examples exercising the state machine and generating appropriate function calls. - ------ -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/vendor/github.com/Azure/go-ansiterm/constants.go b/vendor/github.com/Azure/go-ansiterm/constants.go deleted file mode 100644 index 96504a33b..000000000 --- a/vendor/github.com/Azure/go-ansiterm/constants.go +++ /dev/null @@ -1,188 +0,0 @@ -package ansiterm - -const LogEnv = "DEBUG_TERMINAL" - -// ANSI constants -// References: -// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm -// -- http://man7.org/linux/man-pages/man4/console_codes.4.html -// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html -// -- http://en.wikipedia.org/wiki/ANSI_escape_code -// -- http://vt100.net/emu/dec_ansi_parser -// -- http://vt100.net/emu/vt500_parser.svg -// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html -// -- http://www.inwap.com/pdp10/ansicode.txt -const ( - // ECMA-48 Set Graphics Rendition - // Note: - // -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved - // -- Fonts could possibly be supported via SetCurrentConsoleFontEx - // -- Windows does not expose the per-window cursor (i.e., caret) blink times - ANSI_SGR_RESET = 0 - ANSI_SGR_BOLD = 1 - ANSI_SGR_DIM = 2 - _ANSI_SGR_ITALIC = 3 - ANSI_SGR_UNDERLINE = 4 - _ANSI_SGR_BLINKSLOW = 5 - _ANSI_SGR_BLINKFAST = 6 - ANSI_SGR_REVERSE = 7 - _ANSI_SGR_INVISIBLE = 8 - _ANSI_SGR_LINETHROUGH = 9 - _ANSI_SGR_FONT_00 = 10 - _ANSI_SGR_FONT_01 = 11 - _ANSI_SGR_FONT_02 = 12 - _ANSI_SGR_FONT_03 = 13 - _ANSI_SGR_FONT_04 = 14 - _ANSI_SGR_FONT_05 = 15 - _ANSI_SGR_FONT_06 = 16 - _ANSI_SGR_FONT_07 = 17 - _ANSI_SGR_FONT_08 = 18 - _ANSI_SGR_FONT_09 = 19 - _ANSI_SGR_FONT_10 = 20 - _ANSI_SGR_DOUBLEUNDERLINE = 21 - ANSI_SGR_BOLD_DIM_OFF = 22 - _ANSI_SGR_ITALIC_OFF = 23 - ANSI_SGR_UNDERLINE_OFF = 24 - _ANSI_SGR_BLINK_OFF = 25 - _ANSI_SGR_RESERVED_00 = 26 - ANSI_SGR_REVERSE_OFF = 27 - _ANSI_SGR_INVISIBLE_OFF = 28 - _ANSI_SGR_LINETHROUGH_OFF = 29 - ANSI_SGR_FOREGROUND_BLACK = 30 - ANSI_SGR_FOREGROUND_RED = 31 - ANSI_SGR_FOREGROUND_GREEN = 32 - ANSI_SGR_FOREGROUND_YELLOW = 33 - ANSI_SGR_FOREGROUND_BLUE = 34 - ANSI_SGR_FOREGROUND_MAGENTA = 35 - ANSI_SGR_FOREGROUND_CYAN = 36 - ANSI_SGR_FOREGROUND_WHITE = 37 - _ANSI_SGR_RESERVED_01 = 38 - ANSI_SGR_FOREGROUND_DEFAULT = 39 - ANSI_SGR_BACKGROUND_BLACK = 40 - ANSI_SGR_BACKGROUND_RED = 41 - ANSI_SGR_BACKGROUND_GREEN = 42 - ANSI_SGR_BACKGROUND_YELLOW = 43 - ANSI_SGR_BACKGROUND_BLUE = 44 - ANSI_SGR_BACKGROUND_MAGENTA = 45 - ANSI_SGR_BACKGROUND_CYAN = 46 - ANSI_SGR_BACKGROUND_WHITE = 47 - _ANSI_SGR_RESERVED_02 = 48 - ANSI_SGR_BACKGROUND_DEFAULT = 49 - // 50 - 65: Unsupported - - ANSI_MAX_CMD_LENGTH = 4096 - - MAX_INPUT_EVENTS = 128 - DEFAULT_WIDTH = 80 - DEFAULT_HEIGHT = 24 - - ANSI_BEL = 0x07 - ANSI_BACKSPACE = 0x08 - ANSI_TAB = 0x09 - ANSI_LINE_FEED = 0x0A - ANSI_VERTICAL_TAB = 0x0B - ANSI_FORM_FEED = 0x0C - ANSI_CARRIAGE_RETURN = 0x0D - ANSI_ESCAPE_PRIMARY = 0x1B - ANSI_ESCAPE_SECONDARY = 0x5B - ANSI_OSC_STRING_ENTRY = 0x5D - ANSI_COMMAND_FIRST = 0x40 - ANSI_COMMAND_LAST = 0x7E - DCS_ENTRY = 0x90 - CSI_ENTRY = 0x9B - OSC_STRING = 0x9D - ANSI_PARAMETER_SEP = ";" - ANSI_CMD_G0 = '(' - ANSI_CMD_G1 = ')' - ANSI_CMD_G2 = '*' - ANSI_CMD_G3 = '+' - ANSI_CMD_DECPNM = '>' - ANSI_CMD_DECPAM = '=' - ANSI_CMD_OSC = ']' - ANSI_CMD_STR_TERM = '\\' - - KEY_CONTROL_PARAM_2 = ";2" - KEY_CONTROL_PARAM_3 = ";3" - KEY_CONTROL_PARAM_4 = ";4" - KEY_CONTROL_PARAM_5 = ";5" - KEY_CONTROL_PARAM_6 = ";6" - KEY_CONTROL_PARAM_7 = ";7" - KEY_CONTROL_PARAM_8 = ";8" - KEY_ESC_CSI = "\x1B[" - KEY_ESC_N = "\x1BN" - KEY_ESC_O = "\x1BO" - - FILL_CHARACTER = ' ' -) - -func getByteRange(start byte, end byte) []byte { - bytes := make([]byte, 0, 32) - for i := start; i <= end; i++ { - bytes = append(bytes, byte(i)) - } - - return bytes -} - -var toGroundBytes = getToGroundBytes() -var executors = getExecuteBytes() - -// SPACE 20+A0 hex Always and everywhere a blank space -// Intermediate 20-2F hex !"#$%&'()*+,-./ -var intermeds = getByteRange(0x20, 0x2F) - -// Parameters 30-3F hex 0123456789:;<=>? -// CSI Parameters 30-39, 3B hex 0123456789; -var csiParams = getByteRange(0x30, 0x3F) - -var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...) - -// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_ -var upperCase = getByteRange(0x40, 0x5F) - -// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~ -var lowerCase = getByteRange(0x60, 0x7E) - -// Alphabetics 40-7E hex (all of upper and lower case) -var alphabetics = append(upperCase, lowerCase...) - -var printables = getByteRange(0x20, 0x7F) - -var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E) -var escapeToGroundBytes = getEscapeToGroundBytes() - -// See http://www.vt100.net/emu/vt500_parser.png for description of the complex -// byte ranges below - -func getEscapeToGroundBytes() []byte { - escapeToGroundBytes := getByteRange(0x30, 0x4F) - escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...) - escapeToGroundBytes = append(escapeToGroundBytes, 0x59) - escapeToGroundBytes = append(escapeToGroundBytes, 0x5A) - escapeToGroundBytes = append(escapeToGroundBytes, 0x5C) - escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...) - return escapeToGroundBytes -} - -func getExecuteBytes() []byte { - executeBytes := getByteRange(0x00, 0x17) - executeBytes = append(executeBytes, 0x19) - executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...) - return executeBytes -} - -func getToGroundBytes() []byte { - groundBytes := []byte{0x18} - groundBytes = append(groundBytes, 0x1A) - groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...) - groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...) - groundBytes = append(groundBytes, 0x99) - groundBytes = append(groundBytes, 0x9A) - groundBytes = append(groundBytes, 0x9C) - return groundBytes -} - -// Delete 7F hex Always and everywhere ignored -// C1 Control 80-9F hex 32 additional control characters -// G1 Displayable A1-FE hex 94 additional displayable characters -// Special A0+FF hex Same as SPACE and DELETE diff --git a/vendor/github.com/Azure/go-ansiterm/context.go b/vendor/github.com/Azure/go-ansiterm/context.go deleted file mode 100644 index 8d66e777c..000000000 --- a/vendor/github.com/Azure/go-ansiterm/context.go +++ /dev/null @@ -1,7 +0,0 @@ -package ansiterm - -type ansiContext struct { - currentChar byte - paramBuffer []byte - interBuffer []byte -} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go deleted file mode 100644 index 1bd6057da..000000000 --- a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go +++ /dev/null @@ -1,49 +0,0 @@ -package ansiterm - -type csiEntryState struct { - baseState -} - -func (csiState csiEntryState) Handle(b byte) (s state, e error) { - logger.Infof("CsiEntry::Handle %#x", b) - - nextState, err := csiState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(alphabetics, b): - return csiState.parser.ground, nil - case sliceContains(csiCollectables, b): - return csiState.parser.csiParam, nil - case sliceContains(executors, b): - return csiState, csiState.parser.execute() - } - - return csiState, nil -} - -func (csiState csiEntryState) Transition(s state) error { - logger.Infof("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name()) - csiState.baseState.Transition(s) - - switch s { - case csiState.parser.ground: - return csiState.parser.csiDispatch() - case csiState.parser.csiParam: - switch { - case sliceContains(csiParams, csiState.parser.context.currentChar): - csiState.parser.collectParam() - case sliceContains(intermeds, csiState.parser.context.currentChar): - csiState.parser.collectInter() - } - } - - return nil -} - -func (csiState csiEntryState) Enter() error { - csiState.parser.clear() - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go deleted file mode 100644 index 4be35c5fd..000000000 --- a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go +++ /dev/null @@ -1,38 +0,0 @@ -package ansiterm - -type csiParamState struct { - baseState -} - -func (csiState csiParamState) Handle(b byte) (s state, e error) { - logger.Infof("CsiParam::Handle %#x", b) - - nextState, err := csiState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(alphabetics, b): - return csiState.parser.ground, nil - case sliceContains(csiCollectables, b): - csiState.parser.collectParam() - return csiState, nil - case sliceContains(executors, b): - return csiState, csiState.parser.execute() - } - - return csiState, nil -} - -func (csiState csiParamState) Transition(s state) error { - logger.Infof("CsiParam::Transition %s --> %s", csiState.Name(), s.Name()) - csiState.baseState.Transition(s) - - switch s { - case csiState.parser.ground: - return csiState.parser.csiDispatch() - } - - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go deleted file mode 100644 index 2189eb6b6..000000000 --- a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go +++ /dev/null @@ -1,36 +0,0 @@ -package ansiterm - -type escapeIntermediateState struct { - baseState -} - -func (escState escapeIntermediateState) Handle(b byte) (s state, e error) { - logger.Infof("escapeIntermediateState::Handle %#x", b) - nextState, err := escState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(intermeds, b): - return escState, escState.parser.collectInter() - case sliceContains(executors, b): - return escState, escState.parser.execute() - case sliceContains(escapeIntermediateToGroundBytes, b): - return escState.parser.ground, nil - } - - return escState, nil -} - -func (escState escapeIntermediateState) Transition(s state) error { - logger.Infof("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name()) - escState.baseState.Transition(s) - - switch s { - case escState.parser.ground: - return escState.parser.escDispatch() - } - - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_state.go b/vendor/github.com/Azure/go-ansiterm/escape_state.go deleted file mode 100644 index 7b1b9ad3f..000000000 --- a/vendor/github.com/Azure/go-ansiterm/escape_state.go +++ /dev/null @@ -1,47 +0,0 @@ -package ansiterm - -type escapeState struct { - baseState -} - -func (escState escapeState) Handle(b byte) (s state, e error) { - logger.Infof("escapeState::Handle %#x", b) - nextState, err := escState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case b == ANSI_ESCAPE_SECONDARY: - return escState.parser.csiEntry, nil - case b == ANSI_OSC_STRING_ENTRY: - return escState.parser.oscString, nil - case sliceContains(executors, b): - return escState, escState.parser.execute() - case sliceContains(escapeToGroundBytes, b): - return escState.parser.ground, nil - case sliceContains(intermeds, b): - return escState.parser.escapeIntermediate, nil - } - - return escState, nil -} - -func (escState escapeState) Transition(s state) error { - logger.Infof("Escape::Transition %s --> %s", escState.Name(), s.Name()) - escState.baseState.Transition(s) - - switch s { - case escState.parser.ground: - return escState.parser.escDispatch() - case escState.parser.escapeIntermediate: - return escState.parser.collectInter() - } - - return nil -} - -func (escState escapeState) Enter() error { - escState.parser.clear() - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/event_handler.go b/vendor/github.com/Azure/go-ansiterm/event_handler.go deleted file mode 100644 index 98087b38c..000000000 --- a/vendor/github.com/Azure/go-ansiterm/event_handler.go +++ /dev/null @@ -1,90 +0,0 @@ -package ansiterm - -type AnsiEventHandler interface { - // Print - Print(b byte) error - - // Execute C0 commands - Execute(b byte) error - - // CUrsor Up - CUU(int) error - - // CUrsor Down - CUD(int) error - - // CUrsor Forward - CUF(int) error - - // CUrsor Backward - CUB(int) error - - // Cursor to Next Line - CNL(int) error - - // Cursor to Previous Line - CPL(int) error - - // Cursor Horizontal position Absolute - CHA(int) error - - // Vertical line Position Absolute - VPA(int) error - - // CUrsor Position - CUP(int, int) error - - // Horizontal and Vertical Position (depends on PUM) - HVP(int, int) error - - // Text Cursor Enable Mode - DECTCEM(bool) error - - // Origin Mode - DECOM(bool) error - - // 132 Column Mode - DECCOLM(bool) error - - // Erase in Display - ED(int) error - - // Erase in Line - EL(int) error - - // Insert Line - IL(int) error - - // Delete Line - DL(int) error - - // Insert Character - ICH(int) error - - // Delete Character - DCH(int) error - - // Set Graphics Rendition - SGR([]int) error - - // Pan Down - SU(int) error - - // Pan Up - SD(int) error - - // Device Attributes - DA([]string) error - - // Set Top and Bottom Margins - DECSTBM(int, int) error - - // Index - IND() error - - // Reverse Index - RI() error - - // Flush updates from previous commands - Flush() error -} diff --git a/vendor/github.com/Azure/go-ansiterm/ground_state.go b/vendor/github.com/Azure/go-ansiterm/ground_state.go deleted file mode 100644 index 52451e946..000000000 --- a/vendor/github.com/Azure/go-ansiterm/ground_state.go +++ /dev/null @@ -1,24 +0,0 @@ -package ansiterm - -type groundState struct { - baseState -} - -func (gs groundState) Handle(b byte) (s state, e error) { - gs.parser.context.currentChar = b - - nextState, err := gs.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(printables, b): - return gs, gs.parser.print() - - case sliceContains(executors, b): - return gs, gs.parser.execute() - } - - return gs, nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go deleted file mode 100644 index 24062d420..000000000 --- a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go +++ /dev/null @@ -1,31 +0,0 @@ -package ansiterm - -type oscStringState struct { - baseState -} - -func (oscState oscStringState) Handle(b byte) (s state, e error) { - logger.Infof("OscString::Handle %#x", b) - nextState, err := oscState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case isOscStringTerminator(b): - return oscState.parser.ground, nil - } - - return oscState, nil -} - -// See below for OSC string terminators for linux -// http://man7.org/linux/man-pages/man4/console_codes.4.html -func isOscStringTerminator(b byte) bool { - - if b == ANSI_BEL || b == 0x5C { - return true - } - - return false -} diff --git a/vendor/github.com/Azure/go-ansiterm/parser.go b/vendor/github.com/Azure/go-ansiterm/parser.go deleted file mode 100644 index 169f68dbe..000000000 --- a/vendor/github.com/Azure/go-ansiterm/parser.go +++ /dev/null @@ -1,136 +0,0 @@ -package ansiterm - -import ( - "errors" - "io/ioutil" - "os" - - "github.com/Sirupsen/logrus" -) - -var logger *logrus.Logger - -type AnsiParser struct { - currState state - eventHandler AnsiEventHandler - context *ansiContext - csiEntry state - csiParam state - dcsEntry state - escape state - escapeIntermediate state - error state - ground state - oscString state - stateMap []state -} - -func CreateParser(initialState string, evtHandler AnsiEventHandler) *AnsiParser { - logFile := ioutil.Discard - - if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { - logFile, _ = os.Create("ansiParser.log") - } - - logger = &logrus.Logger{ - Out: logFile, - Formatter: new(logrus.TextFormatter), - Level: logrus.InfoLevel, - } - - parser := &AnsiParser{ - eventHandler: evtHandler, - context: &ansiContext{}, - } - - parser.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: parser}} - parser.csiParam = csiParamState{baseState{name: "CsiParam", parser: parser}} - parser.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: parser}} - parser.escape = escapeState{baseState{name: "Escape", parser: parser}} - parser.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: parser}} - parser.error = errorState{baseState{name: "Error", parser: parser}} - parser.ground = groundState{baseState{name: "Ground", parser: parser}} - parser.oscString = oscStringState{baseState{name: "OscString", parser: parser}} - - parser.stateMap = []state{ - parser.csiEntry, - parser.csiParam, - parser.dcsEntry, - parser.escape, - parser.escapeIntermediate, - parser.error, - parser.ground, - parser.oscString, - } - - parser.currState = getState(initialState, parser.stateMap) - - logger.Infof("CreateParser: parser %p", parser) - return parser -} - -func getState(name string, states []state) state { - for _, el := range states { - if el.Name() == name { - return el - } - } - - return nil -} - -func (ap *AnsiParser) Parse(bytes []byte) (int, error) { - for i, b := range bytes { - if err := ap.handle(b); err != nil { - return i, err - } - } - - return len(bytes), ap.eventHandler.Flush() -} - -func (ap *AnsiParser) handle(b byte) error { - ap.context.currentChar = b - newState, err := ap.currState.Handle(b) - if err != nil { - return err - } - - if newState == nil { - logger.Warning("newState is nil") - return errors.New("New state of 'nil' is invalid.") - } - - if newState != ap.currState { - if err := ap.changeState(newState); err != nil { - return err - } - } - - return nil -} - -func (ap *AnsiParser) changeState(newState state) error { - logger.Infof("ChangeState %s --> %s", ap.currState.Name(), newState.Name()) - - // Exit old state - if err := ap.currState.Exit(); err != nil { - logger.Infof("Exit state '%s' failed with : '%v'", ap.currState.Name(), err) - return err - } - - // Perform transition action - if err := ap.currState.Transition(newState); err != nil { - logger.Infof("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err) - return err - } - - // Enter new state - if err := newState.Enter(); err != nil { - logger.Infof("Enter state '%s' failed with: '%v'", newState.Name(), err) - return err - } - - ap.currState = newState - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go deleted file mode 100644 index 8b69a67a5..000000000 --- a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go +++ /dev/null @@ -1,103 +0,0 @@ -package ansiterm - -import ( - "strconv" -) - -func parseParams(bytes []byte) ([]string, error) { - paramBuff := make([]byte, 0, 0) - params := []string{} - - for _, v := range bytes { - if v == ';' { - if len(paramBuff) > 0 { - // Completed parameter, append it to the list - s := string(paramBuff) - params = append(params, s) - paramBuff = make([]byte, 0, 0) - } - } else { - paramBuff = append(paramBuff, v) - } - } - - // Last parameter may not be terminated with ';' - if len(paramBuff) > 0 { - s := string(paramBuff) - params = append(params, s) - } - - logger.Infof("Parsed params: %v with length: %d", params, len(params)) - return params, nil -} - -func parseCmd(context ansiContext) (string, error) { - return string(context.currentChar), nil -} - -func getInt(params []string, dflt int) int { - i := getInts(params, 1, dflt)[0] - logger.Infof("getInt: %v", i) - return i -} - -func getInts(params []string, minCount int, dflt int) []int { - ints := []int{} - - for _, v := range params { - i, _ := strconv.Atoi(v) - // Zero is mapped to the default value in VT100. - if i == 0 { - i = dflt - } - ints = append(ints, i) - } - - if len(ints) < minCount { - remaining := minCount - len(ints) - for i := 0; i < remaining; i++ { - ints = append(ints, dflt) - } - } - - logger.Infof("getInts: %v", ints) - - return ints -} - -func (ap *AnsiParser) modeDispatch(param string, set bool) error { - switch param { - case "?3": - return ap.eventHandler.DECCOLM(set) - case "?6": - return ap.eventHandler.DECOM(set) - case "?25": - return ap.eventHandler.DECTCEM(set) - } - return nil -} - -func (ap *AnsiParser) hDispatch(params []string) error { - if len(params) == 1 { - return ap.modeDispatch(params[0], true) - } - - return nil -} - -func (ap *AnsiParser) lDispatch(params []string) error { - if len(params) == 1 { - return ap.modeDispatch(params[0], false) - } - - return nil -} - -func getEraseParam(params []string) int { - param := getInt(params, 0) - if param < 0 || 3 < param { - param = 0 - } - - return param -} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/vendor/github.com/Azure/go-ansiterm/parser_actions.go deleted file mode 100644 index 58750a2d2..000000000 --- a/vendor/github.com/Azure/go-ansiterm/parser_actions.go +++ /dev/null @@ -1,122 +0,0 @@ -package ansiterm - -import ( - "fmt" -) - -func (ap *AnsiParser) collectParam() error { - currChar := ap.context.currentChar - logger.Infof("collectParam %#x", currChar) - ap.context.paramBuffer = append(ap.context.paramBuffer, currChar) - return nil -} - -func (ap *AnsiParser) collectInter() error { - currChar := ap.context.currentChar - logger.Infof("collectInter %#x", currChar) - ap.context.paramBuffer = append(ap.context.interBuffer, currChar) - return nil -} - -func (ap *AnsiParser) escDispatch() error { - cmd, _ := parseCmd(*ap.context) - intermeds := ap.context.interBuffer - logger.Infof("escDispatch currentChar: %#x", ap.context.currentChar) - logger.Infof("escDispatch: %v(%v)", cmd, intermeds) - - switch cmd { - case "D": // IND - return ap.eventHandler.IND() - case "E": // NEL, equivalent to CRLF - err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN) - if err == nil { - err = ap.eventHandler.Execute(ANSI_LINE_FEED) - } - return err - case "M": // RI - return ap.eventHandler.RI() - } - - return nil -} - -func (ap *AnsiParser) csiDispatch() error { - cmd, _ := parseCmd(*ap.context) - params, _ := parseParams(ap.context.paramBuffer) - - logger.Infof("csiDispatch: %v(%v)", cmd, params) - - switch cmd { - case "@": - return ap.eventHandler.ICH(getInt(params, 1)) - case "A": - return ap.eventHandler.CUU(getInt(params, 1)) - case "B": - return ap.eventHandler.CUD(getInt(params, 1)) - case "C": - return ap.eventHandler.CUF(getInt(params, 1)) - case "D": - return ap.eventHandler.CUB(getInt(params, 1)) - case "E": - return ap.eventHandler.CNL(getInt(params, 1)) - case "F": - return ap.eventHandler.CPL(getInt(params, 1)) - case "G": - return ap.eventHandler.CHA(getInt(params, 1)) - case "H": - ints := getInts(params, 2, 1) - x, y := ints[0], ints[1] - return ap.eventHandler.CUP(x, y) - case "J": - param := getEraseParam(params) - return ap.eventHandler.ED(param) - case "K": - param := getEraseParam(params) - return ap.eventHandler.EL(param) - case "L": - return ap.eventHandler.IL(getInt(params, 1)) - case "M": - return ap.eventHandler.DL(getInt(params, 1)) - case "P": - return ap.eventHandler.DCH(getInt(params, 1)) - case "S": - return ap.eventHandler.SU(getInt(params, 1)) - case "T": - return ap.eventHandler.SD(getInt(params, 1)) - case "c": - return ap.eventHandler.DA(params) - case "d": - return ap.eventHandler.VPA(getInt(params, 1)) - case "f": - ints := getInts(params, 2, 1) - x, y := ints[0], ints[1] - return ap.eventHandler.HVP(x, y) - case "h": - return ap.hDispatch(params) - case "l": - return ap.lDispatch(params) - case "m": - return ap.eventHandler.SGR(getInts(params, 1, 0)) - case "r": - ints := getInts(params, 2, 1) - top, bottom := ints[0], ints[1] - return ap.eventHandler.DECSTBM(top, bottom) - default: - logger.Errorf(fmt.Sprintf("Unsupported CSI command: '%s', with full context: %v", cmd, ap.context)) - return nil - } - -} - -func (ap *AnsiParser) print() error { - return ap.eventHandler.Print(ap.context.currentChar) -} - -func (ap *AnsiParser) clear() error { - ap.context = &ansiContext{} - return nil -} - -func (ap *AnsiParser) execute() error { - return ap.eventHandler.Execute(ap.context.currentChar) -} diff --git a/vendor/github.com/Azure/go-ansiterm/states.go b/vendor/github.com/Azure/go-ansiterm/states.go deleted file mode 100644 index f2ea1fcd1..000000000 --- a/vendor/github.com/Azure/go-ansiterm/states.go +++ /dev/null @@ -1,71 +0,0 @@ -package ansiterm - -type stateID int - -type state interface { - Enter() error - Exit() error - Handle(byte) (state, error) - Name() string - Transition(state) error -} - -type baseState struct { - name string - parser *AnsiParser -} - -func (base baseState) Enter() error { - return nil -} - -func (base baseState) Exit() error { - return nil -} - -func (base baseState) Handle(b byte) (s state, e error) { - - switch { - case b == CSI_ENTRY: - return base.parser.csiEntry, nil - case b == DCS_ENTRY: - return base.parser.dcsEntry, nil - case b == ANSI_ESCAPE_PRIMARY: - return base.parser.escape, nil - case b == OSC_STRING: - return base.parser.oscString, nil - case sliceContains(toGroundBytes, b): - return base.parser.ground, nil - } - - return nil, nil -} - -func (base baseState) Name() string { - return base.name -} - -func (base baseState) Transition(s state) error { - if s == base.parser.ground { - execBytes := []byte{0x18} - execBytes = append(execBytes, 0x1A) - execBytes = append(execBytes, getByteRange(0x80, 0x8F)...) - execBytes = append(execBytes, getByteRange(0x91, 0x97)...) - execBytes = append(execBytes, 0x99) - execBytes = append(execBytes, 0x9A) - - if sliceContains(execBytes, base.parser.context.currentChar) { - return base.parser.execute() - } - } - - return nil -} - -type dcsEntryState struct { - baseState -} - -type errorState struct { - baseState -} diff --git a/vendor/github.com/Azure/go-ansiterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/utilities.go deleted file mode 100644 index 392114493..000000000 --- a/vendor/github.com/Azure/go-ansiterm/utilities.go +++ /dev/null @@ -1,21 +0,0 @@ -package ansiterm - -import ( - "strconv" -) - -func sliceContains(bytes []byte, b byte) bool { - for _, v := range bytes { - if v == b { - return true - } - } - - return false -} - -func convertBytesToInteger(bytes []byte) int { - s := string(bytes) - i, _ := strconv.Atoi(s) - return i -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go deleted file mode 100644 index daf2f0696..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go +++ /dev/null @@ -1,182 +0,0 @@ -// +build windows - -package winterm - -import ( - "fmt" - "os" - "strconv" - "strings" - "syscall" - - "github.com/Azure/go-ansiterm" -) - -// Windows keyboard constants -// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx. -const ( - VK_PRIOR = 0x21 // PAGE UP key - VK_NEXT = 0x22 // PAGE DOWN key - VK_END = 0x23 // END key - VK_HOME = 0x24 // HOME key - VK_LEFT = 0x25 // LEFT ARROW key - VK_UP = 0x26 // UP ARROW key - VK_RIGHT = 0x27 // RIGHT ARROW key - VK_DOWN = 0x28 // DOWN ARROW key - VK_SELECT = 0x29 // SELECT key - VK_PRINT = 0x2A // PRINT key - VK_EXECUTE = 0x2B // EXECUTE key - VK_SNAPSHOT = 0x2C // PRINT SCREEN key - VK_INSERT = 0x2D // INS key - VK_DELETE = 0x2E // DEL key - VK_HELP = 0x2F // HELP key - VK_F1 = 0x70 // F1 key - VK_F2 = 0x71 // F2 key - VK_F3 = 0x72 // F3 key - VK_F4 = 0x73 // F4 key - VK_F5 = 0x74 // F5 key - VK_F6 = 0x75 // F6 key - VK_F7 = 0x76 // F7 key - VK_F8 = 0x77 // F8 key - VK_F9 = 0x78 // F9 key - VK_F10 = 0x79 // F10 key - VK_F11 = 0x7A // F11 key - VK_F12 = 0x7B // F12 key - - RIGHT_ALT_PRESSED = 0x0001 - LEFT_ALT_PRESSED = 0x0002 - RIGHT_CTRL_PRESSED = 0x0004 - LEFT_CTRL_PRESSED = 0x0008 - SHIFT_PRESSED = 0x0010 - NUMLOCK_ON = 0x0020 - SCROLLLOCK_ON = 0x0040 - CAPSLOCK_ON = 0x0080 - ENHANCED_KEY = 0x0100 -) - -type ansiCommand struct { - CommandBytes []byte - Command string - Parameters []string - IsSpecial bool -} - -func newAnsiCommand(command []byte) *ansiCommand { - - if isCharacterSelectionCmdChar(command[1]) { - // Is Character Set Selection commands - return &ansiCommand{ - CommandBytes: command, - Command: string(command), - IsSpecial: true, - } - } - - // last char is command character - lastCharIndex := len(command) - 1 - - ac := &ansiCommand{ - CommandBytes: command, - Command: string(command[lastCharIndex]), - IsSpecial: false, - } - - // more than a single escape - if lastCharIndex != 0 { - start := 1 - // skip if double char escape sequence - if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY { - start++ - } - // convert this to GetNextParam method - ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP) - } - - return ac -} - -func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 { - if index < 0 || index >= len(ac.Parameters) { - return defaultValue - } - - param, err := strconv.ParseInt(ac.Parameters[index], 10, 16) - if err != nil { - return defaultValue - } - - return int16(param) -} - -func (ac *ansiCommand) String() string { - return fmt.Sprintf("0x%v \"%v\" (\"%v\")", - bytesToHex(ac.CommandBytes), - ac.Command, - strings.Join(ac.Parameters, "\",\"")) -} - -// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands. -// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html. -func isAnsiCommandChar(b byte) bool { - switch { - case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY: - return true - case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM: - // non-CSI escape sequence terminator - return true - case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL: - // String escape sequence terminator - return true - } - return false -} - -func isXtermOscSequence(command []byte, current byte) bool { - return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL) -} - -func isCharacterSelectionCmdChar(b byte) bool { - return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3) -} - -// bytesToHex converts a slice of bytes to a human-readable string. -func bytesToHex(b []byte) string { - hex := make([]string, len(b)) - for i, ch := range b { - hex[i] = fmt.Sprintf("%X", ch) - } - return strings.Join(hex, "") -} - -// ensureInRange adjusts the passed value, if necessary, to ensure it is within -// the passed min / max range. -func ensureInRange(n int16, min int16, max int16) int16 { - if n < min { - return min - } else if n > max { - return max - } else { - return n - } -} - -func GetStdFile(nFile int) (*os.File, uintptr) { - var file *os.File - switch nFile { - case syscall.STD_INPUT_HANDLE: - file = os.Stdin - case syscall.STD_OUTPUT_HANDLE: - file = os.Stdout - case syscall.STD_ERROR_HANDLE: - file = os.Stderr - default: - panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile)) - } - - fd, err := syscall.GetStdHandle(nFile) - if err != nil { - panic(fmt.Errorf("Invalid standard handle indentifier: %v -- %v", nFile, err)) - } - - return file, uintptr(fd) -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/github.com/Azure/go-ansiterm/winterm/api.go deleted file mode 100644 index 462d92f8e..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/api.go +++ /dev/null @@ -1,322 +0,0 @@ -// +build windows - -package winterm - -import ( - "fmt" - "syscall" - "unsafe" -) - -//=========================================================================================================== -// IMPORTANT NOTE: -// -// The methods below make extensive use of the "unsafe" package to obtain the required pointers. -// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack -// variables) the pointers reference *before* the API completes. -// -// As a result, in those cases, the code must hint that the variables remain in active by invoking the -// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer -// require unsafe pointers. -// -// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform -// the garbage collector the variables remain in use if: -// -// -- The value is not a pointer (e.g., int32, struct) -// -- The value is not referenced by the method after passing the pointer to Windows -// -// See http://golang.org/doc/go1.3. -//=========================================================================================================== - -var ( - kernel32DLL = syscall.NewLazyDLL("kernel32.dll") - - getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo") - setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo") - setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition") - setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") - getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") - setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize") - scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA") - setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") - setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo") - writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW") - readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW") - waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject") -) - -// Windows Console constants -const ( - // Console modes - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. - ENABLE_PROCESSED_INPUT = 0x0001 - ENABLE_LINE_INPUT = 0x0002 - ENABLE_ECHO_INPUT = 0x0004 - ENABLE_WINDOW_INPUT = 0x0008 - ENABLE_MOUSE_INPUT = 0x0010 - ENABLE_INSERT_MODE = 0x0020 - ENABLE_QUICK_EDIT_MODE = 0x0040 - ENABLE_EXTENDED_FLAGS = 0x0080 - - ENABLE_PROCESSED_OUTPUT = 0x0001 - ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 - - // Character attributes - // Note: - // -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan). - // Clearing all foreground or background colors results in black; setting all creates white. - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes. - FOREGROUND_BLUE uint16 = 0x0001 - FOREGROUND_GREEN uint16 = 0x0002 - FOREGROUND_RED uint16 = 0x0004 - FOREGROUND_INTENSITY uint16 = 0x0008 - FOREGROUND_MASK uint16 = 0x000F - - BACKGROUND_BLUE uint16 = 0x0010 - BACKGROUND_GREEN uint16 = 0x0020 - BACKGROUND_RED uint16 = 0x0040 - BACKGROUND_INTENSITY uint16 = 0x0080 - BACKGROUND_MASK uint16 = 0x00F0 - - COMMON_LVB_MASK uint16 = 0xFF00 - COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000 - COMMON_LVB_UNDERSCORE uint16 = 0x8000 - - // Input event types - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. - KEY_EVENT = 0x0001 - MOUSE_EVENT = 0x0002 - WINDOW_BUFFER_SIZE_EVENT = 0x0004 - MENU_EVENT = 0x0008 - FOCUS_EVENT = 0x0010 - - // WaitForSingleObject return codes - WAIT_ABANDONED = 0x00000080 - WAIT_FAILED = 0xFFFFFFFF - WAIT_SIGNALED = 0x0000000 - WAIT_TIMEOUT = 0x00000102 - - // WaitForSingleObject wait duration - WAIT_INFINITE = 0xFFFFFFFF - WAIT_ONE_SECOND = 1000 - WAIT_HALF_SECOND = 500 - WAIT_QUARTER_SECOND = 250 -) - -// Windows API Console types -// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD) -// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment -type ( - CHAR_INFO struct { - UnicodeChar uint16 - Attributes uint16 - } - - CONSOLE_CURSOR_INFO struct { - Size uint32 - Visible int32 - } - - CONSOLE_SCREEN_BUFFER_INFO struct { - Size COORD - CursorPosition COORD - Attributes uint16 - Window SMALL_RECT - MaximumWindowSize COORD - } - - COORD struct { - X int16 - Y int16 - } - - SMALL_RECT struct { - Left int16 - Top int16 - Right int16 - Bottom int16 - } - - // INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. - INPUT_RECORD struct { - EventType uint16 - KeyEvent KEY_EVENT_RECORD - } - - KEY_EVENT_RECORD struct { - KeyDown int32 - RepeatCount uint16 - VirtualKeyCode uint16 - VirtualScanCode uint16 - UnicodeChar uint16 - ControlKeyState uint32 - } - - WINDOW_BUFFER_SIZE struct { - Size COORD - } -) - -// boolToBOOL converts a Go bool into a Windows int32. -func boolToBOOL(f bool) int32 { - if f { - return int32(1) - } else { - return int32(0) - } -} - -// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx. -func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { - r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) - return checkError(r1, r2, err) -} - -// SetConsoleCursorInfo sets the size and visiblity of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx. -func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { - r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) - return checkError(r1, r2, err) -} - -// SetConsoleCursorPosition location of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx. -func SetConsoleCursorPosition(handle uintptr, coord COORD) error { - r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord)) - use(coord) - return checkError(r1, r2, err) -} - -// GetConsoleMode gets the console mode for given file descriptor -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx. -func GetConsoleMode(handle uintptr) (mode uint32, err error) { - err = syscall.GetConsoleMode(syscall.Handle(handle), &mode) - return mode, err -} - -// SetConsoleMode sets the console mode for given file descriptor -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. -func SetConsoleMode(handle uintptr, mode uint32) error { - r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0) - use(mode) - return checkError(r1, r2, err) -} - -// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer. -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx. -func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { - info := CONSOLE_SCREEN_BUFFER_INFO{} - err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)) - if err != nil { - return nil, err - } - return &info, nil -} - -func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error { - r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char))) - use(scrollRect) - use(clipRect) - use(destOrigin) - use(char) - return checkError(r1, r2, err) -} - -// SetConsoleScreenBufferSize sets the size of the console screen buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx. -func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error { - r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord)) - use(coord) - return checkError(r1, r2, err) -} - -// SetConsoleTextAttribute sets the attributes of characters written to the -// console screen buffer by the WriteFile or WriteConsole function. -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx. -func SetConsoleTextAttribute(handle uintptr, attribute uint16) error { - r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0) - use(attribute) - return checkError(r1, r2, err) -} - -// SetConsoleWindowInfo sets the size and position of the console screen buffer's window. -// Note that the size and location must be within and no larger than the backing console screen buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx. -func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error { - r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect))) - use(isAbsolute) - use(rect) - return checkError(r1, r2, err) -} - -// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx. -func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error { - r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion))) - use(buffer) - use(bufferSize) - use(bufferCoord) - return checkError(r1, r2, err) -} - -// ReadConsoleInput reads (and removes) data from the console input buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx. -func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error { - r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count))) - use(buffer) - return checkError(r1, r2, err) -} - -// WaitForSingleObject waits for the passed handle to be signaled. -// It returns true if the handle was signaled; false otherwise. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx. -func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) { - r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait))) - switch r1 { - case WAIT_ABANDONED, WAIT_TIMEOUT: - return false, nil - case WAIT_SIGNALED: - return true, nil - } - use(msWait) - return false, err -} - -// String helpers -func (info CONSOLE_SCREEN_BUFFER_INFO) String() string { - return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize) -} - -func (coord COORD) String() string { - return fmt.Sprintf("%v,%v", coord.X, coord.Y) -} - -func (rect SMALL_RECT) String() string { - return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom) -} - -// checkError evaluates the results of a Windows API call and returns the error if it failed. -func checkError(r1, r2 uintptr, err error) error { - // Windows APIs return non-zero to indicate success - if r1 != 0 { - return nil - } - - // Return the error if provided, otherwise default to EINVAL - if err != nil { - return err - } - return syscall.EINVAL -} - -// coordToPointer converts a COORD into a uintptr (by fooling the type system). -func coordToPointer(c COORD) uintptr { - // Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass. - return uintptr(*((*uint32)(unsafe.Pointer(&c)))) -} - -// use is a no-op, but the compiler cannot see that it is. -// Calling use(p) ensures that p is kept live until that point. -func use(p interface{}) {} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go deleted file mode 100644 index cbec8f728..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go +++ /dev/null @@ -1,100 +0,0 @@ -// +build windows - -package winterm - -import "github.com/Azure/go-ansiterm" - -const ( - FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE - BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE -) - -// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the -// request represented by the passed ANSI mode. -func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) { - switch ansiMode { - - // Mode styles - case ansiterm.ANSI_SGR_BOLD: - windowsMode = windowsMode | FOREGROUND_INTENSITY - - case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF: - windowsMode &^= FOREGROUND_INTENSITY - - case ansiterm.ANSI_SGR_UNDERLINE: - windowsMode = windowsMode | COMMON_LVB_UNDERSCORE - - case ansiterm.ANSI_SGR_REVERSE: - inverted = true - - case ansiterm.ANSI_SGR_REVERSE_OFF: - inverted = false - - case ansiterm.ANSI_SGR_UNDERLINE_OFF: - windowsMode &^= COMMON_LVB_UNDERSCORE - - // Foreground colors - case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT: - windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK) - - case ansiterm.ANSI_SGR_FOREGROUND_BLACK: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) - - case ansiterm.ANSI_SGR_FOREGROUND_RED: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED - - case ansiterm.ANSI_SGR_FOREGROUND_GREEN: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN - - case ansiterm.ANSI_SGR_FOREGROUND_YELLOW: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN - - case ansiterm.ANSI_SGR_FOREGROUND_BLUE: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_CYAN: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_WHITE: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE - - // Background colors - case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT: - // Black with no intensity - windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK) - - case ansiterm.ANSI_SGR_BACKGROUND_BLACK: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) - - case ansiterm.ANSI_SGR_BACKGROUND_RED: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED - - case ansiterm.ANSI_SGR_BACKGROUND_GREEN: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN - - case ansiterm.ANSI_SGR_BACKGROUND_YELLOW: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN - - case ansiterm.ANSI_SGR_BACKGROUND_BLUE: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_CYAN: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_WHITE: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE - } - - return windowsMode, inverted -} - -// invertAttributes inverts the foreground and background colors of a Windows attributes value -func invertAttributes(windowsMode uint16) uint16 { - return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4) -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go deleted file mode 100644 index f015723ad..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go +++ /dev/null @@ -1,101 +0,0 @@ -// +build windows - -package winterm - -const ( - horizontal = iota - vertical -) - -func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT { - if h.originMode { - sr := h.effectiveSr(info.Window) - return SMALL_RECT{ - Top: sr.top, - Bottom: sr.bottom, - Left: 0, - Right: info.Size.X - 1, - } - } else { - return SMALL_RECT{ - Top: info.Window.Top, - Bottom: info.Window.Bottom, - Left: 0, - Right: info.Size.X - 1, - } - } -} - -// setCursorPosition sets the cursor to the specified position, bounded to the screen size -func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error { - position.X = ensureInRange(position.X, window.Left, window.Right) - position.Y = ensureInRange(position.Y, window.Top, window.Bottom) - err := SetConsoleCursorPosition(h.fd, position) - if err != nil { - return err - } - logger.Infof("Cursor position set: (%d, %d)", position.X, position.Y) - return err -} - -func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error { - return h.moveCursor(vertical, param) -} - -func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error { - return h.moveCursor(horizontal, param) -} - -func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - switch moveMode { - case horizontal: - position.X += int16(param) - case vertical: - position.Y += int16(param) - } - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) moveCursorLine(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - position.X = 0 - position.Y += int16(param) - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - position.X = int16(param) - 1 - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go deleted file mode 100644 index 244b5fa25..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go +++ /dev/null @@ -1,84 +0,0 @@ -// +build windows - -package winterm - -import "github.com/Azure/go-ansiterm" - -func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error { - // Ignore an invalid (negative area) request - if toCoord.Y < fromCoord.Y { - return nil - } - - var err error - - var coordStart = COORD{} - var coordEnd = COORD{} - - xCurrent, yCurrent := fromCoord.X, fromCoord.Y - xEnd, yEnd := toCoord.X, toCoord.Y - - // Clear any partial initial line - if xCurrent > 0 { - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yCurrent - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - xCurrent = 0 - yCurrent += 1 - } - - // Clear intervening rectangular section - if yCurrent < yEnd { - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yEnd-1 - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - xCurrent = 0 - yCurrent = yEnd - } - - // Clear remaining partial ending line - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yEnd - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error { - region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X} - width := toCoord.X - fromCoord.X + 1 - height := toCoord.Y - fromCoord.Y + 1 - size := uint32(width) * uint32(height) - - if size <= 0 { - return nil - } - - buffer := make([]CHAR_INFO, size) - - char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes} - for i := 0; i < int(size); i++ { - buffer[i] = char - } - - err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, ®ion) - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go deleted file mode 100644 index 706d27057..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go +++ /dev/null @@ -1,118 +0,0 @@ -// +build windows - -package winterm - -// effectiveSr gets the current effective scroll region in buffer coordinates -func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion { - top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom) - bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom) - if top >= bottom { - top = window.Top - bottom = window.Bottom - } - return scrollRegion{top: top, bottom: bottom} -} - -func (h *windowsAnsiEventHandler) scrollUp(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - sr := h.effectiveSr(info.Window) - return h.scroll(param, sr, info) -} - -func (h *windowsAnsiEventHandler) scrollDown(param int) error { - return h.scrollUp(-param) -} - -func (h *windowsAnsiEventHandler) deleteLines(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - start := info.CursorPosition.Y - sr := h.effectiveSr(info.Window) - // Lines cannot be inserted or deleted outside the scrolling region. - if start >= sr.top && start <= sr.bottom { - sr.top = start - return h.scroll(param, sr, info) - } else { - return nil - } -} - -func (h *windowsAnsiEventHandler) insertLines(param int) error { - return h.deleteLines(-param) -} - -// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates. -func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error { - logger.Infof("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom) - logger.Infof("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom) - - // Copy from and clip to the scroll region (full buffer width) - scrollRect := SMALL_RECT{ - Top: sr.top, - Bottom: sr.bottom, - Left: 0, - Right: info.Size.X - 1, - } - - // Origin to which area should be copied - destOrigin := COORD{ - X: 0, - Y: sr.top - int16(param), - } - - char := CHAR_INFO{ - UnicodeChar: ' ', - Attributes: h.attributes, - } - - if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { - return err - } - return nil -} - -func (h *windowsAnsiEventHandler) deleteCharacters(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - return h.scrollLine(param, info.CursorPosition, info) -} - -func (h *windowsAnsiEventHandler) insertCharacters(param int) error { - return h.deleteCharacters(-param) -} - -// scrollLine scrolls a line horizontally starting at the provided position by a number of columns. -func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error { - // Copy from and clip to the scroll region (full buffer width) - scrollRect := SMALL_RECT{ - Top: position.Y, - Bottom: position.Y, - Left: position.X, - Right: info.Size.X - 1, - } - - // Origin to which area should be copied - destOrigin := COORD{ - X: position.X - int16(columns), - Y: position.Y, - } - - char := CHAR_INFO{ - UnicodeChar: ' ', - Attributes: h.attributes, - } - - if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go deleted file mode 100644 index afa7635d7..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build windows - -package winterm - -// AddInRange increments a value by the passed quantity while ensuring the values -// always remain within the supplied min / max range. -func addInRange(n int16, increment int16, min int16, max int16) int16 { - return ensureInRange(n+increment, min, max) -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go deleted file mode 100644 index 4d858ed61..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go +++ /dev/null @@ -1,726 +0,0 @@ -// +build windows - -package winterm - -import ( - "bytes" - "io/ioutil" - "os" - "strconv" - - "github.com/Azure/go-ansiterm" - "github.com/Sirupsen/logrus" -) - -var logger *logrus.Logger - -type windowsAnsiEventHandler struct { - fd uintptr - file *os.File - infoReset *CONSOLE_SCREEN_BUFFER_INFO - sr scrollRegion - buffer bytes.Buffer - attributes uint16 - inverted bool - wrapNext bool - drewMarginByte bool - originMode bool - marginByte byte - curInfo *CONSOLE_SCREEN_BUFFER_INFO - curPos COORD -} - -func CreateWinEventHandler(fd uintptr, file *os.File) ansiterm.AnsiEventHandler { - logFile := ioutil.Discard - - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { - logFile, _ = os.Create("winEventHandler.log") - } - - logger = &logrus.Logger{ - Out: logFile, - Formatter: new(logrus.TextFormatter), - Level: logrus.DebugLevel, - } - - infoReset, err := GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil - } - - return &windowsAnsiEventHandler{ - fd: fd, - file: file, - infoReset: infoReset, - attributes: infoReset.Attributes, - } -} - -type scrollRegion struct { - top int16 - bottom int16 -} - -// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the -// current cursor position and scroll region settings, in which case it returns -// true. If no special handling is necessary, then it does nothing and returns -// false. -// -// In the false case, the caller should ensure that a carriage return -// and line feed are inserted or that the text is otherwise wrapped. -func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { - if h.wrapNext { - if err := h.Flush(); err != nil { - return false, err - } - h.clearWrap() - } - pos, info, err := h.getCurrentInfo() - if err != nil { - return false, err - } - sr := h.effectiveSr(info.Window) - if pos.Y == sr.bottom { - // Scrolling is necessary. Let Windows automatically scroll if the scrolling region - // is the full window. - if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom { - if includeCR { - pos.X = 0 - h.updatePos(pos) - } - return false, nil - } - - // A custom scroll region is active. Scroll the window manually to simulate - // the LF. - if err := h.Flush(); err != nil { - return false, err - } - logger.Info("Simulating LF inside scroll region") - if err := h.scrollUp(1); err != nil { - return false, err - } - if includeCR { - pos.X = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return false, err - } - } - return true, nil - - } else if pos.Y < info.Window.Bottom { - // Let Windows handle the LF. - pos.Y++ - if includeCR { - pos.X = 0 - } - h.updatePos(pos) - return false, nil - } else { - // The cursor is at the bottom of the screen but outside the scroll - // region. Skip the LF. - logger.Info("Simulating LF outside scroll region") - if includeCR { - if err := h.Flush(); err != nil { - return false, err - } - pos.X = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return false, err - } - } - return true, nil - } -} - -// executeLF executes a LF without a CR. -func (h *windowsAnsiEventHandler) executeLF() error { - handled, err := h.simulateLF(false) - if err != nil { - return err - } - if !handled { - // Windows LF will reset the cursor column position. Write the LF - // and restore the cursor position. - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) - if pos.X != 0 { - if err := h.Flush(); err != nil { - return err - } - logger.Info("Resetting cursor position for LF without CR") - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - } - } - return nil -} - -func (h *windowsAnsiEventHandler) Print(b byte) error { - if h.wrapNext { - h.buffer.WriteByte(h.marginByte) - h.clearWrap() - if _, err := h.simulateLF(true); err != nil { - return err - } - } - pos, info, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X == info.Size.X-1 { - h.wrapNext = true - h.marginByte = b - } else { - pos.X++ - h.updatePos(pos) - h.buffer.WriteByte(b) - } - return nil -} - -func (h *windowsAnsiEventHandler) Execute(b byte) error { - switch b { - case ansiterm.ANSI_TAB: - logger.Info("Execute(TAB)") - // Move to the next tab stop, but preserve auto-wrap if already set. - if !h.wrapNext { - pos, info, err := h.getCurrentInfo() - if err != nil { - return err - } - pos.X = (pos.X + 8) - pos.X%8 - if pos.X >= info.Size.X { - pos.X = info.Size.X - 1 - } - if err := h.Flush(); err != nil { - return err - } - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - } - return nil - - case ansiterm.ANSI_BEL: - h.buffer.WriteByte(ansiterm.ANSI_BEL) - return nil - - case ansiterm.ANSI_BACKSPACE: - if h.wrapNext { - if err := h.Flush(); err != nil { - return err - } - h.clearWrap() - } - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X > 0 { - pos.X-- - h.updatePos(pos) - h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE) - } - return nil - - case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED: - // Treat as true LF. - return h.executeLF() - - case ansiterm.ANSI_LINE_FEED: - // Simulate a CR and LF for now since there is no way in go-ansiterm - // to tell if the LF should include CR (and more things break when it's - // missing than when it's incorrectly added). - handled, err := h.simulateLF(true) - if handled || err != nil { - return err - } - return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) - - case ansiterm.ANSI_CARRIAGE_RETURN: - if h.wrapNext { - if err := h.Flush(); err != nil { - return err - } - h.clearWrap() - } - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X != 0 { - pos.X = 0 - h.updatePos(pos) - h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN) - } - return nil - - default: - return nil - } -} - -func (h *windowsAnsiEventHandler) CUU(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CUU: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorVertical(-param) -} - -func (h *windowsAnsiEventHandler) CUD(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CUD: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorVertical(param) -} - -func (h *windowsAnsiEventHandler) CUF(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CUF: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorHorizontal(param) -} - -func (h *windowsAnsiEventHandler) CUB(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CUB: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorHorizontal(-param) -} - -func (h *windowsAnsiEventHandler) CNL(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CNL: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorLine(param) -} - -func (h *windowsAnsiEventHandler) CPL(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CPL: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorLine(-param) -} - -func (h *windowsAnsiEventHandler) CHA(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CHA: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorColumn(param) -} - -func (h *windowsAnsiEventHandler) VPA(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("VPA: [[%d]]", param) - h.clearWrap() - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - window := h.getCursorWindow(info) - position := info.CursorPosition - position.Y = window.Top + int16(param) - 1 - return h.setCursorPosition(position, window) -} - -func (h *windowsAnsiEventHandler) CUP(row int, col int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CUP: [[%d %d]]", row, col) - h.clearWrap() - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - window := h.getCursorWindow(info) - position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1} - return h.setCursorPosition(position, window) -} - -func (h *windowsAnsiEventHandler) HVP(row int, col int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("HVP: [[%d %d]]", row, col) - h.clearWrap() - return h.CUP(row, col) -} - -func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("DECTCEM: [%v]", []string{strconv.FormatBool(visible)}) - h.clearWrap() - return nil -} - -func (h *windowsAnsiEventHandler) DECOM(enable bool) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("DECOM: [%v]", []string{strconv.FormatBool(enable)}) - h.clearWrap() - h.originMode = enable - return h.CUP(1, 1) -} - -func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("DECCOLM: [%v]", []string{strconv.FormatBool(use132)}) - h.clearWrap() - if err := h.ED(2); err != nil { - return err - } - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - targetWidth := int16(80) - if use132 { - targetWidth = 132 - } - if info.Size.X < targetWidth { - if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { - logger.Info("set buffer failed:", err) - return err - } - } - window := info.Window - window.Left = 0 - window.Right = targetWidth - 1 - if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { - logger.Info("set window failed:", err) - return err - } - if info.Size.X > targetWidth { - if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { - logger.Info("set buffer failed:", err) - return err - } - } - return SetConsoleCursorPosition(h.fd, COORD{0, 0}) -} - -func (h *windowsAnsiEventHandler) ED(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("ED: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - - // [J -- Erases from the cursor to the end of the screen, including the cursor position. - // [1J -- Erases from the beginning of the screen to the cursor, including the cursor position. - // [2J -- Erases the complete display. The cursor does not move. - // Notes: - // -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - var start COORD - var end COORD - - switch param { - case 0: - start = info.CursorPosition - end = COORD{info.Size.X - 1, info.Size.Y - 1} - - case 1: - start = COORD{0, 0} - end = info.CursorPosition - - case 2: - start = COORD{0, 0} - end = COORD{info.Size.X - 1, info.Size.Y - 1} - } - - err = h.clearRange(h.attributes, start, end) - if err != nil { - return err - } - - // If the whole buffer was cleared, move the window to the top while preserving - // the window-relative cursor position. - if param == 2 { - pos := info.CursorPosition - window := info.Window - pos.Y -= window.Top - window.Bottom -= window.Top - window.Top = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { - return err - } - } - - return nil -} - -func (h *windowsAnsiEventHandler) EL(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("EL: [%v]", strconv.Itoa(param)) - h.clearWrap() - - // [K -- Erases from the cursor to the end of the line, including the cursor position. - // [1K -- Erases from the beginning of the line to the cursor, including the cursor position. - // [2K -- Erases the complete line. - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - var start COORD - var end COORD - - switch param { - case 0: - start = info.CursorPosition - end = COORD{info.Size.X, info.CursorPosition.Y} - - case 1: - start = COORD{0, info.CursorPosition.Y} - end = info.CursorPosition - - case 2: - start = COORD{0, info.CursorPosition.Y} - end = COORD{info.Size.X, info.CursorPosition.Y} - } - - err = h.clearRange(h.attributes, start, end) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) IL(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("IL: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.insertLines(param) -} - -func (h *windowsAnsiEventHandler) DL(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("DL: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.deleteLines(param) -} - -func (h *windowsAnsiEventHandler) ICH(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("ICH: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.insertCharacters(param) -} - -func (h *windowsAnsiEventHandler) DCH(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("DCH: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.deleteCharacters(param) -} - -func (h *windowsAnsiEventHandler) SGR(params []int) error { - if err := h.Flush(); err != nil { - return err - } - strings := []string{} - for _, v := range params { - strings = append(strings, strconv.Itoa(v)) - } - - logger.Infof("SGR: [%v]", strings) - - if len(params) <= 0 { - h.attributes = h.infoReset.Attributes - h.inverted = false - } else { - for _, attr := range params { - - if attr == ansiterm.ANSI_SGR_RESET { - h.attributes = h.infoReset.Attributes - h.inverted = false - continue - } - - h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr)) - } - } - - attributes := h.attributes - if h.inverted { - attributes = invertAttributes(attributes) - } - err := SetConsoleTextAttribute(h.fd, attributes) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) SU(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("SU: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.scrollUp(param) -} - -func (h *windowsAnsiEventHandler) SD(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("SD: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.scrollDown(param) -} - -func (h *windowsAnsiEventHandler) DA(params []string) error { - logger.Infof("DA: [%v]", params) - // DA cannot be implemented because it must send data on the VT100 input stream, - // which is not available to go-ansiterm. - return nil -} - -func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("DECSTBM: [%d, %d]", top, bottom) - - // Windows is 0 indexed, Linux is 1 indexed - h.sr.top = int16(top - 1) - h.sr.bottom = int16(bottom - 1) - - // This command also moves the cursor to the origin. - h.clearWrap() - return h.CUP(1, 1) -} - -func (h *windowsAnsiEventHandler) RI() error { - if err := h.Flush(); err != nil { - return err - } - logger.Info("RI: []") - h.clearWrap() - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - sr := h.effectiveSr(info.Window) - if info.CursorPosition.Y == sr.top { - return h.scrollDown(1) - } - - return h.moveCursorVertical(-1) -} - -func (h *windowsAnsiEventHandler) IND() error { - logger.Info("IND: []") - return h.executeLF() -} - -func (h *windowsAnsiEventHandler) Flush() error { - h.curInfo = nil - if h.buffer.Len() > 0 { - logger.Infof("Flush: [%s]", h.buffer.Bytes()) - if _, err := h.buffer.WriteTo(h.file); err != nil { - return err - } - } - - if h.wrapNext && !h.drewMarginByte { - logger.Infof("Flush: drawing margin byte '%c'", h.marginByte) - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}} - size := COORD{1, 1} - position := COORD{0, 0} - region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y} - if err := WriteConsoleOutput(h.fd, charInfo, size, position, ®ion); err != nil { - return err - } - h.drewMarginByte = true - } - return nil -} - -// cacheConsoleInfo ensures that the current console screen information has been queried -// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos. -func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) { - if h.curInfo == nil { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return COORD{}, nil, err - } - h.curInfo = info - h.curPos = info.CursorPosition - } - return h.curPos, h.curInfo, nil -} - -func (h *windowsAnsiEventHandler) updatePos(pos COORD) { - if h.curInfo == nil { - panic("failed to call getCurrentInfo before calling updatePos") - } - h.curPos = pos -} - -// clearWrap clears the state where the cursor is in the margin -// waiting for the next character before wrapping the line. This must -// be done before most operations that act on the cursor. -func (h *windowsAnsiEventHandler) clearWrap() { - h.wrapNext = false - h.drewMarginByte = false -} diff --git a/vendor/github.com/MakeNowJust/heredoc/LICENSE b/vendor/github.com/MakeNowJust/heredoc/LICENSE deleted file mode 100644 index dd5aa7e8a..000000000 --- a/vendor/github.com/MakeNowJust/heredoc/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 TSUYUSATO Kitsune - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/MakeNowJust/heredoc/README.md b/vendor/github.com/MakeNowJust/heredoc/README.md deleted file mode 100644 index df12b43db..000000000 --- a/vendor/github.com/MakeNowJust/heredoc/README.md +++ /dev/null @@ -1,53 +0,0 @@ -#heredoc [![Build Status](https://drone.io/github.com/MakeNowJust/heredoc/status.png)](https://drone.io/github.com/MakeNowJust/heredoc/latest) [![Go Walker](http://gowalker.org/api/v1/badge)](https://gowalker.org/github.com/MakeNowJust/heredoc) - -##About - -Package heredoc provides the here-document with keeping indent. - -##Install - -```console -$ go get github.com/MakeNowJust/heredoc -``` - -##Import - -```go -// usual -import "github.com/MakeNowJust/heredoc" -// shortcuts -import . "github.com/MakeNowJust/heredoc/dot" -``` - -##Example - -```go -package main - -import ( - "fmt" - . "github.com/MakeNowJust/heredoc/dot" -) - -func main() { - fmt.Println(D(` - Lorem ipsum dolor sit amet, consectetur adipisicing elit, - sed do eiusmod tempor incididunt ut labore et dolore magna - aliqua. Ut enim ad minim veniam, ... - `)) - // Output: - // Lorem ipsum dolor sit amet, consectetur adipisicing elit, - // sed do eiusmod tempor incididunt ut labore et dolore magna - // aliqua. Ut enim ad minim veniam, ... - // -} -``` - -##API Document - - - [Go Walker - github.com/MakeNowJust/heredoc](https://gowalker.org/github.com/MakeNowJust/heredoc) - - [Go Walker - github.com/MakeNowJust/heredoc/dot](https://gowalker.org/github.com/MakeNowJust/heredoc/dot) - -##License - -This software is released under the MIT License, see LICENSE. diff --git a/vendor/github.com/MakeNowJust/heredoc/heredoc.go b/vendor/github.com/MakeNowJust/heredoc/heredoc.go deleted file mode 100644 index 3978e30da..000000000 --- a/vendor/github.com/MakeNowJust/heredoc/heredoc.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (c) 2014 TSUYUSATO Kitsune -// This software is released under the MIT License. -// http://opensource.org/licenses/mit-license.php - -// Package heredoc provides the here-document with keeping indent. -// -// Golang supports raw-string syntax. -// doc := ` -// Foo -// Bar -// ` -// But raw-string cannot recognize indent. Thus such content is indented string, equivalent to -// "\n\tFoo\n\tBar\n" -// I dont't want this! -// -// However this problem is solved by package heredoc. -// doc := heredoc.Doc(` -// Foo -// Bar -// `) -// It is equivalent to -// "Foo\nBar\n" -package heredoc - -import ( - "fmt" - "strings" - "unicode" -) - -// heredoc.Doc retutns unindented string as here-document. -// -// Process of making here-document: -// 1. Find most little indent size. (Skip empty lines) -// 2. Remove this indents of lines. -func Doc(raw string) string { - skipFirstLine := false - if raw[0] == '\n' { - raw = raw[1:] - } else { - skipFirstLine = true - } - - minIndentSize := int(^uint(0) >> 1) // Max value of type int - lines := strings.Split(raw, "\n") - - // 1. - for i, line := range lines { - if i == 0 && skipFirstLine { - continue - } - - indentSize := 0 - for _, r := range []rune(line) { - if unicode.IsSpace(r) { - indentSize += 1 - } else { - break - } - } - - if len(line) == indentSize { - if i == len(lines)-1 && indentSize < minIndentSize { - lines[i] = "" - } - } else if indentSize < minIndentSize { - minIndentSize = indentSize - } - } - - // 2. - for i, line := range lines { - if i == 0 && skipFirstLine { - continue - } - - if len(lines[i]) >= minIndentSize { - lines[i] = line[minIndentSize:] - } - } - - return strings.Join(lines, "\n") -} - -// heredoc.Docf returns unindented and formatted string as here-document. -// This format is same with package fmt's format. -func Docf(raw string, args ...interface{}) string { - return fmt.Sprintf(Doc(raw), args...) -} diff --git a/vendor/github.com/Sirupsen/logrus/.gitignore b/vendor/github.com/Sirupsen/logrus/.gitignore deleted file mode 100644 index 66be63a00..000000000 --- a/vendor/github.com/Sirupsen/logrus/.gitignore +++ /dev/null @@ -1 +0,0 @@ -logrus diff --git a/vendor/github.com/Sirupsen/logrus/.travis.yml b/vendor/github.com/Sirupsen/logrus/.travis.yml deleted file mode 100644 index ff23150dc..000000000 --- a/vendor/github.com/Sirupsen/logrus/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - 1.3 - - 1.4 - - 1.5 - - tip -install: - - go get -t ./... -script: GOMAXPROCS=4 GORACE="halt_on_error=1" go test -race -v ./... diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md deleted file mode 100644 index f2c2bc211..000000000 --- a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md +++ /dev/null @@ -1,66 +0,0 @@ -# 0.10.0 - -* feature: Add a test hook (#180) -* feature: `ParseLevel` is now case-insensitive (#326) -* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) -* performance: avoid re-allocations on `WithFields` (#335) - -# 0.9.0 - -* logrus/text_formatter: don't emit empty msg -* logrus/hooks/airbrake: move out of main repository -* logrus/hooks/sentry: move out of main repository -* logrus/hooks/papertrail: move out of main repository -* logrus/hooks/bugsnag: move out of main repository -* logrus/core: run tests with `-race` -* logrus/core: detect TTY based on `stderr` -* logrus/core: support `WithError` on logger -* logrus/core: Solaris support - -# 0.8.7 - -* logrus/core: fix possible race (#216) -* logrus/doc: small typo fixes and doc improvements - - -# 0.8.6 - -* hooks/raven: allow passing an initialized client - -# 0.8.5 - -* logrus/core: revert #208 - -# 0.8.4 - -* formatter/text: fix data race (#218) - -# 0.8.3 - -* logrus/core: fix entry log level (#208) -* logrus/core: improve performance of text formatter by 40% -* logrus/core: expose `LevelHooks` type -* logrus/core: add support for DragonflyBSD and NetBSD -* formatter/text: print structs more verbosely - -# 0.8.2 - -* logrus: fix more Fatal family functions - -# 0.8.1 - -* logrus: fix not exiting on `Fatalf` and `Fatalln` - -# 0.8.0 - -* logrus: defaults to stderr instead of stdout -* hooks/sentry: add special field for `*http.Request` -* formatter/text: ignore Windows for colors - -# 0.7.3 - -* formatter/\*: allow configuration of timestamp layout - -# 0.7.2 - -* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md deleted file mode 100644 index 6e1721a74..000000000 --- a/vendor/github.com/Sirupsen/logrus/README.md +++ /dev/null @@ -1,388 +0,0 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus) - -Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not -yet stable (pre 1.0). Logrus itself is completely stable and has been used in -many large deployments. The core API is unlikely to change much but please -version control your Logrus to make sure you aren't fetching latest `master` on -every build.** - -Nicely color-coded in development (when a TTY is attached, otherwise just -plain text): - -![Colored](http://i.imgur.com/PY7qMwd.png) - -With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash -or Splunk: - -```json -{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the -ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} - -{"level":"warning","msg":"The group's number increased tremendously!", -"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"A giant walrus appears!", -"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", -"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} - -{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, -"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} -``` - -With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not -attached, the output is compatible with the -[logfmt](http://godoc.org/github.com/kr/logfmt) format: - -```text -time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 -time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 -time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true -time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 -time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 -time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -exit status 1 -``` - -#### Example - -The simplest way to use Logrus is simply the package-level exported logger: - -```go -package main - -import ( - log "github.com/Sirupsen/logrus" -) - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - }).Info("A walrus appears") -} -``` - -Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` -and you'll now have the flexibility of Logrus. You can customize it all you -want: - -```go -package main - -import ( - "os" - log "github.com/Sirupsen/logrus" -) - -func init() { - // Log as JSON instead of the default ASCII formatter. - log.SetFormatter(&log.JSONFormatter{}) - - // Output to stderr instead of stdout, could also be a file. - log.SetOutput(os.Stderr) - - // Only log the warning severity or above. - log.SetLevel(log.WarnLevel) -} - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(log.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(log.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") - - // A common pattern is to re-use fields between logging statements by re-using - // the logrus.Entry returned from WithFields() - contextLogger := log.WithFields(log.Fields{ - "common": "this is a common field", - "other": "I also should be logged always", - }) - - contextLogger.Info("I'll be logged with common and other field") - contextLogger.Info("Me too") -} -``` - -For more advanced usage such as logging to multiple locations from the same -application, you can also create an instance of the `logrus` Logger: - -```go -package main - -import ( - "github.com/Sirupsen/logrus" -) - -// Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() - -func main() { - // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stderr - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") -} -``` - -#### Fields - -Logrus encourages careful, structured logging though logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed -to send event %s to topic %s with key %d")`, you should log the much more -discoverable: - -```go -log.WithFields(log.Fields{ - "event": event, - "topic": topic, - "key": key, -}).Fatal("Failed to send event") -``` - -We've found this API forces you to think about logging in a way that produces -much more useful logging messages. We've been in countless situations where just -a single added field to a log statement that was already there would've saved us -hours. The `WithFields` call is optional. - -In general, with Logrus using any of the `printf`-family functions should be -seen as a hint you should add a field, however, you can still use the -`printf`-family functions with Logrus. - -#### Hooks - -You can add hooks for logging levels. For example to send errors to an exception -tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to -multiple places simultaneously, e.g. syslog. - -Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in -`init`: - -```go -import ( - log "github.com/Sirupsen/logrus" - "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" - logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" - "log/syslog" -) - -func init() { - - // Use the Airbrake hook to report errors that have Error severity or above to - // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook(123, "xyz", "production")) - - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - if err != nil { - log.Error("Unable to connect to local syslog daemon") - } else { - log.AddHook(hook) - } -} -``` -Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). - -| Hook | Description | -| ----- | ----------- | -| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | -| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | -| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | -| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | -| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | -| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | -| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | -| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | -| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | -| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | -| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | -| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | -| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | -| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | -| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | -| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | -| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | -| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | -| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | -| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | -| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | -| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) | -| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) | -| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka | -| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | -| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch| - - -#### Level logging - -Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. - -```go -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") -// Calls os.Exit(1) after logging -log.Fatal("Bye.") -// Calls panic() after logging -log.Panic("I'm bailing.") -``` - -You can set the logging level on a `Logger`, then it will only log entries with -that severity or anything above it: - -```go -// Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) -``` - -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose -environment if your application has that. - -#### Entries - -Besides the fields added with `WithField` or `WithFields` some fields are -automatically added to all logging events: - -1. `time`. The timestamp when the entry was created. -2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after - the `AddFields` call. E.g. `Failed to send event.` -3. `level`. The logging level. E.g. `info`. - -#### Environments - -Logrus has no notion of environment. - -If you wish for hooks and formatters to only be used in specific environments, -you should handle that yourself. For example, if your application has a global -variable `Environment`, which is a string representation of the environment you -could do: - -```go -import ( - log "github.com/Sirupsen/logrus" -) - -init() { - // do something here to set environment depending on an environment variable - // or command-line flag - if Environment == "production" { - log.SetFormatter(&log.JSONFormatter{}) - } else { - // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(&log.TextFormatter{}) - } -} -``` - -This configuration is how `logrus` was intended to be used, but JSON in -production is mostly only useful if you do log aggregation with tools like -Splunk or Logstash. - -#### Formatters - -The built-in logging formatters are: - -* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise - without colors. - * *Note:* to force colored output when there is no TTY, set the `ForceColors` - field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true` -* `logrus.JSONFormatter`. Logs fields as JSON. -* `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events. - - ```go - logrus.SetFormatter(&logstash.LogstashFormatter{Type: "application_name"}) - ``` - -Third party logging formatters: - -* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. -* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. - -You can define your formatter by implementing the `Formatter` interface, -requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a -`Fields` type (`map[string]interface{}`) with all your fields as well as the -default ones (see Entries section above): - -```go -type MyJSONFormatter struct { -} - -log.SetFormatter(new(MyJSONFormatter)) - -func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { - // Note this doesn't include Time, Level and Message which are available on - // the Entry. Consult `godoc` on information about those fields or read the - // source of the official loggers. - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} -``` - -#### Logger as an `io.Writer` - -Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. - -```go -w := logger.Writer() -defer w.Close() - -srv := http.Server{ - // create a stdlib log.Logger that writes to - // logrus.Logger. - ErrorLog: log.New(w, "", 0), -} -``` - -Each line written to that writer will be printed the usual way, using formatters -and hooks. The level for those entries is `info`. - -#### Rotation - -Log rotation is not provided with Logrus. Log rotation should be done by an -external program (like `logrotate(8)`) that can compress and delete old log -entries. It should not be a feature of the application-level logger. - -#### Tools - -| Tool | Description | -| ---- | ----------- | -|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| - -#### Testing - -Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: - -* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook -* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): - -```go -logger, hook := NewNullLogger() -logger.Error("Hello error") - -assert.Equal(1, len(hook.Entries)) -assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) -assert.Equal("Hello error", hook.LastEntry().Message) - -hook.Reset() -assert.Nil(hook.LastEntry()) -``` diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/Sirupsen/logrus/doc.go deleted file mode 100644 index dddd5f877..000000000 --- a/vendor/github.com/Sirupsen/logrus/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Package logrus is a structured logger for Go, completely API compatible with the standard library logger. - - -The simplest way to use Logrus is simply the package-level exported logger: - - package main - - import ( - log "github.com/Sirupsen/logrus" - ) - - func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "number": 1, - "size": 10, - }).Info("A walrus appears") - } - -Output: - time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 - -For a full guide visit https://github.com/Sirupsen/logrus -*/ -package logrus diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go index 89e966e7b..17fe6f707 100644 --- a/vendor/github.com/Sirupsen/logrus/entry.go +++ b/vendor/github.com/Sirupsen/logrus/entry.go @@ -8,9 +8,6 @@ import ( "time" ) -// Defines the key when adding errors using WithError. -var ErrorKey = "error" - // An entry is the final or intermediate Logrus logging entry. It contains all // the fields passed with WithField{,s}. It's finally logged when Debug, Info, // Warn, Error, Fatal or Panic is called on it. These objects can be reused and @@ -56,11 +53,6 @@ func (entry *Entry) String() (string, error) { return reader.String(), err } -// Add an error as single field (using the key defined in ErrorKey) to the Entry. -func (entry *Entry) WithError(err error) *Entry { - return entry.WithField(ErrorKey, err) -} - // Add a single field to the Entry. func (entry *Entry) WithField(key string, value interface{}) *Entry { return entry.WithFields(Fields{key: value}) @@ -68,7 +60,7 @@ func (entry *Entry) WithField(key string, value interface{}) *Entry { // Add a map of fields to the Entry. func (entry *Entry) WithFields(fields Fields) *Entry { - data := make(Fields, len(entry.Data)+len(fields)) + data := Fields{} for k, v := range entry.Data { data[k] = v } @@ -78,14 +70,12 @@ func (entry *Entry) WithFields(fields Fields) *Entry { return &Entry{Logger: entry.Logger, Data: data} } -// This function is not declared with a pointer value because otherwise -// race conditions will occur when using multiple goroutines -func (entry Entry) log(level Level, msg string) { +func (entry *Entry) log(level Level, msg string) { entry.Time = time.Now() entry.Level = level entry.Message = msg - if err := entry.Logger.Hooks.Fire(level, &entry); err != nil { + if err := entry.Logger.Hooks.Fire(level, entry); err != nil { entry.Logger.mu.Lock() fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) entry.Logger.mu.Unlock() @@ -110,7 +100,7 @@ func (entry Entry) log(level Level, msg string) { // panic() to use in Entry#Panic(), we avoid the allocation by checking // directly here. if level <= PanicLevel { - panic(&entry) + panic(entry) } } @@ -198,7 +188,6 @@ func (entry *Entry) Fatalf(format string, args ...interface{}) { if entry.Logger.Level >= FatalLevel { entry.Fatal(fmt.Sprintf(format, args...)) } - os.Exit(1) } func (entry *Entry) Panicf(format string, args ...interface{}) { @@ -245,7 +234,6 @@ func (entry *Entry) Fatalln(args ...interface{}) { if entry.Logger.Level >= FatalLevel { entry.Fatal(entry.sprintlnn(args...)) } - os.Exit(1) } func (entry *Entry) Panicln(args ...interface{}) { diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go index 9a0120ac1..a67e1b802 100644 --- a/vendor/github.com/Sirupsen/logrus/exported.go +++ b/vendor/github.com/Sirupsen/logrus/exported.go @@ -48,11 +48,6 @@ func AddHook(hook Hook) { std.Hooks.Add(hook) } -// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. -func WithError(err error) *Entry { - return std.WithField(ErrorKey, err) -} - // WithField creates an entry from the standard logger and adds a field to // it. If you want multiple fields, use `WithFields`. // diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/Sirupsen/logrus/hooks.go index 3f151cdc3..0da2b3653 100644 --- a/vendor/github.com/Sirupsen/logrus/hooks.go +++ b/vendor/github.com/Sirupsen/logrus/hooks.go @@ -11,11 +11,11 @@ type Hook interface { } // Internal type for storing the hooks on a logger instance. -type LevelHooks map[Level][]Hook +type levelHooks map[Level][]Hook // Add a hook to an instance of logger. This is called with // `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. -func (hooks LevelHooks) Add(hook Hook) { +func (hooks levelHooks) Add(hook Hook) { for _, level := range hook.Levels() { hooks[level] = append(hooks[level], hook) } @@ -23,7 +23,7 @@ func (hooks LevelHooks) Add(hook Hook) { // Fire all the hooks for the passed level. Used by `entry.log` to fire // appropriate hooks for a log entry. -func (hooks LevelHooks) Fire(level Level, entry *Entry) error { +func (hooks levelHooks) Fire(level Level, entry *Entry) error { for _, hook := range hooks[level] { if err := hook.Fire(entry); err != nil { return err diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go index 2ad6dc5cf..dcc4f1d9f 100644 --- a/vendor/github.com/Sirupsen/logrus/json_formatter.go +++ b/vendor/github.com/Sirupsen/logrus/json_formatter.go @@ -24,12 +24,11 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { } prefixFieldClashes(data) - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = DefaultTimestampFormat + if f.TimestampFormat == "" { + f.TimestampFormat = DefaultTimestampFormat } - data["time"] = entry.Time.Format(timestampFormat) + data["time"] = entry.Time.Format(f.TimestampFormat) data["msg"] = entry.Message data["level"] = entry.Level.String() diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go index 2fdb23176..3c07ea78c 100644 --- a/vendor/github.com/Sirupsen/logrus/logger.go +++ b/vendor/github.com/Sirupsen/logrus/logger.go @@ -8,13 +8,13 @@ import ( type Logger struct { // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a - // file, or leave it default which is `os.Stderr`. You can also set this to + // file, or leave it default which is `os.Stdout`. You can also set this to // something more adventorous, such as logging to Kafka. Out io.Writer // Hooks for the logger instance. These allow firing events based on logging // levels and log entries. For example, to send errors to an error tracking // service, log to StatsD or dump the core on fatal errors. - Hooks LevelHooks + Hooks levelHooks // All log entries pass through the formatter before logged to Out. The // included formatters are `TextFormatter` and `JSONFormatter` for which // TextFormatter is the default. In development (when a TTY is attached) it @@ -37,7 +37,7 @@ type Logger struct { // var log = &Logger{ // Out: os.Stderr, // Formatter: new(JSONFormatter), -// Hooks: make(LevelHooks), +// Hooks: make(levelHooks), // Level: logrus.DebugLevel, // } // @@ -46,14 +46,14 @@ func New() *Logger { return &Logger{ Out: os.Stderr, Formatter: new(TextFormatter), - Hooks: make(LevelHooks), + Hooks: make(levelHooks), Level: InfoLevel, } } // Adds a field to the log entry, note that you it doesn't log until you call // Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. -// If you want multiple fields, use `WithFields`. +// Ff you want multiple fields, use `WithFields`. func (logger *Logger) WithField(key string, value interface{}) *Entry { return NewEntry(logger).WithField(key, value) } @@ -64,12 +64,6 @@ func (logger *Logger) WithFields(fields Fields) *Entry { return NewEntry(logger).WithFields(fields) } -// Add an error as single field to the log entry. All it does is call -// `WithError` for the given `error`. -func (logger *Logger) WithError(err error) *Entry { - return NewEntry(logger).WithError(err) -} - func (logger *Logger) Debugf(format string, args ...interface{}) { if logger.Level >= DebugLevel { NewEntry(logger).Debugf(format, args...) @@ -108,7 +102,6 @@ func (logger *Logger) Fatalf(format string, args ...interface{}) { if logger.Level >= FatalLevel { NewEntry(logger).Fatalf(format, args...) } - os.Exit(1) } func (logger *Logger) Panicf(format string, args ...interface{}) { @@ -155,7 +148,6 @@ func (logger *Logger) Fatal(args ...interface{}) { if logger.Level >= FatalLevel { NewEntry(logger).Fatal(args...) } - os.Exit(1) } func (logger *Logger) Panic(args ...interface{}) { @@ -202,7 +194,6 @@ func (logger *Logger) Fatalln(args ...interface{}) { if logger.Level >= FatalLevel { NewEntry(logger).Fatalln(args...) } - os.Exit(1) } func (logger *Logger) Panicln(args ...interface{}) { diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go index e59669111..43ee12e90 100644 --- a/vendor/github.com/Sirupsen/logrus/logrus.go +++ b/vendor/github.com/Sirupsen/logrus/logrus.go @@ -3,7 +3,6 @@ package logrus import ( "fmt" "log" - "strings" ) // Fields type, used to pass to `WithFields`. @@ -34,7 +33,7 @@ func (level Level) String() string { // ParseLevel takes a string level and returns the Logrus log level constant. func ParseLevel(lvl string) (Level, error) { - switch strings.ToLower(lvl) { + switch lvl { case "panic": return PanicLevel, nil case "fatal": @@ -53,16 +52,6 @@ func ParseLevel(lvl string) (Level, error) { return l, fmt.Errorf("not a valid logrus Level: %q", lvl) } -// A constant exposing all logging levels -var AllLevels = []Level{ - PanicLevel, - FatalLevel, - ErrorLevel, - WarnLevel, - InfoLevel, - DebugLevel, -} - // These are the different logging levels. You can set the logging level to log // on your instance of logger, obtained with `logrus.New()`. const ( @@ -85,11 +74,7 @@ const ( ) // Won't compile if StdLogger can't be realized by a log.Logger -var ( - _ StdLogger = &log.Logger{} - _ StdLogger = &Entry{} - _ StdLogger = &Logger{} -) +var _ StdLogger = &log.Logger{} // StdLogger is what your logrus-enabled library should take, that way // it'll accept a stdlib logger and a logrus logger. There's no standard @@ -107,37 +92,3 @@ type StdLogger interface { Panicf(string, ...interface{}) Panicln(...interface{}) } - -// The FieldLogger interface generalizes the Entry and Logger types -type FieldLogger interface { - WithField(key string, value interface{}) *Entry - WithFields(fields Fields) *Entry - WithError(err error) *Entry - - Debugf(format string, args ...interface{}) - Infof(format string, args ...interface{}) - Printf(format string, args ...interface{}) - Warnf(format string, args ...interface{}) - Warningf(format string, args ...interface{}) - Errorf(format string, args ...interface{}) - Fatalf(format string, args ...interface{}) - Panicf(format string, args ...interface{}) - - Debug(args ...interface{}) - Info(args ...interface{}) - Print(args ...interface{}) - Warn(args ...interface{}) - Warning(args ...interface{}) - Error(args ...interface{}) - Fatal(args ...interface{}) - Panic(args ...interface{}) - - Debugln(args ...interface{}) - Infoln(args ...interface{}) - Println(args ...interface{}) - Warnln(args ...interface{}) - Warningln(args ...interface{}) - Errorln(args ...interface{}) - Fatalln(args ...interface{}) - Panicln(args ...interface{}) -} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_darwin.go b/vendor/github.com/Sirupsen/logrus/terminal_darwin.go new file mode 100644 index 000000000..8fe02a4ae --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_darwin.go @@ -0,0 +1,12 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios syscall.Termios diff --git a/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go b/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go new file mode 100644 index 000000000..0428ee5d5 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go @@ -0,0 +1,20 @@ +/* + Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin. +*/ +package logrus + +import ( + "syscall" +) + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go index b343b3a37..b8bebc13e 100644 --- a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go +++ b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go @@ -3,7 +3,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build linux darwin freebsd openbsd netbsd dragonfly +// +build linux darwin freebsd openbsd package logrus @@ -12,9 +12,9 @@ import ( "unsafe" ) -// IsTerminal returns true if stderr's file descriptor is a terminal. +// IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal() bool { - fd := syscall.Stderr + fd := syscall.Stdout var termios Termios _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) return err == 0 diff --git a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/Sirupsen/logrus/terminal_openbsd.go similarity index 67% rename from vendor/github.com/Sirupsen/logrus/terminal_bsd.go rename to vendor/github.com/Sirupsen/logrus/terminal_openbsd.go index 71f8d67a5..af609a53d 100644 --- a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go +++ b/vendor/github.com/Sirupsen/logrus/terminal_openbsd.go @@ -1,5 +1,3 @@ -// +build darwin freebsd openbsd netbsd dragonfly - package logrus import "syscall" diff --git a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go deleted file mode 100644 index 3e70bf7bf..000000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build solaris - -package logrus - -import ( - "os" - - "golang.org/x/sys/unix" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - _, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA) - return err == nil -} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go index 0146845d1..2e09f6f7e 100644 --- a/vendor/github.com/Sirupsen/logrus/terminal_windows.go +++ b/vendor/github.com/Sirupsen/logrus/terminal_windows.go @@ -18,9 +18,9 @@ var ( procGetConsoleMode = kernel32.NewProc("GetConsoleMode") ) -// IsTerminal returns true if stderr's file descriptor is a terminal. +// IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal() bool { - fd := syscall.Stderr + fd := syscall.Stdout var st uint32 r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) return r != 0 && e == 0 diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go index 06ef20233..612417ff9 100644 --- a/vendor/github.com/Sirupsen/logrus/text_formatter.go +++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go @@ -3,7 +3,6 @@ package logrus import ( "bytes" "fmt" - "runtime" "sort" "strings" "time" @@ -70,23 +69,19 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { prefixFieldClashes(entry.Data) - isColorTerminal := isTerminal && (runtime.GOOS != "windows") - isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors + isColored := (f.ForceColors || isTerminal) && !f.DisableColors - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = DefaultTimestampFormat + if f.TimestampFormat == "" { + f.TimestampFormat = DefaultTimestampFormat } if isColored { - f.printColored(b, entry, keys, timestampFormat) + f.printColored(b, entry, keys) } else { if !f.DisableTimestamp { - f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) + f.appendKeyValue(b, "time", entry.Time.Format(f.TimestampFormat)) } f.appendKeyValue(b, "level", entry.Level.String()) - if entry.Message != "" { - f.appendKeyValue(b, "msg", entry.Message) - } + f.appendKeyValue(b, "msg", entry.Message) for _, key := range keys { f.appendKeyValue(b, key, entry.Data[key]) } @@ -96,7 +91,7 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { return b.Bytes(), nil } -func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string) { var levelColor int switch entry.Level { case DebugLevel: @@ -114,11 +109,11 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin if !f.FullTimestamp { fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) } else { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(f.TimestampFormat), entry.Message) } for _, k := range keys { v := entry.Data[k] - fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v) + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v) } } @@ -134,28 +129,21 @@ func needsQuoting(text string) bool { return true } -func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { - - b.WriteString(key) - b.WriteByte('=') - - switch value := value.(type) { +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) { + switch value.(type) { case string: - if needsQuoting(value) { - b.WriteString(value) + if needsQuoting(value.(string)) { + fmt.Fprintf(b, "%v=%s ", key, value) } else { - fmt.Fprintf(b, "%q", value) + fmt.Fprintf(b, "%v=%q ", key, value) } case error: - errmsg := value.Error() - if needsQuoting(errmsg) { - b.WriteString(errmsg) + if needsQuoting(value.(error).Error()) { + fmt.Fprintf(b, "%v=%s ", key, value) } else { - fmt.Fprintf(b, "%q", value) + fmt.Fprintf(b, "%v=%q ", key, value) } default: - fmt.Fprint(b, value) + fmt.Fprintf(b, "%v=%v ", key, value) } - - b.WriteByte(' ') } diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt deleted file mode 100644 index 1602287d7..000000000 --- a/vendor/github.com/beorn7/perks/quantile/exampledata.txt +++ /dev/null @@ -1,2388 +0,0 @@ -8 -5 -26 -12 -5 -235 -13 -6 -28 -30 -3 -3 -3 -3 -5 -2 -33 -7 -2 -4 -7 -12 -14 -5 -8 -3 -10 -4 -5 -3 -6 -6 -209 -20 -3 -10 -14 -3 -4 -6 -8 -5 -11 -7 -3 -2 -3 -3 -212 -5 -222 -4 -10 -10 -5 -6 -3 -8 -3 -10 -254 -220 -2 -3 -5 -24 -5 -4 -222 -7 -3 -3 -223 -8 -15 -12 -14 -14 -3 -2 -2 -3 -13 -3 -11 -4 -4 -6 -5 -7 -13 -5 -3 -5 -2 -5 -3 -5 -2 -7 -15 -17 -14 -3 -6 -6 -3 -17 -5 -4 -7 -6 -4 -4 -8 -6 -8 -3 -9 -3 -6 -3 -4 -5 -3 -3 -660 -4 -6 -10 -3 -6 -3 -2 -5 -13 -2 -4 -4 -10 -4 -8 -4 -3 -7 -9 -9 -3 -10 -37 -3 -13 -4 -12 -3 -6 -10 -8 -5 -21 -2 -3 -8 -3 -2 -3 -3 -4 -12 -2 -4 -8 -8 -4 -3 -2 -20 -1 -6 -32 -2 -11 -6 -18 -3 -8 -11 -3 -212 -3 -4 -2 -6 -7 -12 -11 -3 -2 -16 -10 -6 -4 -6 -3 -2 -7 -3 -2 -2 -2 -2 -5 -6 -4 -3 -10 -3 -4 -6 -5 -3 -4 -4 -5 -6 -4 -3 -4 -4 -5 -7 -5 -5 -3 -2 -7 -2 -4 -12 -4 -5 -6 -2 -4 -4 -8 -4 -15 -13 -7 -16 -5 -3 -23 -5 -5 -7 -3 -2 -9 -8 -7 -5 -8 -11 -4 -10 -76 -4 -47 -4 -3 -2 -7 -4 -2 -3 -37 -10 -4 -2 -20 -5 -4 -4 -10 -10 -4 -3 -7 -23 -240 -7 -13 -5 -5 -3 -3 -2 -5 -4 -2 -8 -7 -19 -2 -23 -8 -7 -2 -5 -3 -8 -3 -8 -13 -5 -5 -5 -2 -3 -23 -4 -9 -8 -4 -3 -3 -5 -220 -2 -3 -4 -6 -14 -3 -53 -6 -2 -5 -18 -6 -3 -219 -6 -5 -2 -5 -3 -6 -5 -15 -4 -3 -17 -3 -2 -4 -7 -2 -3 -3 -4 -4 -3 -2 -664 -6 -3 -23 -5 -5 -16 -5 -8 -2 -4 -2 -24 -12 -3 -2 -3 -5 -8 -3 -5 -4 -3 -14 -3 -5 -8 -2 -3 -7 -9 -4 -2 -3 -6 -8 -4 -3 -4 -6 -5 -3 -3 -6 -3 -19 -4 -4 -6 -3 -6 -3 -5 -22 -5 -4 -4 -3 -8 -11 -4 -9 -7 -6 -13 -4 -4 -4 -6 -17 -9 -3 -3 -3 -4 -3 -221 -5 -11 -3 -4 -2 -12 -6 -3 -5 -7 -5 -7 -4 -9 -7 -14 -37 -19 -217 -16 -3 -5 -2 -2 -7 -19 -7 -6 -7 -4 -24 -5 -11 -4 -7 -7 -9 -13 -3 -4 -3 -6 -28 -4 -4 -5 -5 -2 -5 -6 -4 -4 -6 -10 -5 -4 -3 -2 -3 -3 -6 -5 -5 -4 -3 -2 -3 -7 -4 -6 -18 -16 -8 -16 -4 -5 -8 -6 -9 -13 -1545 -6 -215 -6 -5 -6 -3 -45 -31 -5 -2 -2 -4 -3 -3 -2 -5 -4 -3 -5 -7 -7 -4 -5 -8 -5 -4 -749 -2 -31 -9 -11 -2 -11 -5 -4 -4 -7 -9 -11 -4 -5 -4 -7 -3 -4 -6 -2 -15 -3 -4 -3 -4 -3 -5 -2 -13 -5 -5 -3 -3 -23 -4 -4 -5 -7 -4 -13 -2 -4 -3 -4 -2 -6 -2 -7 -3 -5 -5 -3 -29 -5 -4 -4 -3 -10 -2 -3 -79 -16 -6 -6 -7 -7 -3 -5 -5 -7 -4 -3 -7 -9 -5 -6 -5 -9 -6 -3 -6 -4 -17 -2 -10 -9 -3 -6 -2 -3 -21 -22 -5 -11 -4 -2 -17 -2 -224 -2 -14 -3 -4 -4 -2 -4 -4 -4 -4 -5 -3 -4 -4 -10 -2 -6 -3 -3 -5 -7 -2 -7 -5 -6 -3 -218 -2 -2 -5 -2 -6 -3 -5 -222 -14 -6 -33 -3 -2 -5 -3 -3 -3 -9 -5 -3 -3 -2 -7 -4 -3 -4 -3 -5 -6 -5 -26 -4 -13 -9 -7 -3 -221 -3 -3 -4 -4 -4 -4 -2 -18 -5 -3 -7 -9 -6 -8 -3 -10 -3 -11 -9 -5 -4 -17 -5 -5 -6 -6 -3 -2 -4 -12 -17 -6 -7 -218 -4 -2 -4 -10 -3 -5 -15 -3 -9 -4 -3 -3 -6 -29 -3 -3 -4 -5 -5 -3 -8 -5 -6 -6 -7 -5 -3 -5 -3 -29 -2 -31 -5 -15 -24 -16 -5 -207 -4 -3 -3 -2 -15 -4 -4 -13 -5 -5 -4 -6 -10 -2 -7 -8 -4 -6 -20 -5 -3 -4 -3 -12 -12 -5 -17 -7 -3 -3 -3 -6 -10 -3 -5 -25 -80 -4 -9 -3 -2 -11 -3 -3 -2 -3 -8 -7 -5 -5 -19 -5 -3 -3 -12 -11 -2 -6 -5 -5 -5 -3 -3 -3 -4 -209 -14 -3 -2 -5 -19 -4 -4 -3 -4 -14 -5 -6 -4 -13 -9 -7 -4 -7 -10 -2 -9 -5 -7 -2 -8 -4 -6 -5 -5 -222 -8 -7 -12 -5 -216 -3 -4 -4 -6 -3 -14 -8 -7 -13 -4 -3 -3 -3 -3 -17 -5 -4 -3 -33 -6 -6 -33 -7 -5 -3 -8 -7 -5 -2 -9 -4 -2 -233 -24 -7 -4 -8 -10 -3 -4 -15 -2 -16 -3 -3 -13 -12 -7 -5 -4 -207 -4 -2 -4 -27 -15 -2 -5 -2 -25 -6 -5 -5 -6 -13 -6 -18 -6 -4 -12 -225 -10 -7 -5 -2 -2 -11 -4 -14 -21 -8 -10 -3 -5 -4 -232 -2 -5 -5 -3 -7 -17 -11 -6 -6 -23 -4 -6 -3 -5 -4 -2 -17 -3 -6 -5 -8 -3 -2 -2 -14 -9 -4 -4 -2 -5 -5 -3 -7 -6 -12 -6 -10 -3 -6 -2 -2 -19 -5 -4 -4 -9 -2 -4 -13 -3 -5 -6 -3 -6 -5 -4 -9 -6 -3 -5 -7 -3 -6 -6 -4 -3 -10 -6 -3 -221 -3 -5 -3 -6 -4 -8 -5 -3 -6 -4 -4 -2 -54 -5 -6 -11 -3 -3 -4 -4 -4 -3 -7 -3 -11 -11 -7 -10 -6 -13 -223 -213 -15 -231 -7 -3 -7 -228 -2 -3 -4 -4 -5 -6 -7 -4 -13 -3 -4 -5 -3 -6 -4 -6 -7 -2 -4 -3 -4 -3 -3 -6 -3 -7 -3 -5 -18 -5 -6 -8 -10 -3 -3 -3 -2 -4 -2 -4 -4 -5 -6 -6 -4 -10 -13 -3 -12 -5 -12 -16 -8 -4 -19 -11 -2 -4 -5 -6 -8 -5 -6 -4 -18 -10 -4 -2 -216 -6 -6 -6 -2 -4 -12 -8 -3 -11 -5 -6 -14 -5 -3 -13 -4 -5 -4 -5 -3 -28 -6 -3 -7 -219 -3 -9 -7 -3 -10 -6 -3 -4 -19 -5 -7 -11 -6 -15 -19 -4 -13 -11 -3 -7 -5 -10 -2 -8 -11 -2 -6 -4 -6 -24 -6 -3 -3 -3 -3 -6 -18 -4 -11 -4 -2 -5 -10 -8 -3 -9 -5 -3 -4 -5 -6 -2 -5 -7 -4 -4 -14 -6 -4 -4 -5 -5 -7 -2 -4 -3 -7 -3 -3 -6 -4 -5 -4 -4 -4 -3 -3 -3 -3 -8 -14 -2 -3 -5 -3 -2 -4 -5 -3 -7 -3 -3 -18 -3 -4 -4 -5 -7 -3 -3 -3 -13 -5 -4 -8 -211 -5 -5 -3 -5 -2 -5 -4 -2 -655 -6 -3 -5 -11 -2 -5 -3 -12 -9 -15 -11 -5 -12 -217 -2 -6 -17 -3 -3 -207 -5 -5 -4 -5 -9 -3 -2 -8 -5 -4 -3 -2 -5 -12 -4 -14 -5 -4 -2 -13 -5 -8 -4 -225 -4 -3 -4 -5 -4 -3 -3 -6 -23 -9 -2 -6 -7 -233 -4 -4 -6 -18 -3 -4 -6 -3 -4 -4 -2 -3 -7 -4 -13 -227 -4 -3 -5 -4 -2 -12 -9 -17 -3 -7 -14 -6 -4 -5 -21 -4 -8 -9 -2 -9 -25 -16 -3 -6 -4 -7 -8 -5 -2 -3 -5 -4 -3 -3 -5 -3 -3 -3 -2 -3 -19 -2 -4 -3 -4 -2 -3 -4 -4 -2 -4 -3 -3 -3 -2 -6 -3 -17 -5 -6 -4 -3 -13 -5 -3 -3 -3 -4 -9 -4 -2 -14 -12 -4 -5 -24 -4 -3 -37 -12 -11 -21 -3 -4 -3 -13 -4 -2 -3 -15 -4 -11 -4 -4 -3 -8 -3 -4 -4 -12 -8 -5 -3 -3 -4 -2 -220 -3 -5 -223 -3 -3 -3 -10 -3 -15 -4 -241 -9 -7 -3 -6 -6 -23 -4 -13 -7 -3 -4 -7 -4 -9 -3 -3 -4 -10 -5 -5 -1 -5 -24 -2 -4 -5 -5 -6 -14 -3 -8 -2 -3 -5 -13 -13 -3 -5 -2 -3 -15 -3 -4 -2 -10 -4 -4 -4 -5 -5 -3 -5 -3 -4 -7 -4 -27 -3 -6 -4 -15 -3 -5 -6 -6 -5 -4 -8 -3 -9 -2 -6 -3 -4 -3 -7 -4 -18 -3 -11 -3 -3 -8 -9 -7 -24 -3 -219 -7 -10 -4 -5 -9 -12 -2 -5 -4 -4 -4 -3 -3 -19 -5 -8 -16 -8 -6 -22 -3 -23 -3 -242 -9 -4 -3 -3 -5 -7 -3 -3 -5 -8 -3 -7 -5 -14 -8 -10 -3 -4 -3 -7 -4 -6 -7 -4 -10 -4 -3 -11 -3 -7 -10 -3 -13 -6 -8 -12 -10 -5 -7 -9 -3 -4 -7 -7 -10 -8 -30 -9 -19 -4 -3 -19 -15 -4 -13 -3 -215 -223 -4 -7 -4 -8 -17 -16 -3 -7 -6 -5 -5 -4 -12 -3 -7 -4 -4 -13 -4 -5 -2 -5 -6 -5 -6 -6 -7 -10 -18 -23 -9 -3 -3 -6 -5 -2 -4 -2 -7 -3 -3 -2 -5 -5 -14 -10 -224 -6 -3 -4 -3 -7 -5 -9 -3 -6 -4 -2 -5 -11 -4 -3 -3 -2 -8 -4 -7 -4 -10 -7 -3 -3 -18 -18 -17 -3 -3 -3 -4 -5 -3 -3 -4 -12 -7 -3 -11 -13 -5 -4 -7 -13 -5 -4 -11 -3 -12 -3 -6 -4 -4 -21 -4 -6 -9 -5 -3 -10 -8 -4 -6 -4 -4 -6 -5 -4 -8 -6 -4 -6 -4 -4 -5 -9 -6 -3 -4 -2 -9 -3 -18 -2 -4 -3 -13 -3 -6 -6 -8 -7 -9 -3 -2 -16 -3 -4 -6 -3 -2 -33 -22 -14 -4 -9 -12 -4 -5 -6 -3 -23 -9 -4 -3 -5 -5 -3 -4 -5 -3 -5 -3 -10 -4 -5 -5 -8 -4 -4 -6 -8 -5 -4 -3 -4 -6 -3 -3 -3 -5 -9 -12 -6 -5 -9 -3 -5 -3 -2 -2 -2 -18 -3 -2 -21 -2 -5 -4 -6 -4 -5 -10 -3 -9 -3 -2 -10 -7 -3 -6 -6 -4 -4 -8 -12 -7 -3 -7 -3 -3 -9 -3 -4 -5 -4 -4 -5 -5 -10 -15 -4 -4 -14 -6 -227 -3 -14 -5 -216 -22 -5 -4 -2 -2 -6 -3 -4 -2 -9 -9 -4 -3 -28 -13 -11 -4 -5 -3 -3 -2 -3 -3 -5 -3 -4 -3 -5 -23 -26 -3 -4 -5 -6 -4 -6 -3 -5 -5 -3 -4 -3 -2 -2 -2 -7 -14 -3 -6 -7 -17 -2 -2 -15 -14 -16 -4 -6 -7 -13 -6 -4 -5 -6 -16 -3 -3 -28 -3 -6 -15 -3 -9 -2 -4 -6 -3 -3 -22 -4 -12 -6 -7 -2 -5 -4 -10 -3 -16 -6 -9 -2 -5 -12 -7 -5 -5 -5 -5 -2 -11 -9 -17 -4 -3 -11 -7 -3 -5 -15 -4 -3 -4 -211 -8 -7 -5 -4 -7 -6 -7 -6 -3 -6 -5 -6 -5 -3 -4 -4 -26 -4 -6 -10 -4 -4 -3 -2 -3 -3 -4 -5 -9 -3 -9 -4 -4 -5 -5 -8 -2 -4 -2 -3 -8 -4 -11 -19 -5 -8 -6 -3 -5 -6 -12 -3 -2 -4 -16 -12 -3 -4 -4 -8 -6 -5 -6 -6 -219 -8 -222 -6 -16 -3 -13 -19 -5 -4 -3 -11 -6 -10 -4 -7 -7 -12 -5 -3 -3 -5 -6 -10 -3 -8 -2 -5 -4 -7 -2 -4 -4 -2 -12 -9 -6 -4 -2 -40 -2 -4 -10 -4 -223 -4 -2 -20 -6 -7 -24 -5 -4 -5 -2 -20 -16 -6 -5 -13 -2 -3 -3 -19 -3 -2 -4 -5 -6 -7 -11 -12 -5 -6 -7 -7 -3 -5 -3 -5 -3 -14 -3 -4 -4 -2 -11 -1 -7 -3 -9 -6 -11 -12 -5 -8 -6 -221 -4 -2 -12 -4 -3 -15 -4 -5 -226 -7 -218 -7 -5 -4 -5 -18 -4 -5 -9 -4 -4 -2 -9 -18 -18 -9 -5 -6 -6 -3 -3 -7 -3 -5 -4 -4 -4 -12 -3 -6 -31 -5 -4 -7 -3 -6 -5 -6 -5 -11 -2 -2 -11 -11 -6 -7 -5 -8 -7 -10 -5 -23 -7 -4 -3 -5 -34 -2 -5 -23 -7 -3 -6 -8 -4 -4 -4 -2 -5 -3 -8 -5 -4 -8 -25 -2 -3 -17 -8 -3 -4 -8 -7 -3 -15 -6 -5 -7 -21 -9 -5 -6 -6 -5 -3 -2 -3 -10 -3 -6 -3 -14 -7 -4 -4 -8 -7 -8 -2 -6 -12 -4 -213 -6 -5 -21 -8 -2 -5 -23 -3 -11 -2 -3 -6 -25 -2 -3 -6 -7 -6 -6 -4 -4 -6 -3 -17 -9 -7 -6 -4 -3 -10 -7 -2 -3 -3 -3 -11 -8 -3 -7 -6 -4 -14 -36 -3 -4 -3 -3 -22 -13 -21 -4 -2 -7 -4 -4 -17 -15 -3 -7 -11 -2 -4 -7 -6 -209 -6 -3 -2 -2 -24 -4 -9 -4 -3 -3 -3 -29 -2 -2 -4 -3 -3 -5 -4 -6 -3 -3 -2 -4 diff --git a/vendor/github.com/blang/semver/README.md b/vendor/github.com/blang/semver/README.md deleted file mode 100644 index 5171c5c55..000000000 --- a/vendor/github.com/blang/semver/README.md +++ /dev/null @@ -1,142 +0,0 @@ -semver for golang [![Build Status](https://drone.io/github.com/blang/semver/status.png)](https://drone.io/github.com/blang/semver/latest) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master) -====== - -semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`. - -Usage ------ -```bash -$ go get github.com/blang/semver -``` -Note: Always vendor your dependencies or fix on a specific version tag. - -```go -import github.com/blang/semver -v1, err := semver.Make("1.0.0-beta") -v2, err := semver.Make("2.0.0-beta") -v1.Compare(v2) -``` - -Also check the [GoDocs](http://godoc.org/github.com/blang/semver). - -Why should I use this lib? ------ - -- Fully spec compatible -- No reflection -- No regex -- Fully tested (Coverage >99%) -- Readable parsing/validation errors -- Fast (See [Benchmarks](#benchmarks)) -- Only Stdlib -- Uses values instead of pointers -- Many features, see below - - -Features ------ - -- Parsing and validation at all levels -- Comparator-like comparisons -- Compare Helper Methods -- InPlace manipulation -- Sortable (implements sort.Interface) -- database/sql compatible (sql.Scanner/Valuer) -- encoding/json compatible (json.Marshaler/Unmarshaler) - - -Example ------ - -Have a look at full examples in [examples/main.go](examples/main.go) - -```go -import github.com/blang/semver - -v, err := semver.Make("0.0.1-alpha.preview+123.github") -fmt.Printf("Major: %d\n", v.Major) -fmt.Printf("Minor: %d\n", v.Minor) -fmt.Printf("Patch: %d\n", v.Patch) -fmt.Printf("Pre: %s\n", v.Pre) -fmt.Printf("Build: %s\n", v.Build) - -// Prerelease versions array -if len(v.Pre) > 0 { - fmt.Println("Prerelease versions:") - for i, pre := range v.Pre { - fmt.Printf("%d: %q\n", i, pre) - } -} - -// Build meta data array -if len(v.Build) > 0 { - fmt.Println("Build meta data:") - for i, build := range v.Build { - fmt.Printf("%d: %q\n", i, build) - } -} - -v001, err := semver.Make("0.0.1") -// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE -v001.GT(v) == true -v.LT(v001) == true -v.GTE(v) == true -v.LTE(v) == true - -// Or use v.Compare(v2) for comparisons (-1, 0, 1): -v001.Compare(v) == 1 -v.Compare(v001) == -1 -v.Compare(v) == 0 - -// Manipulate Version in place: -v.Pre[0], err = semver.NewPRVersion("beta") -if err != nil { - fmt.Printf("Error parsing pre release version: %q", err) -} - -fmt.Println("\nValidate versions:") -v.Build[0] = "?" - -err = v.Validate() -if err != nil { - fmt.Printf("Validation failed: %s\n", err) -} -``` - -Benchmarks ------ - - BenchmarkParseSimple 5000000 328 ns/op 49 B/op 1 allocs/op - BenchmarkParseComplex 1000000 2105 ns/op 263 B/op 7 allocs/op - BenchmarkParseAverage 1000000 1301 ns/op 168 B/op 4 allocs/op - BenchmarkStringSimple 10000000 130 ns/op 5 B/op 1 allocs/op - BenchmarkStringLarger 5000000 280 ns/op 32 B/op 2 allocs/op - BenchmarkStringComplex 3000000 512 ns/op 80 B/op 3 allocs/op - BenchmarkStringAverage 5000000 387 ns/op 47 B/op 2 allocs/op - BenchmarkValidateSimple 500000000 7.92 ns/op 0 B/op 0 allocs/op - BenchmarkValidateComplex 2000000 923 ns/op 0 B/op 0 allocs/op - BenchmarkValidateAverage 5000000 452 ns/op 0 B/op 0 allocs/op - BenchmarkCompareSimple 100000000 11.2 ns/op 0 B/op 0 allocs/op - BenchmarkCompareComplex 50000000 40.9 ns/op 0 B/op 0 allocs/op - BenchmarkCompareAverage 50000000 43.8 ns/op 0 B/op 0 allocs/op - BenchmarkSort 5000000 436 ns/op 259 B/op 2 allocs/op - -See benchmark cases at [semver_test.go](semver_test.go) - - -Motivation ------ - -I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like. - - -Contribution ------ - -Feel free to make a pull request. For bigger changes create a issue first to discuss about it. - - -License ------ - -See [LICENSE](LICENSE) file. diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go index 448e1eaa0..bafa775d5 100644 --- a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go +++ b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go @@ -21,9 +21,9 @@ import ( proto "github.com/golang/protobuf/proto" math "math" -) -import io "io" + io "io" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.proto b/vendor/github.com/coreos/etcd/auth/authpb/auth.proto deleted file mode 100644 index 001d33435..000000000 --- a/vendor/github.com/coreos/etcd/auth/authpb/auth.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; -package authpb; - -import "gogoproto/gogo.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.goproto_getters_all) = false; -option (gogoproto.goproto_enum_prefix_all) = false; - -// User is a single entry in the bucket authUsers -message User { - bytes name = 1; - bytes password = 2; - repeated string roles = 3; -} - -// Permission is a single entity -message Permission { - enum Type { - READ = 0; - WRITE = 1; - READWRITE = 2; - } - Type permType = 1; - - bytes key = 2; - bytes range_end = 3; -} - -// Role is a single entry in the bucket authRoles -message Role { - bytes name = 1; - - repeated Permission keyPermission = 2; -} diff --git a/vendor/github.com/coreos/etcd/client/README.md b/vendor/github.com/coreos/etcd/client/README.md deleted file mode 100644 index 0bab9589c..000000000 --- a/vendor/github.com/coreos/etcd/client/README.md +++ /dev/null @@ -1,117 +0,0 @@ -# etcd/client - -etcd/client is the Go client library for etcd. - -[![GoDoc](https://godoc.org/github.com/coreos/etcd/client?status.png)](https://godoc.org/github.com/coreos/etcd/client) - -etcd uses `cmd/vendor` directory to store external dependencies, which are -to be compiled into etcd release binaries. `client` can be imported without -vendoring. For full compatibility, it is recommended to vendor builds using -etcd's vendored packages, using tools like godep, as in -[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories). -For more detail, please read [Go vendor design](https://golang.org/s/go15vendor). - -## Install - -```bash -go get github.com/coreos/etcd/client -``` - -## Usage - -```go -package main - -import ( - "log" - "time" - - "golang.org/x/net/context" - "github.com/coreos/etcd/client" -) - -func main() { - cfg := client.Config{ - Endpoints: []string{"http://127.0.0.1:2379"}, - Transport: client.DefaultTransport, - // set timeout per request to fail fast when the target endpoint is unavailable - HeaderTimeoutPerRequest: time.Second, - } - c, err := client.New(cfg) - if err != nil { - log.Fatal(err) - } - kapi := client.NewKeysAPI(c) - // set "/foo" key with "bar" value - log.Print("Setting '/foo' key with 'bar' value") - resp, err := kapi.Set(context.Background(), "/foo", "bar", nil) - if err != nil { - log.Fatal(err) - } else { - // print common key info - log.Printf("Set is done. Metadata is %q\n", resp) - } - // get "/foo" key's value - log.Print("Getting '/foo' key value") - resp, err = kapi.Get(context.Background(), "/foo", nil) - if err != nil { - log.Fatal(err) - } else { - // print common key info - log.Printf("Get is done. Metadata is %q\n", resp) - // print value - log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value) - } -} -``` - -## Error Handling - -etcd client might return three types of errors. - -- context error - -Each API call has its first parameter as `context`. A context can be canceled or have an attached deadline. If the context is canceled or reaches its deadline, the responding context error will be returned no matter what internal errors the API call has already encountered. - -- cluster error - -Each API call tries to send request to the cluster endpoints one by one until it successfully gets a response. If a requests to an endpoint fails, due to exceeding per request timeout or connection issues, the error will be added into a list of errors. If all possible endpoints fail, a cluster error that includes all encountered errors will be returned. - -- response error - -If the response gets from the cluster is invalid, a plain string error will be returned. For example, it might be a invalid JSON error. - -Here is the example code to handle client errors: - -```go -cfg := client.Config{Endpoints: []string{"http://etcd1:2379","http://etcd2:2379","http://etcd3:2379"}} -c, err := client.New(cfg) -if err != nil { - log.Fatal(err) -} - -kapi := client.NewKeysAPI(c) -resp, err := kapi.Set(ctx, "test", "bar", nil) -if err != nil { - if err == context.Canceled { - // ctx is canceled by another routine - } else if err == context.DeadlineExceeded { - // ctx is attached with a deadline and it exceeded - } else if cerr, ok := err.(*client.ClusterError); ok { - // process (cerr.Errors) - } else { - // bad cluster endpoints, which are not etcd servers - } -} -``` - - -## Caveat - -1. etcd/client prefers to use the same endpoint as long as the endpoint continues to work well. This saves socket resources, and improves efficiency for both client and server side. This preference doesn't remove consistency from the data consumed by the client because data replicated to each etcd member has already passed through the consensus process. - -2. etcd/client does round-robin rotation on other available endpoints if the preferred endpoint isn't functioning properly. For example, if the member that etcd/client connects to is hard killed, etcd/client will fail on the first attempt with the killed member, and succeed on the second attempt with another member. If it fails to talk to all available endpoints, it will return all errors happened. - -3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention. - -4. etcd/client cannot detect whether the member in use is healthy when doing read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. As a workaround, users could monitor experimental /health endpoint for member healthy information. We are improving it at [#3265](https://github.com/coreos/etcd/issues/3265). diff --git a/vendor/github.com/coreos/etcd/client/client.go b/vendor/github.com/coreos/etcd/client/client.go index eeeb8b57a..4c4d41eb9 100644 --- a/vendor/github.com/coreos/etcd/client/client.go +++ b/vendor/github.com/coreos/etcd/client/client.go @@ -305,7 +305,7 @@ func (c *httpClusterClient) SetEndpoints(eps []string) error { // If endpoints doesn't have the lu, just keep c.pinned = 0. // Forwarding between follower and leader would be required but it works. default: - return errors.New(fmt.Sprintf("invalid endpoint selection mode: %d", c.selectionMode)) + return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode) } return nil @@ -404,7 +404,7 @@ func (c *httpClusterClient) Sync(ctx context.Context) error { c.Lock() defer c.Unlock() - eps := make([]string, 0) + var eps []string for _, m := range ms { eps = append(eps, m.ClientURLs...) } diff --git a/vendor/github.com/coreos/etcd/client/cluster_error.go b/vendor/github.com/coreos/etcd/client/cluster_error.go index aef5bf755..34618cdbd 100644 --- a/vendor/github.com/coreos/etcd/client/cluster_error.go +++ b/vendor/github.com/coreos/etcd/client/cluster_error.go @@ -21,7 +21,11 @@ type ClusterError struct { } func (ce *ClusterError) Error() string { - return ErrClusterUnavailable.Error() + s := ErrClusterUnavailable.Error() + for i, e := range ce.Errors { + s += fmt.Sprintf("; error #%d: %s\n", i, e) + } + return s } func (ce *ClusterError) Detail() string { diff --git a/vendor/github.com/coreos/etcd/client/keys.generated.go b/vendor/github.com/coreos/etcd/client/keys.generated.go index 748283aa9..16f10301a 100644 --- a/vendor/github.com/coreos/etcd/client/keys.generated.go +++ b/vendor/github.com/coreos/etcd/client/keys.generated.go @@ -8,10 +8,11 @@ package client import ( "errors" "fmt" - codec1978 "github.com/ugorji/go/codec" "reflect" "runtime" time "time" + + codec1978 "github.com/ugorji/go/codec" ) const ( diff --git a/vendor/github.com/coreos/etcd/client/keys.go b/vendor/github.com/coreos/etcd/client/keys.go index 62d5d506e..b74b9e0a5 100644 --- a/vendor/github.com/coreos/etcd/client/keys.go +++ b/vendor/github.com/coreos/etcd/client/keys.go @@ -191,6 +191,10 @@ type SetOptions struct { // Dir specifies whether or not this Node should be created as a directory. Dir bool + + // NoValueOnSuccess specifies whether the response contains the current value of the Node. + // If set, the response will only contain the current value when the request fails. + NoValueOnSuccess bool } type GetOptions struct { @@ -335,6 +339,7 @@ func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions act.TTL = opts.TTL act.Refresh = opts.Refresh act.Dir = opts.Dir + act.NoValueOnSuccess = opts.NoValueOnSuccess } doCtx := ctx @@ -523,15 +528,16 @@ func (w *waitAction) HTTPRequest(ep url.URL) *http.Request { } type setAction struct { - Prefix string - Key string - Value string - PrevValue string - PrevIndex uint64 - PrevExist PrevExistType - TTL time.Duration - Refresh bool - Dir bool + Prefix string + Key string + Value string + PrevValue string + PrevIndex uint64 + PrevExist PrevExistType + TTL time.Duration + Refresh bool + Dir bool + NoValueOnSuccess bool } func (a *setAction) HTTPRequest(ep url.URL) *http.Request { @@ -565,6 +571,9 @@ func (a *setAction) HTTPRequest(ep url.URL) *http.Request { if a.Refresh { form.Add("refresh", "true") } + if a.NoValueOnSuccess { + params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess)) + } u.RawQuery = params.Encode() body := strings.NewReader(form.Encode()) diff --git a/vendor/github.com/coreos/etcd/client/util.go b/vendor/github.com/coreos/etcd/client/util.go index 198bff965..15a8babff 100644 --- a/vendor/github.com/coreos/etcd/client/util.go +++ b/vendor/github.com/coreos/etcd/client/util.go @@ -14,6 +14,20 @@ package client +import ( + "regexp" +) + +var ( + roleNotFoundRegExp *regexp.Regexp + userNotFoundRegExp *regexp.Regexp +) + +func init() { + roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.") + userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.") +} + // IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound. func IsKeyNotFound(err error) bool { if cErr, ok := err.(Error); ok { @@ -21,3 +35,19 @@ func IsKeyNotFound(err error) bool { } return false } + +// IsRoleNotFound returns true if the error means role not found of v2 API. +func IsRoleNotFound(err error) bool { + if ae, ok := err.(authError); ok { + return roleNotFoundRegExp.MatchString(ae.Message) + } + return false +} + +// IsUserNotFound returns true if the error means user not found of v2 API. +func IsUserNotFound(err error) bool { + if ae, ok := err.(authError); ok { + return userNotFoundRegExp.MatchString(ae.Message) + } + return false +} diff --git a/vendor/github.com/coreos/etcd/clientv3/README.md b/vendor/github.com/coreos/etcd/clientv3/README.md deleted file mode 100644 index e434f9994..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/README.md +++ /dev/null @@ -1,77 +0,0 @@ -# etcd/clientv3 - -[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd/clientv3) - -`etcd/clientv3` is the official Go etcd client for v3. - -## Install - -```bash -go get github.com/coreos/etcd/clientv3 -``` - -## Get started - -Create client using `clientv3.New`: - -```go -cli, err := clientv3.New(clientv3.Config{ - Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, - DialTimeout: 5 * time.Second, -}) -if err != nil { - // handle error! -} -defer cli.Close() -``` - -etcd v3 uses [`gRPC`](http://www.grpc.io) for remote procedure calls. And `clientv3` uses -[`grpc-go`](https://github.com/grpc/grpc-go) to connect to etcd. Make sure to close the client after using it. -If the client is not closed, the connection will have leaky goroutines. To specify client request timeout, -pass `context.WithTimeout` to APIs: - -```go -ctx, cancel := context.WithTimeout(context.Background(), timeout) -resp, err := kvc.Put(ctx, "sample_key", "sample_value") -cancel() -if err != nil { - // handle error! -} -// use the response -``` - -etcd uses `cmd/vendor` directory to store external dependencies, which are -to be compiled into etcd release binaries. `client` can be imported without -vendoring. For full compatibility, it is recommended to vendor builds using -etcd's vendored packages, using tools like godep, as in -[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories). -For more detail, please read [Go vendor design](https://golang.org/s/go15vendor). - -## Error Handling - -etcd client returns 2 types of errors: - -1. context error: canceled or deadline exceeded. -2. gRPC error: see [api/v3rpc/rpctypes](https://godoc.org/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes). - -Here is the example code to handle client errors: - -```go -resp, err := kvc.Put(ctx, "", "") -if err != nil { - switch err { - case context.Canceled: - log.Fatalf("ctx is canceled by another routine: %v", err) - case context.DeadlineExceeded: - log.Fatalf("ctx is attached with a deadline is exceeded: %v", err) - case rpctypes.ErrEmptyKey: - log.Fatalf("client-side error: %v", err) - default: - log.Fatalf("bad cluster endpoints, which are not etcd servers: %v", err) - } -} -``` - -## Examples - -More code examples can be found at [GoDoc](https://godoc.org/github.com/coreos/etcd/clientv3). diff --git a/vendor/github.com/coreos/etcd/clientv3/auth.go b/vendor/github.com/coreos/etcd/clientv3/auth.go index 8c3c047f4..9d981cfb1 100644 --- a/vendor/github.com/coreos/etcd/clientv3/auth.go +++ b/vendor/github.com/coreos/etcd/clientv3/auth.go @@ -43,6 +43,7 @@ type ( AuthRoleListResponse pb.AuthRoleListResponse PermissionType authpb.Permission_Type + Permission authpb.Permission ) const ( diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer.go b/vendor/github.com/coreos/etcd/clientv3/balancer.go index b7fba6a20..b484b9756 100644 --- a/vendor/github.com/coreos/etcd/clientv3/balancer.go +++ b/vendor/github.com/coreos/etcd/clientv3/balancer.go @@ -42,9 +42,16 @@ type simpleBalancer struct { // upc closes when upEps transitions from empty to non-zero or the balancer closes. upc chan struct{} + // grpc issues TLS cert checks using the string passed into dial so + // that string must be the host. To recover the full scheme://host URL, + // have a map from hosts to the original endpoint. + host2ep map[string]string + // pinAddr is the currently pinned address; set to the empty string on // intialization and shutdown. pinAddr string + + closed bool } func newSimpleBalancer(eps []string) *simpleBalancer { @@ -60,11 +67,12 @@ func newSimpleBalancer(eps []string) *simpleBalancer { readyc: make(chan struct{}), upEps: make(map[string]struct{}), upc: make(chan struct{}), + host2ep: getHost2ep(eps), } return sb } -func (b *simpleBalancer) Start(target string) error { return nil } +func (b *simpleBalancer) Start(target string, config grpc.BalancerConfig) error { return nil } func (b *simpleBalancer) ConnectNotify() <-chan struct{} { b.mu.Lock() @@ -72,17 +80,70 @@ func (b *simpleBalancer) ConnectNotify() <-chan struct{} { return b.upc } +func (b *simpleBalancer) getEndpoint(host string) string { + b.mu.Lock() + defer b.mu.Unlock() + return b.host2ep[host] +} + +func getHost2ep(eps []string) map[string]string { + hm := make(map[string]string, len(eps)) + for i := range eps { + _, host, _ := parseEndpoint(eps[i]) + hm[host] = eps[i] + } + return hm +} + +func (b *simpleBalancer) updateAddrs(eps []string) { + np := getHost2ep(eps) + + b.mu.Lock() + defer b.mu.Unlock() + + match := len(np) == len(b.host2ep) + for k, v := range np { + if b.host2ep[k] != v { + match = false + break + } + } + if match { + // same endpoints, so no need to update address + return + } + + b.host2ep = np + + addrs := make([]grpc.Address, 0, len(eps)) + for i := range eps { + addrs = append(addrs, grpc.Address{Addr: getHost(eps[i])}) + } + b.addrs = addrs + b.notifyCh <- addrs +} + func (b *simpleBalancer) Up(addr grpc.Address) func(error) { b.mu.Lock() + defer b.mu.Unlock() + + // gRPC might call Up after it called Close. We add this check + // to "fix" it up at application layer. Or our simplerBalancer + // might panic since b.upc is closed. + if b.closed { + return func(err error) {} + } + if len(b.upEps) == 0 { // notify waiting Get()s and pin first connected address close(b.upc) b.pinAddr = addr.Addr } b.upEps[addr.Addr] = struct{}{} - b.mu.Unlock() + // notify client that a connection is up b.readyOnce.Do(func() { close(b.readyc) }) + return func(err error) { b.mu.Lock() delete(b.upEps, addr.Addr) @@ -128,13 +189,19 @@ func (b *simpleBalancer) Notify() <-chan []grpc.Address { return b.notifyCh } func (b *simpleBalancer) Close() error { b.mu.Lock() + defer b.mu.Unlock() + // In case gRPC calls close twice. TODO: remove the checking + // when we are sure that gRPC wont call close twice. + if b.closed { + return nil + } + b.closed = true close(b.notifyCh) // terminate all waiting Get()s b.pinAddr = "" if len(b.upEps) == 0 { close(b.upc) } - b.mu.Unlock() return nil } diff --git a/vendor/github.com/coreos/etcd/clientv3/client.go b/vendor/github.com/coreos/etcd/clientv3/client.go index e6903fc2f..148addea8 100644 --- a/vendor/github.com/coreos/etcd/clientv3/client.go +++ b/vendor/github.com/coreos/etcd/clientv3/client.go @@ -18,8 +18,6 @@ import ( "crypto/tls" "errors" "fmt" - "io/ioutil" - "log" "net" "net/url" "strings" @@ -29,6 +27,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc" + "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" ) @@ -98,6 +97,44 @@ func (c *Client) Ctx() context.Context { return c.ctx } // Endpoints lists the registered endpoints for the client. func (c *Client) Endpoints() []string { return c.cfg.Endpoints } +// SetEndpoints updates client's endpoints. +func (c *Client) SetEndpoints(eps ...string) { + c.cfg.Endpoints = eps + c.balancer.updateAddrs(eps) +} + +// Sync synchronizes client's endpoints with the known endpoints from the etcd membership. +func (c *Client) Sync(ctx context.Context) error { + mresp, err := c.MemberList(ctx) + if err != nil { + return err + } + var eps []string + for _, m := range mresp.Members { + eps = append(eps, m.ClientURLs...) + } + c.SetEndpoints(eps...) + return nil +} + +func (c *Client) autoSync() { + if c.cfg.AutoSyncInterval == time.Duration(0) { + return + } + + for { + select { + case <-c.ctx.Done(): + return + case <-time.After(c.cfg.AutoSyncInterval): + ctx, _ := context.WithTimeout(c.ctx, 5*time.Second) + if err := c.Sync(ctx); err != nil && err != c.ctx.Err() { + logger.Println("Auto sync endpoints failed:", err) + } + } + } +} + type authTokenCredential struct { token string } @@ -112,19 +149,31 @@ func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...str }, nil } -func (c *Client) dialTarget(endpoint string) (proto string, host string, creds *credentials.TransportCredentials) { +func parseEndpoint(endpoint string) (proto string, host string, scheme string) { proto = "tcp" host = endpoint - creds = c.creds url, uerr := url.Parse(endpoint) if uerr != nil || !strings.Contains(endpoint, "://") { return } + scheme = url.Scheme + // strip scheme:// prefix since grpc dials by host host = url.Host switch url.Scheme { + case "http", "https": case "unix": proto = "unix" + default: + proto, host = "", "" + } + return +} + +func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) { + creds = c.creds + switch scheme { + case "unix": case "http": creds = nil case "https": @@ -135,7 +184,7 @@ func (c *Client) dialTarget(endpoint string) (proto string, host string, creds * emptyCreds := credentials.NewTLS(tlsconfig) creds = &emptyCreds default: - return "", "", nil + creds = nil } return } @@ -147,17 +196,8 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts } opts = append(opts, dopts...) - // grpc issues TLS cert checks using the string passed into dial so - // that string must be the host. To recover the full scheme://host URL, - // have a map from hosts to the original endpoint. - host2ep := make(map[string]string) - for i := range c.cfg.Endpoints { - _, host, _ := c.dialTarget(c.cfg.Endpoints[i]) - host2ep[host] = c.cfg.Endpoints[i] - } - f := func(host string, t time.Duration) (net.Conn, error) { - proto, host, _ := c.dialTarget(host2ep[host]) + proto, host, _ := parseEndpoint(c.balancer.getEndpoint(host)) if proto == "" { return nil, fmt.Errorf("unknown scheme for %q", host) } @@ -170,7 +210,10 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts } opts = append(opts, grpc.WithDialer(f)) - _, _, creds := c.dialTarget(endpoint) + creds := c.creds + if _, _, scheme := parseEndpoint(endpoint); len(scheme) != 0 { + creds = c.processCreds(scheme) + } if creds != nil { opts = append(opts, grpc.WithTransportCredentials(*creds)) } else { @@ -272,13 +315,8 @@ func newClient(cfg *Config) (*Client, error) { client.Watcher = NewWatcher(client) client.Auth = NewAuth(client) client.Maintenance = NewMaintenance(client) - if cfg.Logger != nil { - logger.Set(cfg.Logger) - } else { - // disable client side grpc by default - logger.Set(log.New(ioutil.Discard, "", 0)) - } + go client.autoSync() return client, nil } @@ -294,17 +332,14 @@ func isHaltErr(ctx context.Context, err error) bool { if err == nil { return false } - eErr := rpctypes.Error(err) - if _, ok := eErr.(rpctypes.EtcdError); ok { - return eErr != rpctypes.ErrStopped && eErr != rpctypes.ErrNoLeader - } - // treat etcdserver errors not recognized by the client as halting - return isConnClosing(err) || strings.Contains(err.Error(), "etcdserver:") -} - -// isConnClosing returns true if the error matches a grpc client closing error -func isConnClosing(err error) bool { - return strings.Contains(err.Error(), grpc.ErrClientConnClosing.Error()) + code := grpc.Code(err) + // Unavailable codes mean the system will be right back. + // (e.g., can't connect, lost leader) + // Treat Internal codes as if something failed, leaving the + // system in an inconsistent state, but retrying could make progress. + // (e.g., failed in middle of send, corrupted frame) + // TODO: are permanent Internal errors possible from grpc? + return code != codes.Unavailable && code != codes.Internal } func toErr(ctx context.Context, err error) error { @@ -312,12 +347,20 @@ func toErr(ctx context.Context, err error) error { return nil } err = rpctypes.Error(err) - switch { - case ctx.Err() != nil && strings.Contains(err.Error(), "context"): - err = ctx.Err() - case strings.Contains(err.Error(), ErrNoAvailableEndpoints.Error()): + if _, ok := err.(rpctypes.EtcdError); ok { + return err + } + code := grpc.Code(err) + switch code { + case codes.DeadlineExceeded: + fallthrough + case codes.Canceled: + if ctx.Err() != nil { + err = ctx.Err() + } + case codes.Unavailable: err = ErrNoAvailableEndpoints - case strings.Contains(err.Error(), grpc.ErrClientConnClosing.Error()): + case codes.FailedPrecondition: err = grpc.ErrClientConnClosing } return err diff --git a/vendor/github.com/coreos/etcd/clientv3/config.go b/vendor/github.com/coreos/etcd/clientv3/config.go index 066b41ece..d1d5f4090 100644 --- a/vendor/github.com/coreos/etcd/clientv3/config.go +++ b/vendor/github.com/coreos/etcd/clientv3/config.go @@ -28,15 +28,16 @@ type Config struct { // Endpoints is a list of URLs Endpoints []string + // AutoSyncInterval is the interval to update endpoints with its latest members. + // 0 disables auto-sync. By default auto-sync is disabled. + AutoSyncInterval time.Duration + // DialTimeout is the timeout for failing to establish a connection. DialTimeout time.Duration // TLS holds the client secure credentials, if any. TLS *tls.Config - // Logger is the logger used by client library. - Logger Logger - // Username is a username for authentication Username string @@ -46,6 +47,7 @@ type Config struct { type yamlConfig struct { Endpoints []string `json:"endpoints"` + AutoSyncInterval time.Duration `json:"auto-sync-interval"` DialTimeout time.Duration `json:"dial-timeout"` InsecureTransport bool `json:"insecure-transport"` InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"` @@ -68,8 +70,9 @@ func configFromFile(fpath string) (*Config, error) { } cfg := &Config{ - Endpoints: yc.Endpoints, - DialTimeout: yc.DialTimeout, + Endpoints: yc.Endpoints, + AutoSyncInterval: yc.AutoSyncInterval, + DialTimeout: yc.DialTimeout, } if yc.InsecureTransport { diff --git a/vendor/github.com/coreos/etcd/clientv3/kv.go b/vendor/github.com/coreos/etcd/clientv3/kv.go index 27f9110f8..834b17d34 100644 --- a/vendor/github.com/coreos/etcd/clientv3/kv.go +++ b/vendor/github.com/coreos/etcd/clientv3/kv.go @@ -85,6 +85,10 @@ func NewKV(c *Client) KV { return &kv{remote: RetryKVClient(c)} } +func NewKVFromKVClient(remote pb.KVClient) KV { + return &kv{remote: remote} +} + func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) { r, err := kv.Do(ctx, OpPut(key, val, opts...)) return r.put, toErr(ctx, err) @@ -137,34 +141,20 @@ func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) { // TODO: handle other ops case tRange: var resp *pb.RangeResponse - r := &pb.RangeRequest{ - Key: op.key, - RangeEnd: op.end, - Limit: op.limit, - Revision: op.rev, - Serializable: op.serializable, - KeysOnly: op.keysOnly, - CountOnly: op.countOnly, - } - if op.sort != nil { - r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order) - r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target) - } - - resp, err = kv.remote.Range(ctx, r, grpc.FailFast(false)) + resp, err = kv.remote.Range(ctx, op.toRangeRequest(), grpc.FailFast(false)) if err == nil { return OpResponse{get: (*GetResponse)(resp)}, nil } case tPut: var resp *pb.PutResponse - r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID)} + r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV} resp, err = kv.remote.Put(ctx, r) if err == nil { return OpResponse{put: (*PutResponse)(resp)}, nil } case tDeleteRange: var resp *pb.DeleteRangeResponse - r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end} + r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} resp, err = kv.remote.DeleteRange(ctx, r) if err == nil { return OpResponse{del: (*DeleteResponse)(resp)}, nil diff --git a/vendor/github.com/coreos/etcd/clientv3/lease.go b/vendor/github.com/coreos/etcd/clientv3/lease.go index bf8919c34..ed8bb0a53 100644 --- a/vendor/github.com/coreos/etcd/clientv3/lease.go +++ b/vendor/github.com/coreos/etcd/clientv3/lease.go @@ -44,6 +44,21 @@ type LeaseKeepAliveResponse struct { TTL int64 } +// LeaseTimeToLiveResponse is used to convert the protobuf lease timetolive response. +type LeaseTimeToLiveResponse struct { + *pb.ResponseHeader + ID LeaseID `json:"id"` + + // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. + TTL int64 `json:"ttl"` + + // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. + GrantedTTL int64 `json:"granted-ttl"` + + // Keys is the list of keys attached to this lease. + Keys [][]byte `json:"keys"` +} + const ( // defaultTTL is the assumed lease TTL used for the first keepalive // deadline before the actual TTL is known to the client. @@ -61,6 +76,9 @@ type Lease interface { // Revoke revokes the given lease. Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) + // TimeToLive retrieves the lease information of the given lease ID. + TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) + // KeepAlive keeps the given lease alive forever. KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) @@ -141,7 +159,7 @@ func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, err return gresp, nil } if isHaltErr(cctx, err) { - return nil, toErr(ctx, err) + return nil, toErr(cctx, err) } if nerr := l.newStream(); nerr != nil { return nil, nerr @@ -170,6 +188,30 @@ func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, } } +func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) { + cctx, cancel := context.WithCancel(ctx) + done := cancelWhenStop(cancel, l.stopCtx.Done()) + defer close(done) + + for { + r := toLeaseTimeToLiveRequest(id, opts...) + resp, err := l.remote.LeaseTimeToLive(cctx, r) + if err == nil { + gresp := &LeaseTimeToLiveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + GrantedTTL: resp.GrantedTTL, + Keys: resp.Keys, + } + return gresp, nil + } + if isHaltErr(cctx, err) { + return nil, toErr(cctx, err) + } + } +} + func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) { ch := make(chan *LeaseKeepAliveResponse, leaseResponseChSize) @@ -390,7 +432,7 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { return } - tosend := make([]LeaseID, 0) + var tosend []LeaseID now := time.Now() l.mu.Lock() diff --git a/vendor/github.com/coreos/etcd/clientv3/logger.go b/vendor/github.com/coreos/etcd/clientv3/logger.go index 6e57c4e7c..519db45d8 100644 --- a/vendor/github.com/coreos/etcd/clientv3/logger.go +++ b/vendor/github.com/coreos/etcd/clientv3/logger.go @@ -15,13 +15,15 @@ package clientv3 import ( + "io/ioutil" "log" - "os" "sync" "google.golang.org/grpc/grpclog" ) +// Logger is the logger used by client library. +// It implements grpclog.Logger interface. type Logger grpclog.Logger var ( @@ -34,20 +36,36 @@ type settableLogger struct { } func init() { - // use go's standard logger by default like grpc + // disable client side logs by default logger.mu.Lock() - logger.l = log.New(os.Stderr, "", log.LstdFlags) + logger.l = log.New(ioutil.Discard, "", 0) + + // logger has to override the grpclog at initialization so that + // any changes to the grpclog go through logger with locking + // instead of through SetLogger + // + // now updates only happen through settableLogger.set grpclog.SetLogger(&logger) logger.mu.Unlock() } -func (s *settableLogger) Set(l Logger) { +// SetLogger sets client-side Logger. By default, logs are disabled. +func SetLogger(l Logger) { + logger.set(l) +} + +// GetLogger returns the current logger. +func GetLogger() Logger { + return logger.get() +} + +func (s *settableLogger) set(l Logger) { s.mu.Lock() logger.l = l s.mu.Unlock() } -func (s *settableLogger) Get() Logger { +func (s *settableLogger) get() Logger { s.mu.RLock() l := logger.l s.mu.RUnlock() @@ -56,9 +74,9 @@ func (s *settableLogger) Get() Logger { // implement the grpclog.Logger interface -func (s *settableLogger) Fatal(args ...interface{}) { s.Get().Fatal(args...) } -func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.Get().Fatalf(format, args...) } -func (s *settableLogger) Fatalln(args ...interface{}) { s.Get().Fatalln(args...) } -func (s *settableLogger) Print(args ...interface{}) { s.Get().Print(args...) } -func (s *settableLogger) Printf(format string, args ...interface{}) { s.Get().Printf(format, args...) } -func (s *settableLogger) Println(args ...interface{}) { s.Get().Println(args...) } +func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) } +func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) } +func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) } +func (s *settableLogger) Print(args ...interface{}) { s.get().Print(args...) } +func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Printf(format, args...) } +func (s *settableLogger) Println(args ...interface{}) { s.get().Println(args...) } diff --git a/vendor/github.com/coreos/etcd/clientv3/op.go b/vendor/github.com/coreos/etcd/clientv3/op.go index 89698be23..b0f24b309 100644 --- a/vendor/github.com/coreos/etcd/clientv3/op.go +++ b/vendor/github.com/coreos/etcd/clientv3/op.go @@ -14,9 +14,7 @@ package clientv3 -import ( - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" -) +import pb "github.com/coreos/etcd/etcdserver/etcdserverpb" type opType int @@ -43,40 +41,63 @@ type Op struct { serializable bool keysOnly bool countOnly bool + minModRev int64 + maxModRev int64 + minCreateRev int64 + maxCreateRev int64 // for range, watch rev int64 + // for watch, put, delete + prevKV bool + // progressNotify is for progress updates. progressNotify bool + // createdNotify is for created event + createdNotify bool + // filters for watchers + filterPut bool + filterDelete bool // for put val []byte leaseID LeaseID } +func (op Op) toRangeRequest() *pb.RangeRequest { + if op.t != tRange { + panic("op.t != tRange") + } + r := &pb.RangeRequest{ + Key: op.key, + RangeEnd: op.end, + Limit: op.limit, + Revision: op.rev, + Serializable: op.serializable, + KeysOnly: op.keysOnly, + CountOnly: op.countOnly, + MinModRevision: op.minModRev, + MaxModRevision: op.maxModRev, + MinCreateRevision: op.minCreateRev, + MaxCreateRevision: op.maxCreateRev, + } + if op.sort != nil { + r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order) + r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target) + } + return r +} + func (op Op) toRequestOp() *pb.RequestOp { switch op.t { case tRange: - r := &pb.RangeRequest{ - Key: op.key, - RangeEnd: op.end, - Limit: op.limit, - Revision: op.rev, - Serializable: op.serializable, - KeysOnly: op.keysOnly, - CountOnly: op.countOnly, - } - if op.sort != nil { - r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order) - r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target) - } - return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: r}} + return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}} case tPut: - r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID)} + r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV} return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}} case tDeleteRange: - r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end} + r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}} default: panic("Unknown Op") @@ -109,6 +130,14 @@ func OpDelete(key string, opts ...OpOption) Op { panic("unexpected serializable in delete") case ret.countOnly: panic("unexpected countOnly in delete") + case ret.minModRev != 0, ret.maxModRev != 0: + panic("unexpected mod revision filter in delete") + case ret.minCreateRev != 0, ret.maxCreateRev != 0: + panic("unexpected create revision filter in delete") + case ret.filterDelete, ret.filterPut: + panic("unexpected filter in delete") + case ret.createdNotify: + panic("unexpected createdNotify in delete") } return ret } @@ -128,7 +157,15 @@ func OpPut(key, val string, opts ...OpOption) Op { case ret.serializable: panic("unexpected serializable in put") case ret.countOnly: - panic("unexpected countOnly in delete") + panic("unexpected countOnly in put") + case ret.minModRev != 0, ret.maxModRev != 0: + panic("unexpected mod revision filter in put") + case ret.minCreateRev != 0, ret.maxCreateRev != 0: + panic("unexpected create revision filter in put") + case ret.filterDelete, ret.filterPut: + panic("unexpected filter in put") + case ret.createdNotify: + panic("unexpected createdNotify in put") } return ret } @@ -146,7 +183,11 @@ func opWatch(key string, opts ...OpOption) Op { case ret.serializable: panic("unexpected serializable in watch") case ret.countOnly: - panic("unexpected countOnly in delete") + panic("unexpected countOnly in watch") + case ret.minModRev != 0, ret.maxModRev != 0: + panic("unexpected mod revision filter in watch") + case ret.minCreateRev != 0, ret.maxCreateRev != 0: + panic("unexpected create revision filter in watch") } return ret } @@ -178,6 +219,14 @@ func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } } // 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'. func WithSort(target SortTarget, order SortOrder) OpOption { return func(op *Op) { + if target == SortByKey && order == SortAscend { + // If order != SortNone, server fetches the entire key-space, + // and then applies the sort and limit, if provided. + // Since current mvcc.Range implementation returns results + // sorted by keys in lexiographically ascending order, + // client should ignore SortOrder if the target is SortByKey. + order = SortNone + } op.sort = &SortOption{target, order} } } @@ -241,6 +290,18 @@ func WithCountOnly() OpOption { return func(op *Op) { op.countOnly = true } } +// WithMinModRev filters out keys for Get with modification revisions less than the given revision. +func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } } + +// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision. +func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } } + +// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision. +func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } } + +// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision. +func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } } + // WithFirstCreate gets the key with the oldest creation revision in the request range. func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) } @@ -264,10 +325,65 @@ func withTop(target SortTarget, order SortOrder) []OpOption { return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)} } -// WithProgressNotify makes watch server send periodic progress updates. +// WithProgressNotify makes watch server send periodic progress updates +// every 10 minutes when there is no incoming events. // Progress updates have zero events in WatchResponse. func WithProgressNotify() OpOption { return func(op *Op) { op.progressNotify = true } } + +// WithCreatedNotify makes watch server sends the created event. +func WithCreatedNotify() OpOption { + return func(op *Op) { + op.createdNotify = true + } +} + +// WithFilterPut discards PUT events from the watcher. +func WithFilterPut() OpOption { + return func(op *Op) { op.filterPut = true } +} + +// WithFilterDelete discards DELETE events from the watcher. +func WithFilterDelete() OpOption { + return func(op *Op) { op.filterDelete = true } +} + +// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted, +// nothing will be returned. +func WithPrevKV() OpOption { + return func(op *Op) { + op.prevKV = true + } +} + +// LeaseOp represents an Operation that lease can execute. +type LeaseOp struct { + id LeaseID + + // for TimeToLive + attachedKeys bool +} + +// LeaseOption configures lease operations. +type LeaseOption func(*LeaseOp) + +func (op *LeaseOp) applyOpts(opts []LeaseOption) { + for _, opt := range opts { + opt(op) + } +} + +// WithAttachedKeys requests lease timetolive API to return +// attached keys of given lease ID. +func WithAttachedKeys() LeaseOption { + return func(op *LeaseOp) { op.attachedKeys = true } +} + +func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest { + ret := &LeaseOp{id: id} + ret.applyOpts(opts) + return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys} +} diff --git a/vendor/github.com/coreos/etcd/clientv3/retry.go b/vendor/github.com/coreos/etcd/clientv3/retry.go index 3029ed8ea..1084c63da 100644 --- a/vendor/github.com/coreos/etcd/clientv3/retry.go +++ b/vendor/github.com/coreos/etcd/clientv3/retry.go @@ -15,9 +15,11 @@ package clientv3 import ( + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "golang.org/x/net/context" "google.golang.org/grpc" + "google.golang.org/grpc/codes" ) type rpcFunc func(ctx context.Context) error @@ -27,8 +29,16 @@ func (c *Client) newRetryWrapper() retryRpcFunc { return func(rpcCtx context.Context, f rpcFunc) { for { err := f(rpcCtx) - // ignore grpc conn closing on fail-fast calls; they are transient errors - if err == nil || !isConnClosing(err) { + if err == nil { + return + } + // only retry if unavailable + if grpc.Code(err) != codes.Unavailable { + return + } + // always stop retry on etcd errors + eErr := rpctypes.Error(err) + if _, ok := eErr.(rpctypes.EtcdError); ok { return } select { diff --git a/vendor/github.com/coreos/etcd/clientv3/txn.go b/vendor/github.com/coreos/etcd/clientv3/txn.go index a451e33ac..a61decd64 100644 --- a/vendor/github.com/coreos/etcd/clientv3/txn.go +++ b/vendor/github.com/coreos/etcd/clientv3/txn.go @@ -19,6 +19,7 @@ import ( pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "golang.org/x/net/context" + "google.golang.org/grpc" ) // Txn is the interface that wraps mini-transactions. @@ -152,7 +153,12 @@ func (txn *txn) Commit() (*TxnResponse, error) { func (txn *txn) commit() (*TxnResponse, error) { r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas} - resp, err := txn.kv.remote.Txn(txn.ctx, r) + + var opts []grpc.CallOption + if !txn.isWrite { + opts = []grpc.CallOption{grpc.FailFast(false)} + } + resp, err := txn.kv.remote.Txn(txn.ctx, r, opts...) if err != nil { return nil, err } diff --git a/vendor/github.com/coreos/etcd/clientv3/watch.go b/vendor/github.com/coreos/etcd/clientv3/watch.go index afcc3b1af..1d652c81a 100644 --- a/vendor/github.com/coreos/etcd/clientv3/watch.go +++ b/vendor/github.com/coreos/etcd/clientv3/watch.go @@ -61,6 +61,9 @@ type WatchResponse struct { // the channel sends a final response that has Canceled set to true with a non-nil Err(). Canceled bool + // Created is used to indicate the creation of the watcher. + Created bool + closeErr error } @@ -89,7 +92,7 @@ func (wr *WatchResponse) Err() error { // IsProgressNotify returns true if the WatchResponse is progress notification. func (wr *WatchResponse) IsProgressNotify() bool { - return len(wr.Events) == 0 && !wr.Canceled + return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0 } // watcher implements the Watcher interface @@ -98,10 +101,12 @@ type watcher struct { // mu protects the grpc streams map mu sync.RWMutex + // streams holds all the active grpc streams keyed by ctx value. streams map[string]*watchGrpcStream } +// watchGrpcStream tracks all watch resources attached to a single grpc stream. type watchGrpcStream struct { owner *watcher remote pb.WatchClient @@ -112,10 +117,10 @@ type watchGrpcStream struct { ctxKey string cancel context.CancelFunc - // mu protects the streams map - mu sync.RWMutex - // streams holds all active watchers - streams map[int64]*watcherStream + // substreams holds all active watchers on this grpc stream + substreams map[int64]*watcherStream + // resuming holds all resuming watchers on this grpc stream + resuming []*watcherStream // reqc sends a watch request from Watch() to the main goroutine reqc chan *watchRequest @@ -127,8 +132,12 @@ type watchGrpcStream struct { donec chan struct{} // errc transmits errors from grpc Recv to the watch stream reconn logic errc chan error + // closingc gets the watcherStream of closing watchers + closingc chan *watcherStream - // the error that closed the watch stream + // resumec closes to signal that all substreams should begin resuming + resumec chan struct{} + // closeErr is the error that closed the watch stream closeErr error } @@ -138,8 +147,14 @@ type watchRequest struct { key string end string rev int64 - // progressNotify is for progress updates. + // send created notification event if this field is true + createdNotify bool + // progressNotify is for progress updates progressNotify bool + // filters is the list of events to filter out + filters []pb.WatchCreateRequest_FilterType + // get the previous key-value pair before the event happens + prevKV bool // retc receives a chan WatchResponse once the watcher is established retc chan chan WatchResponse } @@ -150,20 +165,27 @@ type watcherStream struct { initReq watchRequest // outc publishes watch responses to subscriber - outc chan<- WatchResponse + outc chan WatchResponse // recvc buffers watch responses before publishing recvc chan *WatchResponse - id int64 + // donec closes when the watcherStream goroutine stops. + donec chan struct{} + // closing is set to true when stream should be scheduled to shutdown. + closing bool + // id is the registered watch id on the grpc stream + id int64 - // lastRev is revision last successfully sent over outc - lastRev int64 - // resumec indicates the stream must recover at a given revision - resumec chan int64 + // buf holds all events received from etcd but not yet consumed by the client + buf []*WatchResponse } func NewWatcher(c *Client) Watcher { + return NewWatchFromWatchClient(pb.NewWatchClient(c.conn)) +} + +func NewWatchFromWatchClient(wc pb.WatchClient) Watcher { return &watcher{ - remote: pb.NewWatchClient(c.conn), + remote: wc, streams: make(map[string]*watchGrpcStream), } } @@ -182,18 +204,20 @@ func (vc *valCtx) Err() error { return nil } func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream { ctx, cancel := context.WithCancel(&valCtx{inctx}) wgs := &watchGrpcStream{ - owner: w, - remote: w.remote, - ctx: ctx, - ctxKey: fmt.Sprintf("%v", inctx), - cancel: cancel, - streams: make(map[int64]*watcherStream), - - respc: make(chan *pb.WatchResponse), - reqc: make(chan *watchRequest), - stopc: make(chan struct{}), - donec: make(chan struct{}), - errc: make(chan error, 1), + owner: w, + remote: w.remote, + ctx: ctx, + ctxKey: fmt.Sprintf("%v", inctx), + cancel: cancel, + substreams: make(map[int64]*watcherStream), + + respc: make(chan *pb.WatchResponse), + reqc: make(chan *watchRequest), + stopc: make(chan struct{}), + donec: make(chan struct{}), + errc: make(chan error, 1), + closingc: make(chan *watcherStream), + resumec: make(chan struct{}), } go wgs.run() return wgs @@ -203,14 +227,24 @@ func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream { func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan { ow := opWatch(key, opts...) - retc := make(chan chan WatchResponse, 1) + var filters []pb.WatchCreateRequest_FilterType + if ow.filterPut { + filters = append(filters, pb.WatchCreateRequest_NOPUT) + } + if ow.filterDelete { + filters = append(filters, pb.WatchCreateRequest_NODELETE) + } + wr := &watchRequest{ ctx: ctx, + createdNotify: ow.createdNotify, key: string(ow.key), end: string(ow.end), rev: ow.rev, progressNotify: ow.progressNotify, - retc: retc, + filters: filters, + prevKV: ow.prevKV, + retc: make(chan chan WatchResponse, 1), } ok := false @@ -242,7 +276,6 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch case reqc <- wr: ok = true case <-wr.ctx.Done(): - wgs.stopIfEmpty() case <-donec: if wgs.closeErr != nil { closeCh <- WatchResponse{closeErr: wgs.closeErr} @@ -255,7 +288,7 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch // receive channel if ok { select { - case ret := <-retc: + case ret := <-wr.retc: return ret case <-ctx.Done(): case <-donec: @@ -286,12 +319,7 @@ func (w *watcher) Close() (err error) { } func (w *watchGrpcStream) Close() (err error) { - w.mu.Lock() - if w.stopc != nil { - close(w.stopc) - w.stopc = nil - } - w.mu.Unlock() + close(w.stopc) <-w.donec select { case err = <-w.errc: @@ -300,67 +328,57 @@ func (w *watchGrpcStream) Close() (err error) { return toErr(w.ctx, err) } -func (w *watchGrpcStream) addStream(resp *pb.WatchResponse, pendingReq *watchRequest) { - if pendingReq == nil { - // no pending request; ignore - return - } - if resp.Canceled || resp.CompactRevision != 0 { - // a cancel at id creation time means the start revision has - // been compacted out of the store - ret := make(chan WatchResponse, 1) - ret <- WatchResponse{ - Header: *resp.Header, - CompactRevision: resp.CompactRevision, - Canceled: true} - close(ret) - pendingReq.retc <- ret - return +func (w *watcher) closeStream(wgs *watchGrpcStream) { + w.mu.Lock() + close(wgs.donec) + wgs.cancel() + if w.streams != nil { + delete(w.streams, wgs.ctxKey) } + w.mu.Unlock() +} - ret := make(chan WatchResponse) +func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) { if resp.WatchId == -1 { // failed; no channel - close(ret) - pendingReq.retc <- ret + close(ws.recvc) return } + ws.id = resp.WatchId + w.substreams[ws.id] = ws +} - ws := &watcherStream{ - initReq: *pendingReq, - id: resp.WatchId, - outc: ret, - // buffered so unlikely to block on sending while holding mu - recvc: make(chan *WatchResponse, 4), - resumec: make(chan int64), - } - - if pendingReq.rev == 0 { - // note the header revision so that a put following a current watcher - // disconnect will arrive on the watcher channel after reconnect - ws.initReq.rev = resp.Header.Revision +func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) { + select { + case ws.outc <- *resp: + case <-ws.initReq.ctx.Done(): + case <-time.After(closeSendErrTimeout): } - - w.mu.Lock() - w.streams[ws.id] = ws - w.mu.Unlock() - - // pass back the subscriber channel for the watcher - pendingReq.retc <- ret - - // send messages to subscriber - go w.serveStream(ws) + close(ws.outc) } -// closeStream closes the watcher resources and removes it -func (w *watchGrpcStream) closeStream(ws *watcherStream) { - w.mu.Lock() - // cancels request stream; subscriber receives nil channel - close(ws.initReq.retc) +func (w *watchGrpcStream) closeSubstream(ws *watcherStream) { + // send channel response in case stream was never established + select { + case ws.initReq.retc <- ws.outc: + default: + } // close subscriber's channel - close(ws.outc) - delete(w.streams, ws.id) - w.mu.Unlock() + if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil { + go w.sendCloseSubstream(ws, &WatchResponse{closeErr: w.closeErr}) + } else { + close(ws.outc) + } + if ws.id != -1 { + delete(w.substreams, ws.id) + return + } + for i := range w.resuming { + if w.resuming[i] == ws { + w.resuming[i] = nil + return + } + } } // run is the root of the goroutines for managing a watcher client @@ -368,66 +386,79 @@ func (w *watchGrpcStream) run() { var wc pb.Watch_WatchClient var closeErr error + // substreams marked to close but goroutine still running; needed for + // avoiding double-closing recvc on grpc stream teardown + closing := make(map[*watcherStream]struct{}) + defer func() { - w.owner.mu.Lock() w.closeErr = closeErr - if w.owner.streams != nil { - delete(w.owner.streams, w.ctxKey) + // shutdown substreams and resuming substreams + for _, ws := range w.substreams { + if _, ok := closing[ws]; !ok { + close(ws.recvc) + } + } + for _, ws := range w.resuming { + if _, ok := closing[ws]; ws != nil && !ok { + close(ws.recvc) + } + } + w.joinSubstreams() + for toClose := len(w.substreams) + len(w.resuming); toClose > 0; toClose-- { + w.closeSubstream(<-w.closingc) } - close(w.donec) - w.owner.mu.Unlock() - w.cancel() - }() - // already stopped? - w.mu.RLock() - stopc := w.stopc - w.mu.RUnlock() - if stopc == nil { - return - } + w.owner.closeStream(w) + }() // start a stream with the etcd grpc server if wc, closeErr = w.newWatchClient(); closeErr != nil { return } - var pendingReq, failedReq *watchRequest - curReqC := w.reqc cancelSet := make(map[int64]struct{}) for { select { // Watch() requested - case pendingReq = <-curReqC: - // no more watch requests until there's a response - curReqC = nil - if err := wc.Send(pendingReq.toPB()); err == nil { - // pendingReq now waits on w.respc - break + case wreq := <-w.reqc: + outc := make(chan WatchResponse, 1) + ws := &watcherStream{ + initReq: *wreq, + id: -1, + outc: outc, + // unbufffered so resumes won't cause repeat events + recvc: make(chan *WatchResponse), + } + + ws.donec = make(chan struct{}) + go w.serveSubstream(ws, w.resumec) + + // queue up for watcher creation/resume + w.resuming = append(w.resuming, ws) + if len(w.resuming) == 1 { + // head of resume queue, can register a new watcher + wc.Send(ws.initReq.toPB()) } - failedReq = pendingReq // New events from the watch client case pbresp := <-w.respc: switch { case pbresp.Created: - // response to pending req, try to add - w.addStream(pbresp, pendingReq) - pendingReq = nil - curReqC = w.reqc + // response to head of queue creation + if ws := w.resuming[0]; ws != nil { + w.addSubstream(pbresp, ws) + w.dispatchEvent(pbresp) + w.resuming[0] = nil + } + if ws := w.nextResume(); ws != nil { + wc.Send(ws.initReq.toPB()) + } case pbresp.Canceled: delete(cancelSet, pbresp.WatchId) - // shutdown serveStream, if any - w.mu.Lock() - if ws, ok := w.streams[pbresp.WatchId]; ok { + if ws, ok := w.substreams[pbresp.WatchId]; ok { + // signal to stream goroutine to update closingc close(ws.recvc) - delete(w.streams, ws.id) - } - numStreams := len(w.streams) - w.mu.Unlock() - if numStreams == 0 { - // don't leak watcher streams - return + closing[ws] = struct{}{} } default: // dispatch to appropriate watch stream @@ -448,57 +479,66 @@ func (w *watchGrpcStream) run() { wc.Send(req) } // watch client failed to recv; spawn another if possible - // TODO report watch client errors from errc? case err := <-w.errc: - if toErr(w.ctx, err) == v3rpc.ErrNoLeader { + if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader { closeErr = err return } if wc, closeErr = w.newWatchClient(); closeErr != nil { return } - curReqC = w.reqc - if pendingReq != nil { - failedReq = pendingReq + if ws := w.nextResume(); ws != nil { + wc.Send(ws.initReq.toPB()) } cancelSet = make(map[int64]struct{}) - case <-stopc: + case <-w.stopc: return + case ws := <-w.closingc: + w.closeSubstream(ws) + delete(closing, ws) + if len(w.substreams)+len(w.resuming) == 0 { + // no more watchers on this stream, shutdown + return + } } + } +} - // send failed; queue for retry - if failedReq != nil { - go func(wr *watchRequest) { - select { - case w.reqc <- wr: - case <-wr.ctx.Done(): - case <-w.donec: - } - }(pendingReq) - failedReq = nil - pendingReq = nil +// nextResume chooses the next resuming to register with the grpc stream. Abandoned +// streams are marked as nil in the queue since the head must wait for its inflight registration. +func (w *watchGrpcStream) nextResume() *watcherStream { + for len(w.resuming) != 0 { + if w.resuming[0] != nil { + return w.resuming[0] } + w.resuming = w.resuming[1:len(w.resuming)] } + return nil } // dispatchEvent sends a WatchResponse to the appropriate watcher stream func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { - w.mu.RLock() - defer w.mu.RUnlock() - ws, ok := w.streams[pbresp.WatchId] + ws, ok := w.substreams[pbresp.WatchId] + if !ok { + return false + } events := make([]*Event, len(pbresp.Events)) for i, ev := range pbresp.Events { events[i] = (*Event)(ev) } - if ok { - wr := &WatchResponse{ - Header: *pbresp.Header, - Events: events, - CompactRevision: pbresp.CompactRevision, - Canceled: pbresp.Canceled} - ws.recvc <- wr - } - return ok + wr := &WatchResponse{ + Header: *pbresp.Header, + Events: events, + CompactRevision: pbresp.CompactRevision, + Created: pbresp.Created, + Canceled: pbresp.Canceled, + } + select { + case ws.recvc <- wr: + case <-ws.donec: + return false + } + return true } // serveWatchClient forwards messages from the grpc stream to run() @@ -520,134 +560,126 @@ func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) { } } -// serveStream forwards watch responses from run() to the subscriber -func (w *watchGrpcStream) serveStream(ws *watcherStream) { - var closeErr error - emptyWr := &WatchResponse{} - wrs := []*WatchResponse{} +// serveSubstream forwards watch responses from run() to the subscriber +func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) { + if ws.closing { + panic("created substream goroutine but substream is closing") + } + + // nextRev is the minimum expected next revision + nextRev := ws.initReq.rev resuming := false - closing := false - for !closing { + defer func() { + if !resuming { + ws.closing = true + } + close(ws.donec) + if !resuming { + w.closingc <- ws + } + }() + + emptyWr := &WatchResponse{} + for { curWr := emptyWr outc := ws.outc - if len(wrs) > 0 { - curWr = wrs[0] + + if len(ws.buf) > 0 && ws.buf[0].Created { + select { + case ws.initReq.retc <- ws.outc: + // send first creation event and only if requested + if !ws.initReq.createdNotify { + ws.buf = ws.buf[1:] + } + default: + } + } + + if len(ws.buf) > 0 { + curWr = ws.buf[0] } else { outc = nil } select { case outc <- *curWr: - if wrs[0].Err() != nil { - closing = true - break - } - var newRev int64 - if len(wrs[0].Events) > 0 { - newRev = wrs[0].Events[len(wrs[0].Events)-1].Kv.ModRevision - } else { - newRev = wrs[0].Header.Revision - } - if newRev != ws.lastRev { - ws.lastRev = newRev + if ws.buf[0].Err() != nil { + return } - wrs[0] = nil - wrs = wrs[1:] + ws.buf[0] = nil + ws.buf = ws.buf[1:] case wr, ok := <-ws.recvc: if !ok { - // shutdown from closeStream + // shutdown from closeSubstream return } - // resume up to last seen event if disconnected - if resuming && wr.Err() == nil { - resuming = false - // trim events already seen - for i := 0; i < len(wr.Events); i++ { - if wr.Events[i].Kv.ModRevision > ws.lastRev { - wr.Events = wr.Events[i:] - break - } - } - // only forward new events - if wr.Events[0].Kv.ModRevision == ws.lastRev { - break - } + // TODO pause channel if buffer gets too large + ws.buf = append(ws.buf, wr) + nextRev = wr.Header.Revision + if len(wr.Events) > 0 { + nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1 } - resuming = false - // TODO don't keep buffering if subscriber stops reading - wrs = append(wrs, wr) - case resumeRev := <-ws.resumec: - wrs = nil - resuming = true - if resumeRev == -1 { - // pause serving stream while resume gets set up - break - } - if resumeRev != ws.lastRev { - panic("unexpected resume revision") - } - case <-w.donec: - closing = true - closeErr = w.closeErr + ws.initReq.rev = nextRev case <-ws.initReq.ctx.Done(): - closing = true - } - } - - // try to send off close error - if closeErr != nil { - select { - case ws.outc <- WatchResponse{closeErr: w.closeErr}: - case <-w.donec: - case <-time.After(closeSendErrTimeout): + return + case <-resumec: + resuming = true + return } } - - w.closeStream(ws) - w.stopIfEmpty() // lazily send cancel message if events on missing id } -func (wgs *watchGrpcStream) stopIfEmpty() { - wgs.mu.Lock() - if len(wgs.streams) == 0 && wgs.stopc != nil { - close(wgs.stopc) - wgs.stopc = nil - } - wgs.mu.Unlock() -} - func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) { - ws, rerr := w.resume() - if rerr != nil { - return nil, rerr + // connect to grpc stream + wc, err := w.openWatchClient() + if err != nil { + return nil, v3rpc.Error(err) } - go w.serveWatchClient(ws) - return ws, nil + // mark all substreams as resuming + if len(w.substreams)+len(w.resuming) > 0 { + close(w.resumec) + w.resumec = make(chan struct{}) + w.joinSubstreams() + for _, ws := range w.substreams { + ws.id = -1 + w.resuming = append(w.resuming, ws) + } + for _, ws := range w.resuming { + if ws == nil || ws.closing { + continue + } + ws.donec = make(chan struct{}) + go w.serveSubstream(ws, w.resumec) + } + } + w.substreams = make(map[int64]*watcherStream) + // receive data from new grpc stream + go w.serveWatchClient(wc) + return wc, nil } -// resume creates a new WatchClient with all current watchers reestablished -func (w *watchGrpcStream) resume() (ws pb.Watch_WatchClient, err error) { - for { - if ws, err = w.openWatchClient(); err != nil { - break - } else if err = w.resumeWatchers(ws); err == nil { - break +// joinSubstream waits for all substream goroutines to complete +func (w *watchGrpcStream) joinSubstreams() { + for _, ws := range w.substreams { + <-ws.donec + } + for _, ws := range w.resuming { + if ws != nil { + <-ws.donec } } - return ws, v3rpc.Error(err) } // openWatchClient retries opening a watchclient until retryConnection fails func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) { for { - w.mu.Lock() - stopc := w.stopc - w.mu.Unlock() - if stopc == nil { + select { + case <-w.stopc: if err == nil { - err = context.Canceled + return nil, context.Canceled } return nil, err + default: } if ws, err = w.remote.Watch(w.ctx, grpc.FailFast(false)); ws != nil && err == nil { break @@ -659,48 +691,6 @@ func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) return ws, nil } -// resumeWatchers rebuilds every registered watcher on a new client -func (w *watchGrpcStream) resumeWatchers(wc pb.Watch_WatchClient) error { - w.mu.RLock() - streams := make([]*watcherStream, 0, len(w.streams)) - for _, ws := range w.streams { - streams = append(streams, ws) - } - w.mu.RUnlock() - - for _, ws := range streams { - // pause serveStream - ws.resumec <- -1 - - // reconstruct watcher from initial request - if ws.lastRev != 0 { - ws.initReq.rev = ws.lastRev - } - if err := wc.Send(ws.initReq.toPB()); err != nil { - return err - } - - // wait for request ack - resp, err := wc.Recv() - if err != nil { - return err - } else if len(resp.Events) != 0 || !resp.Created { - return fmt.Errorf("watcher: unexpected response (%+v)", resp) - } - - // id may be different since new remote watcher; update map - w.mu.Lock() - delete(w.streams, ws.id) - ws.id = resp.WatchId - w.streams[ws.id] = ws - w.mu.Unlock() - - // unpause serveStream - ws.resumec <- ws.lastRev - } - return nil -} - // toPB converts an internal watch request structure to its protobuf messagefunc (wr *watchRequest) func (wr *watchRequest) toPB() *pb.WatchRequest { req := &pb.WatchCreateRequest{ @@ -708,6 +698,8 @@ func (wr *watchRequest) toPB() *pb.WatchRequest { Key: []byte(wr.key), RangeEnd: []byte(wr.end), ProgressNotify: wr.progressNotify, + Filters: wr.filters, + PrevKv: wr.prevKV, } cr := &pb.WatchRequest_CreateRequest{CreateRequest: req} return &pb.WatchRequest{RequestUnion: cr} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go index ecf5a2067..183a04f3a 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go @@ -36,7 +36,8 @@ var ( ErrGRPCMemberBadURLs = grpc.Errorf(codes.InvalidArgument, "etcdserver: given member URLs are invalid") ErrGRPCMemberNotFound = grpc.Errorf(codes.NotFound, "etcdserver: member not found") - ErrGRPCRequestTooLarge = grpc.Errorf(codes.InvalidArgument, "etcdserver: request is too large") + ErrGRPCRequestTooLarge = grpc.Errorf(codes.InvalidArgument, "etcdserver: request is too large") + ErrGRPCRequestTooManyRequests = grpc.Errorf(codes.ResourceExhausted, "etcdserver: too many requests") ErrGRPCRootUserNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not exist") ErrGRPCRootRoleNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not have root role") @@ -45,13 +46,17 @@ var ( ErrGRPCRoleAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name already exists") ErrGRPCRoleNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name not found") ErrGRPCAuthFailed = grpc.Errorf(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password") - ErrGRPCPermissionDenied = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission denied") + ErrGRPCPermissionDenied = grpc.Errorf(codes.PermissionDenied, "etcdserver: permission denied") ErrGRPCRoleNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role is not granted to the user") ErrGRPCPermissionNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission is not granted to the role") + ErrGRPCAuthNotEnabled = grpc.Errorf(codes.FailedPrecondition, "etcdserver: authentication is not enabled") - ErrGRPCNoLeader = grpc.Errorf(codes.Unavailable, "etcdserver: no leader") - ErrGRPCNotCapable = grpc.Errorf(codes.Unavailable, "etcdserver: not capable") - ErrGRPCStopped = grpc.Errorf(codes.Unavailable, "etcdserver: server stopped") + ErrGRPCNoLeader = grpc.Errorf(codes.Unavailable, "etcdserver: no leader") + ErrGRPCNotCapable = grpc.Errorf(codes.Unavailable, "etcdserver: not capable") + ErrGRPCStopped = grpc.Errorf(codes.Unavailable, "etcdserver: server stopped") + ErrGRPCTimeout = grpc.Errorf(codes.Unavailable, "etcdserver: request timed out") + ErrGRPCTimeoutDueToLeaderFail = grpc.Errorf(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure") + ErrGRPCUnhealthy = grpc.Errorf(codes.Unavailable, "etcdserver: unhealthy cluster") errStringToError = map[string]error{ grpc.ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey, @@ -69,7 +74,8 @@ var ( grpc.ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs, grpc.ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound, - grpc.ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge, + grpc.ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge, + grpc.ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests, grpc.ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist, grpc.ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist, @@ -81,10 +87,14 @@ var ( grpc.ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied, grpc.ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted, grpc.ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted, - - grpc.ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader, - grpc.ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable, - grpc.ErrorDesc(ErrGRPCStopped): ErrGRPCStopped, + grpc.ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled, + + grpc.ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader, + grpc.ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable, + grpc.ErrorDesc(ErrGRPCStopped): ErrGRPCStopped, + grpc.ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout, + grpc.ErrorDesc(ErrGRPCTimeoutDueToLeaderFail): ErrGRPCTimeoutDueToLeaderFail, + grpc.ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy, } // client-side error @@ -104,6 +114,7 @@ var ( ErrMemberNotFound = Error(ErrGRPCMemberNotFound) ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge) + ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests) ErrRootUserNotExist = Error(ErrGRPCRootUserNotExist) ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist) @@ -115,10 +126,14 @@ var ( ErrPermissionDenied = Error(ErrGRPCPermissionDenied) ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted) ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted) - - ErrNoLeader = Error(ErrGRPCNoLeader) - ErrNotCapable = Error(ErrGRPCNotCapable) - ErrStopped = Error(ErrGRPCStopped) + ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled) + + ErrNoLeader = Error(ErrGRPCNoLeader) + ErrNotCapable = Error(ErrGRPCNotCapable) + ErrStopped = Error(ErrGRPCStopped) + ErrTimeout = Error(ErrGRPCTimeout) + ErrTimeoutDueToLeaderFail = Error(ErrGRPCTimeoutDueToLeaderFail) + ErrUnhealthy = Error(ErrGRPCUnhealthy) ) // EtcdError defines gRPC server errors. diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go index 746e9a116..a6985527a 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go @@ -45,6 +45,8 @@ LeaseRevokeResponse LeaseKeepAliveRequest LeaseKeepAliveResponse + LeaseTimeToLiveRequest + LeaseTimeToLiveResponse Member MemberAddRequest MemberAddResponse @@ -102,9 +104,9 @@ import ( proto "github.com/golang/protobuf/proto" math "math" -) -import io "io" + io "io" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto deleted file mode 100644 index 25e0aca5d..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto +++ /dev/null @@ -1,34 +0,0 @@ -syntax = "proto2"; -package etcdserverpb; - -import "gogoproto/gogo.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.goproto_getters_all) = false; - -message Request { - optional uint64 ID = 1 [(gogoproto.nullable) = false]; - optional string Method = 2 [(gogoproto.nullable) = false]; - optional string Path = 3 [(gogoproto.nullable) = false]; - optional string Val = 4 [(gogoproto.nullable) = false]; - optional bool Dir = 5 [(gogoproto.nullable) = false]; - optional string PrevValue = 6 [(gogoproto.nullable) = false]; - optional uint64 PrevIndex = 7 [(gogoproto.nullable) = false]; - optional bool PrevExist = 8 [(gogoproto.nullable) = true]; - optional int64 Expiration = 9 [(gogoproto.nullable) = false]; - optional bool Wait = 10 [(gogoproto.nullable) = false]; - optional uint64 Since = 11 [(gogoproto.nullable) = false]; - optional bool Recursive = 12 [(gogoproto.nullable) = false]; - optional bool Sorted = 13 [(gogoproto.nullable) = false]; - optional bool Quorum = 14 [(gogoproto.nullable) = false]; - optional int64 Time = 15 [(gogoproto.nullable) = false]; - optional bool Stream = 16 [(gogoproto.nullable) = false]; - optional bool Refresh = 17 [(gogoproto.nullable) = true]; -} - -message Metadata { - optional uint64 NodeID = 1 [(gogoproto.nullable) = false]; - optional uint64 ClusterID = 2 [(gogoproto.nullable) = false]; -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go index 068aefff0..6b3c71e1f 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go @@ -10,9 +10,9 @@ import ( proto "github.com/golang/protobuf/proto" math "math" -) -import io "io" + io "io" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -23,6 +23,8 @@ type RequestHeader struct { ID uint64 `protobuf:"varint,1,opt,name=ID,json=iD,proto3" json:"ID,omitempty"` // username is a username that is associated with an auth token of gRPC connection Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` + // auth_revision is a revision number of auth.authStore. It is not related to mvcc + AuthRevision uint64 `protobuf:"varint,3,opt,name=auth_revision,json=authRevision,proto3" json:"auth_revision,omitempty"` } func (m *RequestHeader) Reset() { *m = RequestHeader{} } @@ -124,6 +126,11 @@ func (m *RequestHeader) MarshalTo(data []byte) (int, error) { i = encodeVarintRaftInternal(data, i, uint64(len(m.Username))) i += copy(data[i:], m.Username) } + if m.AuthRevision != 0 { + data[i] = 0x18 + i++ + i = encodeVarintRaftInternal(data, i, uint64(m.AuthRevision)) + } return i, nil } @@ -535,6 +542,9 @@ func (m *RequestHeader) Size() (n int) { if l > 0 { n += 1 + l + sovRaftInternal(uint64(l)) } + if m.AuthRevision != 0 { + n += 1 + sovRaftInternal(uint64(m.AuthRevision)) + } return n } @@ -765,6 +775,25 @@ func (m *RequestHeader) Unmarshal(data []byte) error { } m.Username = string(data[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthRevision", wireType) + } + m.AuthRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.AuthRevision |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipRaftInternal(data[iNdEx:]) @@ -2006,57 +2035,58 @@ var ( ) var fileDescriptorRaftInternal = []byte{ - // 824 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x96, 0xdd, 0x4e, 0xdb, 0x48, - 0x14, 0xc7, 0x49, 0xf8, 0xcc, 0x24, 0x64, 0xd9, 0x01, 0x76, 0x67, 0x83, 0x94, 0x85, 0xac, 0x76, - 0x97, 0x7e, 0xd1, 0x0a, 0x2e, 0x7b, 0xd1, 0xa6, 0x04, 0x01, 0x12, 0x42, 0xc8, 0xa2, 0x52, 0xa5, - 0x5e, 0x58, 0x43, 0x7c, 0x48, 0x5c, 0x1c, 0xdb, 0xb5, 0x27, 0x29, 0x7d, 0x93, 0x3e, 0x46, 0xbf, - 0x1e, 0x82, 0x8b, 0x7e, 0xd0, 0xf6, 0x05, 0x5a, 0x7a, 0xd3, 0xab, 0xde, 0xb4, 0x0f, 0x50, 0xcd, - 0x87, 0xc7, 0x71, 0x32, 0xe1, 0xce, 0x3e, 0xe7, 0x7f, 0x7e, 0xe7, 0x4c, 0xe6, 0x3f, 0x9e, 0xa0, - 0xf9, 0x88, 0x1e, 0x33, 0xdb, 0xf5, 0x19, 0x44, 0x3e, 0xf5, 0xd6, 0xc2, 0x28, 0x60, 0x01, 0x2e, - 0x01, 0x6b, 0x3a, 0x31, 0x44, 0x3d, 0x88, 0xc2, 0xa3, 0xca, 0x42, 0x2b, 0x68, 0x05, 0x22, 0x71, - 0x93, 0x3f, 0x49, 0x4d, 0x65, 0x2e, 0xd5, 0xa8, 0x48, 0x21, 0x0a, 0x9b, 0xf2, 0xb1, 0x76, 0x1b, - 0xcd, 0x5a, 0xf0, 0xb8, 0x0b, 0x31, 0xdb, 0x01, 0xea, 0x40, 0x84, 0xcb, 0x28, 0xbf, 0xdb, 0x20, - 0xb9, 0xe5, 0xdc, 0xea, 0x84, 0x95, 0x77, 0x1b, 0xb8, 0x82, 0x66, 0xba, 0x31, 0x6f, 0xd9, 0x01, - 0x92, 0x5f, 0xce, 0xad, 0x16, 0x2c, 0xfd, 0x5e, 0xfb, 0x5e, 0x46, 0xf3, 0xbb, 0x6a, 0x20, 0x8b, - 0x1e, 0x33, 0x45, 0x1a, 0x62, 0xfc, 0x8b, 0xf2, 0xbd, 0x75, 0x51, 0x5d, 0x5c, 0x5f, 0x5c, 0xeb, - 0x1f, 0x79, 0x4d, 0x95, 0x58, 0xf9, 0xde, 0x3a, 0xbe, 0x85, 0x26, 0x23, 0xea, 0xb7, 0x80, 0x8c, - 0x0b, 0x65, 0x65, 0x40, 0xc9, 0x53, 0x89, 0x5c, 0x0a, 0xf1, 0x55, 0x34, 0x1e, 0x76, 0x19, 0x99, - 0x10, 0x7a, 0x92, 0xd5, 0x1f, 0x74, 0x93, 0x79, 0x2c, 0x2e, 0xc2, 0x9b, 0xa8, 0xe4, 0x80, 0x07, - 0x0c, 0x6c, 0xd9, 0x64, 0x52, 0x14, 0x2d, 0x67, 0x8b, 0x1a, 0x42, 0x91, 0x69, 0x55, 0x74, 0xd2, - 0x18, 0x6f, 0xc8, 0x4e, 0x7d, 0x32, 0x65, 0x6a, 0x78, 0x78, 0xea, 0xeb, 0x86, 0xec, 0xd4, 0xc7, - 0x77, 0x10, 0x6a, 0x06, 0x9d, 0x90, 0x36, 0x99, 0x1b, 0xf8, 0x64, 0x5a, 0x94, 0xfc, 0x9d, 0x2d, - 0xd9, 0xd4, 0xf9, 0xa4, 0xb2, 0xaf, 0x04, 0xdf, 0x45, 0x45, 0x0f, 0x68, 0x0c, 0x76, 0x2b, 0xa2, - 0x3e, 0x23, 0x33, 0x26, 0xc2, 0x1e, 0x17, 0x6c, 0xf3, 0xbc, 0x26, 0x78, 0x3a, 0xc4, 0xd7, 0x2c, - 0x09, 0x11, 0xf4, 0x82, 0x13, 0x20, 0x05, 0xd3, 0x9a, 0x05, 0xc2, 0x12, 0x02, 0xbd, 0x66, 0x2f, - 0x8d, 0xf1, 0x6d, 0xa1, 0x1e, 0x8d, 0x3a, 0x04, 0x99, 0xb6, 0xa5, 0xce, 0x53, 0x7a, 0x5b, 0x84, - 0x10, 0x6f, 0xa0, 0xa9, 0xb6, 0x70, 0x13, 0x71, 0x44, 0xc9, 0x92, 0x71, 0xcf, 0xa5, 0xe1, 0x2c, - 0x25, 0xc5, 0x75, 0x54, 0xa4, 0x5d, 0xd6, 0xb6, 0xc1, 0xa7, 0x47, 0x1e, 0x90, 0x6f, 0xc6, 0x1f, - 0xac, 0xde, 0x65, 0xed, 0x2d, 0x21, 0xd0, 0xcb, 0xa5, 0x3a, 0x84, 0x1b, 0xa8, 0x24, 0x10, 0x8e, - 0x1b, 0x0b, 0xc6, 0x8f, 0x69, 0xd3, 0x7a, 0x39, 0xa3, 0x21, 0x15, 0x7a, 0xbd, 0x34, 0x8d, 0xe1, - 0x7d, 0x49, 0x01, 0x9f, 0xb9, 0x4d, 0xca, 0x80, 0xfc, 0x94, 0x94, 0x2b, 0x59, 0x4a, 0xe2, 0xfb, - 0x7a, 0x9f, 0x34, 0xc1, 0x65, 0xea, 0xf1, 0x16, 0x9a, 0x15, 0x53, 0xf1, 0x63, 0x63, 0x53, 0xc7, - 0x21, 0x6f, 0x66, 0x46, 0x8d, 0x75, 0x3f, 0x86, 0xa8, 0xee, 0x38, 0x99, 0xb1, 0x54, 0x0c, 0xef, - 0xa3, 0xb9, 0x14, 0x23, 0x3d, 0x49, 0xde, 0x4a, 0xd2, 0x3f, 0x66, 0x92, 0x32, 0xb3, 0x82, 0x95, - 0x69, 0x26, 0x9c, 0x1d, 0xab, 0x05, 0x8c, 0xbc, 0xbb, 0x74, 0xac, 0x6d, 0x60, 0x43, 0x63, 0x6d, - 0x03, 0xc3, 0x2d, 0xf4, 0x57, 0x8a, 0x69, 0xb6, 0xf9, 0x29, 0xb1, 0x43, 0x1a, 0xc7, 0x4f, 0x82, - 0xc8, 0x21, 0xef, 0x25, 0xf2, 0x9a, 0x19, 0xb9, 0x29, 0xd4, 0x07, 0x4a, 0x9c, 0xd0, 0xff, 0xa0, - 0xc6, 0x34, 0x7e, 0x80, 0x16, 0xfa, 0xe6, 0xe5, 0xf6, 0xb6, 0xa3, 0xc0, 0x03, 0x72, 0x2e, 0x7b, - 0xfc, 0x37, 0x62, 0x6c, 0x71, 0x34, 0x82, 0x74, 0xab, 0x7f, 0xa7, 0x83, 0x19, 0xfc, 0x10, 0x2d, - 0xa6, 0x64, 0x79, 0x52, 0x24, 0xfa, 0x83, 0x44, 0xff, 0x6f, 0x46, 0xab, 0x23, 0xd3, 0xc7, 0xc6, - 0x74, 0x28, 0x85, 0x77, 0x50, 0x39, 0x85, 0x7b, 0x6e, 0xcc, 0xc8, 0x47, 0x49, 0x5d, 0x31, 0x53, - 0xf7, 0xdc, 0x98, 0x65, 0x7c, 0x94, 0x04, 0x35, 0x89, 0x8f, 0x26, 0x49, 0x9f, 0x46, 0x92, 0x78, - 0xeb, 0x21, 0x52, 0x12, 0xd4, 0x5b, 0x2f, 0x48, 0xdc, 0x91, 0xcf, 0x0b, 0xa3, 0xb6, 0x9e, 0xd7, - 0x0c, 0x3a, 0x52, 0xc5, 0xb4, 0x23, 0x05, 0x46, 0x39, 0xf2, 0x45, 0x61, 0x94, 0x23, 0x79, 0x95, - 0xc1, 0x91, 0x69, 0x38, 0x3b, 0x16, 0x77, 0xe4, 0xcb, 0x4b, 0xc7, 0x1a, 0x74, 0xa4, 0x8a, 0xe1, - 0x47, 0xa8, 0xd2, 0x87, 0x11, 0x46, 0x09, 0x21, 0xea, 0xb8, 0x71, 0xcc, 0xbf, 0xc3, 0xaf, 0x24, - 0xf3, 0xfa, 0x08, 0x26, 0x97, 0x1f, 0x68, 0x75, 0xc2, 0xff, 0x93, 0x9a, 0xf3, 0xb8, 0x83, 0x96, - 0xd2, 0x5e, 0xca, 0x3a, 0x7d, 0xcd, 0x5e, 0xcb, 0x66, 0x37, 0xcc, 0xcd, 0xa4, 0x4b, 0x86, 0xbb, - 0x11, 0x3a, 0x42, 0x50, 0xfb, 0x0d, 0xcd, 0x6e, 0x75, 0x42, 0xf6, 0xd4, 0x82, 0x38, 0x0c, 0xfc, - 0x18, 0x6a, 0x21, 0x5a, 0xba, 0xe4, 0x43, 0x84, 0x31, 0x9a, 0x10, 0x17, 0x77, 0x4e, 0x5c, 0xdc, - 0xe2, 0x99, 0x5f, 0xe8, 0xfa, 0x7c, 0xaa, 0x0b, 0x3d, 0x79, 0xc7, 0x2b, 0xa8, 0x14, 0xbb, 0x9d, - 0xd0, 0x03, 0x9b, 0x05, 0x27, 0xe0, 0x8b, 0x8b, 0xb8, 0x60, 0x15, 0x65, 0xec, 0x90, 0x87, 0xee, - 0x2d, 0x9c, 0x7d, 0xa9, 0x8e, 0x9d, 0x5d, 0x54, 0x73, 0xe7, 0x17, 0xd5, 0xdc, 0xe7, 0x8b, 0x6a, - 0xee, 0xd9, 0xd7, 0xea, 0xd8, 0xd1, 0x94, 0xf8, 0x37, 0xb1, 0xf1, 0x2b, 0x00, 0x00, 0xff, 0xff, - 0x54, 0x8c, 0x4a, 0x7f, 0xa5, 0x08, 0x00, 0x00, + // 837 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x96, 0xdd, 0x52, 0xd3, 0x40, + 0x14, 0xc7, 0x69, 0xf9, 0xec, 0xb6, 0x54, 0x5c, 0x40, 0xd7, 0x32, 0x53, 0xa1, 0x8c, 0x8a, 0x5f, + 0xe8, 0x94, 0x07, 0xd0, 0x4a, 0x19, 0x60, 0x86, 0x61, 0x98, 0x0c, 0xce, 0x38, 0xe3, 0x45, 0x5c, + 0x9a, 0x43, 0x1b, 0x49, 0x93, 0xb8, 0xd9, 0x56, 0x7c, 0x13, 0x1f, 0xc3, 0xaf, 0x87, 0xe0, 0xc2, + 0x0f, 0xd4, 0x17, 0x50, 0xbc, 0xf1, 0xca, 0x1b, 0x7d, 0x00, 0x67, 0x3f, 0x92, 0x34, 0x6d, 0xca, + 0x5d, 0x72, 0xce, 0xff, 0xfc, 0xce, 0xd9, 0xec, 0x7f, 0xbb, 0x45, 0xb3, 0x8c, 0x1e, 0x72, 0xd3, + 0x76, 0x39, 0x30, 0x97, 0x3a, 0xab, 0x3e, 0xf3, 0xb8, 0x87, 0x0b, 0xc0, 0x1b, 0x56, 0x00, 0xac, + 0x0b, 0xcc, 0x3f, 0x28, 0xcd, 0x35, 0xbd, 0xa6, 0x27, 0x13, 0xf7, 0xc4, 0x93, 0xd2, 0x94, 0x66, + 0x62, 0x8d, 0x8e, 0xe4, 0x98, 0xdf, 0x50, 0x8f, 0x95, 0x67, 0x68, 0xda, 0x80, 0x17, 0x1d, 0x08, + 0xf8, 0x16, 0x50, 0x0b, 0x18, 0x2e, 0xa2, 0xec, 0x76, 0x9d, 0x64, 0x16, 0x33, 0x2b, 0x63, 0x46, + 0xd6, 0xae, 0xe3, 0x12, 0x9a, 0xea, 0x04, 0xa2, 0x65, 0x1b, 0x48, 0x76, 0x31, 0xb3, 0x92, 0x33, + 0xa2, 0x77, 0xbc, 0x8c, 0xa6, 0x69, 0x87, 0xb7, 0x4c, 0x06, 0x5d, 0x3b, 0xb0, 0x3d, 0x97, 0x8c, + 0xca, 0xb2, 0x82, 0x08, 0x1a, 0x3a, 0x56, 0xf9, 0x53, 0x44, 0xb3, 0xdb, 0x7a, 0x6a, 0x83, 0x1e, + 0x72, 0xdd, 0x6e, 0xa0, 0xd1, 0x35, 0x94, 0xed, 0x56, 0x65, 0x8b, 0x7c, 0x75, 0x7e, 0xb5, 0x77, + 0x5d, 0xab, 0xba, 0xc4, 0xc8, 0x76, 0xab, 0xf8, 0x3e, 0x1a, 0x67, 0xd4, 0x6d, 0x82, 0xec, 0x95, + 0xaf, 0x96, 0xfa, 0x94, 0x22, 0x15, 0xca, 0x95, 0x10, 0xdf, 0x42, 0xa3, 0x7e, 0x87, 0x93, 0x31, + 0xa9, 0x27, 0x49, 0xfd, 0x5e, 0x27, 0x9c, 0xc7, 0x10, 0x22, 0xbc, 0x8e, 0x0a, 0x16, 0x38, 0xc0, + 0xc1, 0x54, 0x4d, 0xc6, 0x65, 0xd1, 0x62, 0xb2, 0xa8, 0x2e, 0x15, 0x89, 0x56, 0x79, 0x2b, 0x8e, + 0x89, 0x86, 0xfc, 0xd8, 0x25, 0x13, 0x69, 0x0d, 0xf7, 0x8f, 0xdd, 0xa8, 0x21, 0x3f, 0x76, 0xf1, + 0x03, 0x84, 0x1a, 0x5e, 0xdb, 0xa7, 0x0d, 0x2e, 0xbe, 0xdf, 0xa4, 0x2c, 0xb9, 0x9a, 0x2c, 0x59, + 0x8f, 0xf2, 0x61, 0x65, 0x4f, 0x09, 0x7e, 0x88, 0xf2, 0x0e, 0xd0, 0x00, 0xcc, 0x26, 0xa3, 0x2e, + 0x27, 0x53, 0x69, 0x84, 0x1d, 0x21, 0xd8, 0x14, 0xf9, 0x88, 0xe0, 0x44, 0x21, 0xb1, 0x66, 0x45, + 0x60, 0xd0, 0xf5, 0x8e, 0x80, 0xe4, 0xd2, 0xd6, 0x2c, 0x11, 0x86, 0x14, 0x44, 0x6b, 0x76, 0xe2, + 0x98, 0xd8, 0x16, 0xea, 0x50, 0xd6, 0x26, 0x28, 0x6d, 0x5b, 0x6a, 0x22, 0x15, 0x6d, 0x8b, 0x14, + 0xe2, 0x35, 0x34, 0xd1, 0x92, 0x96, 0x23, 0x96, 0x2c, 0x59, 0x48, 0xdd, 0x73, 0xe5, 0x4a, 0x43, + 0x4b, 0x71, 0x0d, 0xe5, 0xa5, 0xe3, 0xc0, 0xa5, 0x07, 0x0e, 0x90, 0xdf, 0xa9, 0x1f, 0xac, 0xd6, + 0xe1, 0xad, 0x0d, 0x29, 0x88, 0x96, 0x4b, 0xa3, 0x10, 0xae, 0x23, 0xe9, 0x4f, 0xd3, 0xb2, 0x03, + 0xc9, 0xf8, 0x3b, 0x99, 0xb6, 0x5e, 0xc1, 0xa8, 0x2b, 0x45, 0xb4, 0x5e, 0x1a, 0xc7, 0xf0, 0xae, + 0xa2, 0x80, 0xcb, 0xed, 0x06, 0xe5, 0x40, 0xfe, 0x29, 0xca, 0xcd, 0x24, 0x25, 0xf4, 0x7d, 0xad, + 0x47, 0x1a, 0xe2, 0x12, 0xf5, 0x78, 0x43, 0x1f, 0x25, 0x71, 0xb6, 0x4c, 0x6a, 0x59, 0xe4, 0xe3, + 0xd4, 0xb0, 0xb1, 0x1e, 0x07, 0xc0, 0x6a, 0x96, 0x95, 0x18, 0x4b, 0xc7, 0xf0, 0x2e, 0x9a, 0x89, + 0x31, 0xca, 0x93, 0xe4, 0x93, 0x22, 0x2d, 0xa7, 0x93, 0xb4, 0x99, 0x35, 0xac, 0x48, 0x13, 0xe1, + 0xe4, 0x58, 0x4d, 0xe0, 0xe4, 0xf3, 0xb9, 0x63, 0x6d, 0x02, 0x1f, 0x18, 0x6b, 0x13, 0x38, 0x6e, + 0xa2, 0x2b, 0x31, 0xa6, 0xd1, 0x12, 0xa7, 0xc4, 0xf4, 0x69, 0x10, 0xbc, 0xf4, 0x98, 0x45, 0xbe, + 0x28, 0xe4, 0xed, 0x74, 0xe4, 0xba, 0x54, 0xef, 0x69, 0x71, 0x48, 0xbf, 0x44, 0x53, 0xd3, 0xf8, + 0x09, 0x9a, 0xeb, 0x99, 0x57, 0xd8, 0xdb, 0x64, 0x9e, 0x03, 0xe4, 0x54, 0xf5, 0xb8, 0x3e, 0x64, + 0x6c, 0x79, 0x34, 0xbc, 0x78, 0xab, 0x2f, 0xd2, 0xfe, 0x0c, 0x7e, 0x8a, 0xe6, 0x63, 0xb2, 0x3a, + 0x29, 0x0a, 0xfd, 0x55, 0xa1, 0x6f, 0xa4, 0xa3, 0xf5, 0x91, 0xe9, 0x61, 0x63, 0x3a, 0x90, 0xc2, + 0x5b, 0xa8, 0x18, 0xc3, 0x1d, 0x3b, 0xe0, 0xe4, 0x9b, 0xa2, 0x2e, 0xa5, 0x53, 0x77, 0xec, 0x80, + 0x27, 0x7c, 0x14, 0x06, 0x23, 0x92, 0x18, 0x4d, 0x91, 0xbe, 0x0f, 0x25, 0x89, 0xd6, 0x03, 0xa4, + 0x30, 0x18, 0x6d, 0xbd, 0x24, 0x09, 0x47, 0xbe, 0xc9, 0x0d, 0xdb, 0x7a, 0x51, 0xd3, 0xef, 0x48, + 0x1d, 0x8b, 0x1c, 0x29, 0x31, 0xda, 0x91, 0x6f, 0x73, 0xc3, 0x1c, 0x29, 0xaa, 0x52, 0x1c, 0x19, + 0x87, 0x93, 0x63, 0x09, 0x47, 0xbe, 0x3b, 0x77, 0xac, 0x7e, 0x47, 0xea, 0x18, 0x7e, 0x8e, 0x4a, + 0x3d, 0x18, 0x69, 0x14, 0x1f, 0x58, 0xdb, 0x0e, 0xe4, 0x3d, 0xf6, 0x5e, 0x31, 0xef, 0x0c, 0x61, + 0x0a, 0xf9, 0x5e, 0xa4, 0x0e, 0xf9, 0x97, 0x69, 0x7a, 0x1e, 0xb7, 0xd1, 0x42, 0xdc, 0x4b, 0x5b, + 0xa7, 0xa7, 0xd9, 0x07, 0xd5, 0xec, 0x6e, 0x7a, 0x33, 0xe5, 0x92, 0xc1, 0x6e, 0x84, 0x0e, 0x11, + 0x54, 0x2e, 0xa0, 0xe9, 0x8d, 0xb6, 0xcf, 0x5f, 0x19, 0x10, 0xf8, 0x9e, 0x1b, 0x40, 0xc5, 0x47, + 0x0b, 0xe7, 0xfc, 0x10, 0x61, 0x8c, 0xc6, 0xe4, 0xed, 0x9e, 0x91, 0xb7, 0xbb, 0x7c, 0x16, 0xb7, + 0x7e, 0x74, 0x3e, 0xf5, 0xad, 0x1f, 0xbe, 0xe3, 0x25, 0x54, 0x08, 0xec, 0xb6, 0xef, 0x80, 0xc9, + 0xbd, 0x23, 0x50, 0x97, 0x7e, 0xce, 0xc8, 0xab, 0xd8, 0xbe, 0x08, 0x3d, 0x9a, 0x3b, 0xf9, 0x59, + 0x1e, 0x39, 0x39, 0x2b, 0x67, 0x4e, 0xcf, 0xca, 0x99, 0x1f, 0x67, 0xe5, 0xcc, 0xeb, 0x5f, 0xe5, + 0x91, 0x83, 0x09, 0xf9, 0x97, 0x63, 0xed, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x02, 0x23, 0xd2, + 0x00, 0xca, 0x08, 0x00, 0x00, } diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto deleted file mode 100644 index 3ad0d1b37..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto +++ /dev/null @@ -1,72 +0,0 @@ -syntax = "proto3"; -package etcdserverpb; - -import "gogoproto/gogo.proto"; -import "etcdserver.proto"; -import "rpc.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.goproto_getters_all) = false; - -message RequestHeader { - uint64 ID = 1; - // username is a username that is associated with an auth token of gRPC connection - string username = 2; -} - -// An InternalRaftRequest is the union of all requests which can be -// sent via raft. -message InternalRaftRequest { - RequestHeader header = 100; - uint64 ID = 1; - - Request v2 = 2; - - RangeRequest range = 3; - PutRequest put = 4; - DeleteRangeRequest delete_range = 5; - TxnRequest txn = 6; - CompactionRequest compaction = 7; - - LeaseGrantRequest lease_grant = 8; - LeaseRevokeRequest lease_revoke = 9; - - AlarmRequest alarm = 10; - - AuthEnableRequest auth_enable = 1000; - AuthDisableRequest auth_disable = 1011; - - InternalAuthenticateRequest authenticate = 1012; - - AuthUserAddRequest auth_user_add = 1100; - AuthUserDeleteRequest auth_user_delete = 1101; - AuthUserGetRequest auth_user_get = 1102; - AuthUserChangePasswordRequest auth_user_change_password = 1103; - AuthUserGrantRoleRequest auth_user_grant_role = 1104; - AuthUserRevokeRoleRequest auth_user_revoke_role = 1105; - AuthUserListRequest auth_user_list = 1106; - AuthRoleListRequest auth_role_list = 1107; - - AuthRoleAddRequest auth_role_add = 1200; - AuthRoleDeleteRequest auth_role_delete = 1201; - AuthRoleGetRequest auth_role_get = 1202; - AuthRoleGrantPermissionRequest auth_role_grant_permission = 1203; - AuthRoleRevokePermissionRequest auth_role_revoke_permission = 1204; -} - -message EmptyResponse { -} - -// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest? -// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing. -// For avoiding misusage the field, we have an internal version of AuthenticateRequest. -message InternalAuthenticateRequest { - string name = 1; - string password = 2; - - // simple_token is generated in API layer (etcdserver/v3_server.go) - string simple_token = 3; -} - diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go index 213d54426..fdc7ffd77 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go @@ -11,16 +11,15 @@ import ( math "math" - authpb "github.com/coreos/etcd/auth/authpb" - - io "io" -) + mvccpb "github.com/coreos/etcd/mvcc/mvccpb" -import mvccpb "github.com/coreos/etcd/mvcc/mvccpb" + authpb "github.com/coreos/etcd/auth/authpb" -import ( context "golang.org/x/net/context" + grpc "google.golang.org/grpc" + + io "io" ) // Reference imports to suppress errors if they are not otherwise used. @@ -154,6 +153,31 @@ func (x Compare_CompareTarget) String() string { } func (Compare_CompareTarget) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9, 1} } +type WatchCreateRequest_FilterType int32 + +const ( + // filter out put event. + WatchCreateRequest_NOPUT WatchCreateRequest_FilterType = 0 + // filter out delete event. + WatchCreateRequest_NODELETE WatchCreateRequest_FilterType = 1 +) + +var WatchCreateRequest_FilterType_name = map[int32]string{ + 0: "NOPUT", + 1: "NODELETE", +} +var WatchCreateRequest_FilterType_value = map[string]int32{ + "NOPUT": 0, + "NODELETE": 1, +} + +func (x WatchCreateRequest_FilterType) String() string { + return proto.EnumName(WatchCreateRequest_FilterType_name, int32(x)) +} +func (WatchCreateRequest_FilterType) EnumDescriptor() ([]byte, []int) { + return fileDescriptorRpc, []int{19, 0} +} + type AlarmRequest_AlarmAction int32 const ( @@ -177,7 +201,7 @@ func (x AlarmRequest_AlarmAction) String() string { return proto.EnumName(AlarmRequest_AlarmAction_name, int32(x)) } func (AlarmRequest_AlarmAction) EnumDescriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{39, 0} + return fileDescriptorRpc, []int{41, 0} } type ResponseHeader struct { @@ -226,6 +250,18 @@ type RangeRequest struct { KeysOnly bool `protobuf:"varint,8,opt,name=keys_only,json=keysOnly,proto3" json:"keys_only,omitempty"` // count_only when set returns only the count of the keys in the range. CountOnly bool `protobuf:"varint,9,opt,name=count_only,json=countOnly,proto3" json:"count_only,omitempty"` + // min_mod_revision is the lower bound for returned key mod revisions; all keys with + // lesser mod revisions will be filtered away. + MinModRevision int64 `protobuf:"varint,10,opt,name=min_mod_revision,json=minModRevision,proto3" json:"min_mod_revision,omitempty"` + // max_mod_revision is the upper bound for returned key mod revisions; all keys with + // greater mod revisions will be filtered away. + MaxModRevision int64 `protobuf:"varint,11,opt,name=max_mod_revision,json=maxModRevision,proto3" json:"max_mod_revision,omitempty"` + // min_create_revision is the lower bound for returned key create revisions; all keys with + // lesser create trevisions will be filtered away. + MinCreateRevision int64 `protobuf:"varint,12,opt,name=min_create_revision,json=minCreateRevision,proto3" json:"min_create_revision,omitempty"` + // max_create_revision is the upper bound for returned key create revisions; all keys with + // greater create revisions will be filtered away. + MaxCreateRevision int64 `protobuf:"varint,13,opt,name=max_create_revision,json=maxCreateRevision,proto3" json:"max_create_revision,omitempty"` } func (m *RangeRequest) Reset() { *m = RangeRequest{} } @@ -271,6 +307,9 @@ type PutRequest struct { // lease is the lease ID to associate with the key in the key-value store. A lease // value of 0 indicates no lease. Lease int64 `protobuf:"varint,3,opt,name=lease,proto3" json:"lease,omitempty"` + // If prev_kv is set, etcd gets the previous key-value pair before changing it. + // The previous key-value pair will be returned in the put response. + PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` } func (m *PutRequest) Reset() { *m = PutRequest{} } @@ -280,6 +319,8 @@ func (*PutRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []in type PutResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // if prev_kv is set in the request, the previous key-value pair will be returned. + PrevKv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"` } func (m *PutResponse) Reset() { *m = PutResponse{} } @@ -294,6 +335,13 @@ func (m *PutResponse) GetHeader() *ResponseHeader { return nil } +func (m *PutResponse) GetPrevKv() *mvccpb.KeyValue { + if m != nil { + return m.PrevKv + } + return nil +} + type DeleteRangeRequest struct { // key is the first key to delete in the range. Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` @@ -301,6 +349,9 @@ type DeleteRangeRequest struct { // If range_end is not given, the range is defined to contain only the key argument. // If range_end is '\0', the range is all keys greater than or equal to the key argument. RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` + // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. + // The previous key-value pairs will be returned in the delte response. + PrevKv bool `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` } func (m *DeleteRangeRequest) Reset() { *m = DeleteRangeRequest{} } @@ -312,6 +363,8 @@ type DeleteRangeResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // deleted is the number of keys deleted by the delete range request. Deleted int64 `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted,omitempty"` + // if prev_kv is set in the request, the previous key-value pairs will be returned. + PrevKvs []*mvccpb.KeyValue `protobuf:"bytes,3,rep,name=prev_kvs,json=prevKvs" json:"prev_kvs,omitempty"` } func (m *DeleteRangeResponse) Reset() { *m = DeleteRangeResponse{} } @@ -326,6 +379,13 @@ func (m *DeleteRangeResponse) GetHeader() *ResponseHeader { return nil } +func (m *DeleteRangeResponse) GetPrevKvs() []*mvccpb.KeyValue { + if m != nil { + return m.PrevKvs + } + return nil +} + type RequestOp struct { // request is a union of request types accepted by a transaction. // @@ -1111,6 +1171,8 @@ type WatchCreateRequest struct { // range_end is the end of the range [key, range_end) to watch. If range_end is not given, // only the key argument is watched. If range_end is equal to '\0', all keys greater than // or equal to the key argument are watched. + // If the range_end is one bit larger than the given key, + // then all keys with the prefix (the given key) will be watched. RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` // start_revision is an optional revision to watch from (inclusive). No start_revision is "now". StartRevision int64 `protobuf:"varint,3,opt,name=start_revision,json=startRevision,proto3" json:"start_revision,omitempty"` @@ -1119,6 +1181,11 @@ type WatchCreateRequest struct { // wish to recover a disconnected watcher starting from a recent known revision. // The etcd server may decide how often it will send notifications based on current load. ProgressNotify bool `protobuf:"varint,4,opt,name=progress_notify,json=progressNotify,proto3" json:"progress_notify,omitempty"` + // filters filter the events at server side before it sends back to the watcher. + Filters []WatchCreateRequest_FilterType `protobuf:"varint,5,rep,name=filters,enum=etcdserverpb.WatchCreateRequest_FilterType" json:"filters,omitempty"` + // If prev_kv is set, created watcher gets the previous KV before the event happens. + // If the previous KV is already compacted, nothing will be returned. + PrevKv bool `protobuf:"varint,6,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` } func (m *WatchCreateRequest) Reset() { *m = WatchCreateRequest{} } @@ -1268,6 +1335,42 @@ func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader { return nil } +type LeaseTimeToLiveRequest struct { + // ID is the lease ID for the lease. + ID int64 `protobuf:"varint,1,opt,name=ID,json=iD,proto3" json:"ID,omitempty"` + // keys is true to query all the keys attached to this lease. + Keys bool `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` +} + +func (m *LeaseTimeToLiveRequest) Reset() { *m = LeaseTimeToLiveRequest{} } +func (m *LeaseTimeToLiveRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseTimeToLiveRequest) ProtoMessage() {} +func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{28} } + +type LeaseTimeToLiveResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // ID is the lease ID from the keep alive request. + ID int64 `protobuf:"varint,2,opt,name=ID,json=iD,proto3" json:"ID,omitempty"` + // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. + TTL int64 `protobuf:"varint,3,opt,name=TTL,json=tTL,proto3" json:"TTL,omitempty"` + // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. + GrantedTTL int64 `protobuf:"varint,4,opt,name=grantedTTL,proto3" json:"grantedTTL,omitempty"` + // Keys is the list of keys attached to this lease. + Keys [][]byte `protobuf:"bytes,5,rep,name=keys" json:"keys,omitempty"` +} + +func (m *LeaseTimeToLiveResponse) Reset() { *m = LeaseTimeToLiveResponse{} } +func (m *LeaseTimeToLiveResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseTimeToLiveResponse) ProtoMessage() {} +func (*LeaseTimeToLiveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{29} } + +func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + type Member struct { // ID is the member ID for this member. ID uint64 `protobuf:"varint,1,opt,name=ID,json=iD,proto3" json:"ID,omitempty"` @@ -1282,7 +1385,7 @@ type Member struct { func (m *Member) Reset() { *m = Member{} } func (m *Member) String() string { return proto.CompactTextString(m) } func (*Member) ProtoMessage() {} -func (*Member) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{28} } +func (*Member) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{30} } type MemberAddRequest struct { // peerURLs is the list of URLs the added member will use to communicate with the cluster. @@ -1292,7 +1395,7 @@ type MemberAddRequest struct { func (m *MemberAddRequest) Reset() { *m = MemberAddRequest{} } func (m *MemberAddRequest) String() string { return proto.CompactTextString(m) } func (*MemberAddRequest) ProtoMessage() {} -func (*MemberAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{29} } +func (*MemberAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{31} } type MemberAddResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` @@ -1303,7 +1406,7 @@ type MemberAddResponse struct { func (m *MemberAddResponse) Reset() { *m = MemberAddResponse{} } func (m *MemberAddResponse) String() string { return proto.CompactTextString(m) } func (*MemberAddResponse) ProtoMessage() {} -func (*MemberAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{30} } +func (*MemberAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{32} } func (m *MemberAddResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1327,7 +1430,7 @@ type MemberRemoveRequest struct { func (m *MemberRemoveRequest) Reset() { *m = MemberRemoveRequest{} } func (m *MemberRemoveRequest) String() string { return proto.CompactTextString(m) } func (*MemberRemoveRequest) ProtoMessage() {} -func (*MemberRemoveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{31} } +func (*MemberRemoveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{33} } type MemberRemoveResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` @@ -1336,7 +1439,7 @@ type MemberRemoveResponse struct { func (m *MemberRemoveResponse) Reset() { *m = MemberRemoveResponse{} } func (m *MemberRemoveResponse) String() string { return proto.CompactTextString(m) } func (*MemberRemoveResponse) ProtoMessage() {} -func (*MemberRemoveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{32} } +func (*MemberRemoveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{34} } func (m *MemberRemoveResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1355,7 +1458,7 @@ type MemberUpdateRequest struct { func (m *MemberUpdateRequest) Reset() { *m = MemberUpdateRequest{} } func (m *MemberUpdateRequest) String() string { return proto.CompactTextString(m) } func (*MemberUpdateRequest) ProtoMessage() {} -func (*MemberUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{33} } +func (*MemberUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{35} } type MemberUpdateResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` @@ -1364,7 +1467,7 @@ type MemberUpdateResponse struct { func (m *MemberUpdateResponse) Reset() { *m = MemberUpdateResponse{} } func (m *MemberUpdateResponse) String() string { return proto.CompactTextString(m) } func (*MemberUpdateResponse) ProtoMessage() {} -func (*MemberUpdateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{34} } +func (*MemberUpdateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{36} } func (m *MemberUpdateResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1379,7 +1482,7 @@ type MemberListRequest struct { func (m *MemberListRequest) Reset() { *m = MemberListRequest{} } func (m *MemberListRequest) String() string { return proto.CompactTextString(m) } func (*MemberListRequest) ProtoMessage() {} -func (*MemberListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{35} } +func (*MemberListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{37} } type MemberListResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` @@ -1390,7 +1493,7 @@ type MemberListResponse struct { func (m *MemberListResponse) Reset() { *m = MemberListResponse{} } func (m *MemberListResponse) String() string { return proto.CompactTextString(m) } func (*MemberListResponse) ProtoMessage() {} -func (*MemberListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{36} } +func (*MemberListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{38} } func (m *MemberListResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1412,7 +1515,7 @@ type DefragmentRequest struct { func (m *DefragmentRequest) Reset() { *m = DefragmentRequest{} } func (m *DefragmentRequest) String() string { return proto.CompactTextString(m) } func (*DefragmentRequest) ProtoMessage() {} -func (*DefragmentRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{37} } +func (*DefragmentRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{39} } type DefragmentResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` @@ -1421,7 +1524,7 @@ type DefragmentResponse struct { func (m *DefragmentResponse) Reset() { *m = DefragmentResponse{} } func (m *DefragmentResponse) String() string { return proto.CompactTextString(m) } func (*DefragmentResponse) ProtoMessage() {} -func (*DefragmentResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{38} } +func (*DefragmentResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{40} } func (m *DefragmentResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1445,7 +1548,7 @@ type AlarmRequest struct { func (m *AlarmRequest) Reset() { *m = AlarmRequest{} } func (m *AlarmRequest) String() string { return proto.CompactTextString(m) } func (*AlarmRequest) ProtoMessage() {} -func (*AlarmRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{39} } +func (*AlarmRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{41} } type AlarmMember struct { // memberID is the ID of the member associated with the raised alarm. @@ -1457,7 +1560,7 @@ type AlarmMember struct { func (m *AlarmMember) Reset() { *m = AlarmMember{} } func (m *AlarmMember) String() string { return proto.CompactTextString(m) } func (*AlarmMember) ProtoMessage() {} -func (*AlarmMember) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{40} } +func (*AlarmMember) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{42} } type AlarmResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` @@ -1468,7 +1571,7 @@ type AlarmResponse struct { func (m *AlarmResponse) Reset() { *m = AlarmResponse{} } func (m *AlarmResponse) String() string { return proto.CompactTextString(m) } func (*AlarmResponse) ProtoMessage() {} -func (*AlarmResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{41} } +func (*AlarmResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{43} } func (m *AlarmResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1490,7 +1593,7 @@ type StatusRequest struct { func (m *StatusRequest) Reset() { *m = StatusRequest{} } func (m *StatusRequest) String() string { return proto.CompactTextString(m) } func (*StatusRequest) ProtoMessage() {} -func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{42} } +func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{44} } type StatusResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` @@ -1509,7 +1612,7 @@ type StatusResponse struct { func (m *StatusResponse) Reset() { *m = StatusResponse{} } func (m *StatusResponse) String() string { return proto.CompactTextString(m) } func (*StatusResponse) ProtoMessage() {} -func (*StatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{43} } +func (*StatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{45} } func (m *StatusResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1524,7 +1627,7 @@ type AuthEnableRequest struct { func (m *AuthEnableRequest) Reset() { *m = AuthEnableRequest{} } func (m *AuthEnableRequest) String() string { return proto.CompactTextString(m) } func (*AuthEnableRequest) ProtoMessage() {} -func (*AuthEnableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{44} } +func (*AuthEnableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{46} } type AuthDisableRequest struct { } @@ -1532,7 +1635,7 @@ type AuthDisableRequest struct { func (m *AuthDisableRequest) Reset() { *m = AuthDisableRequest{} } func (m *AuthDisableRequest) String() string { return proto.CompactTextString(m) } func (*AuthDisableRequest) ProtoMessage() {} -func (*AuthDisableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{45} } +func (*AuthDisableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{47} } type AuthenticateRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1542,7 +1645,7 @@ type AuthenticateRequest struct { func (m *AuthenticateRequest) Reset() { *m = AuthenticateRequest{} } func (m *AuthenticateRequest) String() string { return proto.CompactTextString(m) } func (*AuthenticateRequest) ProtoMessage() {} -func (*AuthenticateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{46} } +func (*AuthenticateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{48} } type AuthUserAddRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1552,7 +1655,7 @@ type AuthUserAddRequest struct { func (m *AuthUserAddRequest) Reset() { *m = AuthUserAddRequest{} } func (m *AuthUserAddRequest) String() string { return proto.CompactTextString(m) } func (*AuthUserAddRequest) ProtoMessage() {} -func (*AuthUserAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{47} } +func (*AuthUserAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{49} } type AuthUserGetRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1561,7 +1664,7 @@ type AuthUserGetRequest struct { func (m *AuthUserGetRequest) Reset() { *m = AuthUserGetRequest{} } func (m *AuthUserGetRequest) String() string { return proto.CompactTextString(m) } func (*AuthUserGetRequest) ProtoMessage() {} -func (*AuthUserGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{48} } +func (*AuthUserGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{50} } type AuthUserDeleteRequest struct { // name is the name of the user to delete. @@ -1571,7 +1674,7 @@ type AuthUserDeleteRequest struct { func (m *AuthUserDeleteRequest) Reset() { *m = AuthUserDeleteRequest{} } func (m *AuthUserDeleteRequest) String() string { return proto.CompactTextString(m) } func (*AuthUserDeleteRequest) ProtoMessage() {} -func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{49} } +func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{51} } type AuthUserChangePasswordRequest struct { // name is the name of the user whose password is being changed. @@ -1584,7 +1687,7 @@ func (m *AuthUserChangePasswordRequest) Reset() { *m = AuthUserChangePas func (m *AuthUserChangePasswordRequest) String() string { return proto.CompactTextString(m) } func (*AuthUserChangePasswordRequest) ProtoMessage() {} func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{50} + return fileDescriptorRpc, []int{52} } type AuthUserGrantRoleRequest struct { @@ -1597,7 +1700,7 @@ type AuthUserGrantRoleRequest struct { func (m *AuthUserGrantRoleRequest) Reset() { *m = AuthUserGrantRoleRequest{} } func (m *AuthUserGrantRoleRequest) String() string { return proto.CompactTextString(m) } func (*AuthUserGrantRoleRequest) ProtoMessage() {} -func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{51} } +func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{53} } type AuthUserRevokeRoleRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1607,7 +1710,7 @@ type AuthUserRevokeRoleRequest struct { func (m *AuthUserRevokeRoleRequest) Reset() { *m = AuthUserRevokeRoleRequest{} } func (m *AuthUserRevokeRoleRequest) String() string { return proto.CompactTextString(m) } func (*AuthUserRevokeRoleRequest) ProtoMessage() {} -func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{52} } +func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{54} } type AuthRoleAddRequest struct { // name is the name of the role to add to the authentication system. @@ -1617,7 +1720,7 @@ type AuthRoleAddRequest struct { func (m *AuthRoleAddRequest) Reset() { *m = AuthRoleAddRequest{} } func (m *AuthRoleAddRequest) String() string { return proto.CompactTextString(m) } func (*AuthRoleAddRequest) ProtoMessage() {} -func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{53} } +func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{55} } type AuthRoleGetRequest struct { Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` @@ -1626,7 +1729,7 @@ type AuthRoleGetRequest struct { func (m *AuthRoleGetRequest) Reset() { *m = AuthRoleGetRequest{} } func (m *AuthRoleGetRequest) String() string { return proto.CompactTextString(m) } func (*AuthRoleGetRequest) ProtoMessage() {} -func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{54} } +func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{56} } type AuthUserListRequest struct { } @@ -1634,7 +1737,7 @@ type AuthUserListRequest struct { func (m *AuthUserListRequest) Reset() { *m = AuthUserListRequest{} } func (m *AuthUserListRequest) String() string { return proto.CompactTextString(m) } func (*AuthUserListRequest) ProtoMessage() {} -func (*AuthUserListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{55} } +func (*AuthUserListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{57} } type AuthRoleListRequest struct { } @@ -1642,7 +1745,7 @@ type AuthRoleListRequest struct { func (m *AuthRoleListRequest) Reset() { *m = AuthRoleListRequest{} } func (m *AuthRoleListRequest) String() string { return proto.CompactTextString(m) } func (*AuthRoleListRequest) ProtoMessage() {} -func (*AuthRoleListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{56} } +func (*AuthRoleListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{58} } type AuthRoleDeleteRequest struct { Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` @@ -1651,7 +1754,7 @@ type AuthRoleDeleteRequest struct { func (m *AuthRoleDeleteRequest) Reset() { *m = AuthRoleDeleteRequest{} } func (m *AuthRoleDeleteRequest) String() string { return proto.CompactTextString(m) } func (*AuthRoleDeleteRequest) ProtoMessage() {} -func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{57} } +func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{59} } type AuthRoleGrantPermissionRequest struct { // name is the name of the role which will be granted the permission. @@ -1664,7 +1767,7 @@ func (m *AuthRoleGrantPermissionRequest) Reset() { *m = AuthRoleGrantPer func (m *AuthRoleGrantPermissionRequest) String() string { return proto.CompactTextString(m) } func (*AuthRoleGrantPermissionRequest) ProtoMessage() {} func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{58} + return fileDescriptorRpc, []int{60} } func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission { @@ -1684,7 +1787,7 @@ func (m *AuthRoleRevokePermissionRequest) Reset() { *m = AuthRoleRevokeP func (m *AuthRoleRevokePermissionRequest) String() string { return proto.CompactTextString(m) } func (*AuthRoleRevokePermissionRequest) ProtoMessage() {} func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{59} + return fileDescriptorRpc, []int{61} } type AuthEnableResponse struct { @@ -1694,7 +1797,7 @@ type AuthEnableResponse struct { func (m *AuthEnableResponse) Reset() { *m = AuthEnableResponse{} } func (m *AuthEnableResponse) String() string { return proto.CompactTextString(m) } func (*AuthEnableResponse) ProtoMessage() {} -func (*AuthEnableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{60} } +func (*AuthEnableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{62} } func (m *AuthEnableResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1710,7 +1813,7 @@ type AuthDisableResponse struct { func (m *AuthDisableResponse) Reset() { *m = AuthDisableResponse{} } func (m *AuthDisableResponse) String() string { return proto.CompactTextString(m) } func (*AuthDisableResponse) ProtoMessage() {} -func (*AuthDisableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{61} } +func (*AuthDisableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{63} } func (m *AuthDisableResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1728,7 +1831,7 @@ type AuthenticateResponse struct { func (m *AuthenticateResponse) Reset() { *m = AuthenticateResponse{} } func (m *AuthenticateResponse) String() string { return proto.CompactTextString(m) } func (*AuthenticateResponse) ProtoMessage() {} -func (*AuthenticateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{62} } +func (*AuthenticateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{64} } func (m *AuthenticateResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1744,7 +1847,7 @@ type AuthUserAddResponse struct { func (m *AuthUserAddResponse) Reset() { *m = AuthUserAddResponse{} } func (m *AuthUserAddResponse) String() string { return proto.CompactTextString(m) } func (*AuthUserAddResponse) ProtoMessage() {} -func (*AuthUserAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{63} } +func (*AuthUserAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{65} } func (m *AuthUserAddResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1761,7 +1864,7 @@ type AuthUserGetResponse struct { func (m *AuthUserGetResponse) Reset() { *m = AuthUserGetResponse{} } func (m *AuthUserGetResponse) String() string { return proto.CompactTextString(m) } func (*AuthUserGetResponse) ProtoMessage() {} -func (*AuthUserGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{64} } +func (*AuthUserGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{66} } func (m *AuthUserGetResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1777,7 +1880,7 @@ type AuthUserDeleteResponse struct { func (m *AuthUserDeleteResponse) Reset() { *m = AuthUserDeleteResponse{} } func (m *AuthUserDeleteResponse) String() string { return proto.CompactTextString(m) } func (*AuthUserDeleteResponse) ProtoMessage() {} -func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{65} } +func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{67} } func (m *AuthUserDeleteResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1794,7 +1897,7 @@ func (m *AuthUserChangePasswordResponse) Reset() { *m = AuthUserChangePa func (m *AuthUserChangePasswordResponse) String() string { return proto.CompactTextString(m) } func (*AuthUserChangePasswordResponse) ProtoMessage() {} func (*AuthUserChangePasswordResponse) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{66} + return fileDescriptorRpc, []int{68} } func (m *AuthUserChangePasswordResponse) GetHeader() *ResponseHeader { @@ -1811,7 +1914,7 @@ type AuthUserGrantRoleResponse struct { func (m *AuthUserGrantRoleResponse) Reset() { *m = AuthUserGrantRoleResponse{} } func (m *AuthUserGrantRoleResponse) String() string { return proto.CompactTextString(m) } func (*AuthUserGrantRoleResponse) ProtoMessage() {} -func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{67} } +func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{69} } func (m *AuthUserGrantRoleResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1827,7 +1930,7 @@ type AuthUserRevokeRoleResponse struct { func (m *AuthUserRevokeRoleResponse) Reset() { *m = AuthUserRevokeRoleResponse{} } func (m *AuthUserRevokeRoleResponse) String() string { return proto.CompactTextString(m) } func (*AuthUserRevokeRoleResponse) ProtoMessage() {} -func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{68} } +func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{70} } func (m *AuthUserRevokeRoleResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1843,7 +1946,7 @@ type AuthRoleAddResponse struct { func (m *AuthRoleAddResponse) Reset() { *m = AuthRoleAddResponse{} } func (m *AuthRoleAddResponse) String() string { return proto.CompactTextString(m) } func (*AuthRoleAddResponse) ProtoMessage() {} -func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{69} } +func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{71} } func (m *AuthRoleAddResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1860,7 +1963,7 @@ type AuthRoleGetResponse struct { func (m *AuthRoleGetResponse) Reset() { *m = AuthRoleGetResponse{} } func (m *AuthRoleGetResponse) String() string { return proto.CompactTextString(m) } func (*AuthRoleGetResponse) ProtoMessage() {} -func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{70} } +func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{72} } func (m *AuthRoleGetResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1884,7 +1987,7 @@ type AuthRoleListResponse struct { func (m *AuthRoleListResponse) Reset() { *m = AuthRoleListResponse{} } func (m *AuthRoleListResponse) String() string { return proto.CompactTextString(m) } func (*AuthRoleListResponse) ProtoMessage() {} -func (*AuthRoleListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{71} } +func (*AuthRoleListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{73} } func (m *AuthRoleListResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1901,7 +2004,7 @@ type AuthUserListResponse struct { func (m *AuthUserListResponse) Reset() { *m = AuthUserListResponse{} } func (m *AuthUserListResponse) String() string { return proto.CompactTextString(m) } func (*AuthUserListResponse) ProtoMessage() {} -func (*AuthUserListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{72} } +func (*AuthUserListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{74} } func (m *AuthUserListResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1917,7 +2020,7 @@ type AuthRoleDeleteResponse struct { func (m *AuthRoleDeleteResponse) Reset() { *m = AuthRoleDeleteResponse{} } func (m *AuthRoleDeleteResponse) String() string { return proto.CompactTextString(m) } func (*AuthRoleDeleteResponse) ProtoMessage() {} -func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{73} } +func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{75} } func (m *AuthRoleDeleteResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1934,7 +2037,7 @@ func (m *AuthRoleGrantPermissionResponse) Reset() { *m = AuthRoleGrantPe func (m *AuthRoleGrantPermissionResponse) String() string { return proto.CompactTextString(m) } func (*AuthRoleGrantPermissionResponse) ProtoMessage() {} func (*AuthRoleGrantPermissionResponse) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{74} + return fileDescriptorRpc, []int{76} } func (m *AuthRoleGrantPermissionResponse) GetHeader() *ResponseHeader { @@ -1952,7 +2055,7 @@ func (m *AuthRoleRevokePermissionResponse) Reset() { *m = AuthRoleRevoke func (m *AuthRoleRevokePermissionResponse) String() string { return proto.CompactTextString(m) } func (*AuthRoleRevokePermissionResponse) ProtoMessage() {} func (*AuthRoleRevokePermissionResponse) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{75} + return fileDescriptorRpc, []int{77} } func (m *AuthRoleRevokePermissionResponse) GetHeader() *ResponseHeader { @@ -1991,6 +2094,8 @@ func init() { proto.RegisterType((*LeaseRevokeResponse)(nil), "etcdserverpb.LeaseRevokeResponse") proto.RegisterType((*LeaseKeepAliveRequest)(nil), "etcdserverpb.LeaseKeepAliveRequest") proto.RegisterType((*LeaseKeepAliveResponse)(nil), "etcdserverpb.LeaseKeepAliveResponse") + proto.RegisterType((*LeaseTimeToLiveRequest)(nil), "etcdserverpb.LeaseTimeToLiveRequest") + proto.RegisterType((*LeaseTimeToLiveResponse)(nil), "etcdserverpb.LeaseTimeToLiveResponse") proto.RegisterType((*Member)(nil), "etcdserverpb.Member") proto.RegisterType((*MemberAddRequest)(nil), "etcdserverpb.MemberAddRequest") proto.RegisterType((*MemberAddResponse)(nil), "etcdserverpb.MemberAddResponse") @@ -2044,6 +2149,7 @@ func init() { proto.RegisterEnum("etcdserverpb.RangeRequest_SortTarget", RangeRequest_SortTarget_name, RangeRequest_SortTarget_value) proto.RegisterEnum("etcdserverpb.Compare_CompareResult", Compare_CompareResult_name, Compare_CompareResult_value) proto.RegisterEnum("etcdserverpb.Compare_CompareTarget", Compare_CompareTarget_name, Compare_CompareTarget_value) + proto.RegisterEnum("etcdserverpb.WatchCreateRequest_FilterType", WatchCreateRequest_FilterType_name, WatchCreateRequest_FilterType_value) proto.RegisterEnum("etcdserverpb.AlarmRequest_AlarmAction", AlarmRequest_AlarmAction_name, AlarmRequest_AlarmAction_value) } @@ -2397,6 +2503,8 @@ type LeaseClient interface { // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client // to the server and streaming keep alive responses from the server to the client. LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) + // LeaseTimeToLive retrieves lease information. + LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) } type leaseClient struct { @@ -2456,6 +2564,15 @@ func (x *leaseLeaseKeepAliveClient) Recv() (*LeaseKeepAliveResponse, error) { return m, nil } +func (c *leaseClient) LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) { + out := new(LeaseTimeToLiveResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseTimeToLive", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // Server API for Lease service type LeaseServer interface { @@ -2468,6 +2585,8 @@ type LeaseServer interface { // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client // to the server and streaming keep alive responses from the server to the client. LeaseKeepAlive(Lease_LeaseKeepAliveServer) error + // LeaseTimeToLive retrieves lease information. + LeaseTimeToLive(context.Context, *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error) } func RegisterLeaseServer(s *grpc.Server, srv LeaseServer) { @@ -2536,6 +2655,24 @@ func (x *leaseLeaseKeepAliveServer) Recv() (*LeaseKeepAliveRequest, error) { return m, nil } +func _Lease_LeaseTimeToLive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseTimeToLiveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeaseServer).LeaseTimeToLive(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Lease/LeaseTimeToLive", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeaseServer).LeaseTimeToLive(ctx, req.(*LeaseTimeToLiveRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Lease_serviceDesc = grpc.ServiceDesc{ ServiceName: "etcdserverpb.Lease", HandlerType: (*LeaseServer)(nil), @@ -2548,6 +2685,10 @@ var _Lease_serviceDesc = grpc.ServiceDesc{ MethodName: "LeaseRevoke", Handler: _Lease_LeaseRevoke_Handler, }, + { + MethodName: "LeaseTimeToLive", + Handler: _Lease_LeaseTimeToLive_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -3675,6 +3816,26 @@ func (m *RangeRequest) MarshalTo(data []byte) (int, error) { } i++ } + if m.MinModRevision != 0 { + data[i] = 0x50 + i++ + i = encodeVarintRpc(data, i, uint64(m.MinModRevision)) + } + if m.MaxModRevision != 0 { + data[i] = 0x58 + i++ + i = encodeVarintRpc(data, i, uint64(m.MaxModRevision)) + } + if m.MinCreateRevision != 0 { + data[i] = 0x60 + i++ + i = encodeVarintRpc(data, i, uint64(m.MinCreateRevision)) + } + if m.MaxCreateRevision != 0 { + data[i] = 0x68 + i++ + i = encodeVarintRpc(data, i, uint64(m.MaxCreateRevision)) + } return i, nil } @@ -3765,6 +3926,16 @@ func (m *PutRequest) MarshalTo(data []byte) (int, error) { i++ i = encodeVarintRpc(data, i, uint64(m.Lease)) } + if m.PrevKv { + data[i] = 0x20 + i++ + if m.PrevKv { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } return i, nil } @@ -3793,6 +3964,16 @@ func (m *PutResponse) MarshalTo(data []byte) (int, error) { } i += n2 } + if m.PrevKv != nil { + data[i] = 0x12 + i++ + i = encodeVarintRpc(data, i, uint64(m.PrevKv.Size())) + n3, err := m.PrevKv.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + } return i, nil } @@ -3823,6 +4004,16 @@ func (m *DeleteRangeRequest) MarshalTo(data []byte) (int, error) { i = encodeVarintRpc(data, i, uint64(len(m.RangeEnd))) i += copy(data[i:], m.RangeEnd) } + if m.PrevKv { + data[i] = 0x18 + i++ + if m.PrevKv { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } return i, nil } @@ -3845,17 +4036,29 @@ func (m *DeleteRangeResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n3, err := m.Header.MarshalTo(data[i:]) + n4, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n3 + i += n4 } if m.Deleted != 0 { data[i] = 0x10 i++ i = encodeVarintRpc(data, i, uint64(m.Deleted)) } + if len(m.PrevKvs) > 0 { + for _, msg := range m.PrevKvs { + data[i] = 0x1a + i++ + i = encodeVarintRpc(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -3875,11 +4078,11 @@ func (m *RequestOp) MarshalTo(data []byte) (int, error) { var l int _ = l if m.Request != nil { - nn4, err := m.Request.MarshalTo(data[i:]) + nn5, err := m.Request.MarshalTo(data[i:]) if err != nil { return 0, err } - i += nn4 + i += nn5 } return i, nil } @@ -3890,11 +4093,11 @@ func (m *RequestOp_RequestRange) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.RequestRange.Size())) - n5, err := m.RequestRange.MarshalTo(data[i:]) + n6, err := m.RequestRange.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n5 + i += n6 } return i, nil } @@ -3904,11 +4107,11 @@ func (m *RequestOp_RequestPut) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintRpc(data, i, uint64(m.RequestPut.Size())) - n6, err := m.RequestPut.MarshalTo(data[i:]) + n7, err := m.RequestPut.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n6 + i += n7 } return i, nil } @@ -3918,11 +4121,11 @@ func (m *RequestOp_RequestDeleteRange) MarshalTo(data []byte) (int, error) { data[i] = 0x1a i++ i = encodeVarintRpc(data, i, uint64(m.RequestDeleteRange.Size())) - n7, err := m.RequestDeleteRange.MarshalTo(data[i:]) + n8, err := m.RequestDeleteRange.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n7 + i += n8 } return i, nil } @@ -3942,11 +4145,11 @@ func (m *ResponseOp) MarshalTo(data []byte) (int, error) { var l int _ = l if m.Response != nil { - nn8, err := m.Response.MarshalTo(data[i:]) + nn9, err := m.Response.MarshalTo(data[i:]) if err != nil { return 0, err } - i += nn8 + i += nn9 } return i, nil } @@ -3957,11 +4160,11 @@ func (m *ResponseOp_ResponseRange) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.ResponseRange.Size())) - n9, err := m.ResponseRange.MarshalTo(data[i:]) + n10, err := m.ResponseRange.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n9 + i += n10 } return i, nil } @@ -3971,11 +4174,11 @@ func (m *ResponseOp_ResponsePut) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintRpc(data, i, uint64(m.ResponsePut.Size())) - n10, err := m.ResponsePut.MarshalTo(data[i:]) + n11, err := m.ResponsePut.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n10 + i += n11 } return i, nil } @@ -3985,11 +4188,11 @@ func (m *ResponseOp_ResponseDeleteRange) MarshalTo(data []byte) (int, error) { data[i] = 0x1a i++ i = encodeVarintRpc(data, i, uint64(m.ResponseDeleteRange.Size())) - n11, err := m.ResponseDeleteRange.MarshalTo(data[i:]) + n12, err := m.ResponseDeleteRange.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n11 + i += n12 } return i, nil } @@ -4025,11 +4228,11 @@ func (m *Compare) MarshalTo(data []byte) (int, error) { i += copy(data[i:], m.Key) } if m.TargetUnion != nil { - nn12, err := m.TargetUnion.MarshalTo(data[i:]) + nn13, err := m.TargetUnion.MarshalTo(data[i:]) if err != nil { return 0, err } - i += nn12 + i += nn13 } return i, nil } @@ -4138,11 +4341,11 @@ func (m *TxnResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n13, err := m.Header.MarshalTo(data[i:]) + n14, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n13 + i += n14 } if m.Succeeded { data[i] = 0x10 @@ -4221,11 +4424,11 @@ func (m *CompactionResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n14, err := m.Header.MarshalTo(data[i:]) + n15, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n14 + i += n15 } return i, nil } @@ -4267,11 +4470,11 @@ func (m *HashResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n15, err := m.Header.MarshalTo(data[i:]) + n16, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n15 + i += n16 } if m.Hash != 0 { data[i] = 0x10 @@ -4318,11 +4521,11 @@ func (m *SnapshotResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n16, err := m.Header.MarshalTo(data[i:]) + n17, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n16 + i += n17 } if m.RemainingBytes != 0 { data[i] = 0x10 @@ -4354,11 +4557,11 @@ func (m *WatchRequest) MarshalTo(data []byte) (int, error) { var l int _ = l if m.RequestUnion != nil { - nn17, err := m.RequestUnion.MarshalTo(data[i:]) + nn18, err := m.RequestUnion.MarshalTo(data[i:]) if err != nil { return 0, err } - i += nn17 + i += nn18 } return i, nil } @@ -4369,11 +4572,11 @@ func (m *WatchRequest_CreateRequest) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.CreateRequest.Size())) - n18, err := m.CreateRequest.MarshalTo(data[i:]) + n19, err := m.CreateRequest.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n18 + i += n19 } return i, nil } @@ -4383,11 +4586,11 @@ func (m *WatchRequest_CancelRequest) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintRpc(data, i, uint64(m.CancelRequest.Size())) - n19, err := m.CancelRequest.MarshalTo(data[i:]) + n20, err := m.CancelRequest.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n19 + i += n20 } return i, nil } @@ -4433,6 +4636,23 @@ func (m *WatchCreateRequest) MarshalTo(data []byte) (int, error) { } i++ } + if len(m.Filters) > 0 { + for _, num := range m.Filters { + data[i] = 0x28 + i++ + i = encodeVarintRpc(data, i, uint64(num)) + } + } + if m.PrevKv { + data[i] = 0x30 + i++ + if m.PrevKv { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } return i, nil } @@ -4478,11 +4698,11 @@ func (m *WatchResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n20, err := m.Header.MarshalTo(data[i:]) + n21, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n20 + i += n21 } if m.WatchId != 0 { data[i] = 0x10 @@ -4576,11 +4796,11 @@ func (m *LeaseGrantResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n21, err := m.Header.MarshalTo(data[i:]) + n22, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n21 + i += n22 } if m.ID != 0 { data[i] = 0x10 @@ -4643,11 +4863,11 @@ func (m *LeaseRevokeResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n22, err := m.Header.MarshalTo(data[i:]) + n23, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n22 + i += n23 } return i, nil } @@ -4694,11 +4914,82 @@ func (m *LeaseKeepAliveResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n23, err := m.Header.MarshalTo(data[i:]) + n24, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n23 + i += n24 + } + if m.ID != 0 { + data[i] = 0x10 + i++ + i = encodeVarintRpc(data, i, uint64(m.ID)) + } + if m.TTL != 0 { + data[i] = 0x18 + i++ + i = encodeVarintRpc(data, i, uint64(m.TTL)) + } + return i, nil +} + +func (m *LeaseTimeToLiveRequest) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LeaseTimeToLiveRequest) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + data[i] = 0x8 + i++ + i = encodeVarintRpc(data, i, uint64(m.ID)) + } + if m.Keys { + data[i] = 0x10 + i++ + if m.Keys { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *LeaseTimeToLiveResponse) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LeaseTimeToLiveResponse) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + data[i] = 0xa + i++ + i = encodeVarintRpc(data, i, uint64(m.Header.Size())) + n25, err := m.Header.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n25 } if m.ID != 0 { data[i] = 0x10 @@ -4710,6 +5001,19 @@ func (m *LeaseKeepAliveResponse) MarshalTo(data []byte) (int, error) { i++ i = encodeVarintRpc(data, i, uint64(m.TTL)) } + if m.GrantedTTL != 0 { + data[i] = 0x20 + i++ + i = encodeVarintRpc(data, i, uint64(m.GrantedTTL)) + } + if len(m.Keys) > 0 { + for _, b := range m.Keys { + data[i] = 0x2a + i++ + i = encodeVarintRpc(data, i, uint64(len(b))) + i += copy(data[i:], b) + } + } return i, nil } @@ -4824,21 +5128,21 @@ func (m *MemberAddResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n24, err := m.Header.MarshalTo(data[i:]) + n26, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n24 + i += n26 } if m.Member != nil { data[i] = 0x12 i++ i = encodeVarintRpc(data, i, uint64(m.Member.Size())) - n25, err := m.Member.MarshalTo(data[i:]) + n27, err := m.Member.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n25 + i += n27 } return i, nil } @@ -4885,11 +5189,11 @@ func (m *MemberRemoveResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n26, err := m.Header.MarshalTo(data[i:]) + n28, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n26 + i += n28 } return i, nil } @@ -4951,11 +5255,11 @@ func (m *MemberUpdateResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n27, err := m.Header.MarshalTo(data[i:]) + n29, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n27 + i += n29 } return i, nil } @@ -4997,11 +5301,11 @@ func (m *MemberListResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n28, err := m.Header.MarshalTo(data[i:]) + n30, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n28 + i += n30 } if len(m.Members) > 0 { for _, msg := range m.Members { @@ -5055,11 +5359,11 @@ func (m *DefragmentResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n29, err := m.Header.MarshalTo(data[i:]) + n31, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n29 + i += n31 } return i, nil } @@ -5144,11 +5448,11 @@ func (m *AlarmResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n30, err := m.Header.MarshalTo(data[i:]) + n32, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n30 + i += n32 } if len(m.Alarms) > 0 { for _, msg := range m.Alarms { @@ -5202,11 +5506,11 @@ func (m *StatusResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n31, err := m.Header.MarshalTo(data[i:]) + n33, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n31 + i += n33 } if len(m.Version) > 0 { data[i] = 0x12 @@ -5604,11 +5908,11 @@ func (m *AuthRoleGrantPermissionRequest) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintRpc(data, i, uint64(m.Perm.Size())) - n32, err := m.Perm.MarshalTo(data[i:]) + n34, err := m.Perm.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n32 + i += n34 } return i, nil } @@ -5668,11 +5972,11 @@ func (m *AuthEnableResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n33, err := m.Header.MarshalTo(data[i:]) + n35, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n33 + i += n35 } return i, nil } @@ -5696,11 +6000,11 @@ func (m *AuthDisableResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n34, err := m.Header.MarshalTo(data[i:]) + n36, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n34 + i += n36 } return i, nil } @@ -5724,11 +6028,11 @@ func (m *AuthenticateResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n35, err := m.Header.MarshalTo(data[i:]) + n37, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n35 + i += n37 } if len(m.Token) > 0 { data[i] = 0x12 @@ -5758,11 +6062,11 @@ func (m *AuthUserAddResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n36, err := m.Header.MarshalTo(data[i:]) + n38, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n36 + i += n38 } return i, nil } @@ -5786,11 +6090,11 @@ func (m *AuthUserGetResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n37, err := m.Header.MarshalTo(data[i:]) + n39, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n37 + i += n39 } if len(m.Roles) > 0 { for _, s := range m.Roles { @@ -5829,11 +6133,11 @@ func (m *AuthUserDeleteResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n38, err := m.Header.MarshalTo(data[i:]) + n40, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n38 + i += n40 } return i, nil } @@ -5857,11 +6161,11 @@ func (m *AuthUserChangePasswordResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n39, err := m.Header.MarshalTo(data[i:]) + n41, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n39 + i += n41 } return i, nil } @@ -5885,11 +6189,11 @@ func (m *AuthUserGrantRoleResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n40, err := m.Header.MarshalTo(data[i:]) + n42, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n40 + i += n42 } return i, nil } @@ -5913,11 +6217,11 @@ func (m *AuthUserRevokeRoleResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n41, err := m.Header.MarshalTo(data[i:]) + n43, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n41 + i += n43 } return i, nil } @@ -5941,11 +6245,11 @@ func (m *AuthRoleAddResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n42, err := m.Header.MarshalTo(data[i:]) + n44, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n42 + i += n44 } return i, nil } @@ -5969,11 +6273,11 @@ func (m *AuthRoleGetResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n43, err := m.Header.MarshalTo(data[i:]) + n45, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n43 + i += n45 } if len(m.Perm) > 0 { for _, msg := range m.Perm { @@ -6009,11 +6313,11 @@ func (m *AuthRoleListResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n44, err := m.Header.MarshalTo(data[i:]) + n46, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n44 + i += n46 } if len(m.Roles) > 0 { for _, s := range m.Roles { @@ -6052,11 +6356,11 @@ func (m *AuthUserListResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n45, err := m.Header.MarshalTo(data[i:]) + n47, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n45 + i += n47 } if len(m.Users) > 0 { for _, s := range m.Users { @@ -6095,11 +6399,11 @@ func (m *AuthRoleDeleteResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n46, err := m.Header.MarshalTo(data[i:]) + n48, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n46 + i += n48 } return i, nil } @@ -6123,11 +6427,11 @@ func (m *AuthRoleGrantPermissionResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n47, err := m.Header.MarshalTo(data[i:]) + n49, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n47 + i += n49 } return i, nil } @@ -6151,11 +6455,11 @@ func (m *AuthRoleRevokePermissionResponse) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintRpc(data, i, uint64(m.Header.Size())) - n48, err := m.Header.MarshalTo(data[i:]) + n50, err := m.Header.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n48 + i += n50 } return i, nil } @@ -6237,6 +6541,18 @@ func (m *RangeRequest) Size() (n int) { if m.CountOnly { n += 2 } + if m.MinModRevision != 0 { + n += 1 + sovRpc(uint64(m.MinModRevision)) + } + if m.MaxModRevision != 0 { + n += 1 + sovRpc(uint64(m.MaxModRevision)) + } + if m.MinCreateRevision != 0 { + n += 1 + sovRpc(uint64(m.MinCreateRevision)) + } + if m.MaxCreateRevision != 0 { + n += 1 + sovRpc(uint64(m.MaxCreateRevision)) + } return n } @@ -6276,6 +6592,9 @@ func (m *PutRequest) Size() (n int) { if m.Lease != 0 { n += 1 + sovRpc(uint64(m.Lease)) } + if m.PrevKv { + n += 2 + } return n } @@ -6286,6 +6605,10 @@ func (m *PutResponse) Size() (n int) { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } + if m.PrevKv != nil { + l = m.PrevKv.Size() + n += 1 + l + sovRpc(uint64(l)) + } return n } @@ -6300,6 +6623,9 @@ func (m *DeleteRangeRequest) Size() (n int) { if l > 0 { n += 1 + l + sovRpc(uint64(l)) } + if m.PrevKv { + n += 2 + } return n } @@ -6313,6 +6639,12 @@ func (m *DeleteRangeResponse) Size() (n int) { if m.Deleted != 0 { n += 1 + sovRpc(uint64(m.Deleted)) } + if len(m.PrevKvs) > 0 { + for _, e := range m.PrevKvs { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } return n } @@ -6585,6 +6917,14 @@ func (m *WatchCreateRequest) Size() (n int) { if m.ProgressNotify { n += 2 } + if len(m.Filters) > 0 { + for _, e := range m.Filters { + n += 1 + sovRpc(uint64(e)) + } + } + if m.PrevKv { + n += 2 + } return n } @@ -6701,6 +7041,43 @@ func (m *LeaseKeepAliveResponse) Size() (n int) { return n } +func (m *LeaseTimeToLiveRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.Keys { + n += 2 + } + return n +} + +func (m *LeaseTimeToLiveResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + if m.TTL != 0 { + n += 1 + sovRpc(uint64(m.TTL)) + } + if m.GrantedTTL != 0 { + n += 1 + sovRpc(uint64(m.GrantedTTL)) + } + if len(m.Keys) > 0 { + for _, b := range m.Keys { + l = len(b) + n += 1 + l + sovRpc(uint64(l)) + } + } + return n +} + func (m *Member) Size() (n int) { var l int _ = l @@ -7639,6 +8016,82 @@ func (m *RangeRequest) Unmarshal(data []byte) error { } } m.CountOnly = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinModRevision", wireType) + } + m.MinModRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.MinModRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxModRevision", wireType) + } + m.MaxModRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.MaxModRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinCreateRevision", wireType) + } + m.MinCreateRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.MinCreateRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxCreateRevision", wireType) + } + m.MaxCreateRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.MaxCreateRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipRpc(data[iNdEx:]) @@ -7923,6 +8376,26 @@ func (m *PutRequest) Unmarshal(data []byte) error { break } } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PrevKv = bool(v != 0) default: iNdEx = preIndex skippy, err := skipRpc(data[iNdEx:]) @@ -8006,6 +8479,39 @@ func (m *PutResponse) Unmarshal(data []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PrevKv == nil { + m.PrevKv = &mvccpb.KeyValue{} + } + if err := m.PrevKv.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(data[iNdEx:]) @@ -8118,6 +8624,26 @@ func (m *DeleteRangeRequest) Unmarshal(data []byte) error { m.RangeEnd = []byte{} } iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PrevKv = bool(v != 0) default: iNdEx = preIndex skippy, err := skipRpc(data[iNdEx:]) @@ -8220,26 +8746,57 @@ func (m *DeleteRangeResponse) Unmarshal(data []byte) error { break } } - default: - iNdEx = preIndex - skippy, err := skipRpc(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKvs", wireType) } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PrevKvs = append(m.PrevKvs, &mvccpb.KeyValue{}) + if err := m.PrevKvs[len(m.PrevKvs)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } func (m *RequestOp) Unmarshal(data []byte) error { l := len(data) @@ -9770,6 +10327,46 @@ func (m *WatchCreateRequest) Unmarshal(data []byte) error { } } m.ProgressNotify = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var v WatchCreateRequest_FilterType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Filters = append(m.Filters, v) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PrevKv = bool(v != 0) default: iNdEx = preIndex skippy, err := skipRpc(data[iNdEx:]) @@ -10632,6 +11229,264 @@ func (m *LeaseKeepAliveResponse) Unmarshal(data []byte) error { } return nil } +func (m *LeaseTimeToLiveRequest) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseTimeToLiveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseTimeToLiveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Keys = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRpc(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseTimeToLiveResponse) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseTimeToLiveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseTimeToLiveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) + } + m.TTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.TTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GrantedTTL", wireType) + } + m.GrantedTTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.GrantedTTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx)) + copy(m.Keys[len(m.Keys)-1], data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Member) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 @@ -15125,203 +15980,217 @@ var ( ) var fileDescriptorRpc = []byte{ - // 3154 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x5a, 0xcd, 0x72, 0xe3, 0xc6, - 0xf1, 0x17, 0x48, 0x4a, 0x14, 0x9b, 0x14, 0xc5, 0x1d, 0x69, 0xd7, 0x14, 0x56, 0xab, 0xd5, 0xce, - 0x7e, 0xc9, 0x6b, 0x5b, 0xb4, 0x65, 0xff, 0xff, 0x87, 0x4d, 0xca, 0x55, 0x92, 0xc8, 0xac, 0x64, - 0xc9, 0xd2, 0x1a, 0xd2, 0xca, 0x4e, 0x55, 0x2a, 0x2a, 0x88, 0x9c, 0x25, 0x59, 0x22, 0x01, 0x1a, - 0x00, 0xb9, 0x2b, 0x27, 0xa9, 0x4a, 0xb9, 0xe2, 0x43, 0x72, 0xf5, 0x21, 0x5f, 0xc7, 0x3c, 0x43, - 0x6e, 0x79, 0x80, 0x54, 0x2e, 0x71, 0x55, 0x5e, 0x20, 0xb5, 0xc9, 0x21, 0x87, 0xdc, 0x53, 0x39, - 0xa4, 0x92, 0x9a, 0x2f, 0x60, 0x00, 0x02, 0x94, 0x1c, 0xc4, 0x17, 0x09, 0xd3, 0xd3, 0xd3, 0xbf, - 0x9e, 0x9e, 0xe9, 0x46, 0x77, 0x83, 0x50, 0x70, 0x06, 0xcd, 0xf5, 0x81, 0x63, 0x7b, 0x36, 0x2a, - 0x11, 0xaf, 0xd9, 0x72, 0x89, 0x33, 0x22, 0xce, 0xe0, 0x4c, 0x5f, 0x6c, 0xdb, 0x6d, 0x9b, 0x4d, - 0xd4, 0xe8, 0x13, 0xe7, 0xd1, 0x97, 0x28, 0x4f, 0xad, 0x3f, 0x6a, 0x36, 0xd9, 0x9f, 0xc1, 0x59, - 0xed, 0x7c, 0x24, 0xa6, 0x6e, 0xb2, 0x29, 0x73, 0xe8, 0x75, 0xd8, 0x9f, 0xc1, 0x19, 0xfb, 0x27, - 0x26, 0x97, 0xdb, 0xb6, 0xdd, 0xee, 0x91, 0x9a, 0x39, 0xe8, 0xd6, 0x4c, 0xcb, 0xb2, 0x3d, 0xd3, - 0xeb, 0xda, 0x96, 0xcb, 0x67, 0xf1, 0x17, 0x1a, 0x94, 0x0d, 0xe2, 0x0e, 0x6c, 0xcb, 0x25, 0x3b, - 0xc4, 0x6c, 0x11, 0x07, 0xdd, 0x02, 0x68, 0xf6, 0x86, 0xae, 0x47, 0x9c, 0xd3, 0x6e, 0xab, 0xaa, - 0xad, 0x6a, 0x6b, 0x39, 0xa3, 0x20, 0x28, 0xbb, 0x2d, 0x74, 0x13, 0x0a, 0x7d, 0xd2, 0x3f, 0xe3, - 0xb3, 0x19, 0x36, 0x3b, 0xcb, 0x09, 0xbb, 0x2d, 0xa4, 0xc3, 0xac, 0x43, 0x46, 0x5d, 0xb7, 0x6b, - 0x5b, 0xd5, 0xec, 0xaa, 0xb6, 0x96, 0x35, 0xfc, 0x31, 0x5d, 0xe8, 0x98, 0xcf, 0xbd, 0x53, 0x8f, - 0x38, 0xfd, 0x6a, 0x8e, 0x2f, 0xa4, 0x84, 0x63, 0xe2, 0xf4, 0xf1, 0x57, 0x59, 0x28, 0x19, 0xa6, - 0xd5, 0x26, 0x06, 0xf9, 0x74, 0x48, 0x5c, 0x0f, 0x55, 0x20, 0x7b, 0x4e, 0x2e, 0x18, 0x7c, 0xc9, - 0xa0, 0x8f, 0x7c, 0xbd, 0xd5, 0x26, 0xa7, 0xc4, 0xe2, 0xc0, 0x25, 0xba, 0xde, 0x6a, 0x93, 0x86, - 0xd5, 0x42, 0x8b, 0x30, 0xdd, 0xeb, 0xf6, 0xbb, 0x9e, 0x40, 0xe5, 0x83, 0x90, 0x3a, 0xb9, 0x88, - 0x3a, 0xdb, 0x00, 0xae, 0xed, 0x78, 0xa7, 0xb6, 0xd3, 0x22, 0x4e, 0x75, 0x7a, 0x55, 0x5b, 0x2b, - 0x6f, 0xdc, 0x5b, 0x57, 0x0f, 0x62, 0x5d, 0x55, 0x68, 0xfd, 0xc8, 0x76, 0xbc, 0x43, 0xca, 0x6b, - 0x14, 0x5c, 0xf9, 0x88, 0xbe, 0x03, 0x45, 0x26, 0xc4, 0x33, 0x9d, 0x36, 0xf1, 0xaa, 0x33, 0x4c, - 0xca, 0xfd, 0x4b, 0xa4, 0x1c, 0x33, 0x66, 0x83, 0xc1, 0xf3, 0x67, 0x84, 0xa1, 0xe4, 0x12, 0xa7, - 0x6b, 0xf6, 0xba, 0x9f, 0x99, 0x67, 0x3d, 0x52, 0xcd, 0xaf, 0x6a, 0x6b, 0xb3, 0x46, 0x88, 0x46, - 0xf7, 0x7f, 0x4e, 0x2e, 0xdc, 0x53, 0xdb, 0xea, 0x5d, 0x54, 0x67, 0x19, 0xc3, 0x2c, 0x25, 0x1c, - 0x5a, 0xbd, 0x0b, 0x76, 0x68, 0xf6, 0xd0, 0xf2, 0xf8, 0x6c, 0x81, 0xcd, 0x16, 0x18, 0x85, 0x4e, - 0xe3, 0x75, 0x28, 0xf8, 0xfa, 0xa3, 0x59, 0xc8, 0x1d, 0x1c, 0x1e, 0x34, 0x2a, 0x53, 0x08, 0x60, - 0x66, 0xf3, 0x68, 0xbb, 0x71, 0x50, 0xaf, 0x68, 0xa8, 0x08, 0xf9, 0x7a, 0x83, 0x0f, 0x32, 0x78, - 0x0b, 0x20, 0xd0, 0x14, 0xe5, 0x21, 0xbb, 0xd7, 0xf8, 0x6e, 0x65, 0x8a, 0xf2, 0x9c, 0x34, 0x8c, - 0xa3, 0xdd, 0xc3, 0x83, 0x8a, 0x46, 0x17, 0x6f, 0x1b, 0x8d, 0xcd, 0xe3, 0x46, 0x25, 0x43, 0x39, - 0x3e, 0x3c, 0xac, 0x57, 0xb2, 0xa8, 0x00, 0xd3, 0x27, 0x9b, 0xfb, 0xcf, 0x1a, 0x95, 0x1c, 0xfe, - 0x52, 0x83, 0x39, 0xb1, 0x77, 0x7e, 0xbf, 0xd0, 0x7b, 0x30, 0xd3, 0x61, 0x77, 0x8c, 0x1d, 0x6b, - 0x71, 0x63, 0x39, 0x62, 0xa8, 0xd0, 0x3d, 0x34, 0x04, 0x2f, 0xc2, 0x90, 0x3d, 0x1f, 0xb9, 0xd5, - 0xcc, 0x6a, 0x76, 0xad, 0xb8, 0x51, 0x59, 0xe7, 0x97, 0x7f, 0x7d, 0x8f, 0x5c, 0x9c, 0x98, 0xbd, - 0x21, 0x31, 0xe8, 0x24, 0x42, 0x90, 0xeb, 0xdb, 0x0e, 0x61, 0xa7, 0x3f, 0x6b, 0xb0, 0x67, 0x7a, - 0x25, 0x98, 0x01, 0xc4, 0xc9, 0xf3, 0x01, 0xfe, 0x00, 0xe0, 0xe9, 0xd0, 0x4b, 0xbe, 0x65, 0x8b, - 0x30, 0x3d, 0xa2, 0x72, 0xc5, 0x0d, 0xe3, 0x03, 0x76, 0xbd, 0x88, 0xe9, 0x12, 0xff, 0x7a, 0xd1, - 0x01, 0xde, 0x86, 0x22, 0x93, 0x95, 0x66, 0x7b, 0x78, 0x1b, 0x50, 0x9d, 0xf4, 0x88, 0x47, 0x52, - 0x5c, 0x7f, 0x4c, 0x60, 0x21, 0x24, 0x24, 0x95, 0xc1, 0xab, 0x90, 0x6f, 0x31, 0x61, 0x1c, 0x27, - 0x6b, 0xc8, 0x21, 0xfe, 0xbb, 0x06, 0x05, 0xa1, 0xe1, 0xe1, 0x00, 0x6d, 0xc2, 0x9c, 0xc3, 0x07, - 0xa7, 0x4c, 0x11, 0x01, 0xa2, 0x27, 0x5f, 0xff, 0x9d, 0x29, 0xa3, 0x24, 0x96, 0x30, 0x32, 0xfa, - 0x16, 0x14, 0xa5, 0x88, 0xc1, 0xd0, 0x63, 0x70, 0xc5, 0x8d, 0x6a, 0x58, 0x40, 0x70, 0x5c, 0x3b, - 0x53, 0x06, 0x08, 0xf6, 0xa7, 0x43, 0x0f, 0x1d, 0xc3, 0xa2, 0x5c, 0xcc, 0x15, 0x14, 0x6a, 0x64, - 0x99, 0x94, 0xd5, 0xb0, 0x94, 0x71, 0x1b, 0xef, 0x4c, 0x19, 0x48, 0xac, 0x57, 0x26, 0xb7, 0x0a, - 0x90, 0x17, 0x54, 0xfc, 0x0f, 0x0d, 0x40, 0xda, 0xe8, 0x70, 0x80, 0xea, 0x50, 0x76, 0xc4, 0x28, - 0xb4, 0xe1, 0x9b, 0xb1, 0x1b, 0x16, 0xa6, 0x9d, 0x32, 0xe6, 0xe4, 0x22, 0xbe, 0xe5, 0xf7, 0xa1, - 0xe4, 0x4b, 0x09, 0xf6, 0xbc, 0x14, 0xb3, 0x67, 0x5f, 0x42, 0x51, 0x2e, 0xa0, 0xbb, 0xfe, 0x18, - 0xae, 0xfb, 0xeb, 0x63, 0xb6, 0x7d, 0x67, 0xc2, 0xb6, 0x7d, 0x81, 0x0b, 0x52, 0x82, 0xba, 0x71, - 0xa0, 0xc1, 0x92, 0x93, 0xf1, 0xaf, 0xb2, 0x90, 0xdf, 0xb6, 0xfb, 0x03, 0xd3, 0xa1, 0x67, 0x34, - 0xe3, 0x10, 0x77, 0xd8, 0xf3, 0xd8, 0x76, 0xcb, 0x1b, 0x77, 0xc3, 0x08, 0x82, 0x4d, 0xfe, 0x37, - 0x18, 0xab, 0x21, 0x96, 0xd0, 0xc5, 0x22, 0x36, 0x66, 0xae, 0xb0, 0x58, 0x44, 0x46, 0xb1, 0x44, - 0x3a, 0x41, 0x36, 0x70, 0x02, 0x1d, 0xf2, 0x23, 0xe2, 0x04, 0xf1, 0x7c, 0x67, 0xca, 0x90, 0x04, - 0xf4, 0x3a, 0xcc, 0x37, 0x1d, 0x62, 0x52, 0x7b, 0xc8, 0x98, 0x3f, 0x2d, 0x78, 0xca, 0x7c, 0xc2, - 0x90, 0xb1, 0xff, 0x2e, 0x94, 0xfa, 0x76, 0x2b, 0xe0, 0x9b, 0x11, 0x7c, 0xc5, 0xbe, 0xdd, 0xf2, - 0x99, 0x6e, 0xc8, 0x48, 0x40, 0x83, 0x71, 0x69, 0x67, 0x4a, 0xc4, 0x02, 0xfc, 0x0e, 0xcc, 0x85, - 0xf6, 0x4a, 0x63, 0x5e, 0xe3, 0xa3, 0x67, 0x9b, 0xfb, 0x3c, 0x40, 0x3e, 0x61, 0x31, 0xd1, 0xa8, - 0x68, 0x34, 0xce, 0xee, 0x37, 0x8e, 0x8e, 0x2a, 0x19, 0xfc, 0x6d, 0x7f, 0x89, 0x88, 0xa8, 0x4a, - 0x20, 0x9d, 0x52, 0x02, 0xa9, 0x26, 0x03, 0x69, 0x26, 0x08, 0xa4, 0xd9, 0xad, 0x32, 0x94, 0xb8, - 0x41, 0x4e, 0x87, 0x56, 0xd7, 0xb6, 0xf0, 0x6f, 0x34, 0x80, 0xe3, 0x97, 0x96, 0x0c, 0x15, 0x35, - 0xc8, 0x37, 0xb9, 0xf0, 0xaa, 0xc6, 0x62, 0xe4, 0xf5, 0x58, 0x1b, 0x1b, 0x92, 0x0b, 0xbd, 0x03, - 0x79, 0x77, 0xd8, 0x6c, 0x12, 0x57, 0x06, 0xd5, 0xd7, 0xa2, 0x61, 0x41, 0x78, 0xb8, 0x21, 0xf9, - 0xe8, 0x92, 0xe7, 0x66, 0xb7, 0x37, 0x64, 0x21, 0x76, 0xf2, 0x12, 0xc1, 0x87, 0x7f, 0xa9, 0x41, - 0x91, 0x69, 0x99, 0x2a, 0x16, 0x2d, 0x43, 0x81, 0xe9, 0x40, 0x5a, 0x22, 0x1a, 0xcd, 0x1a, 0x01, - 0x01, 0xfd, 0x3f, 0x14, 0xe4, 0x95, 0x75, 0x85, 0x62, 0xd5, 0x78, 0xb1, 0x87, 0x03, 0x23, 0x60, - 0xc5, 0x7b, 0x70, 0x8d, 0x59, 0xa5, 0x49, 0x53, 0x21, 0x69, 0x47, 0x35, 0x59, 0xd0, 0x22, 0xc9, - 0x82, 0x0e, 0xb3, 0x83, 0xce, 0x85, 0xdb, 0x6d, 0x9a, 0x3d, 0xa1, 0x85, 0x3f, 0xc6, 0x1f, 0x00, - 0x52, 0x85, 0xa5, 0x7a, 0x19, 0xcc, 0x41, 0x71, 0xc7, 0x74, 0x3b, 0x42, 0x25, 0xfc, 0x09, 0x94, - 0xf8, 0x30, 0x95, 0x0d, 0x11, 0xe4, 0x3a, 0xa6, 0xdb, 0x61, 0x8a, 0xcf, 0x19, 0xec, 0x19, 0x5f, - 0x83, 0xf9, 0x23, 0xcb, 0x1c, 0xb8, 0x1d, 0x5b, 0x06, 0x57, 0x9a, 0x0a, 0x56, 0x02, 0x5a, 0x2a, - 0xc4, 0x87, 0x30, 0xef, 0x90, 0xbe, 0xd9, 0xb5, 0xba, 0x56, 0xfb, 0xf4, 0xec, 0xc2, 0x23, 0xae, - 0xc8, 0x14, 0xcb, 0x3e, 0x79, 0x8b, 0x52, 0xa9, 0x6a, 0x67, 0x3d, 0xfb, 0x4c, 0xb8, 0x38, 0x7b, - 0xc6, 0xbf, 0xd5, 0xa0, 0xf4, 0xb1, 0xe9, 0x35, 0xa5, 0x15, 0xd0, 0x2e, 0x94, 0x7d, 0xc7, 0x66, - 0x14, 0xa1, 0x4b, 0x24, 0xc2, 0xb3, 0x35, 0xdb, 0xc2, 0xd1, 0x65, 0x84, 0x9f, 0x6b, 0xaa, 0x04, - 0x26, 0xca, 0xb4, 0x9a, 0xa4, 0xe7, 0x8b, 0xca, 0x24, 0x8b, 0x62, 0x8c, 0xaa, 0x28, 0x95, 0xb0, - 0x35, 0x1f, 0xbc, 0xfd, 0xb8, 0x5b, 0x7e, 0xa9, 0x01, 0x1a, 0xd7, 0xe1, 0xeb, 0x26, 0xb2, 0xf7, - 0xa1, 0xec, 0x7a, 0xa6, 0xe3, 0x9d, 0x46, 0xf2, 0xe8, 0x39, 0x46, 0xf5, 0x83, 0xd3, 0x43, 0x98, - 0x1f, 0x38, 0x76, 0xdb, 0x21, 0xae, 0x7b, 0x6a, 0xd9, 0x5e, 0xf7, 0xf9, 0x05, 0x0b, 0x88, 0xb3, - 0x46, 0x59, 0x92, 0x0f, 0x18, 0x15, 0xd7, 0xa4, 0x52, 0xaa, 0xf2, 0x68, 0x09, 0x66, 0x5f, 0x50, - 0xaa, 0xcc, 0xf0, 0xb3, 0x46, 0x9e, 0x8d, 0x77, 0x5b, 0xf8, 0x6f, 0x1a, 0xcc, 0x09, 0xf3, 0xa7, - 0xba, 0x03, 0x2a, 0x44, 0x26, 0x04, 0x41, 0x13, 0x0c, 0x7e, 0x2c, 0x2d, 0x91, 0xb0, 0xc9, 0x21, - 0xf5, 0x33, 0x6e, 0x65, 0xd2, 0x12, 0xfb, 0xf1, 0xc7, 0xe8, 0x75, 0xa8, 0x34, 0xb9, 0x9f, 0x45, - 0x02, 0xbc, 0x31, 0x2f, 0xe8, 0xbe, 0x75, 0xee, 0xc3, 0x0c, 0x19, 0x11, 0xcb, 0x73, 0xab, 0x45, - 0x16, 0x14, 0xe6, 0x64, 0xd6, 0xd8, 0xa0, 0x54, 0x43, 0x4c, 0xe2, 0xff, 0x83, 0x6b, 0xfb, 0x34, - 0x91, 0x7b, 0xe2, 0x98, 0x96, 0x9a, 0x12, 0x1e, 0x1f, 0xef, 0x0b, 0xab, 0x64, 0xbd, 0xe3, 0x7d, - 0x54, 0x86, 0xcc, 0x6e, 0x5d, 0xec, 0x21, 0xd3, 0xad, 0xe3, 0xcf, 0x35, 0x40, 0xea, 0xba, 0x54, - 0x66, 0x8a, 0x08, 0x97, 0xf0, 0xd9, 0x00, 0x7e, 0x11, 0xa6, 0x89, 0xe3, 0xd8, 0x0e, 0x33, 0x48, - 0xc1, 0xe0, 0x03, 0x7c, 0x4f, 0xe8, 0x60, 0x90, 0x91, 0x7d, 0xee, 0x5f, 0x36, 0x2e, 0x4d, 0xf3, - 0x55, 0xdd, 0x83, 0x85, 0x10, 0x57, 0xaa, 0xe0, 0xf4, 0x10, 0xae, 0x33, 0x61, 0x7b, 0x84, 0x0c, - 0x36, 0x7b, 0xdd, 0x51, 0x22, 0xea, 0x00, 0x6e, 0x44, 0x19, 0xbf, 0x59, 0x1b, 0xe1, 0x0e, 0xcc, - 0x7c, 0xc8, 0x6a, 0x50, 0x45, 0x97, 0x1c, 0xe3, 0x45, 0x90, 0xb3, 0xcc, 0x3e, 0x4f, 0xe7, 0x0b, - 0x06, 0x7b, 0x66, 0xd1, 0x9c, 0x10, 0xe7, 0x99, 0xb1, 0xcf, 0xdf, 0x1a, 0x05, 0xc3, 0x1f, 0xa3, - 0x15, 0x5a, 0xfd, 0x76, 0x89, 0xe5, 0xb1, 0xd9, 0x1c, 0x9b, 0x55, 0x28, 0x78, 0x1d, 0x2a, 0x1c, - 0x69, 0xb3, 0xd5, 0x52, 0xde, 0x1c, 0xbe, 0x3c, 0x2d, 0x2c, 0x0f, 0xbf, 0x80, 0x6b, 0x0a, 0x7f, - 0x2a, 0x33, 0xbc, 0x09, 0x33, 0xbc, 0xd0, 0x16, 0x41, 0x6b, 0x31, 0xbc, 0x8a, 0xc3, 0x18, 0x82, - 0x07, 0xdf, 0x87, 0x05, 0x41, 0x21, 0x7d, 0x3b, 0xee, 0xac, 0x98, 0x7d, 0xf0, 0x3e, 0x2c, 0x86, - 0xd9, 0x52, 0x5d, 0x91, 0x4d, 0x09, 0xfa, 0x6c, 0xd0, 0x52, 0x62, 0x60, 0xf4, 0x50, 0x54, 0x83, - 0x65, 0x22, 0x06, 0xf3, 0x15, 0x92, 0x22, 0x52, 0x29, 0xb4, 0x20, 0xcd, 0xbf, 0xdf, 0x75, 0xfd, - 0x37, 0xdd, 0x67, 0x80, 0x54, 0x62, 0xaa, 0x43, 0x59, 0x87, 0x3c, 0x37, 0xb8, 0x4c, 0xa6, 0xe2, - 0x4f, 0x45, 0x32, 0x51, 0x85, 0xea, 0xe4, 0xb9, 0x63, 0xb6, 0xfb, 0xc4, 0x8f, 0x39, 0x34, 0x85, - 0x50, 0x89, 0xa9, 0x76, 0xfc, 0x47, 0x0d, 0x4a, 0x9b, 0x3d, 0xd3, 0xe9, 0x4b, 0xe3, 0xbf, 0x0f, - 0x33, 0x3c, 0x37, 0x11, 0xf9, 0xfb, 0x83, 0xb0, 0x18, 0x95, 0x97, 0x0f, 0x36, 0x79, 0x26, 0x23, - 0x56, 0xd1, 0xc3, 0x12, 0xfd, 0x9d, 0x7a, 0xa4, 0xdf, 0x53, 0x47, 0x6f, 0xc1, 0xb4, 0x49, 0x97, - 0x30, 0x5f, 0x2c, 0x47, 0xb3, 0x42, 0x26, 0xed, 0xf8, 0x62, 0x40, 0x0c, 0xce, 0x85, 0xdf, 0x83, - 0xa2, 0x82, 0x40, 0x93, 0xdd, 0x27, 0x8d, 0xe3, 0xca, 0x14, 0x2a, 0xc1, 0xec, 0xe6, 0xf6, 0xf1, - 0xee, 0x09, 0xcf, 0x81, 0xcb, 0x00, 0xf5, 0x86, 0x3f, 0xce, 0xe0, 0x4f, 0xc4, 0x2a, 0xe1, 0xe1, - 0xaa, 0x3e, 0x5a, 0x92, 0x3e, 0x99, 0x2b, 0xe9, 0xf3, 0x12, 0xe6, 0xc4, 0xf6, 0x53, 0xdd, 0x81, - 0x77, 0x60, 0x86, 0xc9, 0x93, 0x57, 0x60, 0x29, 0x06, 0x56, 0x7a, 0x27, 0x67, 0xc4, 0xf3, 0x30, - 0x77, 0xe4, 0x99, 0xde, 0xd0, 0x95, 0x57, 0xe0, 0x0f, 0x1a, 0x94, 0x25, 0x25, 0x6d, 0xf5, 0x2e, - 0x4b, 0x24, 0x1e, 0xf3, 0xfc, 0x02, 0xe9, 0x06, 0xcc, 0xb4, 0xce, 0x8e, 0xba, 0x9f, 0xc9, 0x2e, - 0x86, 0x18, 0x51, 0x7a, 0x8f, 0xe3, 0xf0, 0xae, 0x9c, 0x18, 0xd1, 0xdc, 0xdb, 0x31, 0x9f, 0x7b, - 0xbb, 0x56, 0x8b, 0xbc, 0x64, 0x6f, 0xda, 0x9c, 0x11, 0x10, 0x58, 0xba, 0x2c, 0xba, 0x77, 0xac, - 0x7e, 0x52, 0xbb, 0x79, 0x0b, 0x70, 0x6d, 0x73, 0xe8, 0x75, 0x1a, 0x96, 0x79, 0xd6, 0x93, 0x41, - 0x00, 0x2f, 0x02, 0xa2, 0xc4, 0x7a, 0xd7, 0x55, 0xa9, 0x0d, 0x58, 0xa0, 0x54, 0x62, 0x79, 0xdd, - 0xa6, 0x12, 0x31, 0x64, 0xd8, 0xd6, 0x22, 0x61, 0xdb, 0x74, 0xdd, 0x17, 0xb6, 0xd3, 0x12, 0x5b, - 0xf3, 0xc7, 0xb8, 0xce, 0x85, 0x3f, 0x73, 0x43, 0x81, 0xf9, 0xeb, 0x4a, 0x59, 0x0b, 0xa4, 0x3c, - 0x21, 0xde, 0x04, 0x29, 0xf8, 0x0d, 0xb8, 0x2e, 0x39, 0x45, 0x0d, 0x3d, 0x81, 0xf9, 0x10, 0x6e, - 0x49, 0xe6, 0xed, 0x0e, 0x4d, 0xf4, 0x9e, 0x0a, 0xc0, 0xff, 0x56, 0xcf, 0x2d, 0xa8, 0xfa, 0x7a, - 0xb2, 0x1c, 0xc4, 0xee, 0xa9, 0x0a, 0x0c, 0x5d, 0x71, 0x67, 0x0a, 0x06, 0x7b, 0xa6, 0x34, 0xc7, - 0xee, 0xf9, 0x2f, 0x41, 0xfa, 0x8c, 0xb7, 0x61, 0x49, 0xca, 0x10, 0xd9, 0x41, 0x58, 0xc8, 0x98, - 0x42, 0x71, 0x42, 0x84, 0xc1, 0xe8, 0xd2, 0xc9, 0x66, 0x57, 0x39, 0xc3, 0xa6, 0x65, 0x32, 0x35, - 0x45, 0xe6, 0x75, 0x7e, 0x23, 0xa8, 0x62, 0x6a, 0xd0, 0x16, 0x64, 0x2a, 0x40, 0x25, 0x8b, 0x83, - 0xa0, 0xe4, 0xb1, 0x83, 0x18, 0x13, 0xfd, 0x3d, 0x58, 0xf1, 0x95, 0xa0, 0x76, 0x7b, 0x4a, 0x9c, - 0x7e, 0xd7, 0x75, 0x95, 0x22, 0x30, 0x6e, 0xe3, 0x0f, 0x20, 0x37, 0x20, 0x22, 0xa6, 0x14, 0x37, - 0xd0, 0x3a, 0xef, 0xb1, 0xaf, 0x2b, 0x8b, 0xd9, 0x3c, 0x6e, 0xc1, 0x6d, 0x29, 0x9d, 0x5b, 0x34, - 0x56, 0x7c, 0x54, 0x29, 0x59, 0x20, 0x70, 0xb3, 0x8e, 0x17, 0x08, 0x59, 0x7e, 0xf6, 0x7e, 0xab, - 0xef, 0x03, 0x6e, 0x48, 0xe9, 0x5b, 0xa9, 0xde, 0x15, 0x7b, 0xdc, 0xa6, 0xbe, 0x4b, 0xa6, 0x12, - 0x76, 0x06, 0x8b, 0x61, 0x4f, 0x4e, 0x15, 0xc6, 0x16, 0x61, 0xda, 0xb3, 0xcf, 0x89, 0x0c, 0x62, - 0x7c, 0x20, 0x15, 0xf6, 0xdd, 0x3c, 0x95, 0xc2, 0x66, 0x20, 0x8c, 0x5d, 0xc9, 0xb4, 0xfa, 0xd2, - 0xd3, 0x94, 0xf9, 0x0c, 0x1f, 0xe0, 0x03, 0xb8, 0x11, 0x0d, 0x13, 0xa9, 0x54, 0x3e, 0xe1, 0x17, - 0x38, 0x2e, 0x92, 0xa4, 0x92, 0xfb, 0x51, 0x10, 0x0c, 0x94, 0x80, 0x92, 0x4a, 0xa4, 0x01, 0x7a, - 0x5c, 0x7c, 0xf9, 0x5f, 0xdc, 0x57, 0x3f, 0xdc, 0xa4, 0x12, 0xe6, 0x06, 0xc2, 0xd2, 0x1f, 0x7f, - 0x10, 0x23, 0xb2, 0x13, 0x63, 0x84, 0x70, 0x92, 0x20, 0x8a, 0x7d, 0x03, 0x97, 0x4e, 0x60, 0x04, - 0x01, 0x34, 0x2d, 0x06, 0x7d, 0x87, 0xf8, 0x18, 0x6c, 0x20, 0x2f, 0xb6, 0x1a, 0x76, 0x53, 0x1d, - 0xc6, 0xc7, 0x41, 0xec, 0x1c, 0x8b, 0xcc, 0xa9, 0x04, 0x7f, 0x02, 0xab, 0xc9, 0x41, 0x39, 0x8d, - 0xe4, 0x47, 0x18, 0x0a, 0x7e, 0x42, 0xa9, 0x7c, 0x53, 0x2b, 0x42, 0xfe, 0xe0, 0xf0, 0xe8, 0xe9, - 0xe6, 0x76, 0xa3, 0xa2, 0x6d, 0xfc, 0x2b, 0x0b, 0x99, 0xbd, 0x13, 0xf4, 0x7d, 0x98, 0xe6, 0xcd, - 0xff, 0x09, 0xdf, 0x46, 0xf4, 0x49, 0x9f, 0x11, 0xf0, 0xf2, 0xe7, 0x7f, 0xfa, 0xeb, 0x97, 0x99, - 0x1b, 0xf8, 0x5a, 0x6d, 0xf4, 0xae, 0xd9, 0x1b, 0x74, 0xcc, 0xda, 0xf9, 0xa8, 0xc6, 0xde, 0x09, - 0x8f, 0xb5, 0x47, 0xe8, 0x04, 0xb2, 0x4f, 0x87, 0x1e, 0x4a, 0xfc, 0x70, 0xa2, 0x27, 0x7f, 0x5e, - 0xc0, 0x3a, 0x93, 0xbc, 0x88, 0xe7, 0x55, 0xc9, 0x83, 0xa1, 0x47, 0xe5, 0x8e, 0xa0, 0xa8, 0x7c, - 0x21, 0x40, 0x97, 0x7e, 0x52, 0xd1, 0x2f, 0xff, 0xfa, 0x80, 0x31, 0xc3, 0x5b, 0xc6, 0xaf, 0xa9, - 0x78, 0xfc, 0x43, 0x86, 0xba, 0x9f, 0xe3, 0x97, 0x56, 0x74, 0x3f, 0x41, 0xcf, 0x3b, 0xba, 0x1f, - 0xa5, 0xcf, 0x1c, 0xbf, 0x1f, 0xef, 0xa5, 0x45, 0xe5, 0xda, 0xe2, 0xab, 0x46, 0xd3, 0x43, 0xb7, - 0x63, 0x9a, 0xe4, 0x6a, 0x3b, 0x58, 0x5f, 0x4d, 0x66, 0x10, 0x48, 0x77, 0x18, 0xd2, 0x4d, 0x7c, - 0x43, 0x45, 0x6a, 0xfa, 0x7c, 0x8f, 0xb5, 0x47, 0x1b, 0x1d, 0x98, 0x66, 0xbd, 0x34, 0x74, 0x2a, - 0x1f, 0xf4, 0x98, 0x4e, 0x63, 0xc2, 0x0d, 0x08, 0x75, 0xe1, 0xf0, 0x12, 0x43, 0x5b, 0xc0, 0x65, - 0x1f, 0x8d, 0xb5, 0xd3, 0x1e, 0x6b, 0x8f, 0xd6, 0xb4, 0xb7, 0xb5, 0x8d, 0x7f, 0x66, 0x60, 0x9a, - 0x35, 0x5d, 0xd0, 0x00, 0x20, 0xe8, 0x4e, 0x45, 0xf7, 0x39, 0xd6, 0xef, 0x8a, 0xee, 0x73, 0xbc, - 0xb1, 0x85, 0x6f, 0x33, 0xe4, 0x25, 0xbc, 0xe8, 0x23, 0xb3, 0xcf, 0x9f, 0xb5, 0x36, 0xe5, 0xa2, - 0x66, 0x7d, 0x01, 0x45, 0xa5, 0xcb, 0x84, 0xe2, 0x24, 0x86, 0xda, 0x54, 0xd1, 0x6b, 0x12, 0xd3, - 0xa2, 0xc2, 0x77, 0x19, 0xe8, 0x2d, 0x5c, 0x55, 0x8d, 0xcb, 0x71, 0x1d, 0xc6, 0x49, 0x81, 0x7f, - 0xa2, 0x41, 0x39, 0xdc, 0x69, 0x42, 0x77, 0x63, 0x44, 0x47, 0x1b, 0x56, 0xfa, 0xbd, 0xc9, 0x4c, - 0x89, 0x2a, 0x70, 0xfc, 0x73, 0x42, 0x06, 0x26, 0xe5, 0x94, 0xb6, 0xff, 0x77, 0x16, 0xf2, 0xdb, - 0xfc, 0x07, 0x12, 0xc8, 0x83, 0x82, 0xdf, 0xef, 0x41, 0x2b, 0x71, 0xbd, 0x80, 0x20, 0x51, 0xd6, - 0x6f, 0x27, 0xce, 0x0b, 0x15, 0x1e, 0x30, 0x15, 0x56, 0xf1, 0x4d, 0x5f, 0x05, 0xf1, 0x43, 0x8c, - 0x1a, 0x2f, 0x79, 0x6b, 0x66, 0xab, 0x45, 0x0d, 0xf1, 0x63, 0x0d, 0x4a, 0x6a, 0x1b, 0x07, 0xdd, - 0x89, 0xed, 0x42, 0xa8, 0x9d, 0x20, 0x1d, 0x4f, 0x62, 0x11, 0xf8, 0xaf, 0x33, 0xfc, 0xbb, 0x78, - 0x25, 0x09, 0xdf, 0x61, 0xfc, 0x61, 0x15, 0x78, 0xe3, 0x26, 0x5e, 0x85, 0x50, 0x5f, 0x28, 0x5e, - 0x85, 0x70, 0xdf, 0xe7, 0x72, 0x15, 0x86, 0x8c, 0x9f, 0xaa, 0xf0, 0x12, 0x20, 0xe8, 0xeb, 0xa0, - 0x58, 0xe3, 0x2a, 0xa5, 0x43, 0xf4, 0xe6, 0x8f, 0xb7, 0x84, 0xf0, 0x43, 0x86, 0x7d, 0x07, 0x2f, - 0x27, 0x61, 0xf7, 0xba, 0x2e, 0xf5, 0x80, 0x8d, 0xdf, 0xe5, 0xa0, 0xf8, 0xa1, 0xd9, 0xb5, 0x3c, - 0x62, 0x99, 0x56, 0x93, 0xa0, 0x36, 0x4c, 0xb3, 0x77, 0x43, 0xd4, 0xdd, 0xd5, 0x66, 0x4b, 0xd4, - 0xdd, 0x43, 0x9d, 0x08, 0x7c, 0x9f, 0x41, 0xdf, 0xc6, 0xba, 0x0f, 0xdd, 0x0f, 0xe4, 0xd7, 0x58, - 0x17, 0x81, 0x6e, 0xf9, 0x1c, 0x66, 0x78, 0xd7, 0x00, 0x45, 0xa4, 0x85, 0xba, 0x0b, 0xfa, 0x72, - 0xfc, 0x64, 0xe2, 0x2d, 0x53, 0xb1, 0x5c, 0xc6, 0x4c, 0xc1, 0x7e, 0x00, 0x10, 0xb4, 0xa9, 0xa2, - 0xf6, 0x1d, 0xeb, 0x6a, 0xe9, 0xab, 0xc9, 0x0c, 0x02, 0xf8, 0x11, 0x03, 0xbe, 0x87, 0x6f, 0xc7, - 0x02, 0xb7, 0xfc, 0x05, 0x14, 0xbc, 0x09, 0xb9, 0x1d, 0xd3, 0xed, 0xa0, 0x48, 0xe8, 0x57, 0x3e, - 0x97, 0xe9, 0x7a, 0xdc, 0x94, 0x80, 0xba, 0xc7, 0xa0, 0x56, 0xf0, 0x52, 0x2c, 0x54, 0xc7, 0x74, - 0x69, 0x24, 0x45, 0x43, 0x98, 0x95, 0x9f, 0xc0, 0xd0, 0xad, 0x88, 0xcd, 0xc2, 0x9f, 0xcb, 0xf4, - 0x95, 0xa4, 0x69, 0x01, 0xb8, 0xc6, 0x00, 0x31, 0xbe, 0x15, 0x6f, 0x54, 0xc1, 0xfe, 0x58, 0x7b, - 0xf4, 0xb6, 0xb6, 0xf1, 0xb3, 0x0a, 0xe4, 0x68, 0x96, 0x42, 0x63, 0x77, 0x50, 0xdc, 0x45, 0x2d, - 0x3c, 0xd6, 0x52, 0x89, 0x5a, 0x78, 0xbc, 0x2e, 0x8c, 0x89, 0xdd, 0xec, 0x67, 0x62, 0x84, 0x71, - 0xd1, 0x1d, 0x7b, 0x50, 0x54, 0x4a, 0x40, 0x14, 0x23, 0x31, 0xdc, 0xb0, 0x89, 0xc6, 0xee, 0x98, - 0xfa, 0x11, 0xaf, 0x32, 0x50, 0x1d, 0x5f, 0x0f, 0x83, 0xb6, 0x38, 0x1b, 0x45, 0xfd, 0x21, 0x94, - 0xd4, 0x5a, 0x11, 0xc5, 0x08, 0x8d, 0x74, 0x84, 0xa2, 0xb1, 0x22, 0xae, 0xd4, 0x8c, 0x71, 0x1a, - 0xff, 0x47, 0x71, 0x92, 0x97, 0xa2, 0x7f, 0x0a, 0x79, 0x51, 0x41, 0xc6, 0xed, 0x37, 0xdc, 0x43, - 0x8a, 0xdb, 0x6f, 0xa4, 0xfc, 0x8c, 0x49, 0x04, 0x18, 0x2c, 0xcd, 0x94, 0x65, 0x80, 0x16, 0x90, - 0x4f, 0x88, 0x97, 0x04, 0x19, 0x74, 0x45, 0x92, 0x20, 0x95, 0x2a, 0x65, 0x22, 0x64, 0x9b, 0x78, - 0xe2, 0x2e, 0xcb, 0x12, 0x00, 0x25, 0x48, 0x54, 0xa3, 0x21, 0x9e, 0xc4, 0x92, 0x98, 0xbb, 0x05, - 0xa8, 0x22, 0x14, 0xa2, 0x1f, 0x01, 0x04, 0xe5, 0x6e, 0xf4, 0x75, 0x1c, 0xdb, 0x33, 0x8b, 0xbe, - 0x8e, 0xe3, 0x2b, 0xe6, 0x18, 0x0f, 0x0e, 0xc0, 0x79, 0xfe, 0x48, 0xe1, 0x7f, 0xae, 0x01, 0x1a, - 0x2f, 0x8f, 0xd1, 0x1b, 0xf1, 0x10, 0xb1, 0xed, 0x38, 0xfd, 0xcd, 0xab, 0x31, 0x27, 0x46, 0xcf, - 0x40, 0xaf, 0x26, 0x5b, 0x32, 0x78, 0x41, 0x35, 0xfb, 0x42, 0x83, 0xb9, 0x50, 0x81, 0x8d, 0x1e, - 0x24, 0x9c, 0x73, 0xa4, 0xa5, 0xa7, 0x3f, 0xbc, 0x94, 0x2f, 0x31, 0x63, 0x51, 0x6e, 0x85, 0xcc, - 0xd6, 0x7e, 0xaa, 0x41, 0x39, 0x5c, 0x95, 0xa3, 0x04, 0x80, 0xb1, 0xbe, 0xa0, 0xbe, 0x76, 0x39, - 0xe3, 0x15, 0x4e, 0x2b, 0x48, 0xe0, 0x3e, 0x85, 0xbc, 0x28, 0xe6, 0xe3, 0xdc, 0x22, 0xdc, 0x56, - 0x8c, 0x73, 0x8b, 0x48, 0x27, 0x20, 0xc9, 0x2d, 0x68, 0x5d, 0xac, 0x78, 0xa2, 0x28, 0xf9, 0x93, - 0x20, 0x27, 0x7b, 0x62, 0xa4, 0x5f, 0x30, 0x11, 0x32, 0xf0, 0x44, 0x59, 0xf0, 0xa3, 0x04, 0x89, - 0x97, 0x78, 0x62, 0xb4, 0x5f, 0x90, 0xe4, 0x89, 0x0c, 0x55, 0xf1, 0xc4, 0xa0, 0x3e, 0x8f, 0xf3, - 0xc4, 0xb1, 0xa6, 0x69, 0x9c, 0x27, 0x8e, 0x97, 0xf8, 0x49, 0x67, 0xcb, 0xc0, 0x43, 0x9e, 0xb8, - 0x10, 0x53, 0xcf, 0xa3, 0x37, 0x13, 0x6c, 0x1a, 0xdb, 0x90, 0xd5, 0xdf, 0xba, 0x22, 0xf7, 0x64, - 0x0f, 0xe0, 0xa7, 0x21, 0x3d, 0xe0, 0xd7, 0x1a, 0x2c, 0xc6, 0x35, 0x04, 0x50, 0x02, 0x58, 0x42, - 0x37, 0x57, 0x5f, 0xbf, 0x2a, 0xfb, 0x15, 0xec, 0xe6, 0xfb, 0xc4, 0x56, 0xe5, 0xf7, 0xaf, 0x56, - 0xb4, 0xaf, 0x5e, 0xad, 0x68, 0x7f, 0x7e, 0xb5, 0xa2, 0xfd, 0xe2, 0x2f, 0x2b, 0x53, 0x67, 0x33, - 0xec, 0xb7, 0xda, 0xef, 0xfe, 0x27, 0x00, 0x00, 0xff, 0xff, 0x45, 0x26, 0x54, 0x69, 0x32, 0x2e, - 0x00, 0x00, + // 3391 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x5b, 0xcf, 0x6f, 0x1b, 0xc7, + 0xf5, 0xd7, 0x92, 0x14, 0x29, 0x3e, 0x52, 0x14, 0x3d, 0x92, 0x6d, 0x6a, 0x6d, 0xcb, 0xf2, 0xf8, + 0x97, 0x6c, 0x27, 0x52, 0xa2, 0xe4, 0xfb, 0x3d, 0xb8, 0x41, 0x00, 0x59, 0x62, 0x2c, 0x55, 0xb2, + 0xe4, 0xac, 0x64, 0x27, 0x05, 0x82, 0x0a, 0x2b, 0x72, 0x2c, 0x2e, 0x44, 0xee, 0x32, 0xbb, 0x4b, + 0x5a, 0x4a, 0x5b, 0xa0, 0x48, 0x13, 0x14, 0xed, 0xb1, 0x39, 0xb4, 0x4d, 0x8f, 0x45, 0xff, 0x84, + 0xde, 0xfa, 0x07, 0x14, 0xbd, 0xb4, 0x40, 0xff, 0x81, 0x22, 0xed, 0xa1, 0x87, 0xde, 0x7b, 0x2a, + 0x5a, 0xcc, 0xaf, 0xdd, 0xd9, 0xe5, 0x2e, 0xe5, 0x74, 0x9b, 0x8b, 0xb5, 0x33, 0xf3, 0xe6, 0x7d, + 0xde, 0x7b, 0x33, 0xef, 0xcd, 0x9b, 0x37, 0x34, 0x94, 0xdd, 0x7e, 0x6b, 0xb9, 0xef, 0x3a, 0xbe, + 0x83, 0xaa, 0xc4, 0x6f, 0xb5, 0x3d, 0xe2, 0x0e, 0x89, 0xdb, 0x3f, 0xd2, 0xe7, 0x8e, 0x9d, 0x63, + 0x87, 0x0d, 0xac, 0xd0, 0x2f, 0x4e, 0xa3, 0xcf, 0x53, 0x9a, 0x95, 0xde, 0xb0, 0xd5, 0x62, 0xff, + 0xf4, 0x8f, 0x56, 0x4e, 0x86, 0x62, 0xe8, 0x0a, 0x1b, 0x32, 0x07, 0x7e, 0x87, 0xfd, 0xd3, 0x3f, + 0x62, 0x7f, 0xc4, 0xe0, 0xd5, 0x63, 0xc7, 0x39, 0xee, 0x92, 0x15, 0xb3, 0x6f, 0xad, 0x98, 0xb6, + 0xed, 0xf8, 0xa6, 0x6f, 0x39, 0xb6, 0xc7, 0x47, 0xf1, 0xe7, 0x1a, 0xd4, 0x0c, 0xe2, 0xf5, 0x1d, + 0xdb, 0x23, 0x9b, 0xc4, 0x6c, 0x13, 0x17, 0x5d, 0x03, 0x68, 0x75, 0x07, 0x9e, 0x4f, 0xdc, 0x43, + 0xab, 0xdd, 0xd0, 0x16, 0xb5, 0xa5, 0x82, 0x51, 0x16, 0x3d, 0x5b, 0x6d, 0x74, 0x05, 0xca, 0x3d, + 0xd2, 0x3b, 0xe2, 0xa3, 0x39, 0x36, 0x3a, 0xc5, 0x3b, 0xb6, 0xda, 0x48, 0x87, 0x29, 0x97, 0x0c, + 0x2d, 0xcf, 0x72, 0xec, 0x46, 0x7e, 0x51, 0x5b, 0xca, 0x1b, 0x41, 0x9b, 0x4e, 0x74, 0xcd, 0x17, + 0xfe, 0xa1, 0x4f, 0xdc, 0x5e, 0xa3, 0xc0, 0x27, 0xd2, 0x8e, 0x03, 0xe2, 0xf6, 0xf0, 0x67, 0x93, + 0x50, 0x35, 0x4c, 0xfb, 0x98, 0x18, 0xe4, 0xe3, 0x01, 0xf1, 0x7c, 0x54, 0x87, 0xfc, 0x09, 0x39, + 0x63, 0xf0, 0x55, 0x83, 0x7e, 0xf2, 0xf9, 0xf6, 0x31, 0x39, 0x24, 0x36, 0x07, 0xae, 0xd2, 0xf9, + 0xf6, 0x31, 0x69, 0xda, 0x6d, 0x34, 0x07, 0x93, 0x5d, 0xab, 0x67, 0xf9, 0x02, 0x95, 0x37, 0x22, + 0xe2, 0x14, 0x62, 0xe2, 0xac, 0x03, 0x78, 0x8e, 0xeb, 0x1f, 0x3a, 0x6e, 0x9b, 0xb8, 0x8d, 0xc9, + 0x45, 0x6d, 0xa9, 0xb6, 0x7a, 0x6b, 0x59, 0x5d, 0x88, 0x65, 0x55, 0xa0, 0xe5, 0x7d, 0xc7, 0xf5, + 0xf7, 0x28, 0xad, 0x51, 0xf6, 0xe4, 0x27, 0x7a, 0x0f, 0x2a, 0x8c, 0x89, 0x6f, 0xba, 0xc7, 0xc4, + 0x6f, 0x14, 0x19, 0x97, 0xdb, 0xe7, 0x70, 0x39, 0x60, 0xc4, 0x06, 0x83, 0xe7, 0xdf, 0x08, 0x43, + 0xd5, 0x23, 0xae, 0x65, 0x76, 0xad, 0x4f, 0xcc, 0xa3, 0x2e, 0x69, 0x94, 0x16, 0xb5, 0xa5, 0x29, + 0x23, 0xd2, 0x47, 0xf5, 0x3f, 0x21, 0x67, 0xde, 0xa1, 0x63, 0x77, 0xcf, 0x1a, 0x53, 0x8c, 0x60, + 0x8a, 0x76, 0xec, 0xd9, 0xdd, 0x33, 0xb6, 0x68, 0xce, 0xc0, 0xf6, 0xf9, 0x68, 0x99, 0x8d, 0x96, + 0x59, 0x0f, 0x1b, 0x5e, 0x82, 0x7a, 0xcf, 0xb2, 0x0f, 0x7b, 0x4e, 0xfb, 0x30, 0x30, 0x08, 0x30, + 0x83, 0xd4, 0x7a, 0x96, 0xfd, 0xc4, 0x69, 0x1b, 0xd2, 0x2c, 0x94, 0xd2, 0x3c, 0x8d, 0x52, 0x56, + 0x04, 0xa5, 0x79, 0xaa, 0x52, 0x2e, 0xc3, 0x2c, 0xe5, 0xd9, 0x72, 0x89, 0xe9, 0x93, 0x90, 0xb8, + 0xca, 0x88, 0x2f, 0xf4, 0x2c, 0x7b, 0x9d, 0x8d, 0x44, 0xe8, 0xcd, 0xd3, 0x11, 0xfa, 0x69, 0x41, + 0x6f, 0x9e, 0x46, 0xe9, 0xf1, 0x32, 0x94, 0x03, 0x9b, 0xa3, 0x29, 0x28, 0xec, 0xee, 0xed, 0x36, + 0xeb, 0x13, 0x08, 0xa0, 0xb8, 0xb6, 0xbf, 0xde, 0xdc, 0xdd, 0xa8, 0x6b, 0xa8, 0x02, 0xa5, 0x8d, + 0x26, 0x6f, 0xe4, 0xf0, 0x23, 0x80, 0xd0, 0xba, 0xa8, 0x04, 0xf9, 0xed, 0xe6, 0x77, 0xea, 0x13, + 0x94, 0xe6, 0x79, 0xd3, 0xd8, 0xdf, 0xda, 0xdb, 0xad, 0x6b, 0x74, 0xf2, 0xba, 0xd1, 0x5c, 0x3b, + 0x68, 0xd6, 0x73, 0x94, 0xe2, 0xc9, 0xde, 0x46, 0x3d, 0x8f, 0xca, 0x30, 0xf9, 0x7c, 0x6d, 0xe7, + 0x59, 0xb3, 0x5e, 0xc0, 0x5f, 0x68, 0x30, 0x2d, 0xd6, 0x8b, 0xfb, 0x04, 0x7a, 0x1b, 0x8a, 0x1d, + 0xe6, 0x17, 0x6c, 0x2b, 0x56, 0x56, 0xaf, 0xc6, 0x16, 0x37, 0xe2, 0x3b, 0x86, 0xa0, 0x45, 0x18, + 0xf2, 0x27, 0x43, 0xaf, 0x91, 0x5b, 0xcc, 0x2f, 0x55, 0x56, 0xeb, 0xcb, 0xdc, 0x61, 0x97, 0xb7, + 0xc9, 0xd9, 0x73, 0xb3, 0x3b, 0x20, 0x06, 0x1d, 0x44, 0x08, 0x0a, 0x3d, 0xc7, 0x25, 0x6c, 0xc7, + 0x4e, 0x19, 0xec, 0x9b, 0x6e, 0x63, 0xb6, 0x68, 0x62, 0xb7, 0xf2, 0x06, 0x6e, 0x01, 0x3c, 0x1d, + 0xf8, 0xe9, 0x9e, 0x31, 0x07, 0x93, 0x43, 0xca, 0x57, 0x78, 0x05, 0x6f, 0x30, 0x97, 0x20, 0xa6, + 0x47, 0x02, 0x97, 0xa0, 0x0d, 0x74, 0x19, 0x4a, 0x7d, 0x97, 0x0c, 0x0f, 0x4f, 0x86, 0x0c, 0x63, + 0xca, 0x28, 0xd2, 0xe6, 0xf6, 0x10, 0xdb, 0x50, 0x61, 0x20, 0x99, 0xf4, 0xbe, 0x17, 0x72, 0xcf, + 0xb1, 0x69, 0xa3, 0xba, 0x4b, 0xbc, 0x8f, 0x00, 0x6d, 0x90, 0x2e, 0xf1, 0x49, 0x16, 0xb7, 0x57, + 0xb4, 0xc9, 0x47, 0xb4, 0xf9, 0x99, 0x06, 0xb3, 0x11, 0xf6, 0x99, 0xd4, 0x6a, 0x40, 0xa9, 0xcd, + 0x98, 0x71, 0x09, 0xf2, 0x86, 0x6c, 0xa2, 0x07, 0x30, 0x25, 0x04, 0xf0, 0x1a, 0xf9, 0x94, 0xd5, + 0x2e, 0x71, 0x99, 0x3c, 0xfc, 0x0f, 0x0d, 0xca, 0x42, 0xd1, 0xbd, 0x3e, 0x5a, 0x83, 0x69, 0x97, + 0x37, 0x0e, 0x99, 0x3e, 0x42, 0x22, 0x3d, 0x3d, 0x7a, 0x6c, 0x4e, 0x18, 0x55, 0x31, 0x85, 0x75, + 0xa3, 0x6f, 0x41, 0x45, 0xb2, 0xe8, 0x0f, 0x7c, 0x61, 0xf2, 0x46, 0x94, 0x41, 0xb8, 0x73, 0x36, + 0x27, 0x0c, 0x10, 0xe4, 0x4f, 0x07, 0x3e, 0x3a, 0x80, 0x39, 0x39, 0x99, 0x6b, 0x23, 0xc4, 0xc8, + 0x33, 0x2e, 0x8b, 0x51, 0x2e, 0xa3, 0x4b, 0xb5, 0x39, 0x61, 0x20, 0x31, 0x5f, 0x19, 0x7c, 0x54, + 0x86, 0x92, 0xe8, 0xc5, 0xff, 0xd4, 0x00, 0xa4, 0x41, 0xf7, 0xfa, 0x68, 0x03, 0x6a, 0xae, 0x68, + 0x45, 0x14, 0xbe, 0x92, 0xa8, 0xb0, 0x58, 0x87, 0x09, 0x63, 0x5a, 0x4e, 0xe2, 0x2a, 0xbf, 0x0b, + 0xd5, 0x80, 0x4b, 0xa8, 0xf3, 0x7c, 0x82, 0xce, 0x01, 0x87, 0x8a, 0x9c, 0x40, 0xb5, 0xfe, 0x00, + 0x2e, 0x06, 0xf3, 0x13, 0xd4, 0xbe, 0x31, 0x46, 0xed, 0x80, 0xe1, 0xac, 0xe4, 0xa0, 0x2a, 0x0e, + 0xf4, 0xac, 0xe1, 0xdd, 0xf8, 0xcb, 0x3c, 0x94, 0xd6, 0x9d, 0x5e, 0xdf, 0x74, 0xe9, 0x1a, 0x15, + 0x5d, 0xe2, 0x0d, 0xba, 0x3e, 0x53, 0xb7, 0xb6, 0x7a, 0x33, 0x8a, 0x20, 0xc8, 0xe4, 0x5f, 0x83, + 0x91, 0x1a, 0x62, 0x0a, 0x9d, 0x2c, 0x8e, 0x96, 0xdc, 0x2b, 0x4c, 0x16, 0x07, 0x8b, 0x98, 0x22, + 0x7d, 0x29, 0x1f, 0xfa, 0x92, 0x0e, 0xa5, 0x21, 0x71, 0xc3, 0xe3, 0x70, 0x73, 0xc2, 0x90, 0x1d, + 0xe8, 0x1e, 0xcc, 0xc4, 0x43, 0xf3, 0xa4, 0xa0, 0xa9, 0xb5, 0xa2, 0x91, 0xfc, 0x26, 0x54, 0x23, + 0xe7, 0x43, 0x51, 0xd0, 0x55, 0x7a, 0xca, 0xf1, 0x70, 0x49, 0x06, 0x25, 0x7a, 0x96, 0x55, 0x37, + 0x27, 0x44, 0x58, 0xc2, 0x6f, 0xc2, 0x74, 0x44, 0x57, 0x1a, 0x7e, 0x9b, 0xef, 0x3f, 0x5b, 0xdb, + 0xe1, 0xb1, 0xfa, 0x31, 0x0b, 0xcf, 0x46, 0x5d, 0xa3, 0x21, 0x7f, 0xa7, 0xb9, 0xbf, 0x5f, 0xcf, + 0xe1, 0x77, 0x82, 0x29, 0x22, 0xb8, 0x2b, 0x31, 0x7d, 0x42, 0x89, 0xe9, 0x9a, 0x8c, 0xe9, 0xb9, + 0x30, 0xa6, 0xe7, 0x1f, 0xd5, 0xa0, 0xca, 0x0d, 0x72, 0x38, 0xb0, 0xe9, 0xb9, 0xf2, 0x6b, 0x0d, + 0xe0, 0xe0, 0xd4, 0x96, 0x11, 0x67, 0x05, 0x4a, 0x2d, 0xce, 0xbc, 0xa1, 0x31, 0x07, 0xbe, 0x98, + 0x68, 0x63, 0x43, 0x52, 0xa1, 0x37, 0xa1, 0xe4, 0x0d, 0x5a, 0x2d, 0xe2, 0xc9, 0xf8, 0x7e, 0x39, + 0x1e, 0x43, 0x84, 0x87, 0x1b, 0x92, 0x8e, 0x4e, 0x79, 0x61, 0x5a, 0xdd, 0x01, 0x8b, 0xf6, 0xe3, + 0xa7, 0x08, 0x3a, 0xfc, 0x4b, 0x0d, 0x2a, 0x4c, 0xca, 0x4c, 0x81, 0xeb, 0x2a, 0x94, 0x99, 0x0c, + 0xa4, 0x2d, 0x42, 0xd7, 0x94, 0x11, 0x76, 0xa0, 0xff, 0x87, 0xb2, 0xdc, 0xb2, 0x32, 0x7a, 0x35, + 0x92, 0xd9, 0xee, 0xf5, 0x8d, 0x90, 0x14, 0x6f, 0xc3, 0x05, 0x66, 0x95, 0x16, 0xcd, 0x24, 0xa5, + 0x1d, 0xd5, 0x5c, 0x4b, 0x8b, 0xe5, 0x5a, 0x3a, 0x4c, 0xf5, 0x3b, 0x67, 0x9e, 0xd5, 0x32, 0xbb, + 0x42, 0x8a, 0xa0, 0x8d, 0xbf, 0x0d, 0x48, 0x65, 0x96, 0x45, 0x5d, 0x3c, 0x0d, 0x95, 0x4d, 0xd3, + 0xeb, 0x08, 0x91, 0xf0, 0x87, 0x50, 0xe5, 0xcd, 0x4c, 0x36, 0x44, 0x50, 0xe8, 0x98, 0x5e, 0x87, + 0x09, 0x3e, 0x6d, 0xb0, 0x6f, 0x7c, 0x01, 0x66, 0xf6, 0x6d, 0xb3, 0xef, 0x75, 0x1c, 0x19, 0x5c, + 0x69, 0x26, 0x5d, 0x0f, 0xfb, 0x32, 0x21, 0xde, 0x85, 0x19, 0x97, 0xf4, 0x4c, 0xcb, 0xb6, 0xec, + 0xe3, 0xc3, 0xa3, 0x33, 0x9f, 0x78, 0x22, 0xd1, 0xae, 0x05, 0xdd, 0x8f, 0x68, 0x2f, 0x15, 0xed, + 0xa8, 0xeb, 0x1c, 0x09, 0x17, 0x67, 0xdf, 0xf8, 0xb7, 0x1a, 0x54, 0x3f, 0x30, 0xfd, 0x96, 0xb4, + 0x02, 0xda, 0x82, 0x5a, 0xe0, 0xd8, 0xac, 0x47, 0xc8, 0x12, 0x8b, 0xf0, 0x6c, 0x8e, 0x4c, 0xc1, + 0x64, 0x84, 0x9f, 0x6e, 0xa9, 0x1d, 0x8c, 0x95, 0x69, 0xb7, 0x48, 0x37, 0x60, 0x95, 0x4b, 0x67, + 0xc5, 0x08, 0x55, 0x56, 0x6a, 0xc7, 0xa3, 0x99, 0xf0, 0xf4, 0xe3, 0x6e, 0xf9, 0x65, 0x0e, 0xd0, + 0xa8, 0x0c, 0x5f, 0x37, 0x21, 0xb8, 0x0d, 0x35, 0xcf, 0x37, 0x5d, 0xff, 0x30, 0x76, 0x0d, 0x99, + 0x66, 0xbd, 0x41, 0x70, 0xba, 0x0b, 0x33, 0x7d, 0xd7, 0x39, 0x76, 0x89, 0xe7, 0x1d, 0xda, 0x8e, + 0x6f, 0xbd, 0x38, 0x13, 0xd9, 0x50, 0x4d, 0x76, 0xef, 0xb2, 0x5e, 0xd4, 0x84, 0xd2, 0x0b, 0xab, + 0xeb, 0x13, 0xd7, 0x6b, 0x4c, 0x2e, 0xe6, 0x97, 0x6a, 0xab, 0x0f, 0xce, 0xb3, 0xda, 0xf2, 0x7b, + 0x8c, 0xfe, 0xe0, 0xac, 0x4f, 0x0c, 0x39, 0x57, 0xcd, 0x53, 0x8a, 0x91, 0x3c, 0xe5, 0x36, 0x40, + 0x48, 0x4f, 0xa3, 0xd6, 0xee, 0xde, 0xd3, 0x67, 0x07, 0xf5, 0x09, 0x54, 0x85, 0xa9, 0xdd, 0xbd, + 0x8d, 0xe6, 0x4e, 0x93, 0xc6, 0x35, 0xbc, 0x22, 0x6d, 0xa3, 0xda, 0x10, 0xcd, 0xc3, 0xd4, 0x4b, + 0xda, 0x2b, 0xef, 0x69, 0x79, 0xa3, 0xc4, 0xda, 0x5b, 0x6d, 0xfc, 0x77, 0x0d, 0xa6, 0xc5, 0x2e, + 0xc8, 0xb4, 0x15, 0x55, 0x88, 0x5c, 0x04, 0x82, 0x26, 0x45, 0x7c, 0x77, 0xb4, 0x45, 0xee, 0x25, + 0x9b, 0xd4, 0xdd, 0xf9, 0x62, 0x93, 0xb6, 0x30, 0x6b, 0xd0, 0x46, 0xf7, 0xa0, 0xde, 0xe2, 0xee, + 0x1e, 0x3b, 0x67, 0x8c, 0x19, 0xd1, 0x1f, 0x2c, 0xd2, 0x6d, 0x28, 0x92, 0x21, 0xb1, 0x7d, 0xaf, + 0x51, 0x61, 0xb1, 0x69, 0x5a, 0x66, 0x56, 0x4d, 0xda, 0x6b, 0x88, 0x41, 0xfc, 0x7f, 0x70, 0x61, + 0x87, 0xa6, 0xb6, 0x8f, 0x5d, 0xd3, 0x56, 0x93, 0xe4, 0x83, 0x83, 0x1d, 0x61, 0x95, 0xbc, 0x7f, + 0xb0, 0x83, 0x6a, 0x90, 0xdb, 0xda, 0x10, 0x3a, 0xe4, 0xac, 0x0d, 0xfc, 0xa9, 0x06, 0x48, 0x9d, + 0x97, 0xc9, 0x4c, 0x31, 0xe6, 0x12, 0x3e, 0x1f, 0xc2, 0xcf, 0xc1, 0x24, 0x71, 0x5d, 0xc7, 0x65, + 0x06, 0x29, 0x1b, 0xbc, 0x81, 0x6f, 0x09, 0x19, 0x0c, 0x32, 0x74, 0x4e, 0x82, 0x3d, 0xcf, 0xb9, + 0x69, 0x81, 0xa8, 0xdb, 0x30, 0x1b, 0xa1, 0xca, 0x14, 0x23, 0xef, 0xc2, 0x45, 0xc6, 0x6c, 0x9b, + 0x90, 0xfe, 0x5a, 0xd7, 0x1a, 0xa6, 0xa2, 0xf6, 0xe1, 0x52, 0x9c, 0xf0, 0x9b, 0xb5, 0x11, 0x7e, + 0x47, 0x20, 0x1e, 0x58, 0x3d, 0x72, 0xe0, 0xec, 0xa4, 0xcb, 0x46, 0x03, 0x1f, 0xbd, 0xfa, 0x8a, + 0xc3, 0x84, 0x7d, 0xe3, 0xdf, 0x68, 0x70, 0x79, 0x64, 0xfa, 0x37, 0xbc, 0xaa, 0x0b, 0x00, 0xc7, + 0x74, 0xfb, 0x90, 0x36, 0x1d, 0xe0, 0x97, 0x36, 0xa5, 0x27, 0x90, 0x93, 0xc6, 0x8e, 0xaa, 0x90, + 0xb3, 0x03, 0xc5, 0x27, 0xac, 0x5e, 0xa2, 0x68, 0x55, 0x90, 0x5a, 0xd9, 0x66, 0x8f, 0x5f, 0xe3, + 0xca, 0x06, 0xfb, 0x66, 0x47, 0x27, 0x21, 0xee, 0x33, 0x63, 0x87, 0x1f, 0xd1, 0x65, 0x23, 0x68, + 0x53, 0xf4, 0x56, 0xd7, 0x22, 0xb6, 0xcf, 0x46, 0x0b, 0x6c, 0x54, 0xe9, 0xc1, 0xcb, 0x50, 0xe7, + 0x48, 0x6b, 0xed, 0xb6, 0x72, 0x4c, 0x07, 0xfc, 0xb4, 0x28, 0x3f, 0xfc, 0x12, 0x2e, 0x28, 0xf4, + 0x99, 0x4c, 0xf7, 0x1a, 0x14, 0x79, 0x51, 0x48, 0x9c, 0x10, 0x73, 0xd1, 0x59, 0x1c, 0xc6, 0x10, + 0x34, 0xf8, 0x36, 0xcc, 0x8a, 0x1e, 0xd2, 0x73, 0x92, 0x56, 0x9d, 0xd9, 0x07, 0xef, 0xc0, 0x5c, + 0x94, 0x2c, 0x93, 0x23, 0xac, 0x49, 0xd0, 0x67, 0xfd, 0xb6, 0x72, 0xe0, 0xc4, 0x17, 0x45, 0x35, + 0x58, 0x2e, 0x66, 0xb0, 0x40, 0x20, 0xc9, 0x22, 0x93, 0x40, 0xb3, 0xd2, 0xfc, 0x3b, 0x96, 0x17, + 0xa4, 0x15, 0x9f, 0x00, 0x52, 0x3b, 0x33, 0x2d, 0xca, 0x32, 0x94, 0xb8, 0xc1, 0x65, 0xe6, 0x9a, + 0xbc, 0x2a, 0x92, 0x88, 0x0a, 0xb4, 0x41, 0x5e, 0xb8, 0xe6, 0x71, 0x8f, 0x04, 0x91, 0x95, 0xe6, + 0x6b, 0x6a, 0x67, 0x26, 0x8d, 0xff, 0xa8, 0x41, 0x75, 0xad, 0x6b, 0xba, 0x3d, 0x69, 0xfc, 0x77, + 0xa1, 0xc8, 0x13, 0x41, 0x71, 0x59, 0xba, 0x13, 0x65, 0xa3, 0xd2, 0xf2, 0xc6, 0x1a, 0x4f, 0x1b, + 0xc5, 0x2c, 0xba, 0x58, 0xa2, 0x16, 0xb9, 0x11, 0xab, 0x4d, 0x6e, 0xa0, 0xd7, 0x61, 0xd2, 0xa4, + 0x53, 0x98, 0xff, 0xd6, 0xe2, 0x29, 0x38, 0xe3, 0xc6, 0x0e, 0x6d, 0x4e, 0x85, 0xdf, 0x86, 0x8a, + 0x82, 0x40, 0x6f, 0x16, 0x8f, 0x9b, 0xe2, 0x60, 0x5e, 0x5b, 0x3f, 0xd8, 0x7a, 0xce, 0x2f, 0x1c, + 0x35, 0x80, 0x8d, 0x66, 0xd0, 0xce, 0xe1, 0x0f, 0xc5, 0x2c, 0xe1, 0xe1, 0xaa, 0x3c, 0x5a, 0x9a, + 0x3c, 0xb9, 0x57, 0x92, 0xe7, 0x14, 0xa6, 0x85, 0xfa, 0x99, 0xf6, 0xc0, 0x9b, 0x50, 0x64, 0xfc, + 0xe4, 0x16, 0x98, 0x4f, 0x80, 0x95, 0xde, 0xc9, 0x09, 0xf1, 0x0c, 0x4c, 0xef, 0xfb, 0xa6, 0x3f, + 0xf0, 0xe4, 0x16, 0xf8, 0x83, 0x06, 0x35, 0xd9, 0x93, 0xb5, 0xae, 0x22, 0xef, 0xa3, 0x3c, 0xe6, + 0x05, 0xb7, 0xd1, 0x4b, 0x50, 0x6c, 0x1f, 0xed, 0x5b, 0x9f, 0xc8, 0xea, 0x95, 0x68, 0xd1, 0xfe, + 0x2e, 0xc7, 0xe1, 0x15, 0x64, 0xd1, 0xa2, 0x17, 0x1d, 0xd7, 0x7c, 0xe1, 0x6f, 0xd9, 0x6d, 0x72, + 0xca, 0xf2, 0x89, 0x82, 0x11, 0x76, 0xb0, 0xbb, 0x89, 0xa8, 0x34, 0xb3, 0xfc, 0x4b, 0xad, 0x3c, + 0xcf, 0xc2, 0x85, 0xb5, 0x81, 0xdf, 0x69, 0xda, 0xe6, 0x51, 0x57, 0x06, 0x01, 0x3c, 0x07, 0x88, + 0x76, 0x6e, 0x58, 0x9e, 0xda, 0xdb, 0x84, 0x59, 0xda, 0x4b, 0x6c, 0xdf, 0x6a, 0x29, 0x11, 0x43, + 0x86, 0x6d, 0x2d, 0x16, 0xb6, 0x4d, 0xcf, 0x7b, 0xe9, 0xb8, 0x6d, 0xa1, 0x5a, 0xd0, 0xc6, 0x1b, + 0x9c, 0xf9, 0x33, 0x2f, 0x12, 0x98, 0xbf, 0x2e, 0x97, 0xa5, 0x90, 0xcb, 0x63, 0xe2, 0x8f, 0xe1, + 0x82, 0x1f, 0xc0, 0x45, 0x49, 0x29, 0x0a, 0x16, 0x63, 0x88, 0xf7, 0xe0, 0x9a, 0x24, 0x5e, 0xef, + 0xd0, 0xac, 0xfa, 0xa9, 0x00, 0xfc, 0x6f, 0xe5, 0x7c, 0x04, 0x8d, 0x40, 0x4e, 0x96, 0x69, 0x39, + 0x5d, 0x55, 0x80, 0x81, 0x27, 0xf6, 0x4c, 0xd9, 0x60, 0xdf, 0xb4, 0xcf, 0x75, 0xba, 0xc1, 0x21, + 0x48, 0xbf, 0xf1, 0x3a, 0xcc, 0x4b, 0x1e, 0x22, 0x07, 0x8a, 0x32, 0x19, 0x11, 0x28, 0x89, 0x89, + 0x30, 0x18, 0x9d, 0x3a, 0xde, 0xec, 0x2a, 0x65, 0xd4, 0xb4, 0x8c, 0xa7, 0xa6, 0xf0, 0xbc, 0xc8, + 0x77, 0x04, 0x15, 0x4c, 0x0d, 0xda, 0xa2, 0x9b, 0x32, 0x50, 0xbb, 0xc5, 0x42, 0xd0, 0xee, 0x91, + 0x85, 0x18, 0x61, 0xfd, 0x11, 0x2c, 0x04, 0x42, 0x50, 0xbb, 0x3d, 0x25, 0x6e, 0xcf, 0xf2, 0x3c, + 0xe5, 0xc6, 0x9d, 0xa4, 0xf8, 0x1d, 0x28, 0xf4, 0x89, 0x88, 0x29, 0x95, 0x55, 0xb4, 0xcc, 0xdf, + 0x83, 0x96, 0x95, 0xc9, 0x6c, 0x1c, 0xb7, 0xe1, 0xba, 0xe4, 0xce, 0x2d, 0x9a, 0xc8, 0x3e, 0x2e, + 0x94, 0xbc, 0x8d, 0x71, 0xb3, 0x8e, 0xde, 0xc6, 0xf2, 0x7c, 0xed, 0xe5, 0x6d, 0x8c, 0x9e, 0x15, + 0xaa, 0x6f, 0x65, 0x3a, 0x2b, 0xb6, 0xb9, 0x4d, 0x03, 0x97, 0xcc, 0xc4, 0xec, 0x08, 0xe6, 0xa2, + 0x9e, 0x9c, 0x29, 0x8c, 0xcd, 0xc1, 0xa4, 0xef, 0x9c, 0x10, 0x19, 0xc4, 0x78, 0x43, 0x0a, 0x1c, + 0xb8, 0x79, 0x26, 0x81, 0xcd, 0x90, 0x19, 0xdb, 0x92, 0x59, 0xe5, 0xa5, 0xab, 0x29, 0xf3, 0x19, + 0xde, 0xc0, 0xbb, 0x70, 0x29, 0x1e, 0x26, 0x32, 0x89, 0xfc, 0x9c, 0x6f, 0xe0, 0xa4, 0x48, 0x92, + 0x89, 0xef, 0xfb, 0x61, 0x30, 0x50, 0x02, 0x4a, 0x26, 0x96, 0x06, 0xe8, 0x49, 0xf1, 0xe5, 0x7f, + 0xb1, 0x5f, 0x83, 0x70, 0x93, 0x89, 0x99, 0x17, 0x32, 0xcb, 0xbe, 0xfc, 0x61, 0x8c, 0xc8, 0x8f, + 0x8d, 0x11, 0xc2, 0x49, 0xc2, 0x28, 0xf6, 0x0d, 0x6c, 0x3a, 0x81, 0x11, 0x06, 0xd0, 0xac, 0x18, + 0xf4, 0x0c, 0x09, 0x30, 0x58, 0x43, 0x6e, 0x6c, 0x35, 0xec, 0x66, 0x5a, 0x8c, 0x0f, 0xc2, 0xd8, + 0x39, 0x12, 0x99, 0x33, 0x31, 0xfe, 0x10, 0x16, 0xd3, 0x83, 0x72, 0x16, 0xce, 0xf7, 0x31, 0x94, + 0x83, 0x84, 0x52, 0x79, 0x4b, 0xad, 0x40, 0x69, 0x77, 0x6f, 0xff, 0xe9, 0xda, 0x7a, 0xb3, 0xae, + 0xad, 0xfe, 0x2b, 0x0f, 0xb9, 0xed, 0xe7, 0xe8, 0xbb, 0x30, 0xc9, 0x5f, 0x5a, 0xc6, 0x3c, 0x44, + 0xe9, 0xe3, 0xde, 0x6c, 0xf0, 0xd5, 0x4f, 0xff, 0xfc, 0xb7, 0x2f, 0x72, 0x97, 0xf0, 0x85, 0x95, + 0xe1, 0x5b, 0x66, 0xb7, 0xdf, 0x31, 0x57, 0x4e, 0x86, 0x2b, 0xec, 0x4c, 0x78, 0xa8, 0xdd, 0x47, + 0xcf, 0x21, 0xff, 0x74, 0xe0, 0xa3, 0xd4, 0x57, 0x2a, 0x3d, 0xfd, 0x2d, 0x07, 0xeb, 0x8c, 0xf3, + 0x1c, 0x9e, 0x51, 0x39, 0xf7, 0x07, 0x3e, 0xe5, 0x3b, 0x84, 0x8a, 0xf2, 0x1c, 0x83, 0xce, 0x7d, + 0xbf, 0xd2, 0xcf, 0x7f, 0xea, 0xc1, 0x98, 0xe1, 0x5d, 0xc5, 0x97, 0x55, 0x3c, 0xfe, 0x6a, 0xa4, + 0xea, 0x73, 0x70, 0x6a, 0xc7, 0xf5, 0x09, 0x1f, 0x18, 0xe2, 0xfa, 0x28, 0x45, 0xfd, 0x64, 0x7d, + 0xfc, 0x53, 0x9b, 0xf2, 0x75, 0xc4, 0x13, 0x52, 0xcb, 0x47, 0xd7, 0x13, 0x5e, 0x24, 0xd4, 0xda, + 0xbb, 0xbe, 0x98, 0x4e, 0x20, 0x90, 0x6e, 0x30, 0xa4, 0x2b, 0xf8, 0x92, 0x8a, 0xd4, 0x0a, 0xe8, + 0x1e, 0x6a, 0xf7, 0x57, 0x3b, 0x30, 0xc9, 0x2a, 0x86, 0xe8, 0x50, 0x7e, 0xe8, 0x09, 0xb5, 0xce, + 0x94, 0x1d, 0x10, 0xa9, 0x35, 0xe2, 0x79, 0x86, 0x36, 0x8b, 0x6b, 0x01, 0x1a, 0x2b, 0x1a, 0x3e, + 0xd4, 0xee, 0x2f, 0x69, 0x6f, 0x68, 0xab, 0x3f, 0x2a, 0xc0, 0x24, 0xab, 0xd4, 0xa0, 0x3e, 0x40, + 0x58, 0x83, 0x8b, 0xeb, 0x39, 0x52, 0xd5, 0x8b, 0xeb, 0x39, 0x5a, 0xbe, 0xc3, 0xd7, 0x19, 0xf2, + 0x3c, 0x9e, 0x0b, 0x90, 0xd9, 0xb3, 0xf7, 0x0a, 0xab, 0xc9, 0x50, 0xb3, 0xbe, 0x84, 0x8a, 0x52, + 0x4b, 0x43, 0x49, 0x1c, 0x23, 0xc5, 0xb8, 0xf8, 0x36, 0x49, 0x28, 0xc4, 0xe1, 0x9b, 0x0c, 0xf4, + 0x1a, 0x6e, 0xa8, 0xc6, 0xe5, 0xb8, 0x2e, 0xa3, 0xa4, 0xc0, 0x9f, 0x69, 0x50, 0x8b, 0xd6, 0xd3, + 0xd0, 0xcd, 0x04, 0xd6, 0xf1, 0xb2, 0x9c, 0x7e, 0x6b, 0x3c, 0x51, 0xaa, 0x08, 0x1c, 0xff, 0x84, + 0x90, 0xbe, 0x49, 0x29, 0x85, 0xed, 0xd1, 0x8f, 0x35, 0x98, 0x89, 0x55, 0xc9, 0x50, 0x12, 0xc4, + 0x48, 0x0d, 0x4e, 0xbf, 0x7d, 0x0e, 0x95, 0x90, 0xe4, 0x2e, 0x93, 0xe4, 0x06, 0xbe, 0x3a, 0x6a, + 0x0c, 0xdf, 0xea, 0x11, 0xdf, 0x11, 0xd2, 0xac, 0xfe, 0x3b, 0x0f, 0xa5, 0x75, 0xfe, 0xb3, 0x22, + 0xe4, 0x43, 0x39, 0xa8, 0x3c, 0xa1, 0x85, 0xa4, 0xaa, 0x44, 0x98, 0xb2, 0xeb, 0xd7, 0x53, 0xc7, + 0x85, 0x08, 0x77, 0x98, 0x08, 0x8b, 0xf8, 0x4a, 0x20, 0x82, 0xf8, 0xf9, 0xd2, 0x0a, 0xbf, 0x7c, + 0xaf, 0x98, 0xed, 0x36, 0x5d, 0x92, 0x1f, 0x6a, 0x50, 0x55, 0x0b, 0x4a, 0xe8, 0x46, 0x62, 0x3d, + 0x44, 0xad, 0x49, 0xe9, 0x78, 0x1c, 0x89, 0xc0, 0xbf, 0xc7, 0xf0, 0x6f, 0xe2, 0x85, 0x34, 0x7c, + 0x97, 0xd1, 0x47, 0x45, 0xe0, 0x25, 0xa4, 0x64, 0x11, 0x22, 0x15, 0xaa, 0x64, 0x11, 0xa2, 0x15, + 0xa8, 0xf3, 0x45, 0x18, 0x30, 0x7a, 0x2a, 0xc2, 0x29, 0x40, 0x58, 0x61, 0x42, 0x89, 0xc6, 0x55, + 0x2e, 0x31, 0x71, 0x1f, 0x1c, 0x2d, 0x4e, 0x25, 0xec, 0x80, 0x18, 0x76, 0xd7, 0xf2, 0xa8, 0x2f, + 0xae, 0xfe, 0xae, 0x00, 0x95, 0x27, 0xa6, 0x65, 0xfb, 0xc4, 0x36, 0xed, 0x16, 0x41, 0xc7, 0x30, + 0xc9, 0x4e, 0xa9, 0x78, 0xe0, 0x51, 0xcb, 0x3e, 0xf1, 0xc0, 0x13, 0xa9, 0x89, 0xe0, 0xdb, 0x0c, + 0xfa, 0x3a, 0xd6, 0x03, 0xe8, 0x5e, 0xc8, 0x7f, 0x85, 0xd5, 0x33, 0xa8, 0xca, 0x27, 0x50, 0xe4, + 0xf5, 0x0b, 0x14, 0xe3, 0x16, 0xa9, 0x73, 0xe8, 0x57, 0x93, 0x07, 0x53, 0x77, 0x99, 0x8a, 0xe5, + 0x31, 0x62, 0x0a, 0xf6, 0x3d, 0x80, 0xb0, 0x60, 0x16, 0xb7, 0xef, 0x48, 0x7d, 0x4d, 0x5f, 0x4c, + 0x27, 0x10, 0xc0, 0xf7, 0x19, 0xf0, 0x2d, 0x7c, 0x3d, 0x11, 0xb8, 0x1d, 0x4c, 0xa0, 0xe0, 0x2d, + 0x28, 0x6c, 0x9a, 0x5e, 0x07, 0xc5, 0x0e, 0x21, 0xe5, 0x95, 0x54, 0xd7, 0x93, 0x86, 0x04, 0xd4, + 0x2d, 0x06, 0xb5, 0x80, 0xe7, 0x13, 0xa1, 0x3a, 0xa6, 0x47, 0x63, 0x3a, 0x1a, 0xc0, 0x94, 0x7c, + 0xf9, 0x44, 0xd7, 0x62, 0x36, 0x8b, 0xbe, 0x92, 0xea, 0x0b, 0x69, 0xc3, 0x02, 0x70, 0x89, 0x01, + 0x62, 0x7c, 0x2d, 0xd9, 0xa8, 0x82, 0xfc, 0xa1, 0x76, 0xff, 0x0d, 0x6d, 0xf5, 0xa7, 0x75, 0x28, + 0xd0, 0x7c, 0x89, 0x9e, 0x22, 0xe1, 0x35, 0x33, 0x6e, 0xe1, 0x91, 0xe2, 0x4e, 0xdc, 0xc2, 0xa3, + 0x37, 0xd4, 0x84, 0x53, 0x84, 0xfd, 0xb8, 0x92, 0x30, 0x2a, 0xaa, 0xb1, 0x0f, 0x15, 0xe5, 0x32, + 0x8a, 0x12, 0x38, 0x46, 0x4b, 0x47, 0xf1, 0x53, 0x24, 0xe1, 0x26, 0x8b, 0x17, 0x19, 0xa8, 0x8e, + 0x2f, 0x46, 0x41, 0xdb, 0x9c, 0x8c, 0xa2, 0x7e, 0x1f, 0xaa, 0xea, 0xad, 0x15, 0x25, 0x30, 0x8d, + 0xd5, 0xa6, 0xe2, 0xb1, 0x22, 0xe9, 0xd2, 0x9b, 0xe0, 0x34, 0xc1, 0x4f, 0x49, 0x25, 0x2d, 0x45, + 0xff, 0x18, 0x4a, 0xe2, 0x2e, 0x9b, 0xa4, 0x6f, 0xb4, 0x9a, 0x95, 0xa4, 0x6f, 0xec, 0x22, 0x9c, + 0x90, 0x92, 0x30, 0x58, 0x9a, 0xb3, 0xcb, 0x00, 0x2d, 0x20, 0x1f, 0x13, 0x3f, 0x0d, 0x32, 0xac, + 0xcf, 0xa4, 0x41, 0x2a, 0xf7, 0xa5, 0xb1, 0x90, 0xc7, 0xc4, 0x17, 0x7b, 0x59, 0x5e, 0x46, 0x50, + 0x0a, 0x47, 0x35, 0x1a, 0xe2, 0x71, 0x24, 0xa9, 0x59, 0x64, 0x88, 0x2a, 0x42, 0x21, 0xfa, 0x01, + 0x40, 0x78, 0xf1, 0x8e, 0x27, 0x06, 0x89, 0xd5, 0xbb, 0x78, 0x62, 0x90, 0x7c, 0x77, 0x4f, 0xf0, + 0xe0, 0x10, 0x9c, 0x67, 0xb2, 0x14, 0xfe, 0xe7, 0x1a, 0xa0, 0xd1, 0x8b, 0x3a, 0x7a, 0x90, 0x0c, + 0x91, 0x58, 0x18, 0xd4, 0x5f, 0x7b, 0x35, 0xe2, 0xd4, 0xe8, 0x19, 0xca, 0xd5, 0x62, 0x53, 0xfa, + 0x2f, 0xa9, 0x64, 0x9f, 0x6b, 0x30, 0x1d, 0xb9, 0xea, 0xa3, 0x3b, 0x29, 0xeb, 0x1c, 0x2b, 0x2e, + 0xea, 0x77, 0xcf, 0xa5, 0x4b, 0xcd, 0x9d, 0x94, 0x5d, 0x21, 0xf3, 0xc6, 0x9f, 0x68, 0x50, 0x8b, + 0xd6, 0x07, 0x50, 0x0a, 0xc0, 0x48, 0x85, 0x52, 0x5f, 0x3a, 0x9f, 0xf0, 0x15, 0x56, 0x2b, 0x4c, + 0x25, 0x3f, 0x86, 0x92, 0x28, 0x2b, 0x24, 0xb9, 0x45, 0xb4, 0xc0, 0x99, 0xe4, 0x16, 0xb1, 0x9a, + 0x44, 0x9a, 0x5b, 0xd0, 0x1b, 0xba, 0xe2, 0x89, 0xa2, 0xf8, 0x90, 0x06, 0x39, 0xde, 0x13, 0x63, + 0x95, 0x8b, 0xb1, 0x90, 0xa1, 0x27, 0xca, 0xd2, 0x03, 0x4a, 0xe1, 0x78, 0x8e, 0x27, 0xc6, 0x2b, + 0x17, 0x69, 0x9e, 0xc8, 0x50, 0x15, 0x4f, 0x0c, 0x2b, 0x05, 0x49, 0x9e, 0x38, 0x52, 0xbe, 0x4d, + 0xf2, 0xc4, 0xd1, 0x62, 0x43, 0xda, 0xda, 0x32, 0xf0, 0x88, 0x27, 0xce, 0x26, 0x54, 0x16, 0xd0, + 0x6b, 0x29, 0x36, 0x4d, 0x2c, 0x0d, 0xeb, 0xaf, 0xbf, 0x22, 0xf5, 0x78, 0x0f, 0xe0, 0xab, 0x21, + 0x3d, 0xe0, 0x57, 0x1a, 0xcc, 0x25, 0x95, 0x26, 0x50, 0x0a, 0x58, 0x4a, 0x5d, 0x59, 0x5f, 0x7e, + 0x55, 0xf2, 0x57, 0xb0, 0x5b, 0xe0, 0x13, 0x8f, 0xea, 0xbf, 0xff, 0x6a, 0x41, 0xfb, 0xd3, 0x57, + 0x0b, 0xda, 0x5f, 0xbe, 0x5a, 0xd0, 0x7e, 0xf1, 0xd7, 0x85, 0x89, 0xa3, 0x22, 0xfb, 0x1f, 0x0e, + 0x6f, 0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0x17, 0x62, 0x2c, 0x4e, 0x68, 0x31, 0x00, 0x00, } diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.gw.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.gw.go index a2efbcd98..473ad582e 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.gw.go +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.gw.go @@ -222,6 +222,19 @@ func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marsh return stream, metadata, nil } +func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq LeaseTimeToLiveRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.LeaseTimeToLive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq MemberAddRequest var metadata runtime.ServerMetadata @@ -935,6 +948,34 @@ func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc }) + mux.Handle("POST", pattern_Lease_LeaseTimeToLive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) + if err != nil { + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + } + resp, md, err := request_Lease_LeaseTimeToLive_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + return + } + + forward_Lease_LeaseTimeToLive_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + return nil } @@ -944,6 +985,8 @@ var ( pattern_Lease_LeaseRevoke_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "kv", "lease", "revoke"}, "")) pattern_Lease_LeaseKeepAlive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "lease", "keepalive"}, "")) + + pattern_Lease_LeaseTimeToLive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "kv", "lease", "timetolive"}, "")) ) var ( @@ -952,6 +995,8 @@ var ( forward_Lease_LeaseRevoke_0 = runtime.ForwardResponseMessage forward_Lease_LeaseKeepAlive_0 = runtime.ForwardResponseStream + + forward_Lease_LeaseTimeToLive_0 = runtime.ForwardResponseMessage ) // RegisterClusterHandlerFromEndpoint is same as RegisterClusterHandler but diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto deleted file mode 100644 index 04f08cb56..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto +++ /dev/null @@ -1,894 +0,0 @@ -syntax = "proto3"; -package etcdserverpb; - -import "gogoproto/gogo.proto"; -import "etcd/mvcc/mvccpb/kv.proto"; -import "etcd/auth/authpb/auth.proto"; - -// for grpc-gateway -import "google/api/annotations.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -service KV { - // Range gets the keys in the range from the key-value store. - rpc Range(RangeRequest) returns (RangeResponse) { - option (google.api.http) = { - post: "/v3alpha/kv/range" - body: "*" - }; - } - - // Put puts the given key into the key-value store. - // A put request increments the revision of the key-value store - // and generates one event in the event history. - rpc Put(PutRequest) returns (PutResponse) { - option (google.api.http) = { - post: "/v3alpha/kv/put" - body: "*" - }; - } - - // DeleteRange deletes the given range from the key-value store. - // A delete request increments the revision of the key-value store - // and generates a delete event in the event history for every deleted key. - rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) { - option (google.api.http) = { - post: "/v3alpha/kv/deleterange" - body: "*" - }; - } - - // Txn processes multiple requests in a single transaction. - // A txn request increments the revision of the key-value store - // and generates events with the same revision for every completed request. - // It is not allowed to modify the same key several times within one txn. - rpc Txn(TxnRequest) returns (TxnResponse) { - option (google.api.http) = { - post: "/v3alpha/kv/txn" - body: "*" - }; - } - - // Compact compacts the event history in the etcd key-value store. The key-value - // store should be periodically compacted or the event history will continue to grow - // indefinitely. - rpc Compact(CompactionRequest) returns (CompactionResponse) { - option (google.api.http) = { - post: "/v3alpha/kv/compaction" - body: "*" - }; - } -} - -service Watch { - // Watch watches for events happening or that have happened. Both input and output - // are streams; the input stream is for creating and canceling watchers and the output - // stream sends events. One watch RPC can watch on multiple key ranges, streaming events - // for several watches at once. The entire event history can be watched starting from the - // last compaction revision. - rpc Watch(stream WatchRequest) returns (stream WatchResponse) { - option (google.api.http) = { - post: "/v3alpha/watch" - body: "*" - }; - } -} - -service Lease { - // LeaseGrant creates a lease which expires if the server does not receive a keepAlive - // within a given time to live period. All keys attached to the lease will be expired and - // deleted if the lease expires. Each expired key generates a delete event in the event history. - rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) { - option (google.api.http) = { - post: "/v3alpha/lease/grant" - body: "*" - }; - } - - // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. - rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) { - option (google.api.http) = { - post: "/v3alpha/kv/lease/revoke" - body: "*" - }; - } - - // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client - // to the server and streaming keep alive responses from the server to the client. - rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) { - option (google.api.http) = { - post: "/v3alpha/lease/keepalive" - body: "*" - }; - } - - // TODO(xiangli) List all existing Leases? - // TODO(xiangli) Get details information (expirations, leased keys, etc.) of a lease? -} - -service Cluster { - // MemberAdd adds a member into the cluster. - rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) { - option (google.api.http) = { - post: "/v3alpha/cluster/member/add" - body: "*" - }; - } - - // MemberRemove removes an existing member from the cluster. - rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) { - option (google.api.http) = { - post: "/v3alpha/cluster/member/remove" - body: "*" - }; - } - - // MemberUpdate updates the member configuration. - rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) { - option (google.api.http) = { - post: "/v3alpha/cluster/member/update" - body: "*" - }; - } - - // MemberList lists all the members in the cluster. - rpc MemberList(MemberListRequest) returns (MemberListResponse) { - option (google.api.http) = { - post: "/v3alpha/cluster/member/list" - body: "*" - }; - } -} - -service Maintenance { - // Alarm activates, deactivates, and queries alarms regarding cluster health. - rpc Alarm(AlarmRequest) returns (AlarmResponse) { - option (google.api.http) = { - post: "/v3alpha/maintenance/alarm" - body: "*" - }; - } - - // Status gets the status of the member. - rpc Status(StatusRequest) returns (StatusResponse) { - option (google.api.http) = { - post: "/v3alpha/maintenance/status" - body: "*" - }; - } - - // Defragment defragments a member's backend database to recover storage space. - rpc Defragment(DefragmentRequest) returns (DefragmentResponse) { - option (google.api.http) = { - post: "/v3alpha/maintenance/defragment" - body: "*" - }; - } - - // Hash returns the hash of the local KV state for consistency checking purpose. - // This is designed for testing; do not use this in production when there - // are ongoing transactions. - rpc Hash(HashRequest) returns (HashResponse) { - option (google.api.http) = { - post: "/v3alpha/maintenance/hash" - body: "*" - }; - } - - // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. - rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) { - option (google.api.http) = { - post: "/v3alpha/maintenance/snapshot" - body: "*" - }; - } -} - -service Auth { - // AuthEnable enables authentication. - rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/enable" - body: "*" - }; - } - - // AuthDisable disables authentication. - rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/disable" - body: "*" - }; - } - - // Authenticate processes an authenticate request. - rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/authenticate" - body: "*" - }; - } - - // UserAdd adds a new user. - rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/user/add" - body: "*" - }; - } - - // UserGet gets detailed user information. - rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/user/get" - body: "*" - }; - } - - // UserList gets a list of all users. - rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/user/list" - body: "*" - }; - } - - // UserDelete deletes a specified user. - rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/user/delete" - body: "*" - }; - } - - // UserChangePassword changes the password of a specified user. - rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/user/changepw" - body: "*" - }; - } - - // UserGrant grants a role to a specified user. - rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/user/grant" - body: "*" - }; - } - - // UserRevokeRole revokes a role of specified user. - rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/user/revoke" - body: "*" - }; - } - - // RoleAdd adds a new role. - rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/role/add" - body: "*" - }; - } - - // RoleGet gets detailed role information. - rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/role/get" - body: "*" - }; - } - - // RoleList gets lists of all roles. - rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/role/list" - body: "*" - }; - } - - // RoleDelete deletes a specified role. - rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/role/delete" - body: "*" - }; - } - - // RoleGrantPermission grants a permission of a specified key or range to a specified role. - rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/role/grant" - body: "*" - }; - } - - // RoleRevokePermission revokes a key or range permission of a specified role. - rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) { - option (google.api.http) = { - post: "/v3alpha/auth/role/revoke" - body: "*" - }; - } -} - -message ResponseHeader { - // cluster_id is the ID of the cluster which sent the response. - uint64 cluster_id = 1; - // member_id is the ID of the member which sent the response. - uint64 member_id = 2; - // revision is the key-value store revision when the request was applied. - int64 revision = 3; - // raft_term is the raft term when the request was applied. - uint64 raft_term = 4; -} - -message RangeRequest { - enum SortOrder { - NONE = 0; // default, no sorting - ASCEND = 1; // lowest target value first - DESCEND = 2; // highest target value first - } - enum SortTarget { - KEY = 0; - VERSION = 1; - CREATE = 2; - MOD = 3; - VALUE = 4; - } - - // key is the first key for the range. If range_end is not given, the request only looks up key. - bytes key = 1; - // range_end is the upper bound on the requested range [key, range_end). - // If range_end is '\0', the range is all keys >= key. - // If the range_end is one bit larger than the given key, - // then the range requests get the all keys with the prefix (the given key). - // If both key and range_end are '\0', then range requests returns all keys. - bytes range_end = 2; - // limit is a limit on the number of keys returned for the request. - int64 limit = 3; - // revision is the point-in-time of the key-value store to use for the range. - // If revision is less or equal to zero, the range is over the newest key-value store. - // If the revision has been compacted, ErrCompacted is returned as a response. - int64 revision = 4; - - // sort_order is the order for returned sorted results. - SortOrder sort_order = 5; - - // sort_target is the key-value field to use for sorting. - SortTarget sort_target = 6; - - // serializable sets the range request to use serializable member-local reads. - // Range requests are linearizable by default; linearizable requests have higher - // latency and lower throughput than serializable requests but reflect the current - // consensus of the cluster. For better performance, in exchange for possible stale reads, - // a serializable range request is served locally without needing to reach consensus - // with other nodes in the cluster. - bool serializable = 7; - - // keys_only when set returns only the keys and not the values. - bool keys_only = 8; - - // count_only when set returns only the count of the keys in the range. - bool count_only = 9; -} - -message RangeResponse { - ResponseHeader header = 1; - // kvs is the list of key-value pairs matched by the range request. - // kvs is empty when count is requested. - repeated mvccpb.KeyValue kvs = 2; - // more indicates if there are more keys to return in the requested range. - bool more = 3; - // count is set to the number of keys within the range when requested. - int64 count = 4; -} - -message PutRequest { - // key is the key, in bytes, to put into the key-value store. - bytes key = 1; - // value is the value, in bytes, to associate with the key in the key-value store. - bytes value = 2; - // lease is the lease ID to associate with the key in the key-value store. A lease - // value of 0 indicates no lease. - int64 lease = 3; -} - -message PutResponse { - ResponseHeader header = 1; -} - -message DeleteRangeRequest { - // key is the first key to delete in the range. - bytes key = 1; - // range_end is the key following the last key to delete for the range [key, range_end). - // If range_end is not given, the range is defined to contain only the key argument. - // If range_end is '\0', the range is all keys greater than or equal to the key argument. - bytes range_end = 2; -} - -message DeleteRangeResponse { - ResponseHeader header = 1; - // deleted is the number of keys deleted by the delete range request. - int64 deleted = 2; -} - -message RequestOp { - // request is a union of request types accepted by a transaction. - oneof request { - RangeRequest request_range = 1; - PutRequest request_put = 2; - DeleteRangeRequest request_delete_range = 3; - } -} - -message ResponseOp { - // response is a union of response types returned by a transaction. - oneof response { - RangeResponse response_range = 1; - PutResponse response_put = 2; - DeleteRangeResponse response_delete_range = 3; - } -} - -message Compare { - enum CompareResult { - EQUAL = 0; - GREATER = 1; - LESS = 2; - } - enum CompareTarget { - VERSION = 0; - CREATE = 1; - MOD = 2; - VALUE= 3; - } - // result is logical comparison operation for this comparison. - CompareResult result = 1; - // target is the key-value field to inspect for the comparison. - CompareTarget target = 2; - // key is the subject key for the comparison operation. - bytes key = 3; - oneof target_union { - // version is the version of the given key - int64 version = 4; - // create_revision is the creation revision of the given key - int64 create_revision = 5; - // mod_revision is the last modified revision of the given key. - int64 mod_revision = 6; - // value is the value of the given key, in bytes. - bytes value = 7; - } -} - -// From google paxosdb paper: -// Our implementation hinges around a powerful primitive which we call MultiOp. All other database -// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically -// and consists of three components: -// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check -// for the absence or presence of a value, or compare with a given value. Two different tests in the guard -// may apply to the same or different entries in the database. All tests in the guard are applied and -// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise -// it executes f op (see item 3 below). -// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or -// lookup operation, and applies to a single database entry. Two different operations in the list may apply -// to the same or different entries in the database. These operations are executed -// if guard evaluates to -// true. -// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false. -message TxnRequest { - // compare is a list of predicates representing a conjunction of terms. - // If the comparisons succeed, then the success requests will be processed in order, - // and the response will contain their respective responses in order. - // If the comparisons fail, then the failure requests will be processed in order, - // and the response will contain their respective responses in order. - repeated Compare compare = 1; - // success is a list of requests which will be applied when compare evaluates to true. - repeated RequestOp success = 2; - // failure is a list of requests which will be applied when compare evaluates to false. - repeated RequestOp failure = 3; -} - -message TxnResponse { - ResponseHeader header = 1; - // succeeded is set to true if the compare evaluated to true or false otherwise. - bool succeeded = 2; - // responses is a list of responses corresponding to the results from applying - // success if succeeded is true or failure if succeeded is false. - repeated ResponseOp responses = 3; -} - -// CompactionRequest compacts the key-value store up to a given revision. All superseded keys -// with a revision less than the compaction revision will be removed. -message CompactionRequest { - // revision is the key-value store revision for the compaction operation. - int64 revision = 1; - // physical is set so the RPC will wait until the compaction is physically - // applied to the local database such that compacted entries are totally - // removed from the backend database. - bool physical = 2; -} - -message CompactionResponse { - ResponseHeader header = 1; -} - -message HashRequest { -} - -message HashResponse { - ResponseHeader header = 1; - // hash is the hash value computed from the responding member's key-value store. - uint32 hash = 2; -} - -message SnapshotRequest { -} - -message SnapshotResponse { - // header has the current key-value store information. The first header in the snapshot - // stream indicates the point in time of the snapshot. - ResponseHeader header = 1; - - // remaining_bytes is the number of blob bytes to be sent after this message - uint64 remaining_bytes = 2; - - // blob contains the next chunk of the snapshot in the snapshot stream. - bytes blob = 3; -} - -message WatchRequest { - // request_union is a request to either create a new watcher or cancel an existing watcher. - oneof request_union { - WatchCreateRequest create_request = 1; - WatchCancelRequest cancel_request = 2; - } -} - -message WatchCreateRequest { - // key is the key to register for watching. - bytes key = 1; - // range_end is the end of the range [key, range_end) to watch. If range_end is not given, - // only the key argument is watched. If range_end is equal to '\0', all keys greater than - // or equal to the key argument are watched. - bytes range_end = 2; - // start_revision is an optional revision to watch from (inclusive). No start_revision is "now". - int64 start_revision = 3; - // progress_notify is set so that the etcd server will periodically send a WatchResponse with - // no events to the new watcher if there are no recent events. It is useful when clients - // wish to recover a disconnected watcher starting from a recent known revision. - // The etcd server may decide how often it will send notifications based on current load. - bool progress_notify = 4; -} - -message WatchCancelRequest { - // watch_id is the watcher id to cancel so that no more events are transmitted. - int64 watch_id = 1; -} - -message WatchResponse { - ResponseHeader header = 1; - // watch_id is the ID of the watcher that corresponds to the response. - int64 watch_id = 2; - // created is set to true if the response is for a create watch request. - // The client should record the watch_id and expect to receive events for - // the created watcher from the same stream. - // All events sent to the created watcher will attach with the same watch_id. - bool created = 3; - // canceled is set to true if the response is for a cancel watch request. - // No further events will be sent to the canceled watcher. - bool canceled = 4; - // compact_revision is set to the minimum index if a watcher tries to watch - // at a compacted index. - // - // This happens when creating a watcher at a compacted revision or the watcher cannot - // catch up with the progress of the key-value store. - // - // The client should treat the watcher as canceled and should not try to create any - // watcher with the same start_revision again. - int64 compact_revision = 5; - - repeated mvccpb.Event events = 11; -} - -message LeaseGrantRequest { - // TTL is the advisory time-to-live in seconds. - int64 TTL = 1; - // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. - int64 ID = 2; -} - -message LeaseGrantResponse { - ResponseHeader header = 1; - // ID is the lease ID for the granted lease. - int64 ID = 2; - // TTL is the server chosen lease time-to-live in seconds. - int64 TTL = 3; - string error = 4; -} - -message LeaseRevokeRequest { - // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. - int64 ID = 1; -} - -message LeaseRevokeResponse { - ResponseHeader header = 1; -} - -message LeaseKeepAliveRequest { - // ID is the lease ID for the lease to keep alive. - int64 ID = 1; -} - -message LeaseKeepAliveResponse { - ResponseHeader header = 1; - // ID is the lease ID from the keep alive request. - int64 ID = 2; - // TTL is the new time-to-live for the lease. - int64 TTL = 3; -} - -message Member { - // ID is the member ID for this member. - uint64 ID = 1; - // name is the human-readable name of the member. If the member is not started, the name will be an empty string. - string name = 2; - // peerURLs is the list of URLs the member exposes to the cluster for communication. - repeated string peerURLs = 3; - // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. - repeated string clientURLs = 4; -} - -message MemberAddRequest { - // peerURLs is the list of URLs the added member will use to communicate with the cluster. - repeated string peerURLs = 1; -} - -message MemberAddResponse { - ResponseHeader header = 1; - // member is the member information for the added member. - Member member = 2; -} - -message MemberRemoveRequest { - // ID is the member ID of the member to remove. - uint64 ID = 1; -} - -message MemberRemoveResponse { - ResponseHeader header = 1; -} - -message MemberUpdateRequest { - // ID is the member ID of the member to update. - uint64 ID = 1; - // peerURLs is the new list of URLs the member will use to communicate with the cluster. - repeated string peerURLs = 2; -} - -message MemberUpdateResponse{ - ResponseHeader header = 1; -} - -message MemberListRequest { -} - -message MemberListResponse { - ResponseHeader header = 1; - // members is a list of all members associated with the cluster. - repeated Member members = 2; -} - -message DefragmentRequest { -} - -message DefragmentResponse { - ResponseHeader header = 1; -} - -enum AlarmType { - NONE = 0; // default, used to query if any alarm is active - NOSPACE = 1; // space quota is exhausted -} - -message AlarmRequest { - enum AlarmAction { - GET = 0; - ACTIVATE = 1; - DEACTIVATE = 2; - } - // action is the kind of alarm request to issue. The action - // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a - // raised alarm. - AlarmAction action = 1; - // memberID is the ID of the member associated with the alarm. If memberID is 0, the - // alarm request covers all members. - uint64 memberID = 2; - // alarm is the type of alarm to consider for this request. - AlarmType alarm = 3; -} - -message AlarmMember { - // memberID is the ID of the member associated with the raised alarm. - uint64 memberID = 1; - // alarm is the type of alarm which has been raised. - AlarmType alarm = 2; -} - -message AlarmResponse { - ResponseHeader header = 1; - // alarms is a list of alarms associated with the alarm request. - repeated AlarmMember alarms = 2; -} - -message StatusRequest { -} - -message StatusResponse { - ResponseHeader header = 1; - // version is the cluster protocol version used by the responding member. - string version = 2; - // dbSize is the size of the backend database, in bytes, of the responding member. - int64 dbSize = 3; - // leader is the member ID which the responding member believes is the current leader. - uint64 leader = 4; - // raftIndex is the current raft index of the responding member. - uint64 raftIndex = 5; - // raftTerm is the current raft term of the responding member. - uint64 raftTerm = 6; -} - -message AuthEnableRequest { -} - -message AuthDisableRequest { -} - -message AuthenticateRequest { - string name = 1; - string password = 2; -} - -message AuthUserAddRequest { - string name = 1; - string password = 2; -} - -message AuthUserGetRequest { - string name = 1; -} - -message AuthUserDeleteRequest { - // name is the name of the user to delete. - string name = 1; -} - -message AuthUserChangePasswordRequest { - // name is the name of the user whose password is being changed. - string name = 1; - // password is the new password for the user. - string password = 2; -} - -message AuthUserGrantRoleRequest { - // user is the name of the user which should be granted a given role. - string user = 1; - // role is the name of the role to grant to the user. - string role = 2; -} - -message AuthUserRevokeRoleRequest { - string name = 1; - string role = 2; -} - -message AuthRoleAddRequest { - // name is the name of the role to add to the authentication system. - string name = 1; -} - -message AuthRoleGetRequest { - string role = 1; -} - -message AuthUserListRequest { -} - -message AuthRoleListRequest { -} - -message AuthRoleDeleteRequest { - string role = 1; -} - -message AuthRoleGrantPermissionRequest { - // name is the name of the role which will be granted the permission. - string name = 1; - // perm is the permission to grant to the role. - authpb.Permission perm = 2; -} - -message AuthRoleRevokePermissionRequest { - string role = 1; - string key = 2; - string range_end = 3; -} - -message AuthEnableResponse { - ResponseHeader header = 1; -} - -message AuthDisableResponse { - ResponseHeader header = 1; -} - -message AuthenticateResponse { - ResponseHeader header = 1; - // token is an authorized token that can be used in succeeding RPCs - string token = 2; -} - -message AuthUserAddResponse { - ResponseHeader header = 1; -} - -message AuthUserGetResponse { - ResponseHeader header = 1; - - repeated string roles = 2; -} - -message AuthUserDeleteResponse { - ResponseHeader header = 1; -} - -message AuthUserChangePasswordResponse { - ResponseHeader header = 1; -} - -message AuthUserGrantRoleResponse { - ResponseHeader header = 1; -} - -message AuthUserRevokeRoleResponse { - ResponseHeader header = 1; -} - -message AuthRoleAddResponse { - ResponseHeader header = 1; -} - -message AuthRoleGetResponse { - ResponseHeader header = 1; - - repeated authpb.Permission perm = 2; -} - -message AuthRoleListResponse { - ResponseHeader header = 1; - - repeated string roles = 2; -} - -message AuthUserListResponse { - ResponseHeader header = 1; - - repeated string users = 2; -} - -message AuthRoleDeleteResponse { - ResponseHeader header = 1; -} - -message AuthRoleGrantPermissionResponse { - ResponseHeader header = 1; -} - -message AuthRoleRevokePermissionResponse { - ResponseHeader header = 1; -} diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go index 2ab97270c..12c038328 100644 --- a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go +++ b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go @@ -20,9 +20,9 @@ import ( proto "github.com/golang/protobuf/proto" math "math" -) -import io "io" + io "io" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -89,6 +89,8 @@ type Event struct { // A DELETE/EXPIRE event contains the deleted key with // its modification revision set to the revision of deletion. Kv *KeyValue `protobuf:"bytes,2,opt,name=kv" json:"kv,omitempty"` + // prev_kv holds the key-value pair before the event happens. + PrevKv *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"` } func (m *Event) Reset() { *m = Event{} } @@ -181,6 +183,16 @@ func (m *Event) MarshalTo(data []byte) (int, error) { } i += n1 } + if m.PrevKv != nil { + data[i] = 0x1a + i++ + i = encodeVarintKv(data, i, uint64(m.PrevKv.Size())) + n2, err := m.PrevKv.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + } return i, nil } @@ -247,6 +259,10 @@ func (m *Event) Size() (n int) { l = m.Kv.Size() n += 1 + l + sovKv(uint64(l)) } + if m.PrevKv != nil { + l = m.PrevKv.Size() + n += 1 + l + sovKv(uint64(l)) + } return n } @@ -532,6 +548,39 @@ func (m *Event) Unmarshal(data []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthKv + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PrevKv == nil { + m.PrevKv = &KeyValue{} + } + if err := m.PrevKv.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKv(data[iNdEx:]) @@ -659,23 +708,24 @@ var ( ) var fileDescriptorKv = []byte{ - // 285 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x44, 0x90, 0x41, 0x4a, 0xc3, 0x40, - 0x14, 0x86, 0x33, 0x4d, 0x9b, 0xd6, 0xd7, 0x52, 0xc3, 0x50, 0x70, 0x70, 0x31, 0xc4, 0x6e, 0x2c, - 0x08, 0x11, 0xea, 0x0d, 0xc4, 0xac, 0x74, 0x21, 0x21, 0xba, 0x95, 0x34, 0x7d, 0x94, 0x92, 0xa6, - 0x13, 0xd2, 0x38, 0x98, 0x9b, 0x78, 0x0a, 0xcf, 0xd1, 0x65, 0x8f, 0x60, 0xe3, 0x45, 0x24, 0x6f, - 0x4c, 0xdd, 0x0c, 0xef, 0xff, 0xff, 0x6f, 0x98, 0xff, 0x0d, 0x0c, 0x52, 0xed, 0xe7, 0x85, 0x2a, - 0x15, 0x77, 0x32, 0x9d, 0x24, 0xf9, 0xe2, 0x72, 0xb2, 0x52, 0x2b, 0x45, 0xd6, 0x6d, 0x33, 0x99, - 0x74, 0xfa, 0xc5, 0x60, 0xf0, 0x88, 0xd5, 0x6b, 0xbc, 0x79, 0x47, 0xee, 0x82, 0x9d, 0x62, 0x25, - 0x98, 0xc7, 0x66, 0xa3, 0xb0, 0x19, 0xf9, 0x35, 0x9c, 0x27, 0x05, 0xc6, 0x25, 0xbe, 0x15, 0xa8, - 0xd7, 0xbb, 0xb5, 0xda, 0x8a, 0x8e, 0xc7, 0x66, 0x76, 0x38, 0x36, 0x76, 0xf8, 0xe7, 0xf2, 0x2b, - 0x18, 0x65, 0x6a, 0xf9, 0x4f, 0xd9, 0x44, 0x0d, 0x33, 0xb5, 0x3c, 0x21, 0x02, 0xfa, 0x1a, 0x0b, - 0x4a, 0xbb, 0x94, 0xb6, 0x92, 0x4f, 0xa0, 0xa7, 0x9b, 0x02, 0xa2, 0x47, 0x2f, 0x1b, 0xd1, 0xb8, - 0x1b, 0x8c, 0x77, 0x28, 0x1c, 0xa2, 0x8d, 0x98, 0x7e, 0x40, 0x2f, 0xd0, 0xb8, 0x2d, 0xf9, 0x0d, - 0x74, 0xcb, 0x2a, 0x47, 0x6a, 0x3b, 0x9e, 0x5f, 0xf8, 0x66, 0x4d, 0x9f, 0x42, 0x73, 0x46, 0x55, - 0x8e, 0x21, 0x41, 0xdc, 0x83, 0x4e, 0xaa, 0xa9, 0xfa, 0x70, 0xee, 0xb6, 0x68, 0xbb, 0x77, 0xd8, - 0x49, 0xf5, 0xd4, 0x83, 0xb3, 0xd3, 0x25, 0xde, 0x07, 0xfb, 0xf9, 0x25, 0x72, 0x2d, 0x0e, 0xe0, - 0x3c, 0x04, 0x4f, 0x41, 0x14, 0xb8, 0xec, 0x5e, 0xec, 0x8f, 0xd2, 0x3a, 0x1c, 0xa5, 0xb5, 0xaf, - 0x25, 0x3b, 0xd4, 0x92, 0x7d, 0xd7, 0x92, 0x7d, 0xfe, 0x48, 0x6b, 0xe1, 0xd0, 0x5f, 0xde, 0xfd, - 0x06, 0x00, 0x00, 0xff, 0xff, 0xd6, 0x21, 0x8f, 0x2c, 0x75, 0x01, 0x00, 0x00, + // 303 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40, + 0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18, + 0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94, + 0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa, + 0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3, + 0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae, + 0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7, + 0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3, + 0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d, + 0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b, + 0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23, + 0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36, + 0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34, + 0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad, + 0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30, + 0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a, + 0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94, + 0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff, + 0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00, } diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto deleted file mode 100644 index f0c82b57c..000000000 --- a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto +++ /dev/null @@ -1,46 +0,0 @@ -syntax = "proto3"; -package mvccpb; - -import "gogoproto/gogo.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.goproto_getters_all) = false; -option (gogoproto.goproto_enum_prefix_all) = false; - -message KeyValue { - // key is the key in bytes. An empty key is not allowed. - bytes key = 1; - // create_revision is the revision of last creation on this key. - int64 create_revision = 2; - // mod_revision is the revision of last modification on this key. - int64 mod_revision = 3; - // version is the version of the key. A deletion resets - // the version to zero and any modification of the key - // increases its version. - int64 version = 4; - // value is the value held by the key, in bytes. - bytes value = 5; - // lease is the ID of the lease that attached to key. - // When the attached lease expires, the key will be deleted. - // If lease is 0, then no lease is attached to the key. - int64 lease = 6; -} - -message Event { - enum EventType { - PUT = 0; - DELETE = 1; - } - // type is the kind of event. If type is a PUT, it indicates - // new data has been stored to the key. If type is a DELETE, - // it indicates the key was deleted. - EventType type = 1; - // kv holds the KeyValue for the event. - // A PUT event contains current kv pair. - // A PUT event with kv.Version=1 indicates the creation of a key. - // A DELETE/EXPIRE event contains the deleted key with - // its modification revision set to the revision of deletion. - KeyValue kv = 2; -} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go new file mode 100644 index 000000000..58a77dfc1 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go @@ -0,0 +1,22 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package fileutil + +import "os" + +// OpenDir opens a directory for syncing. +func OpenDir(path string) (*os.File, error) { return os.Open(path) } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go new file mode 100644 index 000000000..c123395c0 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go @@ -0,0 +1,46 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package fileutil + +import ( + "os" + "syscall" +) + +// OpenDir opens a directory in windows with write access for syncing. +func OpenDir(path string) (*os.File, error) { + fd, err := openDir(path) + if err != nil { + return nil, err + } + return os.NewFile(uintptr(fd), path), nil +} + +func openDir(path string) (fd syscall.Handle, err error) { + if len(path) == 0 { + return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND + } + pathp, err := syscall.UTF16PtrFromString(path) + if err != nil { + return syscall.InvalidHandle, err + } + access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE) + sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE) + createmode := uint32(syscall.OPEN_EXISTING) + fl := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS) + return syscall.CreateFile(pathp, access, sharemode, nil, createmode, fl, 0) +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go b/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go index c963a7903..9585ed5e0 100644 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go @@ -33,7 +33,7 @@ const ( ) var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd/pkg", "fileutil") + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/fileutil") ) // IsDirWriteable checks if dir is writable by writing and removing a file @@ -96,3 +96,26 @@ func Exist(name string) bool { _, err := os.Stat(name) return err == nil } + +// ZeroToEnd zeros a file starting from SEEK_CUR to its SEEK_END. May temporarily +// shorten the length of the file. +func ZeroToEnd(f *os.File) error { + // TODO: support FALLOC_FL_ZERO_RANGE + off, err := f.Seek(0, os.SEEK_CUR) + if err != nil { + return err + } + lenf, lerr := f.Seek(0, os.SEEK_END) + if lerr != nil { + return lerr + } + if err = f.Truncate(off); err != nil { + return err + } + // make sure blocks remain allocated + if err = Preallocate(f, lenf, true); err != nil { + return err + } + _, err = f.Seek(off, os.SEEK_SET) + return err +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener.go b/vendor/github.com/coreos/etcd/pkg/transport/listener.go index 46fe12009..3ae1b21d4 100644 --- a/vendor/github.com/coreos/etcd/pkg/transport/listener.go +++ b/vendor/github.com/coreos/etcd/pkg/transport/listener.go @@ -34,27 +34,30 @@ import ( "github.com/coreos/etcd/pkg/tlsutil" ) -func NewListener(addr string, scheme string, tlscfg *tls.Config) (l net.Listener, err error) { +func NewListener(addr, scheme string, tlscfg *tls.Config) (l net.Listener, err error) { + if l, err = newListener(addr, scheme); err != nil { + return nil, err + } + return wrapTLS(addr, scheme, tlscfg, l) +} + +func newListener(addr string, scheme string) (net.Listener, error) { if scheme == "unix" || scheme == "unixs" { // unix sockets via unix://laddr - l, err = NewUnixListener(addr) - } else { - l, err = net.Listen("tcp", addr) + return NewUnixListener(addr) } + return net.Listen("tcp", addr) +} - if err != nil { - return nil, err +func wrapTLS(addr, scheme string, tlscfg *tls.Config, l net.Listener) (net.Listener, error) { + if scheme != "https" && scheme != "unixs" { + return l, nil } - - if scheme == "https" || scheme == "unixs" { - if tlscfg == nil { - return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", scheme+"://"+addr) - } - - l = tls.NewListener(l, tlscfg) + if tlscfg == nil { + l.Close() + return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", scheme+"://"+addr) } - - return l, nil + return tls.NewListener(l, tlscfg), nil } type TLSInfo struct { @@ -205,6 +208,9 @@ func (info TLSInfo) ServerConfig() (*tls.Config, error) { cfg.ClientCAs = cp } + // "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP server + cfg.NextProtos = []string{"h2"} + return cfg, nil } diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go index f176c43b9..0f4df5fbe 100644 --- a/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go +++ b/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go @@ -24,15 +24,19 @@ import ( // If read/write on the accepted connection blocks longer than its time limit, // it will return timeout error. func NewTimeoutListener(addr string, scheme string, tlscfg *tls.Config, rdtimeoutd, wtimeoutd time.Duration) (net.Listener, error) { - ln, err := NewListener(addr, scheme, tlscfg) + ln, err := newListener(addr, scheme) if err != nil { return nil, err } - return &rwTimeoutListener{ + ln = &rwTimeoutListener{ Listener: ln, rdtimeoutd: rdtimeoutd, wtimeoutd: wtimeoutd, - }, nil + } + if ln, err = wrapTLS(addr, scheme, tlscfg, ln); err != nil { + return nil, err + } + return ln, nil } type rwTimeoutListener struct { diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go index 742cc5cbf..ea16b4c0f 100644 --- a/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go +++ b/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go @@ -35,7 +35,7 @@ func NewTimeoutTransport(info TLSInfo, dialtimeoutd, rdtimeoutd, wtimeoutd time. // it should not be put back to http transport as an idle connection for future usage. tr.MaxIdleConnsPerHost = -1 } else { - // allow more idle connections between peers to avoid unncessary port allocation. + // allow more idle connections between peers to avoid unnecessary port allocation. tr.MaxIdleConnsPerHost = 1024 } diff --git a/vendor/github.com/coreos/etcd/pkg/transport/transport.go b/vendor/github.com/coreos/etcd/pkg/transport/transport.go index ca9ccfd80..4a7fe69d2 100644 --- a/vendor/github.com/coreos/etcd/pkg/transport/transport.go +++ b/vendor/github.com/coreos/etcd/pkg/transport/transport.go @@ -64,7 +64,8 @@ func NewTransport(info TLSInfo, dialtimeoutd time.Duration) (*http.Transport, er } func (urt *unixTransport) RoundTrip(req *http.Request) (*http.Response, error) { - req2 := *req - req2.URL.Scheme = strings.Replace(req.URL.Scheme, "unix", "http", 1) + url := *req.URL + req.URL = &url + req.URL.Scheme = strings.Replace(req.URL.Scheme, "unix", "http", 1) return urt.Transport.RoundTrip(req) } diff --git a/vendor/github.com/coreos/pkg/capnslog/README.md b/vendor/github.com/coreos/pkg/capnslog/README.md deleted file mode 100644 index 81efb1fb6..000000000 --- a/vendor/github.com/coreos/pkg/capnslog/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# capnslog, the CoreOS logging package - -There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?). -capnslog provides a simple but consistent logging interface suitable for all kinds of projects. - -### Design Principles - -##### `package main` is the place where logging gets turned on and routed - -A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak. - -##### All log options are runtime-configurable. - -Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly. - -##### There is one log object per package. It is registered under its repository and package name. - -`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs. - -##### There is *one* output stream, and it is an `io.Writer` composed with a formatter. - -Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer. - -Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependant. These are, at best, provided as options, but more likely, provided by your application. - -##### Log objects are an interface - -An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed. - -##### Log levels have specific meanings: - - * Critical: Unrecoverable. Must fail. - * Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost - * Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning. - * Notice: Normal, but important (uncommon) log information. - * Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations. - * Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices. - * Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query. - diff --git a/vendor/github.com/coreos/pkg/health/README.md b/vendor/github.com/coreos/pkg/health/README.md deleted file mode 100644 index 5ec34c21e..000000000 --- a/vendor/github.com/coreos/pkg/health/README.md +++ /dev/null @@ -1,11 +0,0 @@ -health -==== - -A simple framework for implementing an HTTP health check endpoint on servers. - -Users implement their `health.Checkable` types, and create a `health.Checker`, from which they can get an `http.HandlerFunc` using `health.Checker.MakeHealthHandlerFunc`. - -### Documentation - -For more details, visit the docs on [gopkgdoc](http://godoc.org/github.com/coreos/pkg/health) - diff --git a/vendor/github.com/coreos/pkg/httputil/README.md b/vendor/github.com/coreos/pkg/httputil/README.md deleted file mode 100644 index 44fa751c4..000000000 --- a/vendor/github.com/coreos/pkg/httputil/README.md +++ /dev/null @@ -1,13 +0,0 @@ -httputil -==== - -Common code for dealing with HTTP. - -Includes: - -* Code for returning JSON responses. - -### Documentation - -Visit the docs on [gopkgdoc](http://godoc.org/github.com/coreos/pkg/httputil) - diff --git a/vendor/github.com/dgrijalva/jwt-go/.gitignore b/vendor/github.com/dgrijalva/jwt-go/.gitignore deleted file mode 100644 index 80bed650e..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.DS_Store -bin - - diff --git a/vendor/github.com/dgrijalva/jwt-go/.travis.yml b/vendor/github.com/dgrijalva/jwt-go/.travis.yml deleted file mode 100644 index bde823d8a..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go - -go: - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - tip diff --git a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md deleted file mode 100644 index fd62e9490..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md +++ /dev/null @@ -1,96 +0,0 @@ -## Migration Guide from v2 -> v3 - -Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code. - -### `Token.Claims` is now an interface type - -The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`. - -`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property. - -The old example for parsing a token looked like this.. - -```go - if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { - fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) - } -``` - -is now directly mapped to... - -```go - if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { - claims := token.Claims.(jwt.MapClaims) - fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) - } -``` - -`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type. - -```go - type MyCustomClaims struct { - User string - *StandardClaims - } - - if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil { - claims := token.Claims.(*MyCustomClaims) - fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt) - } -``` - -### `ParseFromRequest` has been moved - -To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`. - -`Extractors` do the work of picking the token string out of a request. The interface is simple and composable. - -This simple parsing example: - -```go - if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil { - fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) - } -``` - -is directly mapped to: - -```go - if token, err := request.ParseFromRequest(tokenString, request.OAuth2Extractor, req, keyLookupFunc); err == nil { - fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) - } -``` - -There are several concrete `Extractor` types provided for your convenience: - -* `HeaderExtractor` will search a list of headers until one contains content. -* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content. -* `MultiExtractor` will try a list of `Extractors` in order until one returns content. -* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token. -* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument -* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header - - -### RSA signing methods no longer accept `[]byte` keys - -Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse. - -To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types. - -```go - func keyLookupFunc(*Token) (interface{}, error) { - // Don't forget to validate the alg is what you expect: - if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { - return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) - } - - // Look up key - key, err := lookupPublicKey(token.Header["kid"]) - if err != nil { - return nil, err - } - - // Unpack key from PEM encoded PKCS8 - return jwt.ParseRSAPublicKeyFromPEM(key) - } -``` diff --git a/vendor/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/dgrijalva/jwt-go/README.md deleted file mode 100644 index f48365faf..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/README.md +++ /dev/null @@ -1,85 +0,0 @@ -A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) - -[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) - -**BREAKING CHANGES:*** Version 3.0.0 is here. It includes _a lot_ of changes including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. - -**NOTICE:** A vulnerability in JWT was [recently published](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). As this library doesn't force users to validate the `alg` is what they expected, it's possible your usage is effected. There will be an update soon to remedy this, and it will likey require backwards-incompatible changes to the API. In the short term, please make sure your implementation verifies the `alg` is what you expect. - - -## What the heck is a JWT? - -JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. - -In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. - -The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. - -The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own. - -## What's in the box? - -This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. - -## Examples - -See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: - -* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) -* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) -* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) - -## Extensions - -This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. - -Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go - -## Compliance - -This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: - -* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. - -## Project Status & Versioning - -This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). - -This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). - -While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`. It will do the right thing WRT semantic versioning. - -## Usage Tips - -### Signing vs Encryption - -A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: - -* The author of the token was in the possession of the signing secret -* The data has not been modified since it was signed - -It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. - -### Choosing a Signing Method - -There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. - -Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. - -Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. - -### JWT and OAuth - -It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. - -Without going too far down the rabbit hole, here's a description of the interaction of these technologies: - -* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. -* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. -* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. - -## More - -Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). - -The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in to documentation. diff --git a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md deleted file mode 100644 index b605b4509..000000000 --- a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md +++ /dev/null @@ -1,105 +0,0 @@ -## `jwt-go` Version History - -#### 3.0.0 - -* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code - * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. - * `ParseFromRequest` has been moved to `request` subpackage and usage has changed - * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. -* Other Additions and Changes - * Added `Claims` interface type to allow users to decode the claims into a custom type - * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. - * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage - * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` - * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. - * Added several new, more specific, validation errors to error type bitmask - * Moved examples from README to executable example files - * Signing method registry is now thread safe - * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) - -#### 2.7.0 - -This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. - -* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying -* Error text for expired tokens includes how long it's been expired -* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` -* Documentation updates - -#### 2.6.0 - -* Exposed inner error within ValidationError -* Fixed validation errors when using UseJSONNumber flag -* Added several unit tests - -#### 2.5.0 - -* Added support for signing method none. You shouldn't use this. The API tries to make this clear. -* Updated/fixed some documentation -* Added more helpful error message when trying to parse tokens that begin with `BEARER ` - -#### 2.4.0 - -* Added new type, Parser, to allow for configuration of various parsing parameters - * You can now specify a list of valid signing methods. Anything outside this set will be rejected. - * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON -* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) -* Fixed some bugs with ECDSA parsing - -#### 2.3.0 - -* Added support for ECDSA signing methods -* Added support for RSA PSS signing methods (requires go v1.4) - -#### 2.2.0 - -* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. - -#### 2.1.0 - -Backwards compatible API change that was missed in 2.0.0. - -* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` - -#### 2.0.0 - -There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. - -The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. - -It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. - -* **Compatibility Breaking Changes** - * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` - * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` - * `KeyFunc` now returns `interface{}` instead of `[]byte` - * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key - * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key -* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. - * Added public package global `SigningMethodHS256` - * Added public package global `SigningMethodHS384` - * Added public package global `SigningMethodHS512` -* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. - * Added public package global `SigningMethodRS256` - * Added public package global `SigningMethodRS384` - * Added public package global `SigningMethodRS512` -* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. -* Refactored the RSA implementation to be easier to read -* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` - -#### 1.0.2 - -* Fixed bug in parsing public keys from certificates -* Added more tests around the parsing of keys for RS256 -* Code refactoring in RS256 implementation. No functional changes - -#### 1.0.1 - -* Fixed panic if RS256 signing method was passed an invalid key - -#### 1.0.0 - -* First versioned release -* API stabilized -* Supports creating, signing, parsing, and validating JWT tokens -* Supports RS256 and HS256 signing methods \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/.gitignore b/vendor/github.com/docker/distribution/.gitignore deleted file mode 100644 index 1c3ae0a77..000000000 --- a/vendor/github.com/docker/distribution/.gitignore +++ /dev/null @@ -1,37 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -# never checkin from the bin file (for now) -bin/* - -# Test key files -*.pem - -# Cover profiles -*.out - -# Editor/IDE specific files. -*.sublime-project -*.sublime-workspace diff --git a/vendor/github.com/docker/distribution/.mailmap b/vendor/github.com/docker/distribution/.mailmap deleted file mode 100644 index d99106019..000000000 --- a/vendor/github.com/docker/distribution/.mailmap +++ /dev/null @@ -1,18 +0,0 @@ -Stephen J Day Stephen Day -Stephen J Day Stephen Day -Olivier Gambier Olivier Gambier -Brian Bland Brian Bland -Brian Bland Brian Bland -Josh Hawn Josh Hawn -Richard Scothern Richard -Richard Scothern Richard Scothern -Andrew Meredith Andrew Meredith -harche harche -Jessie Frazelle -Sharif Nassar Sharif Nassar -Sven Dowideit Sven Dowideit -Vincent Giersch Vincent Giersch -davidli davidli -Omer Cohen Omer Cohen -Eric Yang Eric Yang -Nikita Tarasov Nikita diff --git a/vendor/github.com/docker/distribution/AUTHORS b/vendor/github.com/docker/distribution/AUTHORS deleted file mode 100644 index 9e80e062b..000000000 --- a/vendor/github.com/docker/distribution/AUTHORS +++ /dev/null @@ -1,147 +0,0 @@ -Aaron Lehmann -Aaron Schlesinger -Aaron Vinson -Adam Enger -Adrian Mouat -Ahmet Alp Balkan -Alex Chan -Alex Elman -Alexey Gladkov -allencloud -amitshukla -Amy Lindburg -Andrew Hsu -Andrew Meredith -Andrew T Nguyen -Andrey Kostov -Andy Goldstein -Anis Elleuch -Anton Tiurin -Antonio Mercado -Antonio Murdaca -Arien Holthuizen -Arnaud Porterie -Arthur Baars -Asuka Suzuki -Avi Miller -Ayose Cazorla -BadZen -Ben Firshman -bin liu -Brian Bland -burnettk -Carson A -Chris Dillon -cyli -Daisuke Fujita -Daniel Huhn -Darren Shepherd -Dave Trombley -Dave Tucker -David Lawrence -David Verhasselt -David Xia -davidli -Dejan Golja -Derek McGowan -Diogo Mónica -DJ Enriquez -Donald Huang -Doug Davis -Eric Yang -Fabio Huser -farmerworking -Felix Yan -Florentin Raud -Frederick F. Kautz IV -gabriell nascimento -Gleb Schukin -harche -Henri Gomez -Hu Keping -Hua Wang -HuKeping -Ian Babrou -igayoso -Jack Griffin -Jason Freidman -Jeff Nickoloff -Jessie Frazelle -jhaohai -Jianqing Wang -John Starks -Jon Johnson -Jon Poler -Jonathan Boulle -Jordan Liggitt -Josh Hawn -Julien Fernandez -Ke Xu -Keerthan Mala -Kelsey Hightower -Kenneth Lim -Kenny Leung -Li Yi -Liu Hua -liuchang0812 -Louis Kottmann -Luke Carpenter -Mary Anthony -Matt Bentley -Matt Duch -Matt Moore -Matt Robenolt -Michael Prokop -Michal Minar -Miquel Sabaté -Morgan Bauer -moxiegirl -Nathan Sullivan -nevermosby -Nghia Tran -Nikita Tarasov -Nuutti Kotivuori -Oilbeater -Olivier Gambier -Olivier Jacques -Omer Cohen -Patrick Devine -Phil Estes -Philip Misiowiec -Richard Scothern -Rodolfo Carvalho -Rusty Conover -Sean Boran -Sebastiaan van Stijn -Serge Dubrouski -Sharif Nassar -Shawn Falkner-Horine -Shreyas Karnik -Simon Thulbourn -Spencer Rinehart -Stefan Majewsky -Stefan Weil -Stephen J Day -Sungho Moon -Sven Dowideit -Sylvain Baubeau -Ted Reed -tgic -Thomas Sjögren -Tianon Gravi -Tibor Vass -Tonis Tiigi -Tony Holdstock-Brown -Trevor Pounds -Troels Thomsen -Vincent Batts -Vincent Demeester -Vincent Giersch -W. Trevor King -weiyuan.yl -xg.song -xiekeyang -Yann ROBERT -yuzou -zhouhaibing089 -姜继忠 diff --git a/vendor/github.com/docker/distribution/BUILDING.md b/vendor/github.com/docker/distribution/BUILDING.md deleted file mode 100644 index d9577022b..000000000 --- a/vendor/github.com/docker/distribution/BUILDING.md +++ /dev/null @@ -1,119 +0,0 @@ - -# Building the registry source - -## Use-case - -This is useful if you intend to actively work on the registry. - -### Alternatives - -Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/). - -People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. - -OS X users who want to run natively can do so following [the instructions here](osx-setup-guide.md). - -### Gotchas - -You are expected to know your way around with go & git. - -If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you. - -## Build the development environment - -The first prerequisite of properly building distribution targets is to have a Go -development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) -for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the -environment. - -If a Go development environment is setup, one can use `go get` to install the -`registry` command from the current latest: - - go get github.com/docker/distribution/cmd/registry - -The above will install the source repository into the `GOPATH`. - -Now create the directory for the registry data (this might require you to set permissions properly) - - mkdir -p /var/lib/registry - -... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. - -The `registry` -binary can then be run with the following: - - $ $GOPATH/bin/registry --version - $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown - -> __NOTE:__ While you do not need to use `go get` to checkout the distribution -> project, for these build instructions to work, the project must be checked -> out in the correct location in the `GOPATH`. This should almost always be -> `$GOPATH/src/github.com/docker/distribution`. - -The registry can be run with the default config using the following -incantation: - - $ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml - INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] debug server listening localhost:5001 - -If it is working, one should see the above log messages. - -### Repeatable Builds - -For the full development experience, one should `cd` into -`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` -commands, such as `go test`, should work per package (please see -[Developing](#developing) if they don't work). - -A `Makefile` has been provided as a convenience to support repeatable builds. -Please install the following into `GOPATH` for it to work: - - go get github.com/tools/godep github.com/golang/lint/golint - -**TODO(stevvooe):** Add a `make setup` command to Makefile to run this. Have to think about how to interact with Godeps properly. - -Once these commands are available in the `GOPATH`, run `make` to get a full -build: - - $ make - + clean - + fmt - + vet - + lint - + build - github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar - github.com/Sirupsen/logrus - github.com/docker/libtrust - ... - github.com/yvasiyarov/gorelic - github.com/docker/distribution/registry/handlers - github.com/docker/distribution/cmd/registry - + test - ... - ok github.com/docker/distribution/digest 7.875s - ok github.com/docker/distribution/manifest 0.028s - ok github.com/docker/distribution/notifications 17.322s - ? github.com/docker/distribution/registry [no test files] - ok github.com/docker/distribution/registry/api/v2 0.101s - ? github.com/docker/distribution/registry/auth [no test files] - ok github.com/docker/distribution/registry/auth/silly 0.011s - ... - + /Users/sday/go/src/github.com/docker/distribution/bin/registry - + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template - + binaries - -The above provides a repeatable build using the contents of the vendored -Godeps directory. This includes formatting, vetting, linting, building, -testing and generating tagged binaries. We can verify this worked by running -the registry binary generated in the "./bin" directory: - - $ ./bin/registry -version - ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m - -### Optional build tags - -Optional [build tags](http://golang.org/pkg/go/build/) can be provided using -the environment variable `DOCKER_BUILDTAGS`. diff --git a/vendor/github.com/docker/distribution/CHANGELOG.md b/vendor/github.com/docker/distribution/CHANGELOG.md deleted file mode 100644 index 3445c090c..000000000 --- a/vendor/github.com/docker/distribution/CHANGELOG.md +++ /dev/null @@ -1,35 +0,0 @@ -# Changelog - -## 2.5.0 (2016-06-14) - -### Storage -- Ensure uploads directory is cleaned after upload is commited -- Add ability to cap concurrent operations in filesystem driver -- S3: Add 'us-gov-west-1' to the valid region list -- Swift: Handle ceph not returning Last-Modified header for HEAD requests -- Add redirect middleware - -#### Registry -- Add support for blobAccessController middleware -- Add support for layers from foreign sources -- Remove signature store -- Add support for Let's Encrypt -- Correct yaml key names in configuration - -#### Client -- Add option to get content digest from manifest get - -#### Spec -- Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported -- Clarify API documentation around catalog fetch behavior - -### API -- Support returning HTTP 429 (Too Many Requests) - -### Documentation -- Update auth documentation examples to show "expires in" as int - -### Docker Image -- Use Alpine Linux as base image - - diff --git a/vendor/github.com/docker/distribution/CONTRIBUTING.md b/vendor/github.com/docker/distribution/CONTRIBUTING.md deleted file mode 100644 index 7cc7aedff..000000000 --- a/vendor/github.com/docker/distribution/CONTRIBUTING.md +++ /dev/null @@ -1,140 +0,0 @@ -# Contributing to the registry - -## Before reporting an issue... - -### If your problem is with... - - - automated builds - - your account on the [Docker Hub](https://hub.docker.com/) - - any other [Docker Hub](https://hub.docker.com/) issue - -Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com) - -### If you... - - - need help setting up your registry - - can't figure out something - - are not sure what's going on or what your problem is - -Then please do not open an issue here yet - you should first try one of the following support forums: - - - irc: #docker-distribution on freenode - - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution - -## Reporting an issue properly - -By following these simple rules you will get better and faster feedback on your issue. - - - search the bugtracker for an already reported issue - -### If you found an issue that describes your problem: - - - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments - - please refrain from adding "same thing here" or "+1" comments - - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button - - comment if you have some new, technical and relevant information to add to the case - - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. - -### If you have not found an existing issue that describes your problem: - - 1. create a new issue, with a succinct title that describes your issue: - - bad title: "It doesn't work with my docker" - - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" - 2. copy the output of: - - `docker version` - - `docker info` - - `docker exec registry -version` - 3. copy the command line you used to launch your Registry - 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) - 5. reproduce your problem and get your docker daemon logs showing the error - 6. if relevant, copy your registry logs that show the error - 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) - 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry - -## Contributing a patch for a known bug, or a small correction - -You should follow the basic GitHub workflow: - - 1. fork - 2. commit a change - 3. make sure the tests pass - 4. PR - -Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple: - - - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com` - - sign your commits using `-s`: `git commit -s -m "My commit"` - -Some simple rules to ensure quick merge: - - - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`) - - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once - - if you need to amend your PR following comments, please squash instead of adding more commits - -## Contributing new features - -You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. - -If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. -If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. - -Then you should submit your implementation, clearly linking to the issue (and possible proposal). - -Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged. - -It's mandatory to: - - - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines) - - address maintainers' comments and modify your submission accordingly - - write tests for any new code - -Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. - -Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493) - -## Coding Style - -Unless explicitly stated, we follow all coding guidelines from the Go -community. While some of these standards may seem arbitrary, they somehow seem -to result in a solid, consistent codebase. - -It is possible that the code base does not currently comply with these -guidelines. We are not looking for a massive PR that fixes this, since that -goes against the spirit of the guidelines. All new contributions should make a -best effort to clean up and make the code base better than they left it. -Obviously, apply your best judgement. Remember, the goal here is to make the -code base easier for humans to navigate and understand. Always keep that in -mind when nudging others to comply. - -The rules: - -1. All code should be formatted with `gofmt -s`. -2. All code should pass the default levels of - [`golint`](https://github.com/golang/lint). -3. All code should follow the guidelines covered in [Effective - Go](http://golang.org/doc/effective_go.html) and [Go Code Review - Comments](https://github.com/golang/go/wiki/CodeReviewComments). -4. Comment the code. Tell us the why, the history and the context. -5. Document _all_ declarations and methods, even private ones. Declare - expectations, caveats and anything else that may be important. If a type - gets exported, having the comments already there will ensure it's ready. -6. Variable name length should be proportional to its context and no longer. - `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. - In practice, short methods will have short variable names and globals will - have longer names. -7. No underscores in package names. If you need a compound name, step back, - and re-examine why you need a compound name. If you still think you need a - compound name, lose the underscore. -8. No utils or helpers packages. If a function is not general enough to - warrant its own package, it has not been written generally enough to be a - part of a util package. Just leave it unexported and well-documented. -9. All tests should run with `go test` and outside tooling should not be - required. No, we don't need another unit testing framework. Assertion - packages are acceptable if they provide _real_ incremental value. -10. Even though we call these "rules" above, they are actually just - guidelines. Since you've read all the rules, you now know that. - -If you are having trouble getting into the mood of idiomatic Go, we recommend -reading through [Effective Go](http://golang.org/doc/effective_go.html). The -[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the -kool-aid is a lot easier than going thirsty. diff --git a/vendor/github.com/docker/distribution/Dockerfile b/vendor/github.com/docker/distribution/Dockerfile deleted file mode 100644 index bc3c78577..000000000 --- a/vendor/github.com/docker/distribution/Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -FROM golang:1.6-alpine - -ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution -ENV DOCKER_BUILDTAGS include_oss include_gcs - -RUN set -ex \ - && apk add --no-cache make git - -WORKDIR $DISTRIBUTION_DIR -COPY . $DISTRIBUTION_DIR -COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml - -RUN make PREFIX=/go clean binaries - -VOLUME ["/var/lib/registry"] -EXPOSE 5000 -ENTRYPOINT ["registry"] -CMD ["serve", "/etc/docker/registry/config.yml"] diff --git a/vendor/github.com/docker/distribution/Jenkinsfile b/vendor/github.com/docker/distribution/Jenkinsfile deleted file mode 100644 index fa29520b5..000000000 --- a/vendor/github.com/docker/distribution/Jenkinsfile +++ /dev/null @@ -1,8 +0,0 @@ -// Only run on Linux atm -wrappedNode(label: 'docker') { - deleteDir() - stage "checkout" - checkout scm - - documentationChecker("docs") -} diff --git a/vendor/github.com/docker/distribution/MAINTAINERS b/vendor/github.com/docker/distribution/MAINTAINERS deleted file mode 100644 index bda400150..000000000 --- a/vendor/github.com/docker/distribution/MAINTAINERS +++ /dev/null @@ -1,58 +0,0 @@ -# Distribution maintainers file -# -# This file describes who runs the docker/distribution project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "aaronlehmann", - "dmcgowan", - "dmp42", - "richardscothern", - "shykes", - "stevvooe", - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.aaronlehmann] - Name = "Aaron Lehmann" - Email = "aaron.lehmann@docker.com" - GitHub = "aaronlehmann" - - [people.dmcgowan] - Name = "Derek McGowan" - Email = "derek@mcgstyle.net" - GitHub = "dmcgowan" - - [people.dmp42] - Name = "Olivier Gambier" - Email = "olivier@docker.com" - GitHub = "dmp42" - - [people.richardscothern] - Name = "Richard Scothern" - Email = "richard.scothern@gmail.com" - GitHub = "richardscothern" - - [people.shykes] - Name = "Solomon Hykes" - Email = "solomon@docker.com" - GitHub = "shykes" - - [people.stevvooe] - Name = "Stephen Day" - Email = "stephen.day@docker.com" - GitHub = "stevvooe" diff --git a/vendor/github.com/docker/distribution/Makefile b/vendor/github.com/docker/distribution/Makefile deleted file mode 100644 index a0602d0b2..000000000 --- a/vendor/github.com/docker/distribution/Makefile +++ /dev/null @@ -1,106 +0,0 @@ -# Set an output prefix, which is the local directory if not specified -PREFIX?=$(shell pwd) - - -# Used to populate version variable in main package. -VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) - -# Allow turning off function inlining and variable registerization -ifeq (${DISABLE_OPTIMIZATION},true) - GO_GCFLAGS=-gcflags "-N -l" - VERSION:="$(VERSION)-noopt" -endif - -GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)" - -.PHONY: clean all fmt vet lint build test binaries -.DEFAULT: all -all: fmt vet lint build test binaries - -AUTHORS: .mailmap .git/HEAD - git log --format='%aN <%aE>' | sort -fu > $@ - -# This only needs to be generated by hand when cutting full releases. -version/version.go: - ./version/version.sh > $@ - -# Required for go 1.5 to build -GO15VENDOREXPERIMENT := 1 - -# Package list -PKGS := $(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/) - -# Resolving binary dependencies for specific targets -GOLINT := $(shell which golint || echo '') -GODEP := $(shell which godep || echo '') - -${PREFIX}/bin/registry: $(wildcard **/*.go) - @echo "+ $@" - @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry - -${PREFIX}/bin/digest: $(wildcard **/*.go) - @echo "+ $@" - @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest - -${PREFIX}/bin/registry-api-descriptor-template: $(wildcard **/*.go) - @echo "+ $@" - @go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template - -docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template - ./bin/registry-api-descriptor-template $< > $@ - -vet: - @echo "+ $@" - @go vet -tags "${DOCKER_BUILDTAGS}" $(PKGS) - -fmt: - @echo "+ $@" - @test -z "$$(gofmt -s -l . 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" || \ - (echo >&2 "+ please format Go code with 'gofmt -s'" && false) - -lint: - @echo "+ $@" - $(if $(GOLINT), , \ - $(error Please install golint: `go get -u github.com/golang/lint/golint`)) - @test -z "$$($(GOLINT) ./... 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" - -build: - @echo "+ $@" - @go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} $(PKGS) - -test: - @echo "+ $@" - @go test -test.short -tags "${DOCKER_BUILDTAGS}" $(PKGS) - -test-full: - @echo "+ $@" - @go test -tags "${DOCKER_BUILDTAGS}" $(PKGS) - -binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/digest ${PREFIX}/bin/registry-api-descriptor-template - @echo "+ $@" - -clean: - @echo "+ $@" - @rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/digest" "${PREFIX}/bin/registry-api-descriptor-template" - -dep-save: - @echo "+ $@" - $(if $(GODEP), , \ - $(error Please install godep: go get github.com/tools/godep)) - @$(GODEP) save $(PKGS) - -dep-restore: - @echo "+ $@" - $(if $(GODEP), , \ - $(error Please install godep: go get github.com/tools/godep)) - @$(GODEP) restore -v - -dep-validate: dep-restore - @echo "+ $@" - @rm -Rf .vendor.bak - @mv vendor .vendor.bak - @rm -Rf Godeps - @$(GODEP) save ./... - @test -z "$$(diff -r vendor .vendor.bak 2>&1 | tee /dev/stderr)" || \ - (echo >&2 "+ borked dependencies! what you have in Godeps/Godeps.json does not match with what you have in vendor" && false) - @rm -Rf .vendor.bak diff --git a/vendor/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md deleted file mode 100644 index 99fd59457..000000000 --- a/vendor/github.com/docker/distribution/README.md +++ /dev/null @@ -1,131 +0,0 @@ -# Distribution - -The Docker toolset to pack, ship, store, and deliver content. - -This repository's main product is the Docker Registry 2.0 implementation -for storing and distributing Docker images. It supersedes the -[docker/docker-registry](https://github.com/docker/docker-registry) -project with a new API design, focused around security and performance. - - - -[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master) -[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution) - -This repository contains the following components: - -|**Component** |Description | -|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | -| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | -| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | -| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry. | - -### How does this integrate with Docker engine? - -This project should provide an implementation to a V2 API for use in the [Docker -core project](https://github.com/docker/docker). The API should be embeddable -and simplify the process of securely pulling and pushing content from `docker` -daemons. - -### What are the long term goals of the Distribution project? - -The _Distribution_ project has the further long term goal of providing a -secure tool chain for distributing content. The specifications, APIs and tools -should be as useful with Docker as they are without. - -Our goal is to design a professional grade and extensible content distribution -system that allow users to: - -* Enjoy an efficient, secured and reliable way to store, manage, package and - exchange content -* Hack/roll their own on top of healthy open-source components -* Implement their own home made solution through good specs, and solid - extensions mechanism. - -## More about Registry 2.0 - -The new registry implementation provides the following benefits: - -- faster push and pull -- new, more efficient implementation -- simplified deployment -- pluggable storage backend -- webhook notifications - -For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). - -### Who needs to deploy a registry? - -By default, Docker users pull images from Docker's public registry instance. -[Installing Docker](https://docs.docker.com/engine/installation/) gives users this -ability. Users can also push images to a repository on Docker's public registry, -if they have a [Docker Hub](https://hub.docker.com/) account. - -For some users and even companies, this default behavior is sufficient. For -others, it is not. - -For example, users with their own software products may want to maintain a -registry for private, company images. Also, you may wish to deploy your own -image repository for images used to test or in continuous integration. For these -use cases and others, [deploying your own registry instance](docs/deploying.md) -may be the better choice. - -### Migration to Registry 2.0 - -For those who have previously deployed their own registry based on the Registry -1.0 implementation and wish to deploy a Registry 2.0 while retaining images, -data migration is required. A tool to assist with migration efforts has been -created. For more information see [docker/migrator] -(https://github.com/docker/migrator). - -## Contribute - -Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute -issues, fixes, and patches to this project. If you are contributing code, see -the instructions for [building a development environment](BUILDING.md). - -## Support - -If any issues are encountered while using the _Distribution_ project, several -avenues are available for support: - - - - - - - - - - - - - - - - - - -
- IRC - - #docker-distribution on FreeNode -
- Issue Tracker - - github.com/docker/distribution/issues -
- Google Groups - - https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution -
- Mailing List - - docker@dockerproject.org -
- - -## License - -This project is distributed under [Apache License, Version 2.0](LICENSE). diff --git a/vendor/github.com/docker/distribution/ROADMAP.md b/vendor/github.com/docker/distribution/ROADMAP.md deleted file mode 100644 index 701127afe..000000000 --- a/vendor/github.com/docker/distribution/ROADMAP.md +++ /dev/null @@ -1,267 +0,0 @@ -# Roadmap - -The Distribution Project consists of several components, some of which are -still being defined. This document defines the high-level goals of the -project, identifies the current components, and defines the release- -relationship to the Docker Platform. - -* [Distribution Goals](#distribution-goals) -* [Distribution Components](#distribution-components) -* [Project Planning](#project-planning): release-relationship to the Docker Platform. - -This road map is a living document, providing an overview of the goals and -considerations made in respect of the future of the project. - -## Distribution Goals - -- Replace the existing [docker registry](github.com/docker/docker-registry) - implementation as the primary implementation. -- Replace the existing push and pull code in the docker engine with the - distribution package. -- Define a strong data model for distributing docker images -- Provide a flexible distribution tool kit for use in the docker platform -- Unlock new distribution models - -## Distribution Components - -Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming -features and bugfixes for a component will be added to the relevant milestone. If a feature or -bugfix is not part of a milestone, it is currently unscheduled for -implementation. - -* [Registry](#registry) -* [Distribution Package](#distribution-package) - -*** - -### Registry - -The new Docker registry is the main portion of the distribution repository. -Registry 2.0 is the first release of the next-generation registry. This was -primarily focused on implementing the [new registry -API](https://github.com/docker/distribution/blob/master/docs/spec/api.md), -with a focus on security and performance. - -Following from the Distribution project goals above, we have a set of goals -for registry v2 that we would like to follow in the design. New features -should be compared against these goals. - -#### Data Storage and Distribution First - -The registry's first goal is to provide a reliable, consistent storage -location for Docker images. The registry should only provide the minimal -amount of indexing required to fetch image data and no more. - -This means we should be selective in new features and API additions, including -those that may require expensive, ever growing indexes. Requests should be -servable in "constant time". - -#### Content Addressability - -All data objects used in the registry API should be content addressable. -Content identifiers should be secure and verifiable. This provides a secure, -reliable base from which to build more advanced content distribution systems. - -#### Content Agnostic - -In the past, changes to the image format would require large changes in Docker -and the Registry. By decoupling the distribution and image format, we can -allow the formats to progress without having to coordinate between the two. -This means that we should be focused on decoupling Docker from the registry -just as much as decoupling the registry from Docker. Such an approach will -allow us to unlock new distribution models that haven't been possible before. - -We can take this further by saying that the new registry should be content -agnostic. The registry provides a model of names, tags, manifests and content -addresses and that model can be used to work with content. - -#### Simplicity - -The new registry should be closer to a microservice component than its -predecessor. This means it should have a narrower API and a low number of -service dependencies. It should be easy to deploy. - -This means that other solutions should be explored before changing the API or -adding extra dependencies. If functionality is required, can it be added as an -extension or companion service. - -#### Extensibility - -The registry should provide extension points to add functionality. By keeping -the scope narrow, but providing the ability to add functionality. - -Features like search, indexing, synchronization and registry explorers fall -into this category. No such feature should be added unless we've found it -impossible to do through an extension. - -#### Active Feature Discussions - -The following are feature discussions that are currently active. - -If you don't see your favorite, unimplemented feature, feel free to contact us -via IRC or the mailing list and we can talk about adding it. The goal here is -to make sure that new features go through a rigid design process before -landing in the registry. - -##### Proxying to other Registries - -A _pull-through caching_ mode exists for the registry, but is restricted from -within the docker client to only mirror the official Docker Hub. This functionality -can be expanded when image provenance has been specified and implemented in the -distribution project. - -##### Metadata storage - -Metadata for the registry is currently stored with the manifest and layer data on -the storage backend. While this is a big win for simplicity and reliably maintaining -state, it comes with the cost of consistency and high latency. The mutable registry -metadata operations should be abstracted behind an API which will allow ACID compliant -storage systems to handle metadata. - -##### Peer to Peer transfer - -Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit - -##### Indexing, Search and Discovery - -The original registry provided some implementation of search for use with -private registries. Support has been elided from V2 since we'd like to both -decouple search functionality from the registry. The makes the registry -simpler to deploy, especially in use cases where search is not needed, and -let's us decouple the image format from the registry. - -There are explorations into using the catalog API and notification system to -build external indexes. The current line of thought is that we will define a -common search API to index and query docker images. Such a system could be run -as a companion to a registry or set of registries to power discovery. - -The main issue with search and discovery is that there are so many ways to -accomplish it. There are two aspects to this project. The first is deciding on -how it will be done, including an API definition that can work with changing -data formats. The second is the process of integrating with `docker search`. -We expect that someone attempts to address the problem with the existing tools -and propose it as a standard search API or uses it to inform a standardization -process. Once this has been explored, we integrate with the docker client. - -Please see the following for more detail: - -- https://github.com/docker/distribution/issues/206 - -##### Deletes - -> __NOTE:__ Deletes are a much asked for feature. Before requesting this -feature or participating in discussion, we ask that you read this section in -full and understand the problems behind deletes. - -While, at first glance, implementing deleting seems simple, there are a number -mitigating factors that make many solutions not ideal or even pathological in -the context of a registry. The following paragraph discuss the background and -approaches that could be applied to arrive at a solution. - -The goal of deletes in any system is to remove unused or unneeded data. Only -data requested for deletion should be removed and no other data. Removing -unintended data is worse than _not_ removing data that was requested for -removal but ideally, both are supported. Generally, according to this rule, we -err on holding data longer than needed, ensuring that it is only removed when -we can be certain that it can be removed. With the current behavior, we opt to -hold onto the data forever, ensuring that data cannot be incorrectly removed. - -To understand the problems with implementing deletes, one must understand the -data model. All registry data is stored in a filesystem layout, implemented on -a "storage driver", effectively a _virtual file system_ (VFS). The storage -system must assume that this VFS layer will be eventually consistent and has -poor read- after-write consistency, since this is the lower common denominator -among the storage drivers. This is mitigated by writing values in reverse- -dependent order, but makes wider transactional operations unsafe. - -Layered on the VFS model is a content-addressable _directed, acyclic graph_ -(DAG) made up of blobs. Manifests reference layers. Tags reference manifests. -Since the same data can be referenced by multiple manifests, we only store -data once, even if it is in different repositories. Thus, we have a set of -blobs, referenced by tags and manifests. If we want to delete a blob we need -to be certain that it is no longer referenced by another manifest or tag. When -we delete a manifest, we also can try to delete the referenced blobs. Deciding -whether or not a blob has an active reference is the crux of the problem. - -Conceptually, deleting a manifest and its resources is quite simple. Just find -all the manifests, enumerate the referenced blobs and delete the blobs not in -that set. An astute observer will recognize this as a garbage collection -problem. As with garbage collection in programming languages, this is very -simple when one always has a consistent view. When one adds parallelism and an -inconsistent view of data, it becomes very challenging. - -A simple example can demonstrate this. Let's say we are deleting a manifest -_A_ in one process. We scan the manifest and decide that all the blobs are -ready for deletion. Concurrently, we have another process accepting a new -manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_ -is accepted and all the blobs are considered present, so the operation -proceeds. The original process then deletes the referenced blobs, assuming -they were unreferenced. The manifest _B_, which we thought had all of its data -present, can no longer be served by the registry, since the dependent data has -been deleted. - -Deleting data from the registry safely requires some way to coordinate this -operation. The following approaches are being considered: - -- _Reference Counting_ - Maintain a count of references to each blob. This is - challenging for a number of reasons: 1. maintaining a consistent consensus - of reference counts across a set of Registries and 2. Building the initial - list of reference counts for an existing registry. These challenges can be - met with a consensus protocol like Paxos or Raft in the first case and a - necessary but simple scan in the second.. -- _Lock the World GC_ - Halt all writes to the data store. Walk the data store - and find all blob references. Delete all unreferenced blobs. This approach - is very simple but requires disabling writes for a period of time while the - service reads all data. This is slow and expensive but very accurate and - effective. -- _Generational GC_ - Do something similar to above but instead of blocking - writes, writes are sent to another storage backend while reads are broadcast - to the new and old backends. GC is then performed on the read-only portion. - Because writes land in the new backend, the data in the read-only section - can be safely deleted. The main drawbacks of this approach are complexity - and coordination. -- _Centralized Oracle_ - Using a centralized, transactional database, we can - know exactly which data is referenced at any given time. This avoids - coordination problem by managing this data in a single location. We trade - off metadata scalability for simplicity and performance. This is a very good - option for most registry deployments. This would create a bottleneck for - registry metadata. However, metadata is generally not the main bottleneck - when serving images. - -Please let us know if other solutions exist that we have yet to enumerate. -Note that for any approach, implementation is a massive consideration. For -example, a mark-sweep based solution may seem simple but the amount of work in -coordination offset the extra work it might take to build a _Centralized -Oracle_. We'll accept proposals for any solution but please coordinate with us -before dropping code. - -At this time, we have traded off simplicity and ease of deployment for disk -space. Simplicity and ease of deployment tend to reduce developer involvement, -which is currently the most expensive resource in software engineering. Taking -on any solution for deletes will greatly effect these factors, trading off -very cheap disk space for a complex deployment and operational story. - -Please see the following issues for more detail: - -- https://github.com/docker/distribution/issues/422 -- https://github.com/docker/distribution/issues/461 -- https://github.com/docker/distribution/issues/462 - -### Distribution Package - -At its core, the Distribution Project is a set of Go packages that make up -Distribution Components. At this time, most of these packages make up the -Registry implementation. - -The package itself is considered unstable. If you're using it, please take care to vendor the dependent version. - -For feature additions, please see the Registry section. In the future, we may break out a -separate Roadmap for distribution-specific features that apply to more than -just the registry. - -*** - -### Project Planning - -An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress. - diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go index 1f91ae21e..d12533011 100644 --- a/vendor/github.com/docker/distribution/blobs.go +++ b/vendor/github.com/docker/distribution/blobs.go @@ -192,18 +192,6 @@ type BlobCreateOption interface { Apply(interface{}) error } -// CreateOptions is a collection of blob creation modifiers relevant to general -// blob storage intended to be configured by the BlobCreateOption.Apply method. -type CreateOptions struct { - Mount struct { - ShouldMount bool - From reference.Canonical - // Stat allows to pass precalculated descriptor to link and return. - // Blob access check will be skipped if set. - Stat *Descriptor - } -} - // BlobWriter provides a handle for inserting data into a blob store. // Instances should be obtained from BlobWriteService.Writer and // BlobWriteService.Resume. If supported by the store, a writer can be diff --git a/vendor/github.com/docker/distribution/circle.yml b/vendor/github.com/docker/distribution/circle.yml deleted file mode 100644 index 52348a4bc..000000000 --- a/vendor/github.com/docker/distribution/circle.yml +++ /dev/null @@ -1,93 +0,0 @@ -# Pony-up! -machine: - pre: - # Install gvm - - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer) - # Install codecov for coverage - - pip install --user codecov - - post: - # go - - gvm install go1.6 --prefer-binary --name=stable - - environment: - # Convenient shortcuts to "common" locations - CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME - BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME - # Trick circle brainflat "no absolute path" behavior - BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR - DOCKER_BUILDTAGS: "include_oss include_gcs" - # Workaround Circle parsing dumb bugs and/or YAML wonkyness - CIRCLE_PAIN: "mode: set" - - hosts: - # Not used yet - fancy: 127.0.0.1 - -dependencies: - pre: - # Copy the code to the gopath of all go versions - - > - gvm use stable && - mkdir -p "$(dirname $BASE_STABLE)" && - cp -R "$CHECKOUT" "$BASE_STABLE" - - override: - # Install dependencies for every copied clone/go version - - gvm use stable && go get github.com/tools/godep: - pwd: $BASE_STABLE - - post: - # For the stable go version, additionally install linting tools - - > - gvm use stable && - go get github.com/axw/gocov/gocov github.com/golang/lint/golint - -test: - pre: - # Output the go versions we are going to test - # - gvm use old && go version - - gvm use stable && go version - - # todo(richard): replace with a more robust vendoring solution. Removed due to a fundamental disagreement in godep philosophies. - # Ensure validation of dependencies - # - gvm use stable && if test -n "`git diff --stat=1000 master | grep -Ei \"vendor|godeps\"`"; then make dep-validate; fi: - # pwd: $BASE_STABLE - - # First thing: build everything. This will catch compile errors, and it's - # also necessary for go vet to work properly (see #807). - - gvm use stable && godep go install $(go list ./... | grep -v "/vendor/"): - pwd: $BASE_STABLE - - # FMT - - gvm use stable && make fmt: - pwd: $BASE_STABLE - - # VET - - gvm use stable && make vet: - pwd: $BASE_STABLE - - # LINT - - gvm use stable && make lint: - pwd: $BASE_STABLE - - override: - # Test stable, and report - - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE': - timeout: 1000 - pwd: $BASE_STABLE - - # Test stable with race - - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | grep -v "registry/handlers" | grep -v "registry/storage/driver" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -race -tags "$DOCKER_BUILDTAGS" -test.short $PACKAGE': - timeout: 1000 - pwd: $BASE_STABLE - post: - # Report to codecov - - bash <(curl -s https://codecov.io/bash): - pwd: $BASE_STABLE - - ## Notes - # Do we want these as well? - # - go get code.google.com/p/go.tools/cmd/goimports - # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" - # http://labix.org/gocheck diff --git a/vendor/github.com/docker/distribution/context/http.go b/vendor/github.com/docker/distribution/context/http.go index 7fe9b8ab0..2cb1d0417 100644 --- a/vendor/github.com/docker/distribution/context/http.go +++ b/vendor/github.com/docker/distribution/context/http.go @@ -103,22 +103,20 @@ func GetRequestID(ctx Context) string { // WithResponseWriter returns a new context and response writer that makes // interesting response statistics available within the context. func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) { + irw := instrumentedResponseWriter{ + ResponseWriter: w, + Context: ctx, + } + if closeNotifier, ok := w.(http.CloseNotifier); ok { irwCN := &instrumentedResponseWriterCN{ - instrumentedResponseWriter: instrumentedResponseWriter{ - ResponseWriter: w, - Context: ctx, - }, - CloseNotifier: closeNotifier, + instrumentedResponseWriter: irw, + CloseNotifier: closeNotifier, } return irwCN, irwCN } - irw := instrumentedResponseWriter{ - ResponseWriter: w, - Context: ctx, - } return &irw, &irw } diff --git a/vendor/github.com/docker/distribution/coverpkg.sh b/vendor/github.com/docker/distribution/coverpkg.sh deleted file mode 100755 index 25d419ae8..000000000 --- a/vendor/github.com/docker/distribution/coverpkg.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash -# Given a subpackage and the containing package, figures out which packages -# need to be passed to `go test -coverpkg`: this includes all of the -# subpackage's dependencies within the containing package, as well as the -# subpackage itself. -DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2} | grep -v github.com/docker/distribution/vendor)" -echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ',' diff --git a/vendor/github.com/docker/distribution/manifest/schema2/builder.go b/vendor/github.com/docker/distribution/manifest/schema2/builder.go index ec0bf858d..44b94eaae 100644 --- a/vendor/github.com/docker/distribution/manifest/schema2/builder.go +++ b/vendor/github.com/docker/distribution/manifest/schema2/builder.go @@ -46,9 +46,6 @@ func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { m.Config, err = mb.bs.Stat(ctx, configDigest) switch err { case nil: - // Override MediaType, since Put always replaces the specified media - // type with application/octet-stream in the descriptor it returns. - m.Config.MediaType = MediaTypeConfig return FromStruct(m) case distribution.ErrBlobUnknown: // nop diff --git a/vendor/github.com/docker/distribution/manifest/schema2/manifest.go b/vendor/github.com/docker/distribution/manifest/schema2/manifest.go index dd2ed114c..355b5ad4e 100644 --- a/vendor/github.com/docker/distribution/manifest/schema2/manifest.go +++ b/vendor/github.com/docker/distribution/manifest/schema2/manifest.go @@ -17,9 +17,6 @@ const ( // MediaTypeConfig specifies the mediaType for the image configuration. MediaTypeConfig = "application/vnd.docker.container.image.v1+json" - // MediaTypePluginConfig specifies the mediaType for plugin configuration. - MediaTypePluginConfig = "application/vnd.docker.plugin.image.v0+json" - // MediaTypeLayer is the mediaType used for layers referenced by the // manifest. MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip" diff --git a/vendor/github.com/docker/distribution/manifest/versioned.go b/vendor/github.com/docker/distribution/manifest/versioned.go index caa6b14e8..c57398bde 100644 --- a/vendor/github.com/docker/distribution/manifest/versioned.go +++ b/vendor/github.com/docker/distribution/manifest/versioned.go @@ -1,8 +1,8 @@ package manifest -// Versioned provides a struct with the manifest schemaVersion and mediaType. -// Incoming content with unknown schema version can be decoded against this -// struct to check the version. +// Versioned provides a struct with the manifest schemaVersion and . Incoming +// content with unknown schema version can be decoded against this struct to +// check the version. type Versioned struct { // SchemaVersion is the image manifest schema that this image follows SchemaVersion int `json:"schemaVersion"` diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go index 5b3e08ee4..bb09fa25d 100644 --- a/vendor/github.com/docker/distribution/reference/reference.go +++ b/vendor/github.com/docker/distribution/reference/reference.go @@ -24,7 +24,6 @@ package reference import ( "errors" "fmt" - "strings" "github.com/docker/distribution/digest" ) @@ -44,9 +43,6 @@ var ( // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. ErrDigestInvalidFormat = errors.New("invalid digest format") - // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. - ErrNameContainsUppercase = errors.New("repository name must be lowercase") - // ErrNameEmpty is returned for empty, invalid repository names. ErrNameEmpty = errors.New("repository name must have at least one component") @@ -153,9 +149,7 @@ func Parse(s string) (Reference, error) { if s == "" { return nil, ErrNameEmpty } - if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { - return nil, ErrNameContainsUppercase - } + // TODO(dmcgowan): Provide more specific and helpful error return nil, ErrReferenceInvalidFormat } diff --git a/vendor/github.com/docker/docker/api/types/versions/README.md b/vendor/github.com/docker/docker/api/types/versions/README.md deleted file mode 100644 index cdac50a53..000000000 --- a/vendor/github.com/docker/docker/api/types/versions/README.md +++ /dev/null @@ -1,14 +0,0 @@ -## Legacy API type versions - -This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. - -Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. - -### Package name conventions - -The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: - -1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. -2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. - -For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile.go b/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile.go deleted file mode 100644 index 75c2d0743..000000000 --- a/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile.go +++ /dev/null @@ -1,71 +0,0 @@ -// +build experimental - -package bundlefile - -import ( - "encoding/json" - "fmt" - "io" -) - -// Bundlefile stores the contents of a bundlefile -type Bundlefile struct { - Version string - Services map[string]Service -} - -// Service is a service from a bundlefile -type Service struct { - Image string - Command []string `json:",omitempty"` - Args []string `json:",omitempty"` - Env []string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Ports []Port `json:",omitempty"` - WorkingDir *string `json:",omitempty"` - User *string `json:",omitempty"` - Networks []string `json:",omitempty"` -} - -// Port is a port as defined in a bundlefile -type Port struct { - Protocol string - Port uint32 -} - -// LoadFile loads a bundlefile from a path to the file -func LoadFile(reader io.Reader) (*Bundlefile, error) { - bundlefile := &Bundlefile{} - - decoder := json.NewDecoder(reader) - if err := decoder.Decode(bundlefile); err != nil { - switch jsonErr := err.(type) { - case *json.SyntaxError: - return nil, fmt.Errorf( - "JSON syntax error at byte %v: %s", - jsonErr.Offset, - jsonErr.Error()) - case *json.UnmarshalTypeError: - return nil, fmt.Errorf( - "Unexpected type at byte %v. Expected %s but received %s.", - jsonErr.Offset, - jsonErr.Type, - jsonErr.Value) - } - return nil, err - } - - return bundlefile, nil -} - -// Print writes the contents of the bundlefile to the output writer -// as human readable json -func Print(out io.Writer, bundle *Bundlefile) error { - bytes, err := json.MarshalIndent(*bundle, "", " ") - if err != nil { - return err - } - - _, err = out.Write(bytes) - return err -} diff --git a/vendor/github.com/docker/docker/pkg/signal/README.md b/vendor/github.com/docker/docker/pkg/signal/README.md deleted file mode 100644 index 2b237a594..000000000 --- a/vendor/github.com/docker/docker/pkg/signal/README.md +++ /dev/null @@ -1 +0,0 @@ -This package provides helper functions for dealing with signals across various operating systems \ No newline at end of file diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/docker/docker/pkg/system/chtimes.go deleted file mode 100644 index 7637f12e1..000000000 --- a/vendor/github.com/docker/docker/pkg/system/chtimes.go +++ /dev/null @@ -1,52 +0,0 @@ -package system - -import ( - "os" - "syscall" - "time" - "unsafe" -) - -var ( - maxTime time.Time -) - -func init() { - if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { - // This is a 64 bit timespec - // os.Chtimes limits time to the following - maxTime = time.Unix(0, 1<<63-1) - } else { - // This is a 32 bit timespec - maxTime = time.Unix(1<<31-1, 0) - } -} - -// Chtimes changes the access time and modified time of a file at the given path -func Chtimes(name string, atime time.Time, mtime time.Time) error { - unixMinTime := time.Unix(0, 0) - unixMaxTime := maxTime - - // If the modified time is prior to the Unix Epoch, or after the - // end of Unix Time, os.Chtimes has undefined behavior - // default to Unix Epoch in this case, just in case - - if atime.Before(unixMinTime) || atime.After(unixMaxTime) { - atime = unixMinTime - } - - if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { - mtime = unixMinTime - } - - if err := os.Chtimes(name, atime, mtime); err != nil { - return err - } - - // Take platform specific action for setting create time. - if err := setCTime(name, mtime); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go b/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go deleted file mode 100644 index 09d58bcbf..000000000 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !windows - -package system - -import ( - "time" -) - -//setCTime will set the create time on a file. On Unix, the create -//time is updated as a side effect of setting the modified time, so -//no action is required. -func setCTime(path string, ctime time.Time) error { - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go deleted file mode 100644 index 294586846..000000000 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build windows - -package system - -import ( - "syscall" - "time" -) - -//setCTime will set the create time on a file. On Windows, this requires -//calling SetFileTime and explicitly including the create time. -func setCTime(path string, ctime time.Time) error { - ctimespec := syscall.NsecToTimespec(ctime.UnixNano()) - pathp, e := syscall.UTF16PtrFromString(path) - if e != nil { - return e - } - h, e := syscall.CreateFile(pathp, - syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil, - syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) - if e != nil { - return e - } - defer syscall.Close(h) - c := syscall.NsecToFiletime(syscall.TimespecToNsec(ctimespec)) - return syscall.SetFileTime(h, &c, nil, nil) -} diff --git a/vendor/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/docker/docker/pkg/system/errors.go deleted file mode 100644 index 288318985..000000000 --- a/vendor/github.com/docker/docker/pkg/system/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package system - -import ( - "errors" -) - -var ( - // ErrNotSupportedPlatform means the platform is not supported. - ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") -) diff --git a/vendor/github.com/docker/docker/pkg/system/events_windows.go b/vendor/github.com/docker/docker/pkg/system/events_windows.go deleted file mode 100644 index 04e2de787..000000000 --- a/vendor/github.com/docker/docker/pkg/system/events_windows.go +++ /dev/null @@ -1,83 +0,0 @@ -package system - -// This file implements syscalls for Win32 events which are not implemented -// in golang. - -import ( - "syscall" - "unsafe" -) - -var ( - procCreateEvent = modkernel32.NewProc("CreateEventW") - procOpenEvent = modkernel32.NewProc("OpenEventW") - procSetEvent = modkernel32.NewProc("SetEvent") - procResetEvent = modkernel32.NewProc("ResetEvent") - procPulseEvent = modkernel32.NewProc("PulseEvent") -) - -// CreateEvent implements win32 CreateEventW func in golang. It will create an event object. -func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) { - namep, _ := syscall.UTF16PtrFromString(name) - var _p1 uint32 - if manualReset { - _p1 = 1 - } - var _p2 uint32 - if initialState { - _p2 = 1 - } - r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) - use(unsafe.Pointer(namep)) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = e1 - } - return -} - -// OpenEvent implements win32 OpenEventW func in golang. It opens an event object. -func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) { - namep, _ := syscall.UTF16PtrFromString(name) - var _p1 uint32 - if inheritHandle { - _p1 = 1 - } - r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) - use(unsafe.Pointer(namep)) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = e1 - } - return -} - -// SetEvent implements win32 SetEvent func in golang. -func SetEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procSetEvent) -} - -// ResetEvent implements win32 ResetEvent func in golang. -func ResetEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procResetEvent) -} - -// PulseEvent implements win32 PulseEvent func in golang. -func PulseEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procPulseEvent) -} - -func setResetPulse(handle syscall.Handle, proc *syscall.LazyProc) (err error) { - r0, _, _ := proc.Call(uintptr(handle)) - if r0 != 0 { - err = syscall.Errno(r0) - } - return -} - -var temp unsafe.Pointer - -// use ensures a variable is kept alive without the GC freeing while still needed -func use(p unsafe.Pointer) { - temp = p -} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/docker/docker/pkg/system/filesys.go deleted file mode 100644 index c14feb849..000000000 --- a/vendor/github.com/docker/docker/pkg/system/filesys.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package system - -import ( - "os" - "path/filepath" -) - -// MkdirAll creates a directory named path along with any necessary parents, -// with permission specified by attribute perm for all dir created. -func MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} - -// IsAbs is a platform-specific wrapper for filepath.IsAbs. -func IsAbs(path string) bool { - return filepath.IsAbs(path) -} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go deleted file mode 100644 index 16823d551..000000000 --- a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go +++ /dev/null @@ -1,82 +0,0 @@ -// +build windows - -package system - -import ( - "os" - "path/filepath" - "regexp" - "strings" - "syscall" -) - -// MkdirAll implementation that is volume path aware for Windows. -func MkdirAll(path string, perm os.FileMode) error { - if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { - return nil - } - - // The rest of this method is copied from os.MkdirAll and should be kept - // as-is to ensure compatibility. - - // Fast path: if we can tell whether path is a directory or file, stop with success or error. - dir, err := os.Stat(path) - if err == nil { - if dir.IsDir() { - return nil - } - return &os.PathError{ - Op: "mkdir", - Path: path, - Err: syscall.ENOTDIR, - } - } - - // Slow path: make sure parent exists and then call Mkdir for path. - i := len(path) - for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. - i-- - } - - j := i - for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. - j-- - } - - if j > 1 { - // Create parent - err = MkdirAll(path[0:j-1], perm) - if err != nil { - return err - } - } - - // Parent now exists; invoke Mkdir and use its result. - err = os.Mkdir(path, perm) - if err != nil { - // Handle arguments like "foo/." by - // double-checking that directory doesn't exist. - dir, err1 := os.Lstat(path) - if err1 == nil && dir.IsDir() { - return nil - } - return err - } - return nil -} - -// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, -// golang filepath.IsAbs does not consider a path \windows\system32 as absolute -// as it doesn't start with a drive-letter/colon combination. However, in -// docker we need to verify things such as WORKDIR /windows/system32 in -// a Dockerfile (which gets translated to \windows\system32 when being processed -// by the daemon. This SHOULD be treated as absolute from a docker processing -// perspective. -func IsAbs(path string) bool { - if !filepath.IsAbs(path) { - if !strings.HasPrefix(path, string(os.PathSeparator)) { - return false - } - } - return true -} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat.go b/vendor/github.com/docker/docker/pkg/system/lstat.go deleted file mode 100644 index bd23c4d50..000000000 --- a/vendor/github.com/docker/docker/pkg/system/lstat.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// Lstat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Lstat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Lstat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go deleted file mode 100644 index 49e87eb40..000000000 --- a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build windows - -package system - -import ( - "os" -) - -// Lstat calls os.Lstat to get a fileinfo interface back. -// This is then copied into our own locally defined structure. -// Note the Linux version uses fromStatT to do the copy back, -// but that not strictly necessary when already in an OS specific module. -func Lstat(path string) (*StatT, error) { - fi, err := os.Lstat(path) - if err != nil { - return nil, err - } - - return &StatT{ - name: fi.Name(), - size: fi.Size(), - mode: fi.Mode(), - modTime: fi.ModTime(), - isDir: fi.IsDir()}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo.go b/vendor/github.com/docker/docker/pkg/system/meminfo.go deleted file mode 100644 index 3b6e947e6..000000000 --- a/vendor/github.com/docker/docker/pkg/system/meminfo.go +++ /dev/null @@ -1,17 +0,0 @@ -package system - -// MemInfo contains memory statistics of the host system. -type MemInfo struct { - // Total usable RAM (i.e. physical RAM minus a few reserved bits and the - // kernel binary code). - MemTotal int64 - - // Amount of free memory. - MemFree int64 - - // Total amount of swap space available. - SwapTotal int64 - - // Amount of swap space that is currently unused. - SwapFree int64 -} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go deleted file mode 100644 index 385f1d5e7..000000000 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go +++ /dev/null @@ -1,65 +0,0 @@ -package system - -import ( - "bufio" - "io" - "os" - "strconv" - "strings" - - "github.com/docker/go-units" -) - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - file, err := os.Open("/proc/meminfo") - if err != nil { - return nil, err - } - defer file.Close() - return parseMemInfo(file) -} - -// parseMemInfo parses the /proc/meminfo file into -// a MemInfo object given an io.Reader to the file. -// Throws error if there are problems reading from the file -func parseMemInfo(reader io.Reader) (*MemInfo, error) { - meminfo := &MemInfo{} - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - // Expected format: ["MemTotal:", "1234", "kB"] - parts := strings.Fields(scanner.Text()) - - // Sanity checks: Skip malformed entries. - if len(parts) < 3 || parts[2] != "kB" { - continue - } - - // Convert to bytes. - size, err := strconv.Atoi(parts[1]) - if err != nil { - continue - } - bytes := int64(size) * units.KiB - - switch parts[0] { - case "MemTotal:": - meminfo.MemTotal = bytes - case "MemFree:": - meminfo.MemFree = bytes - case "SwapTotal:": - meminfo.SwapTotal = bytes - case "SwapFree:": - meminfo.SwapFree = bytes - } - - } - - // Handle errors that may have occurred during the reading of the file. - if err := scanner.Err(); err != nil { - return nil, err - } - - return meminfo, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go b/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go deleted file mode 100644 index 313c601b1..000000000 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go +++ /dev/null @@ -1,128 +0,0 @@ -// +build solaris,cgo - -package system - -import ( - "fmt" - "unsafe" -) - -// #cgo LDFLAGS: -lkstat -// #include -// #include -// #include -// #include -// #include -// #include -// struct swaptable *allocSwaptable(int num) { -// struct swaptable *st; -// struct swapent *swapent; -// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int)); -// swapent = st->swt_ent; -// for (int i = 0; i < num; i++,swapent++) { -// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char)); -// } -// st->swt_n = num; -// return st; -//} -// void freeSwaptable (struct swaptable *st) { -// struct swapent *swapent = st->swt_ent; -// for (int i = 0; i < st->swt_n; i++,swapent++) { -// free(swapent->ste_path); -// } -// free(st); -// } -// swapent_t getSwapEnt(swapent_t *ent, int i) { -// return ent[i]; -// } -// int64_t getPpKernel() { -// int64_t pp_kernel = 0; -// kstat_ctl_t *ksc; -// kstat_t *ks; -// kstat_named_t *knp; -// kid_t kid; -// -// if ((ksc = kstat_open()) == NULL) { -// return -1; -// } -// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) { -// return -1; -// } -// if (((kid = kstat_read(ksc, ks, NULL)) == -1) || -// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) { -// return -1; -// } -// switch (knp->data_type) { -// case KSTAT_DATA_UINT64: -// pp_kernel = knp->value.ui64; -// break; -// case KSTAT_DATA_UINT32: -// pp_kernel = knp->value.ui32; -// break; -// } -// pp_kernel *= sysconf(_SC_PAGESIZE); -// return (pp_kernel > 0 ? pp_kernel : -1); -// } -import "C" - -// Get the system memory info using sysconf same as prtconf -func getTotalMem() int64 { - pagesize := C.sysconf(C._SC_PAGESIZE) - npages := C.sysconf(C._SC_PHYS_PAGES) - return int64(pagesize * npages) -} - -func getFreeMem() int64 { - pagesize := C.sysconf(C._SC_PAGESIZE) - npages := C.sysconf(C._SC_AVPHYS_PAGES) - return int64(pagesize * npages) -} - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - - ppKernel := C.getPpKernel() - MemTotal := getTotalMem() - MemFree := getFreeMem() - SwapTotal, SwapFree, err := getSysSwap() - - if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || - SwapFree < 0 { - return nil, fmt.Errorf("Error getting system memory info %v\n", err) - } - - meminfo := &MemInfo{} - // Total memory is total physical memory less than memory locked by kernel - meminfo.MemTotal = MemTotal - int64(ppKernel) - meminfo.MemFree = MemFree - meminfo.SwapTotal = SwapTotal - meminfo.SwapFree = SwapFree - - return meminfo, nil -} - -func getSysSwap() (int64, int64, error) { - var tSwap int64 - var fSwap int64 - var diskblksPerPage int64 - num, err := C.swapctl(C.SC_GETNSWP, nil) - if err != nil { - return -1, -1, err - } - st := C.allocSwaptable(num) - _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st)) - if err != nil { - C.freeSwaptable(st) - return -1, -1, err - } - - diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT) - for i := 0; i < int(num); i++ { - swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i)) - tSwap += int64(swapent.ste_pages) * diskblksPerPage - fSwap += int64(swapent.ste_free) * diskblksPerPage - } - C.freeSwaptable(st) - return tSwap, fSwap, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go deleted file mode 100644 index 3ce019dff..000000000 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !linux,!windows,!solaris - -package system - -// ReadMemInfo is not supported on platforms other than linux and windows. -func ReadMemInfo() (*MemInfo, error) { - return nil, ErrNotSupportedPlatform -} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go deleted file mode 100644 index d46642598..000000000 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go +++ /dev/null @@ -1,44 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - - procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") -) - -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx -type memorystatusex struct { - dwLength uint32 - dwMemoryLoad uint32 - ullTotalPhys uint64 - ullAvailPhys uint64 - ullTotalPageFile uint64 - ullAvailPageFile uint64 - ullTotalVirtual uint64 - ullAvailVirtual uint64 - ullAvailExtendedVirtual uint64 -} - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - msi := &memorystatusex{ - dwLength: 64, - } - r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) - if r1 == 0 { - return &MemInfo{}, nil - } - return &MemInfo{ - MemTotal: int64(msi.ullTotalPhys), - MemFree: int64(msi.ullAvailPhys), - SwapTotal: int64(msi.ullTotalPageFile), - SwapFree: int64(msi.ullAvailPageFile), - }, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go deleted file mode 100644 index 73958182b..000000000 --- a/vendor/github.com/docker/docker/pkg/system/mknod.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// Mknod creates a filesystem node (file, device special file or named pipe) named path -// with attributes specified by mode and dev. -func Mknod(path string, mode uint32, dev int) error { - return syscall.Mknod(path, mode, dev) -} - -// Mkdev is used to build the value of linux devices (in /dev/) which specifies major -// and minor number of the newly created device special file. -// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. -// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, -// then the top 12 bits of the minor. -func Mkdev(major int64, minor int64) uint32 { - return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) -} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go deleted file mode 100644 index 2e863c021..000000000 --- a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build windows - -package system - -// Mknod is not implemented on Windows. -func Mknod(path string, mode uint32, dev int) error { - return ErrNotSupportedPlatform -} - -// Mkdev is not implemented on Windows. -func Mkdev(major int64, minor int64) uint32 { - panic("Mkdev not implemented on Windows.") -} diff --git a/vendor/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/docker/docker/pkg/system/path_unix.go deleted file mode 100644 index c607c4db0..000000000 --- a/vendor/github.com/docker/docker/pkg/system/path_unix.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !windows - -package system - -// DefaultPathEnv is unix style list of directories to search for -// executables. Each directory is separated from the next by a colon -// ':' character . -const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - -// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, -// is the system drive. This is a no-op on Linux. -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - return path, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/docker/docker/pkg/system/path_windows.go deleted file mode 100644 index cbfe2c157..000000000 --- a/vendor/github.com/docker/docker/pkg/system/path_windows.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build windows - -package system - -import ( - "fmt" - "path/filepath" - "strings" -) - -// DefaultPathEnv is deliberately empty on Windows as the default path will be set by -// the container. Docker has no context of what the default path should be. -const DefaultPathEnv = "" - -// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. -// This is used, for example, when validating a user provided path in docker cp. -// If a drive letter is supplied, it must be the system drive. The drive letter -// is always removed. Also, it translates it to OS semantics (IOW / to \). We -// need the path in this syntax so that it can ultimately be contatenated with -// a Windows long-path which doesn't support drive-letters. Examples: -// C: --> Fail -// C:\ --> \ -// a --> a -// /a --> \a -// d:\ --> Fail -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - if len(path) == 2 && string(path[1]) == ":" { - return "", fmt.Errorf("No relative path specified in %q", path) - } - if !filepath.IsAbs(path) || len(path) < 2 { - return filepath.FromSlash(path), nil - } - if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { - return "", fmt.Errorf("The specified path is not on the system drive (C:)") - } - return filepath.FromSlash(path[2:]), nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat.go b/vendor/github.com/docker/docker/pkg/system/stat.go deleted file mode 100644 index 087034c5e..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat.go +++ /dev/null @@ -1,53 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// StatT type contains status of a file. It contains metadata -// like permission, owner, group, size, etc about a file. -type StatT struct { - mode uint32 - uid uint32 - gid uint32 - rdev uint64 - size int64 - mtim syscall.Timespec -} - -// Mode returns file's permission mode. -func (s StatT) Mode() uint32 { - return s.mode -} - -// UID returns file's user id of owner. -func (s StatT) UID() uint32 { - return s.uid -} - -// GID returns file's group id of owner. -func (s StatT) GID() uint32 { - return s.gid -} - -// Rdev returns file's device ID (if it's special file). -func (s StatT) Rdev() uint64 { - return s.rdev -} - -// Size returns file's size. -func (s StatT) Size() int64 { - return s.size -} - -// Mtim returns file's last modification time. -func (s StatT) Mtim() syscall.Timespec { - return s.mtim -} - -// GetLastModification returns file's last modification time. -func (s StatT) GetLastModification() syscall.Timespec { - return s.Mtim() -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go deleted file mode 100644 index f0742f59e..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go +++ /dev/null @@ -1,32 +0,0 @@ -package system - -import ( - "syscall" -) - -// fromStatT creates a system.StatT type from a syscall.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil -} - -// FromStatT loads a system.StatT from a syscall.Stat_t. -func FromStatT(s *syscall.Stat_t) (*StatT, error) { - return fromStatT(s) -} - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go b/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go deleted file mode 100644 index d0fb6f151..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go +++ /dev/null @@ -1,27 +0,0 @@ -package system - -import ( - "syscall" -) - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil -} - -// Stat takes a path to a file and returns -// a system.Stat_t type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go deleted file mode 100644 index 8b1eded13..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_linux.go +++ /dev/null @@ -1,33 +0,0 @@ -package system - -import ( - "syscall" -) - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: s.Mode, - uid: s.Uid, - gid: s.Gid, - rdev: s.Rdev, - mtim: s.Mtim}, nil -} - -// FromStatT exists only on linux, and loads a system.StatT from a -// syscal.Stat_t. -func FromStatT(s *syscall.Stat_t) (*StatT, error) { - return fromStatT(s) -} - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go deleted file mode 100644 index 3c3b71fb2..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go +++ /dev/null @@ -1,15 +0,0 @@ -package system - -import ( - "syscall" -) - -// fromStatT creates a system.StatT type from a syscall.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtim}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go deleted file mode 100644 index 0216985a2..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build solaris - -package system - -import ( - "syscall" -) - -// fromStatT creates a system.StatT type from a syscall.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtim}, nil -} - -// FromStatT loads a system.StatT from a syscal.Stat_t. -func FromStatT(s *syscall.Stat_t) (*StatT, error) { - return fromStatT(s) -} - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go b/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go deleted file mode 100644 index 5d85f523c..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build !linux,!windows,!freebsd,!solaris,!openbsd,!darwin - -package system - -import ( - "syscall" -) - -// fromStatT creates a system.StatT type from a syscall.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go deleted file mode 100644 index 39490c625..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_windows.go +++ /dev/null @@ -1,43 +0,0 @@ -// +build windows - -package system - -import ( - "os" - "time" -) - -// StatT type contains status of a file. It contains metadata -// like name, permission, size, etc about a file. -type StatT struct { - name string - size int64 - mode os.FileMode - modTime time.Time - isDir bool -} - -// Name returns file's name. -func (s StatT) Name() string { - return s.name -} - -// Size returns file's size. -func (s StatT) Size() int64 { - return s.size -} - -// Mode returns file's permission mode. -func (s StatT) Mode() os.FileMode { - return s.mode -} - -// ModTime returns file's last modification time. -func (s StatT) ModTime() time.Time { - return s.modTime -} - -// IsDir returns whether file is actually a directory. -func (s StatT) IsDir() bool { - return s.isDir -} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go deleted file mode 100644 index 3ae912846..000000000 --- a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build linux freebsd - -package system - -import "syscall" - -// Unmount is a platform-specific helper function to call -// the unmount syscall. -func Unmount(dest string) error { - return syscall.Unmount(dest, 0) -} - -// CommandLineToArgv should not be used on Unix. -// It simply returns commandLine in the only element in the returned array. -func CommandLineToArgv(commandLine string) ([]string, error) { - return []string{commandLine}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go deleted file mode 100644 index f5f2d5694..000000000 --- a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go +++ /dev/null @@ -1,103 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" - - "github.com/Sirupsen/logrus" -) - -var ( - ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") - procGetVersionExW = modkernel32.NewProc("GetVersionExW") -) - -// OSVersion is a wrapper for Windows version information -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx -type OSVersion struct { - Version uint32 - MajorVersion uint8 - MinorVersion uint8 - Build uint16 -} - -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx -type osVersionInfoEx struct { - OSVersionInfoSize uint32 - MajorVersion uint32 - MinorVersion uint32 - BuildNumber uint32 - PlatformID uint32 - CSDVersion [128]uint16 - ServicePackMajor uint16 - ServicePackMinor uint16 - SuiteMask uint16 - ProductType byte - Reserve byte -} - -// GetOSVersion gets the operating system version on Windows. Note that -// docker.exe must be manifested to get the correct version information. -func GetOSVersion() OSVersion { - var err error - osv := OSVersion{} - osv.Version, err = syscall.GetVersion() - if err != nil { - // GetVersion never fails. - panic(err) - } - osv.MajorVersion = uint8(osv.Version & 0xFF) - osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) - osv.Build = uint16(osv.Version >> 16) - return osv -} - -// IsWindowsClient returns true if the SKU is client -func IsWindowsClient() bool { - osviex := &osVersionInfoEx{OSVersionInfoSize: 284} - r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) - if r1 == 0 { - logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) - return false - } - const verNTWorkstation = 0x00000001 - return osviex.ProductType == verNTWorkstation -} - -// Unmount is a platform-specific helper function to call -// the unmount syscall. Not supported on Windows -func Unmount(dest string) error { - return nil -} - -// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array. -func CommandLineToArgv(commandLine string) ([]string, error) { - var argc int32 - - argsPtr, err := syscall.UTF16PtrFromString(commandLine) - if err != nil { - return nil, err - } - - argv, err := syscall.CommandLineToArgv(argsPtr, &argc) - if err != nil { - return nil, err - } - defer syscall.LocalFree(syscall.Handle(uintptr(unsafe.Pointer(argv)))) - - newArgs := make([]string, argc) - for i, v := range (*argv)[:argc] { - newArgs[i] = string(syscall.UTF16ToString((*v)[:])) - } - - return newArgs, nil -} - -// HasWin32KSupport determines whether containers that depend on win32k can -// run on this machine. Win32k is the driver used to implement windowing. -func HasWin32KSupport() bool { - // For now, check for ntuser API support on the host. In the future, a host - // may support win32k in containers even if the host does not support ntuser - // APIs. - return ntuserApiset.Load() == nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/docker/docker/pkg/system/umask.go deleted file mode 100644 index 3d0146b01..000000000 --- a/vendor/github.com/docker/docker/pkg/system/umask.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// Umask sets current process's file mode creation mask to newmask -// and returns oldmask. -func Umask(newmask int) (oldmask int, err error) { - return syscall.Umask(newmask), nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/umask_windows.go b/vendor/github.com/docker/docker/pkg/system/umask_windows.go deleted file mode 100644 index 13f1de176..000000000 --- a/vendor/github.com/docker/docker/pkg/system/umask_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build windows - -package system - -// Umask is not supported on the windows platform. -func Umask(newmask int) (oldmask int, err error) { - // should not be called on cli code path - return 0, ErrNotSupportedPlatform -} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go deleted file mode 100644 index e2eac3b55..000000000 --- a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go +++ /dev/null @@ -1,22 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -// LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. -func LUtimesNano(path string, ts []syscall.Timespec) error { - var _path *byte - _path, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - - if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { - return err - } - - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go deleted file mode 100644 index fc8a1aba9..000000000 --- a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go +++ /dev/null @@ -1,26 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -// LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. -func LUtimesNano(path string, ts []syscall.Timespec) error { - // These are not currently available in syscall - atFdCwd := -100 - atSymLinkNoFollow := 0x100 - - var _path *byte - _path, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - - if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS { - return err - } - - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go deleted file mode 100644 index 139714544..000000000 --- a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !linux,!freebsd - -package system - -import "syscall" - -// LUtimesNano is only supported on linux and freebsd. -func LUtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotSupportedPlatform -} diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go deleted file mode 100644 index d2e2c0579..000000000 --- a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go +++ /dev/null @@ -1,63 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -// Lgetxattr retrieves the value of the extended attribute identified by attr -// and associated with the given path in the file system. -// It will returns a nil slice and nil error if the xattr is not set. -func Lgetxattr(path string, attr string) ([]byte, error) { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return nil, err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return nil, err - } - - dest := make([]byte, 128) - destBytes := unsafe.Pointer(&dest[0]) - sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) - if errno == syscall.ENODATA { - return nil, nil - } - if errno == syscall.ERANGE { - dest = make([]byte, sz) - destBytes := unsafe.Pointer(&dest[0]) - sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) - } - if errno != 0 { - return nil, errno - } - - return dest[:sz], nil -} - -var _zero uintptr - -// Lsetxattr sets the value of the extended attribute identified by attr -// and associated with the given path in the file system. -func Lsetxattr(path string, attr string, data []byte, flags int) error { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return err - } - var dataBytes unsafe.Pointer - if len(data) > 0 { - dataBytes = unsafe.Pointer(&data[0]) - } else { - dataBytes = unsafe.Pointer(&_zero) - } - _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) - if errno != 0 { - return errno - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go deleted file mode 100644 index 0114f2227..000000000 --- a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !linux - -package system - -// Lgetxattr is not supported on platforms other than linux. -func Lgetxattr(path string, attr string) ([]byte, error) { - return nil, ErrNotSupportedPlatform -} - -// Lsetxattr is not supported on platforms other than linux. -func Lsetxattr(path string, attr string, data []byte, flags int) error { - return ErrNotSupportedPlatform -} diff --git a/vendor/github.com/docker/docker/pkg/term/ascii.go b/vendor/github.com/docker/docker/pkg/term/ascii.go deleted file mode 100644 index f5262bccf..000000000 --- a/vendor/github.com/docker/docker/pkg/term/ascii.go +++ /dev/null @@ -1,66 +0,0 @@ -package term - -import ( - "fmt" - "strings" -) - -// ASCII list the possible supported ASCII key sequence -var ASCII = []string{ - "ctrl-@", - "ctrl-a", - "ctrl-b", - "ctrl-c", - "ctrl-d", - "ctrl-e", - "ctrl-f", - "ctrl-g", - "ctrl-h", - "ctrl-i", - "ctrl-j", - "ctrl-k", - "ctrl-l", - "ctrl-m", - "ctrl-n", - "ctrl-o", - "ctrl-p", - "ctrl-q", - "ctrl-r", - "ctrl-s", - "ctrl-t", - "ctrl-u", - "ctrl-v", - "ctrl-w", - "ctrl-x", - "ctrl-y", - "ctrl-z", - "ctrl-[", - "ctrl-\\", - "ctrl-]", - "ctrl-^", - "ctrl-_", -} - -// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. -func ToBytes(keys string) ([]byte, error) { - codes := []byte{} -next: - for _, key := range strings.Split(keys, ",") { - if len(key) != 1 { - for code, ctrl := range ASCII { - if ctrl == key { - codes = append(codes, byte(code)) - continue next - } - } - if key == "DEL" { - codes = append(codes, 127) - } else { - return nil, fmt.Errorf("Unknown character: '%s'", key) - } - } else { - codes = append(codes, byte(key[0])) - } - } - return codes, nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go b/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go deleted file mode 100644 index 59dac5ba8..000000000 --- a/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go +++ /dev/null @@ -1,50 +0,0 @@ -// +build linux,cgo - -package term - -import ( - "syscall" - "unsafe" -) - -// #include -import "C" - -// Termios is the Unix API for terminal I/O. -// It is passthrough for syscall.Termios in order to make it portable with -// other platforms where it is not available or handled differently. -type Termios syscall.Termios - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { - return nil, err - } - - newState := oldState.termios - - C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState))) - if err := tcset(fd, &newState); err != 0 { - return nil, err - } - return &oldState, nil -} - -func tcget(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} diff --git a/vendor/github.com/docker/docker/pkg/term/tc_other.go b/vendor/github.com/docker/docker/pkg/term/tc_other.go deleted file mode 100644 index 750d7c3f6..000000000 --- a/vendor/github.com/docker/docker/pkg/term/tc_other.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build !windows -// +build !linux !cgo -// +build !solaris !cgo - -package term - -import ( - "syscall" - "unsafe" -) - -func tcget(fd uintptr, p *Termios) syscall.Errno { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) - return err -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) - return err -} diff --git a/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go b/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go deleted file mode 100644 index c9139d0ca..000000000 --- a/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go +++ /dev/null @@ -1,63 +0,0 @@ -// +build solaris,cgo - -package term - -import ( - "syscall" - "unsafe" -) - -// #include -import "C" - -// Termios is the Unix API for terminal I/O. -// It is passthrough for syscall.Termios in order to make it portable with -// other platforms where it is not available or handled differently. -type Termios syscall.Termios - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { - return nil, err - } - - newState := oldState.termios - - newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON | syscall.IXANY) - newState.Oflag &^= syscall.OPOST - newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) - newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) - newState.Cflag |= syscall.CS8 - - /* - VMIN is the minimum number of characters that needs to be read in non-canonical mode for it to be returned - Since VMIN is overloaded with another element in canonical mode when we switch modes it defaults to 4. It - needs to be explicitly set to 1. - */ - newState.Cc[C.VMIN] = 1 - newState.Cc[C.VTIME] = 0 - - if err := tcset(fd, &newState); err != 0 { - return nil, err - } - return &oldState, nil -} - -func tcget(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} diff --git a/vendor/github.com/docker/docker/pkg/term/term.go b/vendor/github.com/docker/docker/pkg/term/term.go deleted file mode 100644 index fe59faa94..000000000 --- a/vendor/github.com/docker/docker/pkg/term/term.go +++ /dev/null @@ -1,123 +0,0 @@ -// +build !windows - -// Package term provides structures and helper functions to work with -// terminal (state, sizes). -package term - -import ( - "errors" - "fmt" - "io" - "os" - "os/signal" - "syscall" -) - -var ( - // ErrInvalidState is returned if the state of the terminal is invalid. - ErrInvalidState = errors.New("Invalid terminal state") -) - -// State represents the state of the terminal. -type State struct { - termios Termios -} - -// Winsize represents the size of the terminal window. -type Winsize struct { - Height uint16 - Width uint16 - x uint16 - y uint16 -} - -// StdStreams returns the standard streams (stdin, stdout, stedrr). -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - return os.Stdin, os.Stdout, os.Stderr -} - -// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (uintptr, bool) { - var inFd uintptr - var isTerminalIn bool - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminalIn = IsTerminal(inFd) - } - return inFd, isTerminalIn -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - var termios Termios - return tcget(fd, &termios) == 0 -} - -// RestoreTerminal restores the terminal connected to the given file descriptor -// to a previous state. -func RestoreTerminal(fd uintptr, state *State) error { - if state == nil { - return ErrInvalidState - } - if err := tcset(fd, &state.termios); err != 0 { - return err - } - return nil -} - -// SaveState saves the state of the terminal connected to the given file descriptor. -func SaveState(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { - return nil, err - } - - return &oldState, nil -} - -// DisableEcho applies the specified state to the terminal connected to the file -// descriptor, with echo disabled. -func DisableEcho(fd uintptr, state *State) error { - newState := state.termios - newState.Lflag &^= syscall.ECHO - - if err := tcset(fd, &newState); err != 0 { - return err - } - handleInterrupt(fd, state) - return nil -} - -// SetRawTerminal puts the terminal connected to the given file descriptor into -// raw mode and returns the previous state. On UNIX, this puts both the input -// and output into raw mode. On Windows, it only puts the input into raw mode. -func SetRawTerminal(fd uintptr) (*State, error) { - oldState, err := MakeRaw(fd) - if err != nil { - return nil, err - } - handleInterrupt(fd, oldState) - return oldState, err -} - -// SetRawTerminalOutput puts the output of terminal connected to the given file -// descriptor into raw mode. On UNIX, this does nothing and returns nil for the -// state. On Windows, it disables LF -> CRLF translation. -func SetRawTerminalOutput(fd uintptr) (*State, error) { - return nil, nil -} - -func handleInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - go func() { - for range sigchan { - // quit cleanly and the new terminal item is on a new line - fmt.Println() - signal.Stop(sigchan) - close(sigchan) - RestoreTerminal(fd, state) - os.Exit(1) - } - }() -} diff --git a/vendor/github.com/docker/docker/pkg/term/term_solaris.go b/vendor/github.com/docker/docker/pkg/term/term_solaris.go deleted file mode 100644 index 112debbec..000000000 --- a/vendor/github.com/docker/docker/pkg/term/term_solaris.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build solaris - -package term - -import ( - "syscall" - "unsafe" -) - -/* -#include -#include -#include - -// Small wrapper to get rid of variadic args of ioctl() -int my_ioctl(int fd, int cmd, struct winsize *ws) { - return ioctl(fd, cmd, ws); -} -*/ -import "C" - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - ws := &Winsize{} - ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCGWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) - // Skip retval = 0 - if ret == 0 { - return ws, nil - } - return ws, err -} - -// SetWinsize tries to set the specified window size for the specified file descriptor. -func SetWinsize(fd uintptr, ws *Winsize) error { - ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCSWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) - // Skip retval = 0 - if ret == 0 { - return nil - } - return err -} diff --git a/vendor/github.com/docker/docker/pkg/term/term_unix.go b/vendor/github.com/docker/docker/pkg/term/term_unix.go deleted file mode 100644 index ddf87a0e5..000000000 --- a/vendor/github.com/docker/docker/pkg/term/term_unix.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build !solaris,!windows - -package term - -import ( - "syscall" - "unsafe" -) - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - ws := &Winsize{} - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) - // Skipp errno = 0 - if err == 0 { - return ws, nil - } - return ws, err -} - -// SetWinsize tries to set the specified window size for the specified file descriptor. -func SetWinsize(fd uintptr, ws *Winsize) error { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) - // Skipp errno = 0 - if err == 0 { - return nil - } - return err -} diff --git a/vendor/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go deleted file mode 100644 index 11a16fdea..000000000 --- a/vendor/github.com/docker/docker/pkg/term/term_windows.go +++ /dev/null @@ -1,233 +0,0 @@ -// +build windows - -package term - -import ( - "io" - "os" - "os/signal" - "syscall" - - "github.com/Azure/go-ansiterm/winterm" - "github.com/docker/docker/pkg/term/windows" -) - -// State holds the console mode for the terminal. -type State struct { - mode uint32 -} - -// Winsize is used for window size. -type Winsize struct { - Height uint16 - Width uint16 -} - -const ( - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx - enableVirtualTerminalInput = 0x0200 - enableVirtualTerminalProcessing = 0x0004 - disableNewlineAutoReturn = 0x0008 -) - -// vtInputSupported is true if enableVirtualTerminalInput is supported by the console -var vtInputSupported bool - -// StdStreams returns the standard streams (stdin, stdout, stedrr). -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - // Turn on VT handling on all std handles, if possible. This might - // fail, in which case we will fall back to terminal emulation. - var emulateStdin, emulateStdout, emulateStderr bool - fd := os.Stdin.Fd() - if mode, err := winterm.GetConsoleMode(fd); err == nil { - // Validate that enableVirtualTerminalInput is supported, but do not set it. - if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalInput); err != nil { - emulateStdin = true - } else { - vtInputSupported = true - } - // Unconditionally set the console mode back even on failure because SetConsoleMode - // remembers invalid bits on input handles. - winterm.SetConsoleMode(fd, mode) - } - - fd = os.Stdout.Fd() - if mode, err := winterm.GetConsoleMode(fd); err == nil { - // Validate disableNewlineAutoReturn is supported, but do not set it. - if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil { - emulateStdout = true - } else { - winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing) - } - } - - fd = os.Stderr.Fd() - if mode, err := winterm.GetConsoleMode(fd); err == nil { - // Validate disableNewlineAutoReturn is supported, but do not set it. - if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil { - emulateStderr = true - } else { - winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing) - } - } - - if os.Getenv("ConEmuANSI") == "ON" { - // The ConEmu terminal emulates ANSI on output streams well. - emulateStdin = true - emulateStdout = false - emulateStderr = false - } - - if emulateStdin { - stdIn = windows.NewAnsiReader(syscall.STD_INPUT_HANDLE) - } else { - stdIn = os.Stdin - } - - if emulateStdout { - stdOut = windows.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE) - } else { - stdOut = os.Stdout - } - - if emulateStderr { - stdErr = windows.NewAnsiWriter(syscall.STD_ERROR_HANDLE) - } else { - stdErr = os.Stderr - } - - return -} - -// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (uintptr, bool) { - return windows.GetHandleInfo(in) -} - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil, err - } - - winsize := &Winsize{ - Width: uint16(info.Window.Right - info.Window.Left + 1), - Height: uint16(info.Window.Bottom - info.Window.Top + 1), - } - - return winsize, nil -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - return windows.IsConsole(fd) -} - -// RestoreTerminal restores the terminal connected to the given file descriptor -// to a previous state. -func RestoreTerminal(fd uintptr, state *State) error { - return winterm.SetConsoleMode(fd, state.mode) -} - -// SaveState saves the state of the terminal connected to the given file descriptor. -func SaveState(fd uintptr) (*State, error) { - mode, e := winterm.GetConsoleMode(fd) - if e != nil { - return nil, e - } - - return &State{mode: mode}, nil -} - -// DisableEcho disables echo for the terminal connected to the given file descriptor. -// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx -func DisableEcho(fd uintptr, state *State) error { - mode := state.mode - mode &^= winterm.ENABLE_ECHO_INPUT - mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT - err := winterm.SetConsoleMode(fd, mode) - if err != nil { - return err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return nil -} - -// SetRawTerminal puts the terminal connected to the given file descriptor into -// raw mode and returns the previous state. On UNIX, this puts both the input -// and output into raw mode. On Windows, it only puts the input into raw mode. -func SetRawTerminal(fd uintptr) (*State, error) { - state, err := MakeRaw(fd) - if err != nil { - return nil, err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return state, err -} - -// SetRawTerminalOutput puts the output of terminal connected to the given file -// descriptor into raw mode. On UNIX, this does nothing and returns nil for the -// state. On Windows, it disables LF -> CRLF translation. -func SetRawTerminalOutput(fd uintptr) (*State, error) { - state, err := SaveState(fd) - if err != nil { - return nil, err - } - - // Ignore failures, since disableNewlineAutoReturn might not be supported on this - // version of Windows. - winterm.SetConsoleMode(fd, state.mode|disableNewlineAutoReturn) - return state, err -} - -// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be restored. -func MakeRaw(fd uintptr) (*State, error) { - state, err := SaveState(fd) - if err != nil { - return nil, err - } - - mode := state.mode - - // See - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx - - // Disable these modes - mode &^= winterm.ENABLE_ECHO_INPUT - mode &^= winterm.ENABLE_LINE_INPUT - mode &^= winterm.ENABLE_MOUSE_INPUT - mode &^= winterm.ENABLE_WINDOW_INPUT - mode &^= winterm.ENABLE_PROCESSED_INPUT - - // Enable these modes - mode |= winterm.ENABLE_EXTENDED_FLAGS - mode |= winterm.ENABLE_INSERT_MODE - mode |= winterm.ENABLE_QUICK_EDIT_MODE - if vtInputSupported { - mode |= enableVirtualTerminalInput - } - - err = winterm.SetConsoleMode(fd, mode) - if err != nil { - return nil, err - } - return state, nil -} - -func restoreAtInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - - go func() { - _ = <-sigchan - RestoreTerminal(fd, state) - os.Exit(0) - }() -} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_darwin.go b/vendor/github.com/docker/docker/pkg/term/termios_darwin.go deleted file mode 100644 index 480db900a..000000000 --- a/vendor/github.com/docker/docker/pkg/term/termios_darwin.go +++ /dev/null @@ -1,69 +0,0 @@ -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TIOCGETA - setTermios = syscall.TIOCSETA -) - -// Termios magic numbers, passthrough to the ones defined in syscall. -const ( - IGNBRK = syscall.IGNBRK - PARMRK = syscall.PARMRK - INLCR = syscall.INLCR - IGNCR = syscall.IGNCR - ECHONL = syscall.ECHONL - CSIZE = syscall.CSIZE - ICRNL = syscall.ICRNL - ISTRIP = syscall.ISTRIP - PARENB = syscall.PARENB - ECHO = syscall.ECHO - ICANON = syscall.ICANON - ISIG = syscall.ISIG - IXON = syscall.IXON - BRKINT = syscall.BRKINT - INPCK = syscall.INPCK - OPOST = syscall.OPOST - CS8 = syscall.CS8 - IEXTEN = syscall.IEXTEN -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint64 - Oflag uint64 - Cflag uint64 - Lflag uint64 - Cc [20]byte - Ispeed uint64 - Ospeed uint64 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) - newState.Oflag &^= OPOST - newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) - newState.Cflag &^= (CSIZE | PARENB) - newState.Cflag |= CS8 - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go b/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go deleted file mode 100644 index ed843ad69..000000000 --- a/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go +++ /dev/null @@ -1,69 +0,0 @@ -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TIOCGETA - setTermios = syscall.TIOCSETA -) - -// Termios magic numbers, passthrough to the ones defined in syscall. -const ( - IGNBRK = syscall.IGNBRK - PARMRK = syscall.PARMRK - INLCR = syscall.INLCR - IGNCR = syscall.IGNCR - ECHONL = syscall.ECHONL - CSIZE = syscall.CSIZE - ICRNL = syscall.ICRNL - ISTRIP = syscall.ISTRIP - PARENB = syscall.PARENB - ECHO = syscall.ECHO - ICANON = syscall.ICANON - ISIG = syscall.ISIG - IXON = syscall.IXON - BRKINT = syscall.BRKINT - INPCK = syscall.INPCK - OPOST = syscall.OPOST - CS8 = syscall.CS8 - IEXTEN = syscall.IEXTEN -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]byte - Ispeed uint32 - Ospeed uint32 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) - newState.Oflag &^= OPOST - newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) - newState.Cflag &^= (CSIZE | PARENB) - newState.Cflag |= CS8 - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_linux.go b/vendor/github.com/docker/docker/pkg/term/termios_linux.go deleted file mode 100644 index 22921b6ae..000000000 --- a/vendor/github.com/docker/docker/pkg/term/termios_linux.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build !cgo - -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TCGETS - setTermios = syscall.TCSETS -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]byte - Ispeed uint32 - Ospeed uint32 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - - newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON) - newState.Oflag &^= syscall.OPOST - newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) - newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) - newState.Cflag |= syscall.CS8 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - return &oldState, nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go b/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go deleted file mode 100644 index ed843ad69..000000000 --- a/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go +++ /dev/null @@ -1,69 +0,0 @@ -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TIOCGETA - setTermios = syscall.TIOCSETA -) - -// Termios magic numbers, passthrough to the ones defined in syscall. -const ( - IGNBRK = syscall.IGNBRK - PARMRK = syscall.PARMRK - INLCR = syscall.INLCR - IGNCR = syscall.IGNCR - ECHONL = syscall.ECHONL - CSIZE = syscall.CSIZE - ICRNL = syscall.ICRNL - ISTRIP = syscall.ISTRIP - PARENB = syscall.PARENB - ECHO = syscall.ECHO - ICANON = syscall.ICANON - ISIG = syscall.ISIG - IXON = syscall.IXON - BRKINT = syscall.BRKINT - INPCK = syscall.INPCK - OPOST = syscall.OPOST - CS8 = syscall.CS8 - IEXTEN = syscall.IEXTEN -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]byte - Ispeed uint32 - Ospeed uint32 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) - newState.Oflag &^= OPOST - newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) - newState.Cflag &^= (CSIZE | PARENB) - newState.Cflag |= CS8 - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go deleted file mode 100644 index 58452ad78..000000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go +++ /dev/null @@ -1,261 +0,0 @@ -// +build windows - -package windows - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "strings" - "unsafe" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" -) - -const ( - escapeSequence = ansiterm.KEY_ESC_CSI -) - -// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. -type ansiReader struct { - file *os.File - fd uintptr - buffer []byte - cbBuffer int - command []byte -} - -// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a -// Windows console input handle. -func NewAnsiReader(nFile int) io.ReadCloser { - initLogger() - file, fd := winterm.GetStdFile(nFile) - return &ansiReader{ - file: file, - fd: fd, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - buffer: make([]byte, 0), - } -} - -// Close closes the wrapped file. -func (ar *ansiReader) Close() (err error) { - return ar.file.Close() -} - -// Fd returns the file descriptor of the wrapped file. -func (ar *ansiReader) Fd() uintptr { - return ar.fd -} - -// Read reads up to len(p) bytes of translated input events into p. -func (ar *ansiReader) Read(p []byte) (int, error) { - if len(p) == 0 { - return 0, nil - } - - // Previously read bytes exist, read as much as we can and return - if len(ar.buffer) > 0 { - logger.Debugf("Reading previously cached bytes") - - originalLength := len(ar.buffer) - copiedLength := copy(p, ar.buffer) - - if copiedLength == originalLength { - ar.buffer = make([]byte, 0, len(p)) - } else { - ar.buffer = ar.buffer[copiedLength:] - } - - logger.Debugf("Read from cache p[%d]: % x", copiedLength, p) - return copiedLength, nil - } - - // Read and translate key events - events, err := readInputEvents(ar.fd, len(p)) - if err != nil { - return 0, err - } else if len(events) == 0 { - logger.Debug("No input events detected") - return 0, nil - } - - keyBytes := translateKeyEvents(events, []byte(escapeSequence)) - - // Save excess bytes and right-size keyBytes - if len(keyBytes) > len(p) { - logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p)) - ar.buffer = keyBytes[len(p):] - keyBytes = keyBytes[:len(p)] - } else if len(keyBytes) == 0 { - logger.Debug("No key bytes returned from the translator") - return 0, nil - } - - copiedLength := copy(p, keyBytes) - if copiedLength != len(keyBytes) { - return 0, errors.New("Unexpected copy length encountered.") - } - - logger.Debugf("Read p[%d]: % x", copiedLength, p) - logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes) - return copiedLength, nil -} - -// readInputEvents polls until at least one event is available. -func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) { - // Determine the maximum number of records to retrieve - // -- Cast around the type system to obtain the size of a single INPUT_RECORD. - // unsafe.Sizeof requires an expression vs. a type-reference; the casting - // tricks the type system into believing it has such an expression. - recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) - countRecords := maxBytes / recordSize - if countRecords > ansiterm.MAX_INPUT_EVENTS { - countRecords = ansiterm.MAX_INPUT_EVENTS - } - logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize) - - // Wait for and read input events - events := make([]winterm.INPUT_RECORD, countRecords) - nEvents := uint32(0) - eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE) - if err != nil { - return nil, err - } - - if eventsExist { - err = winterm.ReadConsoleInput(fd, events, &nEvents) - if err != nil { - return nil, err - } - } - - // Return a slice restricted to the number of returned records - logger.Debugf("[windows] readInputEvents: Read %v events", nEvents) - return events[:nEvents], nil -} - -// KeyEvent Translation Helpers - -var arrowKeyMapPrefix = map[uint16]string{ - winterm.VK_UP: "%s%sA", - winterm.VK_DOWN: "%s%sB", - winterm.VK_RIGHT: "%s%sC", - winterm.VK_LEFT: "%s%sD", -} - -var keyMapPrefix = map[uint16]string{ - winterm.VK_UP: "\x1B[%sA", - winterm.VK_DOWN: "\x1B[%sB", - winterm.VK_RIGHT: "\x1B[%sC", - winterm.VK_LEFT: "\x1B[%sD", - winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 - winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 - winterm.VK_INSERT: "\x1B[2%s~", - winterm.VK_DELETE: "\x1B[3%s~", - winterm.VK_PRIOR: "\x1B[5%s~", - winterm.VK_NEXT: "\x1B[6%s~", - winterm.VK_F1: "", - winterm.VK_F2: "", - winterm.VK_F3: "\x1B[13%s~", - winterm.VK_F4: "\x1B[14%s~", - winterm.VK_F5: "\x1B[15%s~", - winterm.VK_F6: "\x1B[17%s~", - winterm.VK_F7: "\x1B[18%s~", - winterm.VK_F8: "\x1B[19%s~", - winterm.VK_F9: "\x1B[20%s~", - winterm.VK_F10: "\x1B[21%s~", - winterm.VK_F11: "\x1B[23%s~", - winterm.VK_F12: "\x1B[24%s~", -} - -// translateKeyEvents converts the input events into the appropriate ANSI string. -func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { - var buffer bytes.Buffer - for _, event := range events { - if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { - buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) - } - } - - return buffer.Bytes() -} - -// keyToString maps the given input event record to the corresponding string. -func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { - if keyEvent.UnicodeChar == 0 { - return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) - } - - _, alt, control := getControlKeys(keyEvent.ControlKeyState) - if control { - // TODO(azlinux): Implement following control sequences - // -D Signals the end of input from the keyboard; also exits current shell. - // -H Deletes the first character to the left of the cursor. Also called the ERASE key. - // -Q Restarts printing after it has been stopped with -s. - // -S Suspends printing on the screen (does not stop the program). - // -U Deletes all characters on the current line. Also called the KILL key. - // -E Quits current command and creates a core - - } - - // +Key generates ESC N Key - if !control && alt { - return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) - } - - return string(keyEvent.UnicodeChar) -} - -// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. -func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string { - shift, alt, control := getControlKeys(controlState) - modifier := getControlKeysModifier(shift, alt, control) - - if format, ok := arrowKeyMapPrefix[key]; ok { - return fmt.Sprintf(format, escapeSequence, modifier) - } - - if format, ok := keyMapPrefix[key]; ok { - return fmt.Sprintf(format, modifier) - } - - return "" -} - -// getControlKeys extracts the shift, alt, and ctrl key states. -func getControlKeys(controlState uint32) (shift, alt, control bool) { - shift = 0 != (controlState & winterm.SHIFT_PRESSED) - alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) - control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) - return shift, alt, control -} - -// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. -func getControlKeysModifier(shift, alt, control bool) string { - if shift && alt && control { - return ansiterm.KEY_CONTROL_PARAM_8 - } - if alt && control { - return ansiterm.KEY_CONTROL_PARAM_7 - } - if shift && control { - return ansiterm.KEY_CONTROL_PARAM_6 - } - if control { - return ansiterm.KEY_CONTROL_PARAM_5 - } - if shift && alt { - return ansiterm.KEY_CONTROL_PARAM_4 - } - if alt { - return ansiterm.KEY_CONTROL_PARAM_3 - } - if shift { - return ansiterm.KEY_CONTROL_PARAM_2 - } - return "" -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go deleted file mode 100644 index a3ce5697d..000000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go +++ /dev/null @@ -1,64 +0,0 @@ -// +build windows - -package windows - -import ( - "io" - "os" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" -) - -// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. -type ansiWriter struct { - file *os.File - fd uintptr - infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO - command []byte - escapeSequence []byte - inAnsiSequence bool - parser *ansiterm.AnsiParser -} - -// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a -// Windows console output handle. -func NewAnsiWriter(nFile int) io.Writer { - initLogger() - file, fd := winterm.GetStdFile(nFile) - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil - } - - parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) - logger.Infof("newAnsiWriter: parser %p", parser) - - aw := &ansiWriter{ - file: file, - fd: fd, - infoReset: info, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - escapeSequence: []byte(ansiterm.KEY_ESC_CSI), - parser: parser, - } - - logger.Infof("newAnsiWriter: aw.parser %p", aw.parser) - logger.Infof("newAnsiWriter: %v", aw) - return aw -} - -func (aw *ansiWriter) Fd() uintptr { - return aw.fd -} - -// Write writes len(p) bytes from p to the underlying data stream. -func (aw *ansiWriter) Write(p []byte) (total int, err error) { - if len(p) == 0 { - return 0, nil - } - - logger.Infof("Write: % x", p) - logger.Infof("Write: %s", string(p)) - return aw.parser.Parse(p) -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/console.go b/vendor/github.com/docker/docker/pkg/term/windows/console.go deleted file mode 100644 index ca5c3b2e5..000000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/console.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build windows - -package windows - -import ( - "os" - - "github.com/Azure/go-ansiterm/winterm" -) - -// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. -func GetHandleInfo(in interface{}) (uintptr, bool) { - switch t := in.(type) { - case *ansiReader: - return t.Fd(), true - case *ansiWriter: - return t.Fd(), true - } - - var inFd uintptr - var isTerminal bool - - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminal = IsConsole(inFd) - } - return inFd, isTerminal -} - -// IsConsole returns true if the given file descriptor is a Windows Console. -// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. -func IsConsole(fd uintptr) bool { - _, e := winterm.GetConsoleMode(fd) - return e == nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows.go b/vendor/github.com/docker/docker/pkg/term/windows/windows.go deleted file mode 100644 index ce4cb5990..000000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/windows.go +++ /dev/null @@ -1,33 +0,0 @@ -// These files implement ANSI-aware input and output streams for use by the Docker Windows client. -// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create -// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. - -package windows - -import ( - "io/ioutil" - "os" - "sync" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Sirupsen/logrus" -) - -var logger *logrus.Logger -var initOnce sync.Once - -func initLogger() { - initOnce.Do(func() { - logFile := ioutil.Discard - - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { - logFile, _ = os.Create("ansiReaderWriter.log") - } - - logger = &logrus.Logger{ - Out: logFile, - Formatter: new(logrus.TextFormatter), - Level: logrus.DebugLevel, - } - }) -} diff --git a/vendor/github.com/docker/engine-api/types/client.go b/vendor/github.com/docker/engine-api/types/client.go index def3f0619..fa3b2cfb4 100644 --- a/vendor/github.com/docker/engine-api/types/client.go +++ b/vendor/github.com/docker/engine-api/types/client.go @@ -10,12 +10,6 @@ import ( "github.com/docker/go-units" ) -// CheckpointCreateOptions holds parameters to create a checkpoint from a container -type CheckpointCreateOptions struct { - CheckpointID string - Exit bool -} - // ContainerAttachOptions holds parameters to attach to a container. type ContainerAttachOptions struct { Stream bool @@ -73,11 +67,6 @@ type ContainerRemoveOptions struct { Force bool } -// ContainerStartOptions holds parameters to start containers. -type ContainerStartOptions struct { - CheckpointID string -} - // CopyToContainerOptions holds information // about files to copy into a container type CopyToContainerOptions struct { @@ -218,7 +207,11 @@ type ImageSearchOptions struct { RegistryAuth string PrivilegeFunc RequestPrivilegeFunc Filters filters.Args - Limit int +} + +// ImageTagOptions holds parameters to tag an image +type ImageTagOptions struct { + Force bool } // ResizeOptions holds parameters to resize a tty. @@ -240,47 +233,3 @@ type VersionResponse struct { func (v VersionResponse) ServerOK() bool { return v.Server != nil } - -// NodeListOptions holds parameters to list nodes with. -type NodeListOptions struct { - Filter filters.Args -} - -// ServiceCreateOptions contains the options to use when creating a service. -type ServiceCreateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string -} - -// ServiceCreateResponse contains the information returned to a client -// on the creation of a new service. -type ServiceCreateResponse struct { - // ID is the ID of the created service. - ID string -} - -// ServiceUpdateOptions contains the options to be used for updating services. -type ServiceUpdateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string - - // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate - // into this field. While it does open API users up to racy writes, most - // users may not need that level of consistency in practice. -} - -// ServiceListOptions holds parameters to list services with. -type ServiceListOptions struct { - Filter filters.Args -} - -// TaskListOptions holds parameters to list tasks with. -type TaskListOptions struct { - Filter filters.Args -} diff --git a/vendor/github.com/docker/engine-api/types/container/config.go b/vendor/github.com/docker/engine-api/types/container/config.go index 707fc8c17..1dfc40834 100644 --- a/vendor/github.com/docker/engine-api/types/container/config.go +++ b/vendor/github.com/docker/engine-api/types/container/config.go @@ -1,32 +1,10 @@ package container import ( - "time" - "github.com/docker/engine-api/types/strslice" "github.com/docker/go-connections/nat" ) -// HealthConfig holds configuration settings for the HEALTHCHECK feature. -type HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} - // Config contains the configuration data about a container. // It should hold only portable information about the container. // Here, "portable" means "independent from the host we are running on". @@ -46,7 +24,6 @@ type Config struct { StdinOnce bool // If true, close stdin after the 1 attached client disconnects. Env []string // List of environment variable to set in the container Cmd strslice.StrSlice // Command to run when starting the container - Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) Image string // Name of the image as it was passed by the operator (eg. could be symbolic) Volumes map[string]struct{} // List of volumes (mounts) used for the container @@ -57,6 +34,4 @@ type Config struct { OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile Labels map[string]string // List of labels set to this container StopSignal string `json:",omitempty"` // Signal to stop a container - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT } diff --git a/vendor/github.com/docker/engine-api/types/container/host_config.go b/vendor/github.com/docker/engine-api/types/container/host_config.go index a9ff755b0..2446c1904 100644 --- a/vendor/github.com/docker/engine-api/types/container/host_config.go +++ b/vendor/github.com/docker/engine-api/types/container/host_config.go @@ -195,7 +195,7 @@ type RestartPolicy struct { // IsNone indicates whether the container has the "no" restart policy. // This means the container will not automatically restart when exiting. func (rp *RestartPolicy) IsNone() bool { - return rp.Name == "no" || rp.Name == "" + return rp.Name == "no" } // IsAlways indicates whether the container has the "always" restart policy. @@ -257,10 +257,11 @@ type Resources struct { Ulimits []*units.Ulimit // List of ulimits to be set in the container // Applicable to Windows - CPUCount int64 `json:"CpuCount"` // CPU count - CPUPercent int64 `json:"CpuPercent"` // CPU percent - IOMaximumIOps uint64 // Maximum IOps for the container system drive - IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive + CPUCount int64 `json:"CpuCount"` // CPU count + CPUPercent int64 `json:"CpuPercent"` // CPU percent + IOMaximumIOps uint64 // Maximum IOps for the container system drive + IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive + NetworkMaximumBandwidth uint64 // Maximum bandwidth of the network endpoint in bytes per second } // UpdateConfig holds the mutable attributes of a Container. @@ -303,13 +304,12 @@ type HostConfig struct { PublishAllPorts bool // Should docker publish all exposed port for the container ReadonlyRootfs bool // Is the container root filesystem in read-only SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. - StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. + StorageOpt map[string]string // Storage driver options per container. Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container UTSMode UTSMode // UTS namespace to use for the container UsernsMode UsernsMode // The user namespace to use for the container ShmSize int64 // Total shm memory usage Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container - Runtime string `json:",omitempty"` // Runtime to use with this container // Applicable to Windows ConsoleSize [2]int // Initial console size diff --git a/vendor/github.com/docker/engine-api/types/errors.go b/vendor/github.com/docker/engine-api/types/errors.go deleted file mode 100644 index 649ab9513..000000000 --- a/vendor/github.com/docker/engine-api/types/errors.go +++ /dev/null @@ -1,6 +0,0 @@ -package types - -// ErrorResponse is the response body of API errors. -type ErrorResponse struct { - Message string `json:"message"` -} diff --git a/vendor/github.com/docker/engine-api/types/filters/parse.go b/vendor/github.com/docker/engine-api/types/filters/parse.go index dc2c48b89..0e0d7e380 100644 --- a/vendor/github.com/docker/engine-api/types/filters/parse.go +++ b/vendor/github.com/docker/engine-api/types/filters/parse.go @@ -215,22 +215,10 @@ func (filters Args) ExactMatch(field, source string) bool { } // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one. -func (filters Args) UniqueExactMatch(field, source string) bool { - fieldValues := filters.fields[field] - //do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { + if fieldValues[source] { return true } - if len(filters.fields[field]) != 1 { - return false - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] + return false } // FuzzyMatch returns true if the source matches exactly one of the filters, diff --git a/vendor/github.com/docker/engine-api/types/network/network.go b/vendor/github.com/docker/engine-api/types/network/network.go index 47080b652..bce60f5ee 100644 --- a/vendor/github.com/docker/engine-api/types/network/network.go +++ b/vendor/github.com/docker/engine-api/types/network/network.go @@ -23,9 +23,8 @@ type IPAMConfig struct { // EndpointIPAMConfig represents IPAM configurations for the endpoint type EndpointIPAMConfig struct { - IPv4Address string `json:",omitempty"` - IPv6Address string `json:",omitempty"` - LinkLocalIPs []string `json:",omitempty"` + IPv4Address string `json:",omitempty"` + IPv6Address string `json:",omitempty"` } // EndpointSettings stores the network endpoint details diff --git a/vendor/github.com/docker/engine-api/types/plugin.go b/vendor/github.com/docker/engine-api/types/plugin.go deleted file mode 100644 index 05030ff3d..000000000 --- a/vendor/github.com/docker/engine-api/types/plugin.go +++ /dev/null @@ -1,169 +0,0 @@ -// +build experimental - -package types - -import ( - "encoding/json" - "fmt" -) - -// PluginInstallOptions holds parameters to install a plugin. -type PluginInstallOptions struct { - Disabled bool - AcceptAllPermissions bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - PrivilegeFunc RequestPrivilegeFunc - AcceptPermissionsFunc func(PluginPrivileges) (bool, error) -} - -// PluginConfig represents the values of settings potentially modifiable by a user -type PluginConfig struct { - Mounts []PluginMount - Env []string - Args []string - Devices []PluginDevice -} - -// Plugin represents a Docker plugin for the remote API -type Plugin struct { - ID string `json:"Id,omitempty"` - Name string - Tag string - Active bool - Config PluginConfig - Manifest PluginManifest -} - -// PluginsListResponse contains the response for the remote API -type PluginsListResponse []*Plugin - -const ( - authzDriver = "AuthzDriver" - graphDriver = "GraphDriver" - ipamDriver = "IpamDriver" - networkDriver = "NetworkDriver" - volumeDriver = "VolumeDriver" -) - -// PluginInterfaceType represents a type that a plugin implements. -type PluginInterfaceType struct { - Prefix string // This is always "docker" - Capability string // Capability should be validated against the above list. - Version string // Plugin API version. Depends on the capability -} - -// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType -func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { - versionIndex := len(p) - prefixIndex := 0 - if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { - return fmt.Errorf("%q is not a plugin interface type", p) - } - p = p[1 : len(p)-1] -loop: - for i, b := range p { - switch b { - case '.': - prefixIndex = i - case '/': - versionIndex = i - break loop - } - } - t.Prefix = string(p[:prefixIndex]) - t.Capability = string(p[prefixIndex+1 : versionIndex]) - if versionIndex < len(p) { - t.Version = string(p[versionIndex+1:]) - } - return nil -} - -// MarshalJSON implements json.Marshaler for PluginInterfaceType -func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { - return json.Marshal(t.String()) -} - -// String implements fmt.Stringer for PluginInterfaceType -func (t PluginInterfaceType) String() string { - return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) -} - -// PluginInterface describes the interface between Docker and plugin -type PluginInterface struct { - Types []PluginInterfaceType - Socket string -} - -// PluginSetting is to be embedded in other structs, if they are supposed to be -// modifiable by the user. -type PluginSetting struct { - Name string - Description string - Settable []string -} - -// PluginNetwork represents the network configuration for a plugin -type PluginNetwork struct { - Type string -} - -// PluginMount represents the mount configuration for a plugin -type PluginMount struct { - PluginSetting - Source *string - Destination string - Type string - Options []string -} - -// PluginEnv represents an environment variable for a plugin -type PluginEnv struct { - PluginSetting - Value *string -} - -// PluginArgs represents the command line arguments for a plugin -type PluginArgs struct { - PluginSetting - Value []string -} - -// PluginDevice represents a device for a plugin -type PluginDevice struct { - PluginSetting - Path *string -} - -// PluginUser represents the user for the plugin's process -type PluginUser struct { - UID uint32 `json:"Uid,omitempty"` - GID uint32 `json:"Gid,omitempty"` -} - -// PluginManifest represents the manifest of a plugin -type PluginManifest struct { - ManifestVersion string - Description string - Documentation string - Interface PluginInterface - Entrypoint []string - Workdir string - User PluginUser `json:",omitempty"` - Network PluginNetwork - Capabilities []string - Mounts []PluginMount - Devices []PluginDevice - Env []PluginEnv - Args PluginArgs -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string - Description string - Value []string -} - -// PluginPrivileges is a list of PluginPrivilege -type PluginPrivileges []PluginPrivilege diff --git a/vendor/github.com/docker/engine-api/types/seccomp.go b/vendor/github.com/docker/engine-api/types/seccomp.go index 854f1c453..e0305a9e3 100644 --- a/vendor/github.com/docker/engine-api/types/seccomp.go +++ b/vendor/github.com/docker/engine-api/types/seccomp.go @@ -24,11 +24,6 @@ const ( ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" - ArchPPC Arch = "SCMP_ARCH_PPC" - ArchPPC64 Arch = "SCMP_ARCH_PPC64" - ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE" - ArchS390 Arch = "SCMP_ARCH_S390" - ArchS390X Arch = "SCMP_ARCH_S390X" ) // Action taken upon Seccomp rule match diff --git a/vendor/github.com/docker/engine-api/types/swarm/common.go b/vendor/github.com/docker/engine-api/types/swarm/common.go deleted file mode 100644 index b87f54536..000000000 --- a/vendor/github.com/docker/engine-api/types/swarm/common.go +++ /dev/null @@ -1,21 +0,0 @@ -package swarm - -import "time" - -// Version represent the internal object version. -type Version struct { - Index uint64 `json:",omitempty"` -} - -// Meta is base object inherited by most of the other once. -type Meta struct { - Version Version `json:",omitempty"` - CreatedAt time.Time `json:",omitempty"` - UpdatedAt time.Time `json:",omitempty"` -} - -// Annotations represents how to describe an object. -type Annotations struct { - Name string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` -} diff --git a/vendor/github.com/docker/engine-api/types/swarm/container.go b/vendor/github.com/docker/engine-api/types/swarm/container.go deleted file mode 100644 index 29f2e8a64..000000000 --- a/vendor/github.com/docker/engine-api/types/swarm/container.go +++ /dev/null @@ -1,67 +0,0 @@ -package swarm - -import "time" - -// ContainerSpec represents the spec of a container. -type ContainerSpec struct { - Image string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Command []string `json:",omitempty"` - Args []string `json:",omitempty"` - Env []string `json:",omitempty"` - Dir string `json:",omitempty"` - User string `json:",omitempty"` - Mounts []Mount `json:",omitempty"` - StopGracePeriod *time.Duration `json:",omitempty"` -} - -// MountType represents the type of a mount. -type MountType string - -const ( - // MountTypeBind BIND - MountTypeBind MountType = "bind" - // MountTypeVolume VOLUME - MountTypeVolume MountType = "volume" -) - -// Mount represents a mount (volume). -type Mount struct { - Type MountType `json:",omitempty"` - Source string `json:",omitempty"` - Target string `json:",omitempty"` - ReadOnly bool `json:",omitempty"` - - BindOptions *BindOptions `json:",omitempty"` - VolumeOptions *VolumeOptions `json:",omitempty"` -} - -// MountPropagation represents the propagation of a mount. -type MountPropagation string - -const ( - // MountPropagationRPrivate RPRIVATE - MountPropagationRPrivate MountPropagation = "rprivate" - // MountPropagationPrivate PRIVATE - MountPropagationPrivate MountPropagation = "private" - // MountPropagationRShared RSHARED - MountPropagationRShared MountPropagation = "rshared" - // MountPropagationShared SHARED - MountPropagationShared MountPropagation = "shared" - // MountPropagationRSlave RSLAVE - MountPropagationRSlave MountPropagation = "rslave" - // MountPropagationSlave SLAVE - MountPropagationSlave MountPropagation = "slave" -) - -// BindOptions defines options specific to mounts of type "bind". -type BindOptions struct { - Propagation MountPropagation `json:",omitempty"` -} - -// VolumeOptions represents the options for a mount of type volume. -type VolumeOptions struct { - NoCopy bool `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - DriverConfig *Driver `json:",omitempty"` -} diff --git a/vendor/github.com/docker/engine-api/types/swarm/network.go b/vendor/github.com/docker/engine-api/types/swarm/network.go deleted file mode 100644 index 84804da2f..000000000 --- a/vendor/github.com/docker/engine-api/types/swarm/network.go +++ /dev/null @@ -1,99 +0,0 @@ -package swarm - -// Endpoint represents an endpoint. -type Endpoint struct { - Spec EndpointSpec `json:",omitempty"` - Ports []PortConfig `json:",omitempty"` - VirtualIPs []EndpointVirtualIP `json:",omitempty"` -} - -// EndpointSpec represents the spec of an endpoint. -type EndpointSpec struct { - Mode ResolutionMode `json:",omitempty"` - Ports []PortConfig `json:",omitempty"` -} - -// ResolutionMode represents a resolution mode. -type ResolutionMode string - -const ( - // ResolutionModeVIP VIP - ResolutionModeVIP ResolutionMode = "vip" - // ResolutionModeDNSRR DNSRR - ResolutionModeDNSRR ResolutionMode = "dnsrr" -) - -// PortConfig represents the config of a port. -type PortConfig struct { - Name string `json:",omitempty"` - Protocol PortConfigProtocol `json:",omitempty"` - TargetPort uint32 `json:",omitempty"` - PublishedPort uint32 `json:",omitempty"` -} - -// PortConfigProtocol represents the protocol of a port. -type PortConfigProtocol string - -const ( - // TODO(stevvooe): These should be used generally, not just for PortConfig. - - // PortConfigProtocolTCP TCP - PortConfigProtocolTCP PortConfigProtocol = "tcp" - // PortConfigProtocolUDP UDP - PortConfigProtocolUDP PortConfigProtocol = "udp" -) - -// EndpointVirtualIP represents the virtual ip of a port. -type EndpointVirtualIP struct { - NetworkID string `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// Network represents a network. -type Network struct { - ID string - Meta - Spec NetworkSpec `json:",omitempty"` - DriverState Driver `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` -} - -// NetworkSpec represents the spec of a network. -type NetworkSpec struct { - Annotations - DriverConfiguration *Driver `json:",omitempty"` - IPv6Enabled bool `json:",omitempty"` - Internal bool `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` -} - -// NetworkAttachmentConfig represents the configuration of a network attachment. -type NetworkAttachmentConfig struct { - Target string `json:",omitempty"` - Aliases []string `json:",omitempty"` -} - -// NetworkAttachment represents a network attachment. -type NetworkAttachment struct { - Network Network `json:",omitempty"` - Addresses []string `json:",omitempty"` -} - -// IPAMOptions represents ipam options. -type IPAMOptions struct { - Driver Driver `json:",omitempty"` - Configs []IPAMConfig `json:",omitempty"` -} - -// IPAMConfig represents ipam configuration. -type IPAMConfig struct { - Subnet string `json:",omitempty"` - Range string `json:",omitempty"` - Gateway string `json:",omitempty"` -} - -// Driver represents a driver (network/volume). -type Driver struct { - Name string `json:",omitempty"` - Options map[string]string `json:",omitempty"` -} diff --git a/vendor/github.com/docker/engine-api/types/swarm/node.go b/vendor/github.com/docker/engine-api/types/swarm/node.go deleted file mode 100644 index 8421f67a2..000000000 --- a/vendor/github.com/docker/engine-api/types/swarm/node.go +++ /dev/null @@ -1,118 +0,0 @@ -package swarm - -// Node represents a node. -type Node struct { - ID string - Meta - - Spec NodeSpec `json:",omitempty"` - Description NodeDescription `json:",omitempty"` - Status NodeStatus `json:",omitempty"` - ManagerStatus *ManagerStatus `json:",omitempty"` -} - -// NodeSpec represents the spec of a node. -type NodeSpec struct { - Annotations - Role NodeRole `json:",omitempty"` - Membership NodeMembership `json:",omitempty"` - Availability NodeAvailability `json:",omitempty"` -} - -// NodeRole represents the role of a node. -type NodeRole string - -const ( - // NodeRoleWorker WORKER - NodeRoleWorker NodeRole = "worker" - // NodeRoleManager MANAGER - NodeRoleManager NodeRole = "manager" -) - -// NodeMembership represents the membership of a node. -type NodeMembership string - -const ( - // NodeMembershipPending PENDING - NodeMembershipPending NodeMembership = "pending" - // NodeMembershipAccepted ACCEPTED - NodeMembershipAccepted NodeMembership = "accepted" -) - -// NodeAvailability represents the availability of a node. -type NodeAvailability string - -const ( - // NodeAvailabilityActive ACTIVE - NodeAvailabilityActive NodeAvailability = "active" - // NodeAvailabilityPause PAUSE - NodeAvailabilityPause NodeAvailability = "pause" - // NodeAvailabilityDrain DRAIN - NodeAvailabilityDrain NodeAvailability = "drain" -) - -// NodeDescription represents the description of a node. -type NodeDescription struct { - Hostname string `json:",omitempty"` - Platform Platform `json:",omitempty"` - Resources Resources `json:",omitempty"` - Engine EngineDescription `json:",omitempty"` -} - -// Platform represents the platfrom (Arch/OS). -type Platform struct { - Architecture string `json:",omitempty"` - OS string `json:",omitempty"` -} - -// EngineDescription represents the description of an engine. -type EngineDescription struct { - EngineVersion string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Plugins []PluginDescription `json:",omitempty"` -} - -// PluginDescription represents the description of an engine plugin. -type PluginDescription struct { - Type string `json:",omitempty"` - Name string `json:",omitempty"` -} - -// NodeStatus represents the status of a node. -type NodeStatus struct { - State NodeState `json:",omitempty"` - Message string `json:",omitempty"` -} - -// Reachability represents the reachability of a node. -type Reachability string - -const ( - // ReachabilityUnknown UNKNOWN - ReachabilityUnknown Reachability = "unknown" - // ReachabilityUnreachable UNREACHABLE - ReachabilityUnreachable Reachability = "unreachable" - // ReachabilityReachable REACHABLE - ReachabilityReachable Reachability = "reachable" -) - -// ManagerStatus represents the status of a manager. -type ManagerStatus struct { - Leader bool `json:",omitempty"` - Reachability Reachability `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// NodeState represents the state of a node. -type NodeState string - -const ( - // NodeStateUnknown UNKNOWN - NodeStateUnknown NodeState = "unknown" - // NodeStateDown DOWN - NodeStateDown NodeState = "down" - // NodeStateReady READY - NodeStateReady NodeState = "ready" - // NodeStateDisconnected DISCONNECTED - NodeStateDisconnected NodeState = "disconnected" -) diff --git a/vendor/github.com/docker/engine-api/types/swarm/service.go b/vendor/github.com/docker/engine-api/types/swarm/service.go deleted file mode 100644 index 6303c146f..000000000 --- a/vendor/github.com/docker/engine-api/types/swarm/service.go +++ /dev/null @@ -1,44 +0,0 @@ -package swarm - -import "time" - -// Service represents a service. -type Service struct { - ID string - Meta - Spec ServiceSpec `json:",omitempty"` - Endpoint Endpoint `json:",omitempty"` -} - -// ServiceSpec represents the spec of a service. -type ServiceSpec struct { - Annotations - - // TaskTemplate defines how the service should construct new tasks when - // orchestrating this service. - TaskTemplate TaskSpec `json:",omitempty"` - Mode ServiceMode `json:",omitempty"` - UpdateConfig *UpdateConfig `json:",omitempty"` - Networks []NetworkAttachmentConfig `json:",omitempty"` - EndpointSpec *EndpointSpec `json:",omitempty"` -} - -// ServiceMode represents the mode of a service. -type ServiceMode struct { - Replicated *ReplicatedService `json:",omitempty"` - Global *GlobalService `json:",omitempty"` -} - -// ReplicatedService is a kind of ServiceMode. -type ReplicatedService struct { - Replicas *uint64 `json:",omitempty"` -} - -// GlobalService is a kind of ServiceMode. -type GlobalService struct{} - -// UpdateConfig represents the update configuration. -type UpdateConfig struct { - Parallelism uint64 `json:",omitempty"` - Delay time.Duration `json:",omitempty"` -} diff --git a/vendor/github.com/docker/engine-api/types/swarm/swarm.go b/vendor/github.com/docker/engine-api/types/swarm/swarm.go deleted file mode 100644 index 0a0685d0f..000000000 --- a/vendor/github.com/docker/engine-api/types/swarm/swarm.go +++ /dev/null @@ -1,129 +0,0 @@ -package swarm - -import "time" - -// Swarm represents a swarm. -type Swarm struct { - ID string - Meta - Spec Spec -} - -// Spec represents the spec of a swarm. -type Spec struct { - Annotations - - AcceptancePolicy AcceptancePolicy `json:",omitempty"` - Orchestration OrchestrationConfig `json:",omitempty"` - Raft RaftConfig `json:",omitempty"` - Dispatcher DispatcherConfig `json:",omitempty"` - CAConfig CAConfig `json:",omitempty"` - - // DefaultLogDriver sets the log driver to use at task creation time if - // unspecified by a task. - // - // Updating this value will only have an affect on new tasks. Old tasks - // will continue use their previously configured log driver until - // recreated. - DefaultLogDriver *Driver `json:",omitempty"` -} - -// AcceptancePolicy represents the list of policies. -type AcceptancePolicy struct { - Policies []Policy `json:",omitempty"` -} - -// Policy represents a role, autoaccept and secret. -type Policy struct { - Role NodeRole - Autoaccept bool - Secret *string `json:",omitempty"` -} - -// OrchestrationConfig represents orchestration configuration. -type OrchestrationConfig struct { - TaskHistoryRetentionLimit int64 `json:",omitempty"` -} - -// RaftConfig represents raft configuration. -type RaftConfig struct { - SnapshotInterval uint64 `json:",omitempty"` - KeepOldSnapshots uint64 `json:",omitempty"` - LogEntriesForSlowFollowers uint64 `json:",omitempty"` - HeartbeatTick uint32 `json:",omitempty"` - ElectionTick uint32 `json:",omitempty"` -} - -// DispatcherConfig represents dispatcher configuration. -type DispatcherConfig struct { - HeartbeatPeriod uint64 `json:",omitempty"` -} - -// CAConfig represents CA configuration. -type CAConfig struct { - NodeCertExpiry time.Duration `json:",omitempty"` - ExternalCAs []*ExternalCA `json:",omitempty"` -} - -// ExternalCAProtocol represents type of external CA. -type ExternalCAProtocol string - -// ExternalCAProtocolCFSSL CFSSL -const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" - -// ExternalCA defines external CA to be used by the cluster. -type ExternalCA struct { - Protocol ExternalCAProtocol - URL string - Options map[string]string `json:",omitempty"` -} - -// InitRequest is the request used to init a swarm. -type InitRequest struct { - ListenAddr string - ForceNewCluster bool - Spec Spec -} - -// JoinRequest is the request used to join a swarm. -type JoinRequest struct { - ListenAddr string - RemoteAddrs []string - Secret string // accept by secret - CACertHash string - Manager bool -} - -// LocalNodeState represents the state of the local node. -type LocalNodeState string - -const ( - // LocalNodeStateInactive INACTIVE - LocalNodeStateInactive LocalNodeState = "inactive" - // LocalNodeStatePending PENDING - LocalNodeStatePending LocalNodeState = "pending" - // LocalNodeStateActive ACTIVE - LocalNodeStateActive LocalNodeState = "active" - // LocalNodeStateError ERROR - LocalNodeStateError LocalNodeState = "error" -) - -// Info represents generic information about swarm. -type Info struct { - NodeID string - - LocalNodeState LocalNodeState - ControlAvailable bool - Error string - - RemoteManagers []Peer - Nodes int - Managers int - CACertHash string -} - -// Peer represents a peer. -type Peer struct { - NodeID string - Addr string -} diff --git a/vendor/github.com/docker/engine-api/types/swarm/task.go b/vendor/github.com/docker/engine-api/types/swarm/task.go deleted file mode 100644 index fa8228a49..000000000 --- a/vendor/github.com/docker/engine-api/types/swarm/task.go +++ /dev/null @@ -1,115 +0,0 @@ -package swarm - -import "time" - -// TaskState represents the state of a task. -type TaskState string - -const ( - // TaskStateNew NEW - TaskStateNew TaskState = "new" - // TaskStateAllocated ALLOCATED - TaskStateAllocated TaskState = "allocated" - // TaskStatePending PENDING - TaskStatePending TaskState = "pending" - // TaskStateAssigned ASSIGNED - TaskStateAssigned TaskState = "assigned" - // TaskStateAccepted ACCEPTED - TaskStateAccepted TaskState = "accepted" - // TaskStatePreparing PREPARING - TaskStatePreparing TaskState = "preparing" - // TaskStateReady READY - TaskStateReady TaskState = "ready" - // TaskStateStarting STARTING - TaskStateStarting TaskState = "starting" - // TaskStateRunning RUNNING - TaskStateRunning TaskState = "running" - // TaskStateComplete COMPLETE - TaskStateComplete TaskState = "complete" - // TaskStateShutdown SHUTDOWN - TaskStateShutdown TaskState = "shutdown" - // TaskStateFailed FAILED - TaskStateFailed TaskState = "failed" - // TaskStateRejected REJECTED - TaskStateRejected TaskState = "rejected" -) - -// Task represents a task. -type Task struct { - ID string - Meta - - Spec TaskSpec `json:",omitempty"` - ServiceID string `json:",omitempty"` - Slot int `json:",omitempty"` - NodeID string `json:",omitempty"` - Status TaskStatus `json:",omitempty"` - DesiredState TaskState `json:",omitempty"` - NetworksAttachments []NetworkAttachment `json:",omitempty"` -} - -// TaskSpec represents the spec of a task. -type TaskSpec struct { - ContainerSpec ContainerSpec `json:",omitempty"` - Resources *ResourceRequirements `json:",omitempty"` - RestartPolicy *RestartPolicy `json:",omitempty"` - Placement *Placement `json:",omitempty"` - - // LogDriver specifies the LogDriver to use for tasks created from this - // spec. If not present, the one on cluster default on swarm.Spec will be - // used, finally falling back to the engine default if not specified. - LogDriver *Driver `json:",omitempty"` -} - -// Resources represents resources (CPU/Memory). -type Resources struct { - NanoCPUs int64 `json:",omitempty"` - MemoryBytes int64 `json:",omitempty"` -} - -// ResourceRequirements represents resources requirements. -type ResourceRequirements struct { - Limits *Resources `json:",omitempty"` - Reservations *Resources `json:",omitempty"` -} - -// Placement represents orchestration parameters. -type Placement struct { - Constraints []string `json:",omitempty"` -} - -// RestartPolicy represents the restart policy. -type RestartPolicy struct { - Condition RestartPolicyCondition `json:",omitempty"` - Delay *time.Duration `json:",omitempty"` - MaxAttempts *uint64 `json:",omitempty"` - Window *time.Duration `json:",omitempty"` -} - -// RestartPolicyCondition represents when to restart. -type RestartPolicyCondition string - -const ( - // RestartPolicyConditionNone NONE - RestartPolicyConditionNone RestartPolicyCondition = "none" - // RestartPolicyConditionOnFailure ON_FAILURE - RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" - // RestartPolicyConditionAny ANY - RestartPolicyConditionAny RestartPolicyCondition = "any" -) - -// TaskStatus represents the status of a task. -type TaskStatus struct { - Timestamp time.Time `json:",omitempty"` - State TaskState `json:",omitempty"` - Message string `json:",omitempty"` - Err string `json:",omitempty"` - ContainerStatus ContainerStatus `json:",omitempty"` -} - -// ContainerStatus represents the status of a container. -type ContainerStatus struct { - ContainerID string `json:",omitempty"` - PID int `json:",omitempty"` - ExitCode int `json:",omitempty"` -} diff --git a/vendor/github.com/docker/engine-api/types/types.go b/vendor/github.com/docker/engine-api/types/types.go index 3cc8db8c1..cb2dc9ac9 100644 --- a/vendor/github.com/docker/engine-api/types/types.go +++ b/vendor/github.com/docker/engine-api/types/types.go @@ -7,7 +7,6 @@ import ( "github.com/docker/engine-api/types/container" "github.com/docker/engine-api/types/network" "github.com/docker/engine-api/types/registry" - "github.com/docker/engine-api/types/swarm" "github.com/docker/go-connections/nat" ) @@ -29,7 +28,7 @@ type ContainerExecCreateResponse struct { } // ContainerUpdateResponse contains response of Remote API: -// POST "/containers/{name:.*}/update" +// POST /containers/{name:.*}/update type ContainerUpdateResponse struct { // Warnings are any warnings encountered during the updating of the container. Warnings []string `json:"Warnings"` @@ -143,7 +142,7 @@ type Port struct { } // Container contains response of Remote API: -// GET "/containers/json" +// GET "/containers/json" type Container struct { ID string `json:"Id"` Names []string @@ -253,9 +252,6 @@ type Info struct { ClusterStore string ClusterAdvertise string SecurityOptions []string - Runtimes map[string]Runtime - DefaultRuntime string - Swarm swarm.Info } // PluginsInfo is a temp struct holding Plugins name @@ -278,28 +274,6 @@ type ExecStartCheck struct { Tty bool } -// HealthcheckResult stores information about a single run of a healthcheck probe -type HealthcheckResult struct { - Start time.Time // Start is the time this check started - End time.Time // End is the time this check ended - ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=starting, else=error running probe - Output string // Output from last check -} - -// Health states -const ( - Starting = "starting" // Starting indicates that the container is not yet ready - Healthy = "healthy" // Healthy indicates that the container is running correctly - Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem -) - -// Health stores information about the container's healthcheck results -type Health struct { - Status string // Status is one of Starting, Healthy or Unhealthy - FailingStreak int // FailingStreak is the number of consecutive failures - Log []*HealthcheckResult // Log contains the last few results (oldest first) -} - // ContainerState stores container's running state // it's part of ContainerJSONBase and will return by "inspect" command type ContainerState struct { @@ -314,7 +288,6 @@ type ContainerState struct { Error string StartedAt string FinishedAt string - Health *Health `json:",omitempty"` } // ContainerNode stores information about the node that a container @@ -325,7 +298,7 @@ type ContainerNode struct { Addr string Name string Cpus int - Memory int64 + Memory int Labels map[string]string } @@ -379,13 +352,13 @@ type SummaryNetworkSettings struct { // NetworkSettingsBase holds basic information about networks type NetworkSettingsBase struct { - Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) - SandboxID string // SandboxID uniquely represents a container's network stack - HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface - LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix - LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address - Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port - SandboxKey string // SandboxKey identifies the sandbox + Bridge string + SandboxID string + HairpinMode bool + LinkLocalIPv6Address string + LinkLocalIPv6PrefixLen int + Ports nat.PortMap + SandboxKey string SecondaryIPAddresses []network.Address SecondaryIPv6Addresses []network.Address } @@ -394,14 +367,14 @@ type NetworkSettingsBase struct { // during the 2 release deprecation period. // It will be removed in Docker 1.11. type DefaultNetworkSettings struct { - EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox - Gateway string // Gateway holds the gateway address for the network - GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address - GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address - IPAddress string // IPAddress holds the IPv4 address for the network - IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address - IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 - MacAddress string // MacAddress holds the MAC address for the network + EndpointID string + Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + IPAddress string + IPPrefixLen int + IPv6Gateway string + MacAddress string } // MountPoint represents a mount point configuration inside the container. @@ -443,16 +416,16 @@ type VolumeCreateRequest struct { // NetworkResource is the body of the "get network" http response message type NetworkResource struct { - Name string // Name is the requested name of the network - ID string `json:"Id"` // ID uniquely identifies a network on a single machine - Scope string // Scope describes the level at which the network exists (e.g. `global` for cluster-wide or `local` for machine level) - Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) - EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 - IPAM network.IPAM // IPAM is the network's IP Address Management - Internal bool // Internal represents if the network is used internal only - Containers map[string]EndpointResource // Containers contains endpoints belonging to the network - Options map[string]string // Options holds the network specific options to use for when creating the network - Labels map[string]string // Labels holds metadata specific to the network being created + Name string + ID string `json:"Id"` + Scope string + Driver string + EnableIPv6 bool + IPAM network.IPAM + Internal bool + Containers map[string]EndpointResource + Options map[string]string + Labels map[string]string } // EndpointResource contains network resources allocated and used for a container in a network @@ -498,14 +471,3 @@ type NetworkDisconnect struct { Container string Force bool } - -// Checkpoint represents the details of a checkpoint -type Checkpoint struct { - Name string // Name is the name of the checkpoint -} - -// Runtime describes an OCI runtime -type Runtime struct { - Path string `json:"path"` - Args []string `json:"runtimeArgs,omitempty"` -} diff --git a/vendor/github.com/docker/engine-api/types/versions/README.md b/vendor/github.com/docker/engine-api/types/versions/README.md deleted file mode 100644 index cdac50a53..000000000 --- a/vendor/github.com/docker/engine-api/types/versions/README.md +++ /dev/null @@ -1,14 +0,0 @@ -## Legacy API type versions - -This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. - -Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. - -### Package name conventions - -The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: - -1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. -2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. - -For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/vendor/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/docker/go-units/CONTRIBUTING.md deleted file mode 100644 index 9ea86d784..000000000 --- a/vendor/github.com/docker/go-units/CONTRIBUTING.md +++ /dev/null @@ -1,67 +0,0 @@ -# Contributing to go-units - -Want to hack on go-units? Awesome! Here are instructions to get you started. - -go-units is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read Docker's -[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), -[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), -[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and -[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/docker/go-units/MAINTAINERS deleted file mode 100644 index 477be8b21..000000000 --- a/vendor/github.com/docker/go-units/MAINTAINERS +++ /dev/null @@ -1,27 +0,0 @@ -# go-connections maintainers file -# -# This file describes who runs the docker/go-connections project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "calavera", - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - [people.calavera] - Name = "David Calavera" - Email = "david.calavera@gmail.com" - GitHub = "calavera" diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md deleted file mode 100644 index 3ce4d79da..000000000 --- a/vendor/github.com/docker/go-units/README.md +++ /dev/null @@ -1,18 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) - -# Introduction - -go-units is a library to transform human friendly measurements into machine friendly values. - -## Usage - -See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. - -## Copyright and license - -Copyright © 2015 Docker, Inc. All rights reserved, except as follows. Code -is released under the Apache 2.0 license. The README.md file, and files in the -"docs" folder are licensed under the Creative Commons Attribution 4.0 -International License under the terms and conditions set forth in the file -"LICENSE.docs". You may obtain a duplicate copy of the same license, titled -CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/vendor/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml deleted file mode 100644 index 9043b3547..000000000 --- a/vendor/github.com/docker/go-units/circle.yml +++ /dev/null @@ -1,11 +0,0 @@ -dependencies: - post: - # install golint - - go get github.com/golang/lint/golint - -test: - pre: - # run analysis before tests - - go vet ./... - - test -z "$(golint ./... | tee /dev/stderr)" - - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/docker/libtrust/CONTRIBUTING.md b/vendor/github.com/docker/libtrust/CONTRIBUTING.md deleted file mode 100644 index 05be0f8ab..000000000 --- a/vendor/github.com/docker/libtrust/CONTRIBUTING.md +++ /dev/null @@ -1,13 +0,0 @@ -# Contributing to libtrust - -Want to hack on libtrust? Awesome! Here are instructions to get you -started. - -libtrust is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read -[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). - -Happy hacking! diff --git a/vendor/github.com/docker/libtrust/MAINTAINERS b/vendor/github.com/docker/libtrust/MAINTAINERS deleted file mode 100644 index 9768175fe..000000000 --- a/vendor/github.com/docker/libtrust/MAINTAINERS +++ /dev/null @@ -1,3 +0,0 @@ -Solomon Hykes -Josh Hawn (github: jlhawn) -Derek McGowan (github: dmcgowan) diff --git a/vendor/github.com/docker/libtrust/README.md b/vendor/github.com/docker/libtrust/README.md deleted file mode 100644 index 8e7db3818..000000000 --- a/vendor/github.com/docker/libtrust/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# libtrust - -Libtrust is library for managing authentication and authorization using public key cryptography. - -Authentication is handled using the identity attached to the public key. -Libtrust provides multiple methods to prove possession of the private key associated with an identity. - - TLS x509 certificates - - Signature verification - - Key Challenge - -Authorization and access control is managed through a distributed trust graph. -Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. - -## Copyright and license - -Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. -Docs released under Creative commons. - diff --git a/vendor/github.com/emicklei/go-restful/.gitignore b/vendor/github.com/emicklei/go-restful/.gitignore deleted file mode 100644 index cece7be66..000000000 --- a/vendor/github.com/emicklei/go-restful/.gitignore +++ /dev/null @@ -1,70 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -restful.html - -*.out - -tmp.prof - -go-restful.test - -examples/restful-basic-authentication - -examples/restful-encoding-filter - -examples/restful-filters - -examples/restful-hello-world - -examples/restful-resource-functions - -examples/restful-serve-static - -examples/restful-user-service - -*.DS_Store -examples/restful-user-resource - -examples/restful-multi-containers - -examples/restful-form-handling - -examples/restful-CORS-filter - -examples/restful-options-filter - -examples/restful-curly-router - -examples/restful-cpuprofiler-service - -examples/restful-pre-post-filters - -curly.prof - -examples/restful-NCSA-logging - -examples/restful-html-template - -s.html -restful-path-tail diff --git a/vendor/github.com/emicklei/go-restful/CHANGES.md b/vendor/github.com/emicklei/go-restful/CHANGES.md deleted file mode 100644 index 070bca7cd..000000000 --- a/vendor/github.com/emicklei/go-restful/CHANGES.md +++ /dev/null @@ -1,163 +0,0 @@ -Change history of go-restful -= -2016-02-14 -- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response -- add constructors for custom entity accessors for xml and json - -2015-09-27 -- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency - -2015-09-25 -- fixed problem with changing Header after WriteHeader (issue 235) - -2015-09-14 -- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write) -- added support for custom EntityReaderWriters. - -2015-08-06 -- add support for reading entities from compressed request content -- use sync.Pool for compressors of http response and request body -- add Description to Parameter for documentation in Swagger UI - -2015-03-20 -- add configurable logging - -2015-03-18 -- if not specified, the Operation is derived from the Route function - -2015-03-17 -- expose Parameter creation functions -- make trace logger an interface -- fix OPTIONSFilter -- customize rendering of ServiceError -- JSR311 router now handles wildcards -- add Notes to Route - -2014-11-27 -- (api add) PrettyPrint per response. (as proposed in #167) - -2014-11-12 -- (api add) ApiVersion(.) for documentation in Swagger UI - -2014-11-10 -- (api change) struct fields tagged with "description" show up in Swagger UI - -2014-10-31 -- (api change) ReturnsError -> Returns -- (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder -- fix swagger nested structs -- sort Swagger response messages by code - -2014-10-23 -- (api add) ReturnsError allows you to document Http codes in swagger -- fixed problem with greedy CurlyRouter -- (api add) Access-Control-Max-Age in CORS -- add tracing functionality (injectable) for debugging purposes -- support JSON parse 64bit int -- fix empty parameters for swagger -- WebServicesUrl is now optional for swagger -- fixed duplicate AccessControlAllowOrigin in CORS -- (api change) expose ServeMux in container -- (api add) added AllowedDomains in CORS -- (api add) ParameterNamed for detailed documentation - -2014-04-16 -- (api add) expose constructor of Request for testing. - -2014-06-27 -- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification). -- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons). - -2014-07-03 -- (api add) CORS can be configured with a list of allowed domains - -2014-03-12 -- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter) - -2014-02-26 -- (api add) Request now provides information about the matched Route, see method SelectedRoutePath - -2014-02-17 -- (api change) renamed parameter constants (go-lint checks) - -2014-01-10 - - (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier - -2014-01-07 - - (api change) Write* methods in Response now return the error or nil. - - added example of serving HTML from a Go template. - - fixed comparing Allowed headers in CORS (is now case-insensitive) - -2013-11-13 - - (api add) Response knows how many bytes are written to the response body. - -2013-10-29 - - (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information. - -2013-10-04 - - (api add) Response knows what HTTP status has been written - - (api add) Request can have attributes (map of string->interface, also called request-scoped variables - -2013-09-12 - - (api change) Router interface simplified - - Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths - -2013-08-05 - - add OPTIONS support - - add CORS support - -2013-08-27 - - fixed some reported issues (see github) - - (api change) deprecated use of WriteError; use WriteErrorString instead - -2014-04-15 - - (fix) v1.0.1 tag: fix Issue 111: WriteErrorString - -2013-08-08 - - (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer. - - (api add) the swagger package has be extended to have a UI per container. - - if panic is detected then a small stack trace is printed (thanks to runner-mei) - - (api add) WriteErrorString to Response - -Important API changes: - - - (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead. - - (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead. - - -2013-07-06 - - - (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature. - -2013-06-19 - - - (improve) DoNotRecover option, moved request body closer, improved ReadEntity - -2013-06-03 - - - (api change) removed Dispatcher interface, hide PathExpression - - changed receiver names of type functions to be more idiomatic Go - -2013-06-02 - - - (optimize) Cache the RegExp compilation of Paths. - -2013-05-22 - - - (api add) Added support for request/response filter functions - -2013-05-18 - - - - (api add) Added feature to change the default Http Request Dispatch function (travis cline) - - (api change) Moved Swagger Webservice to swagger package (see example restful-user) - -[2012-11-14 .. 2013-05-18> - - - See https://github.com/emicklei/go-restful/commits - -2012-11-14 - - - Initial commit - - diff --git a/vendor/github.com/emicklei/go-restful/README.md b/vendor/github.com/emicklei/go-restful/README.md deleted file mode 100644 index cfe6d0a91..000000000 --- a/vendor/github.com/emicklei/go-restful/README.md +++ /dev/null @@ -1,74 +0,0 @@ -go-restful -========== - -package for building REST-style Web Services using Google Go - -REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping: - -- GET = Retrieve a representation of a resource -- POST = Create if you are sending content to the server to create a subordinate of the specified resource collection, using some server-side algorithm. -- PUT = Create if you are sending the full content of the specified resource (URI). -- PUT = Update if you are updating the full content of the specified resource. -- DELETE = Delete if you are requesting the server to delete the resource -- PATCH = Update partial content of a resource -- OPTIONS = Get information about the communication options for the request URI - -### Example - -```Go -ws := new(restful.WebService) -ws. - Path("/users"). - Consumes(restful.MIME_XML, restful.MIME_JSON). - Produces(restful.MIME_JSON, restful.MIME_XML) - -ws.Route(ws.GET("/{user-id}").To(u.findUser). - Doc("get a user"). - Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")). - Writes(User{})) -... - -func (u UserResource) findUser(request *restful.Request, response *restful.Response) { - id := request.PathParameter("user-id") - ... -} -``` - -[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/restful-user-resource.go) - -### Features - -- Routes for request → function mapping with path parameter (e.g. {id}) support -- Configurable router: - - Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions (See RouterJSR311 which is used by default) - - Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}, See CurlyRouter) -- Request API for reading structs from JSON/XML and accesing parameters (path,query,header) -- Response API for writing structs to JSON/XML and setting headers -- Filters for intercepting the request → response flow on Service or Route level -- Request-scoped variables using attributes -- Containers for WebServices on different HTTP endpoints -- Content encoding (gzip,deflate) of request and response payloads -- Automatic responses on OPTIONS (using a filter) -- Automatic CORS request handling (using a filter) -- API declaration for Swagger UI (see swagger package) -- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...) -- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...) -- Configurable (trace) logging -- Customizable encoding using EntityReaderWriter registration -- Customizable gzip/deflate readers and writers using CompressorProvider registration - -### Resources - -- [Documentation on godoc.org](http://godoc.org/github.com/emicklei/go-restful) -- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples) -- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/) -- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/) -- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful) -- [gopkg.in](https://gopkg.in/emicklei/go-restful.v1) -- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora) - -[![Build Status](https://drone.io/github.com/emicklei/go-restful/status.png)](https://drone.io/github.com/emicklei/go-restful/latest) - -(c) 2012 - 2015, http://ernestmicklei.com. MIT License - -Type ```git shortlog -s``` for a full list of contributors. diff --git a/vendor/github.com/emicklei/go-restful/Srcfile b/vendor/github.com/emicklei/go-restful/Srcfile deleted file mode 100644 index 16fd18689..000000000 --- a/vendor/github.com/emicklei/go-restful/Srcfile +++ /dev/null @@ -1 +0,0 @@ -{"SkipDirs": ["examples"]} diff --git a/vendor/github.com/emicklei/go-restful/bench_test.sh b/vendor/github.com/emicklei/go-restful/bench_test.sh deleted file mode 100644 index 47ffbe4ac..000000000 --- a/vendor/github.com/emicklei/go-restful/bench_test.sh +++ /dev/null @@ -1,10 +0,0 @@ -#go test -run=none -file bench_test.go -test.bench . -cpuprofile=bench_test.out - -go test -c -./go-restful.test -test.run=none -test.cpuprofile=tmp.prof -test.bench=BenchmarkMany -./go-restful.test -test.run=none -test.cpuprofile=curly.prof -test.bench=BenchmarkManyCurly - -#go tool pprof go-restful.test tmp.prof -go tool pprof go-restful.test curly.prof - - diff --git a/vendor/github.com/emicklei/go-restful/coverage.sh b/vendor/github.com/emicklei/go-restful/coverage.sh deleted file mode 100644 index e27dbf1a9..000000000 --- a/vendor/github.com/emicklei/go-restful/coverage.sh +++ /dev/null @@ -1,2 +0,0 @@ -go test -coverprofile=coverage.out -go tool cover -html=coverage.out \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/install.sh b/vendor/github.com/emicklei/go-restful/install.sh deleted file mode 100644 index 36cbf25f8..000000000 --- a/vendor/github.com/emicklei/go-restful/install.sh +++ /dev/null @@ -1,10 +0,0 @@ -go test -test.v ...restful && \ -go test -test.v ...swagger && \ -go vet ...restful && \ -go fmt ...swagger && \ -go install ...swagger && \ -go fmt ...restful && \ -go install ...restful -cd examples - ls *.go | xargs -I {} go build -o /tmp/ignore {} - cd .. \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/swagger/CHANGES.md b/vendor/github.com/emicklei/go-restful/swagger/CHANGES.md deleted file mode 100644 index 736f6f37c..000000000 --- a/vendor/github.com/emicklei/go-restful/swagger/CHANGES.md +++ /dev/null @@ -1,43 +0,0 @@ -Change history of swagger -= -2015-10-16 -- add type override mechanism for swagger models (MR 254, nathanejohnson) -- replace uses of wildcard in generated apidocs (issue 251) - -2015-05-25 -- (api break) changed the type of Properties in Model -- (api break) changed the type of Models in ApiDeclaration -- (api break) changed the parameter type of PostBuildDeclarationMapFunc - -2015-04-09 -- add ModelBuildable interface for customization of Model - -2015-03-17 -- preserve order of Routes per WebService in Swagger listing -- fix use of $ref and type in Swagger models -- add api version to listing - -2014-11-14 -- operation parameters are now sorted using ordering path,query,form,header,body - -2014-11-12 -- respect omitempty tag value for embedded structs -- expose ApiVersion of WebService to Swagger ApiDeclaration - -2014-05-29 -- (api add) Ability to define custom http.Handler to serve swagger-ui static files - -2014-05-04 -- (fix) include model for array element type of response - -2014-01-03 -- (fix) do not add primitive type to the Api models - -2013-11-27 -- (fix) make Swagger work for WebServices with root ("/" or "") paths - -2013-10-29 -- (api add) package variable LogInfo to customize logging function - -2013-10-15 -- upgraded to spec version 1.2 (https://github.com/wordnik/swagger-core/wiki/1.2-transition) \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/swagger/README.md b/vendor/github.com/emicklei/go-restful/swagger/README.md deleted file mode 100644 index 6c27c3070..000000000 --- a/vendor/github.com/emicklei/go-restful/swagger/README.md +++ /dev/null @@ -1,76 +0,0 @@ -How to use Swagger UI with go-restful -= - -Get the Swagger UI sources (version 1.2 only) - - git clone https://github.com/wordnik/swagger-ui.git - -The project contains a "dist" folder. -Its contents has all the Swagger UI files you need. - -The `index.html` has an `url` set to `http://petstore.swagger.wordnik.com/api/api-docs`. -You need to change that to match your WebService JSON endpoint e.g. `http://localhost:8080/apidocs.json` - -Now, you can install the Swagger WebService for serving the Swagger specification in JSON. - - config := swagger.Config{ - WebServices: restful.RegisteredWebServices(), - ApiPath: "/apidocs.json", - SwaggerPath: "/apidocs/", - SwaggerFilePath: "/Users/emicklei/Projects/swagger-ui/dist"} - swagger.InstallSwaggerService(config) - - -Documenting Structs --- - -Currently there are 2 ways to document your structs in the go-restful Swagger. - -###### By using struct tags -- Use tag "description" to annotate a struct field with a description to show in the UI -- Use tag "modelDescription" to annotate the struct itself with a description to show in the UI. The tag can be added in an field of the struct and in case that there are multiple definition, they will be appended with an empty line. - -###### By using the SwaggerDoc method -Here is an example with an `Address` struct and the documentation for each of the fields. The `""` is a special entry for **documenting the struct itself**. - - type Address struct { - Country string `json:"country,omitempty"` - PostCode int `json:"postcode,omitempty"` - } - - func (Address) SwaggerDoc() map[string]string { - return map[string]string{ - "": "Address doc", - "country": "Country doc", - "postcode": "PostCode doc", - } - } - -This example will generate a JSON like this - - { - "Address": { - "id": "Address", - "description": "Address doc", - "properties": { - "country": { - "type": "string", - "description": "Country doc" - }, - "postcode": { - "type": "integer", - "format": "int32", - "description": "PostCode doc" - } - } - } - } - -**Very Important Notes:** -- `SwaggerDoc()` is using a **NON-Pointer** receiver (e.g. func (Address) and not func (*Address)) -- The returned map should use as key the name of the field as defined in the JSON parameter (e.g. `"postcode"` and not `"PostCode"`) - -Notes --- -- The Nickname of an Operation is automatically set by finding the name of the function. You can override it using RouteBuilder.Operation(..) -- The WebServices field of swagger.Config can be used to control which service you want to expose and document ; you can have multiple configs and therefore multiple endpoints. diff --git a/vendor/github.com/evanphx/json-patch/.travis.yml b/vendor/github.com/evanphx/json-patch/.travis.yml deleted file mode 100644 index ed5cb244c..000000000 --- a/vendor/github.com/evanphx/json-patch/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go - -go: - - 1.4 - - 1.3 - -install: - - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi - -script: - - go test -cover ./... - -notifications: - email: false diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md deleted file mode 100644 index d0d826bac..000000000 --- a/vendor/github.com/evanphx/json-patch/README.md +++ /dev/null @@ -1,29 +0,0 @@ -## JSON-Patch - -Provides the ability to modify and test a JSON according to a -[RFC6902 JSON patch](http://tools.ietf.org/html/rfc6902) and [RFC7396 JSON Merge Patch](https://tools.ietf.org/html/rfc7396). - -*Version*: **1.0** - -[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) - -[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch) - -### API Usage - -* Given a `[]byte`, obtain a Patch object - - `obj, err := jsonpatch.DecodePatch(patch)` - -* Apply the patch and get a new document back - - `out, err := obj.Apply(doc)` - -* Create a JSON Merge Patch document based on two json documents (a to b): - - `mergeDoc, err := jsonpatch.CreateMergePatch(a, b)` - -* Bonus API: compare documents for structural equality - - `jsonpatch.Equal(doca, docb)` - diff --git a/vendor/github.com/fatih/structs/.gitignore b/vendor/github.com/fatih/structs/.gitignore deleted file mode 100644 index 836562412..000000000 --- a/vendor/github.com/fatih/structs/.gitignore +++ /dev/null @@ -1,23 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test diff --git a/vendor/github.com/fatih/structs/.travis.yml b/vendor/github.com/fatih/structs/.travis.yml deleted file mode 100644 index 845012b7a..000000000 --- a/vendor/github.com/fatih/structs/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -language: go -go: - - 1.6 - - tip -sudo: false -before_install: -- go get github.com/axw/gocov/gocov -- go get github.com/mattn/goveralls -- if ! go get github.com/golang/tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi -script: -- $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/fatih/structs/README.md b/vendor/github.com/fatih/structs/README.md deleted file mode 100644 index 44e01006e..000000000 --- a/vendor/github.com/fatih/structs/README.md +++ /dev/null @@ -1,163 +0,0 @@ -# Structs [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/structs) [![Build Status](http://img.shields.io/travis/fatih/structs.svg?style=flat-square)](https://travis-ci.org/fatih/structs) [![Coverage Status](http://img.shields.io/coveralls/fatih/structs.svg?style=flat-square)](https://coveralls.io/r/fatih/structs) - -Structs contains various utilities to work with Go (Golang) structs. It was -initially used by me to convert a struct into a `map[string]interface{}`. With -time I've added other utilities for structs. It's basically a high level -package based on primitives from the reflect package. Feel free to add new -functions or improve the existing code. - -## Install - -```bash -go get github.com/fatih/structs -``` - -## Usage and Examples - -Just like the standard lib `strings`, `bytes` and co packages, `structs` has -many global functions to manipulate or organize your struct data. Lets define -and declare a struct: - -```go -type Server struct { - Name string `json:"name,omitempty"` - ID int - Enabled bool - users []string // not exported - http.Server // embedded -} - -server := &Server{ - Name: "gopher", - ID: 123456, - Enabled: true, -} -``` - -```go -// Convert a struct to a map[string]interface{} -// => {"Name":"gopher", "ID":123456, "Enabled":true} -m := structs.Map(server) - -// Convert the values of a struct to a []interface{} -// => ["gopher", 123456, true] -v := structs.Values(server) - -// Convert the names of a struct to a []string -// (see "Names methods" for more info about fields) -n := structs.Names(server) - -// Convert the values of a struct to a []*Field -// (see "Field methods" for more info about fields) -f := structs.Fields(server) - -// Return the struct name => "Server" -n := structs.Name(server) - -// Check if any field of a struct is initialized or not. -h := structs.HasZero(server) - -// Check if all fields of a struct is initialized or not. -z := structs.IsZero(server) - -// Check if server is a struct or a pointer to struct -i := structs.IsStruct(server) -``` - -### Struct methods - -The structs functions can be also used as independent methods by creating a new -`*structs.Struct`. This is handy if you want to have more control over the -structs (such as retrieving a single Field). - -```go -// Create a new struct type: -s := structs.New(server) - -m := s.Map() // Get a map[string]interface{} -v := s.Values() // Get a []interface{} -f := s.Fields() // Get a []*Field -n := s.Names() // Get a []string -f := s.Field(name) // Get a *Field based on the given field name -f, ok := s.FieldOk(name) // Get a *Field based on the given field name -n := s.Name() // Get the struct name -h := s.HasZero() // Check if any field is initialized -z := s.IsZero() // Check if all fields are initialized -``` - -### Field methods - -We can easily examine a single Field for more detail. Below you can see how we -get and interact with various field methods: - - -```go -s := structs.New(server) - -// Get the Field struct for the "Name" field -name := s.Field("Name") - -// Get the underlying value, value => "gopher" -value := name.Value().(string) - -// Set the field's value -name.Set("another gopher") - -// Get the field's kind, kind => "string" -name.Kind() - -// Check if the field is exported or not -if name.IsExported() { - fmt.Println("Name field is exported") -} - -// Check if the value is a zero value, such as "" for string, 0 for int -if !name.IsZero() { - fmt.Println("Name is initialized") -} - -// Check if the field is an anonymous (embedded) field -if !name.IsEmbedded() { - fmt.Println("Name is not an embedded field") -} - -// Get the Field's tag value for tag name "json", tag value => "name,omitempty" -tagValue := name.Tag("json") -``` - -Nested structs are supported too: - -```go -addrField := s.Field("Server").Field("Addr") - -// Get the value for addr -a := addrField.Value().(string) - -// Or get all fields -httpServer := s.Field("Server").Fields() -``` - -We can also get a slice of Fields from the Struct type to iterate over all -fields. This is handy if you wish to examine all fields: - -```go -s := structs.New(server) - -for _, f := range s.Fields() { - fmt.Printf("field name: %+v\n", f.Name()) - - if f.IsExported() { - fmt.Printf("value : %+v\n", f.Value()) - fmt.Printf("is zero : %+v\n", f.IsZero()) - } -} -``` - -## Credits - - * [Fatih Arslan](https://github.com/fatih) - * [Cihangir Savas](https://github.com/cihangir) - -## License - -The MIT License (MIT) - see LICENSE.md for more details diff --git a/vendor/github.com/fatih/structs/structs.go b/vendor/github.com/fatih/structs/structs.go index 1c052751c..06da62094 100644 --- a/vendor/github.com/fatih/structs/structs.go +++ b/vendor/github.com/fatih/structs/structs.go @@ -431,7 +431,7 @@ func strctVal(s interface{}) reflect.Value { v := reflect.ValueOf(s) // if pointer get the underlying element≤ - if v.Kind() == reflect.Ptr { + for v.Kind() == reflect.Ptr { v = v.Elem() } @@ -558,7 +558,10 @@ func (s *Struct) nested(val reflect.Value) interface{} { // TODO(arslan): should this be optional? // do not iterate of non struct types, just pass the value. Ie: []int, // []string, co... We only iterate further if it's a struct. - if val.Type().Elem().Kind() != reflect.Struct { + // i.e []foo or []*foo + if val.Type().Elem().Kind() != reflect.Struct && + !(val.Type().Elem().Kind() == reflect.Ptr && + val.Type().Elem().Elem().Kind() == reflect.Struct) { finalVal = val.Interface() break } diff --git a/vendor/github.com/flynn/go-shlex/Makefile b/vendor/github.com/flynn/go-shlex/Makefile deleted file mode 100644 index 038d9a489..000000000 --- a/vendor/github.com/flynn/go-shlex/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2011 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -include $(GOROOT)/src/Make.inc - -TARG=shlex -GOFILES=\ - shlex.go\ - -include $(GOROOT)/src/Make.pkg diff --git a/vendor/github.com/flynn/go-shlex/README.md b/vendor/github.com/flynn/go-shlex/README.md deleted file mode 100644 index c86bcc066..000000000 --- a/vendor/github.com/flynn/go-shlex/README.md +++ /dev/null @@ -1,2 +0,0 @@ -go-shlex is a simple lexer for go that supports shell-style quoting, -commenting, and escaping. diff --git a/vendor/github.com/fsouza/go-dockerclient/.gitignore b/vendor/github.com/fsouza/go-dockerclient/.gitignore deleted file mode 100644 index 5f6b48eae..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# temporary symlink for testing -testing/data/symlink diff --git a/vendor/github.com/fsouza/go-dockerclient/.travis.yml b/vendor/github.com/fsouza/go-dockerclient/.travis.yml deleted file mode 100644 index 68b137ad2..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/.travis.yml +++ /dev/null @@ -1,27 +0,0 @@ -language: go -sudo: required -go: - - 1.4.2 - - 1.5.3 - - 1.6 - - tip -os: - - linux - - osx -env: - - GOARCH=amd64 DOCKER_VERSION=1.8.3 - - GOARCH=386 DOCKER_VERSION=1.8.3 - - GOARCH=amd64 DOCKER_VERSION=1.9.1 - - GOARCH=386 DOCKER_VERSION=1.9.1 - - GOARCH=amd64 DOCKER_VERSION=1.10.3 - - GOARCH=386 DOCKER_VERSION=1.10.3 -install: - - travis_retry travis-scripts/install.bash -script: - - travis-scripts/run-tests.bash -services: - - docker -matrix: - fast_finish: true - allow_failures: - - go: tip diff --git a/vendor/github.com/fsouza/go-dockerclient/AUTHORS b/vendor/github.com/fsouza/go-dockerclient/AUTHORS deleted file mode 100644 index bb71cc345..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/AUTHORS +++ /dev/null @@ -1,132 +0,0 @@ -# This is the official list of go-dockerclient authors for copyright purposes. - -Abhishek Chanda -Adam Bell-Hanssen -Adrien Kohlbecker -Aldrin Leal -Andreas Jaekle -Andrews Medina -Andrey Sibiryov -Andy Goldstein -Antonio Murdaca -Artem Sidorenko -Ben Marini -Ben McCann -Ben Parees -Benno van den Berg -Bradley Cicenas -Brendan Fosberry -Brian Lalor -Brian P. Hamachek -Brian Palmer -Bryan Boreham -Burke Libbey -Carlos Diaz-Padron -Cesar Wong -Cezar Sa Espinola -Cheah Chu Yeow -cheneydeng -Chris Bednarski -CMGS -Colin Hebert -Craig Jellick -Dan Williams -Daniel, Dao Quang Minh -Daniel Garcia -Daniel Hiltgen -Darren Shepherd -Dave Choi -David Huie -Dawn Chen -Dinesh Subhraveti -Drew Wells -Ed -Elias G. Schneevoigt -Erez Horev -Eric Anderson -Ewout Prangsma -Fabio Rehm -Fatih Arslan -Flavia Missi -Francisco Souza -Frank Groeneveld -George Moura -Grégoire Delattre -Guillermo Álvarez Fernández -Harry Zhang -He Simei -Ivan Mikushin -James Bardin -James Nugent -Jari Kolehmainen -Jason Wilder -Jawher Moussa -Jean-Baptiste Dalido -Jeff Mitchell -Jeffrey Hulten -Jen Andre -Jérôme Laurens -Johan Euphrosine -John Hughes -Kamil Domanski -Karan Misra -Ken Herner -Kim, Hirokuni -Kyle Allan -Liron Levin -Lior Yankovich -Liu Peng -Lorenz Leutgeb -Lucas Clemente -Lucas Weiblen -Lyon Hill -Mantas Matelis -Martin Sweeney -Máximo Cuadros Ortiz -Michael Schmatz -Michal Fojtik -Mike Dillon -Mrunal Patel -Nate Jones -Nguyen Sy Thanh Son -Nicholas Van Wiggeren -Nick Ethier -Omeid Matten -Orivej Desh -Paul Bellamy -Paul Morie -Paul Weil -Peter Edge -Peter Jihoon Kim -Phil Lu -Philippe Lafoucrière -Rafe Colton -Rob Miller -Robert Williamson -Roman Khlystik -Salvador Gironès -Sam Rijs -Sami Wagiaalla -Samuel Archambault -Samuel Karp -Silas Sewell -Simon Eskildsen -Simon Menke -Skolos -Soulou -Sridhar Ratnakumar -Summer Mousa -Sunjin Lee -Tarsis Azevedo -Tim Schindler -Timothy St. Clair -Tobi Knaup -Tom Wilkie -Tonic -ttyh061 -Victor Marmol -Vincenzo Prignano -Wiliam Souza -Ye Yin -Yu, Zou -Yuriy Bogdanov diff --git a/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE b/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE deleted file mode 100644 index 706634474..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE +++ /dev/null @@ -1,6 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -You can find the Docker license at the following link: -https://raw.githubusercontent.com/docker/docker/master/LICENSE diff --git a/vendor/github.com/fsouza/go-dockerclient/Makefile b/vendor/github.com/fsouza/go-dockerclient/Makefile deleted file mode 100644 index dd8c73b1b..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/Makefile +++ /dev/null @@ -1,57 +0,0 @@ -.PHONY: \ - all \ - vendor \ - lint \ - vet \ - fmt \ - fmtcheck \ - pretest \ - test \ - integration \ - cov \ - clean - -PKGS = . ./testing - -all: test - -vendor: - @ go get -v github.com/mjibson/party - party -d external -c -u - -lint: - @ go get -v github.com/golang/lint/golint - @for file in $$(git ls-files '*.go' | grep -v 'external/'); do \ - export output="$$(golint $${file} | grep -v 'type name will be used as docker.DockerInfo')"; \ - [ -n "$${output}" ] && echo "$${output}" && export status=1; \ - done; \ - exit $${status:-0} - -vet: - $(foreach pkg,$(PKGS),go vet $(pkg);) - -fmt: - gofmt -s -w $(PKGS) - -fmtcheck: - @ export output=$$(gofmt -s -d $(PKGS)); \ - [ -n "$${output}" ] && echo "$${output}" && export status=1; \ - exit $${status:-0} - -pretest: lint vet fmtcheck - -gotest: - $(foreach pkg,$(PKGS),go test $(pkg) || exit;) - -test: pretest gotest - -integration: - go test -tags docker_integration -run TestIntegration -v - -cov: - @ go get -v github.com/axw/gocov/gocov - @ go get golang.org/x/tools/cmd/cover - gocov test | gocov report - -clean: - $(foreach pkg,$(PKGS),go clean $(pkg) || exit;) diff --git a/vendor/github.com/fsouza/go-dockerclient/README.markdown b/vendor/github.com/fsouza/go-dockerclient/README.markdown deleted file mode 100644 index b915039f1..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/README.markdown +++ /dev/null @@ -1,105 +0,0 @@ -# go-dockerclient - -[![Travis](https://img.shields.io/travis/fsouza/go-dockerclient.svg?style=flat-square)](https://travis-ci.org/fsouza/go-dockerclient) -[![GoDoc](https://img.shields.io/badge/api-Godoc-blue.svg?style=flat-square)](https://godoc.org/github.com/fsouza/go-dockerclient) - -This package presents a client for the Docker remote API. It also provides -support for the extensions in the [Swarm API](https://docs.docker.com/swarm/swarm-api/). - -This package also provides support for docker's network API, which is a simple -passthrough to the libnetwork remote API. Note that docker's network API is -only available in docker 1.8 and above, and only enabled in docker if -DOCKER_EXPERIMENTAL is defined during the docker build process. - -For more details, check the [remote API documentation](http://docs.docker.com/engine/reference/api/docker_remote_api/). - -## Vendoring - -If you are having issues with Go 1.5 and have `GO15VENDOREXPERIMENT` set with an application that has go-dockerclient vendored, -please update your vendoring of go-dockerclient :) We recently moved the `vendor` directory to `external` so that go-dockerclient -is compatible with this configuration. See [338](https://github.com/fsouza/go-dockerclient/issues/338) and [339](https://github.com/fsouza/go-dockerclient/pull/339) -for details. - -## Example - -```go -package main - -import ( - "fmt" - - "github.com/fsouza/go-dockerclient" -) - -func main() { - endpoint := "unix:///var/run/docker.sock" - client, _ := docker.NewClient(endpoint) - imgs, _ := client.ListImages(docker.ListImagesOptions{All: false}) - for _, img := range imgs { - fmt.Println("ID: ", img.ID) - fmt.Println("RepoTags: ", img.RepoTags) - fmt.Println("Created: ", img.Created) - fmt.Println("Size: ", img.Size) - fmt.Println("VirtualSize: ", img.VirtualSize) - fmt.Println("ParentId: ", img.ParentID) - } -} -``` - -## Using with TLS - -In order to instantiate the client for a TLS-enabled daemon, you should use NewTLSClient, passing the endpoint and path for key and certificates as parameters. - -```go -package main - -import ( - "fmt" - - "github.com/fsouza/go-dockerclient" -) - -func main() { - endpoint := "tcp://[ip]:[port]" - path := os.Getenv("DOCKER_CERT_PATH") - ca := fmt.Sprintf("%s/ca.pem", path) - cert := fmt.Sprintf("%s/cert.pem", path) - key := fmt.Sprintf("%s/key.pem", path) - client, _ := docker.NewTLSClient(endpoint, cert, key, ca) - // use client -} -``` - -If using [docker-machine](https://docs.docker.com/machine/), or another application that exports environment variables -`DOCKER_HOST, DOCKER_TLS_VERIFY, DOCKER_CERT_PATH`, you can use NewClientFromEnv. - - -```go -package main - -import ( - "fmt" - - "github.com/fsouza/go-dockerclient" -) - -func main() { - client, _ := docker.NewClientFromEnv() - // use client -} -``` - -See the documentation for more details. - -## Developing - -All development commands can be seen in the [Makefile](Makefile). - -Commited code must pass: - -* [golint](https://github.com/golang/lint) -* [go vet](https://godoc.org/golang.org/x/tools/cmd/vet) -* [gofmt](https://golang.org/cmd/gofmt) -* [go test](https://golang.org/cmd/go/#hdr-Test_packages) - -Running `make test` will check all of these. If your editor does not automatically call gofmt, `make fmt` will format all go files in this repository. diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md deleted file mode 100644 index ecc843272..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md +++ /dev/null @@ -1,55 +0,0 @@ -# 0.9.0 (Unreleased) - -* logrus/text_formatter: don't emit empty msg -* logrus/hooks/airbrake: move out of main repository -* logrus/hooks/sentry: move out of main repository -* logrus/hooks/papertrail: move out of main repository -* logrus/hooks/bugsnag: move out of main repository - -# 0.8.7 - -* logrus/core: fix possible race (#216) -* logrus/doc: small typo fixes and doc improvements - - -# 0.8.6 - -* hooks/raven: allow passing an initialized client - -# 0.8.5 - -* logrus/core: revert #208 - -# 0.8.4 - -* formatter/text: fix data race (#218) - -# 0.8.3 - -* logrus/core: fix entry log level (#208) -* logrus/core: improve performance of text formatter by 40% -* logrus/core: expose `LevelHooks` type -* logrus/core: add support for DragonflyBSD and NetBSD -* formatter/text: print structs more verbosely - -# 0.8.2 - -* logrus: fix more Fatal family functions - -# 0.8.1 - -* logrus: fix not exiting on `Fatalf` and `Fatalln` - -# 0.8.0 - -* logrus: defaults to stderr instead of stdout -* hooks/sentry: add special field for `*http.Request` -* formatter/text: ignore Windows for colors - -# 0.7.3 - -* formatter/\*: allow configuration of timestamp layout - -# 0.7.2 - -* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md deleted file mode 100644 index 55d3a8d5f..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md +++ /dev/null @@ -1,365 +0,0 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc] - -Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not -yet stable (pre 1.0). Logrus itself is completely stable and has been used in -many large deployments. The core API is unlikely to change much but please -version control your Logrus to make sure you aren't fetching latest `master` on -every build.** - -Nicely color-coded in development (when a TTY is attached, otherwise just -plain text): - -![Colored](http://i.imgur.com/PY7qMwd.png) - -With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash -or Splunk: - -```json -{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the -ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} - -{"level":"warning","msg":"The group's number increased tremendously!", -"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"A giant walrus appears!", -"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", -"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} - -{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, -"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} -``` - -With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not -attached, the output is compatible with the -[logfmt](http://godoc.org/github.com/kr/logfmt) format: - -```text -time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 -time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 -time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true -time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 -time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 -time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -exit status 1 -``` - -#### Example - -The simplest way to use Logrus is simply the package-level exported logger: - -```go -package main - -import ( - log "github.com/Sirupsen/logrus" -) - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - }).Info("A walrus appears") -} -``` - -Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` -and you'll now have the flexibility of Logrus. You can customize it all you -want: - -```go -package main - -import ( - "os" - log "github.com/Sirupsen/logrus" -) - -func init() { - // Log as JSON instead of the default ASCII formatter. - log.SetFormatter(&log.JSONFormatter{}) - - // Output to stderr instead of stdout, could also be a file. - log.SetOutput(os.Stderr) - - // Only log the warning severity or above. - log.SetLevel(log.WarnLevel) -} - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(log.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(log.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") - - // A common pattern is to re-use fields between logging statements by re-using - // the logrus.Entry returned from WithFields() - contextLogger := log.WithFields(log.Fields{ - "common": "this is a common field", - "other": "I also should be logged always", - }) - - contextLogger.Info("I'll be logged with common and other field") - contextLogger.Info("Me too") -} -``` - -For more advanced usage such as logging to multiple locations from the same -application, you can also create an instance of the `logrus` Logger: - -```go -package main - -import ( - "github.com/Sirupsen/logrus" -) - -// Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() - -func main() { - // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stderr - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") -} -``` - -#### Fields - -Logrus encourages careful, structured logging though logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed -to send event %s to topic %s with key %d")`, you should log the much more -discoverable: - -```go -log.WithFields(log.Fields{ - "event": event, - "topic": topic, - "key": key, -}).Fatal("Failed to send event") -``` - -We've found this API forces you to think about logging in a way that produces -much more useful logging messages. We've been in countless situations where just -a single added field to a log statement that was already there would've saved us -hours. The `WithFields` call is optional. - -In general, with Logrus using any of the `printf`-family functions should be -seen as a hint you should add a field, however, you can still use the -`printf`-family functions with Logrus. - -#### Hooks - -You can add hooks for logging levels. For example to send errors to an exception -tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to -multiple places simultaneously, e.g. syslog. - -Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in -`init`: - -```go -import ( - log "github.com/Sirupsen/logrus" - "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" - logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" - "log/syslog" -) - -func init() { - - // Use the Airbrake hook to report errors that have Error severity or above to - // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook(123, "xyz", "production")) - - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - if err != nil { - log.Error("Unable to connect to local syslog daemon") - } else { - log.AddHook(hook) - } -} -``` -Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). - -| Hook | Description | -| ----- | ----------- | -| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | -| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | -| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | -| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | -| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | -| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | -| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | -| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | -| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | -| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | -| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | -| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | -| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | -| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | -| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | -| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | -| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | -| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | -| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | -| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | -| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | - -#### Level logging - -Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. - -```go -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") -// Calls os.Exit(1) after logging -log.Fatal("Bye.") -// Calls panic() after logging -log.Panic("I'm bailing.") -``` - -You can set the logging level on a `Logger`, then it will only log entries with -that severity or anything above it: - -```go -// Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) -``` - -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose -environment if your application has that. - -#### Entries - -Besides the fields added with `WithField` or `WithFields` some fields are -automatically added to all logging events: - -1. `time`. The timestamp when the entry was created. -2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after - the `AddFields` call. E.g. `Failed to send event.` -3. `level`. The logging level. E.g. `info`. - -#### Environments - -Logrus has no notion of environment. - -If you wish for hooks and formatters to only be used in specific environments, -you should handle that yourself. For example, if your application has a global -variable `Environment`, which is a string representation of the environment you -could do: - -```go -import ( - log "github.com/Sirupsen/logrus" -) - -init() { - // do something here to set environment depending on an environment variable - // or command-line flag - if Environment == "production" { - log.SetFormatter(&log.JSONFormatter{}) - } else { - // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(&log.TextFormatter{}) - } -} -``` - -This configuration is how `logrus` was intended to be used, but JSON in -production is mostly only useful if you do log aggregation with tools like -Splunk or Logstash. - -#### Formatters - -The built-in logging formatters are: - -* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise - without colors. - * *Note:* to force colored output when there is no TTY, set the `ForceColors` - field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true` -* `logrus.JSONFormatter`. Logs fields as JSON. -* `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events. - - ```go - logrus.SetFormatter(&logstash.LogstashFormatter{Type: "application_name"}) - ``` - -Third party logging formatters: - -* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. -* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. - -You can define your formatter by implementing the `Formatter` interface, -requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a -`Fields` type (`map[string]interface{}`) with all your fields as well as the -default ones (see Entries section above): - -```go -type MyJSONFormatter struct { -} - -log.SetFormatter(new(MyJSONFormatter)) - -func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { - // Note this doesn't include Time, Level and Message which are available on - // the Entry. Consult `godoc` on information about those fields or read the - // source of the official loggers. - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} -``` - -#### Logger as an `io.Writer` - -Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. - -```go -w := logger.Writer() -defer w.Close() - -srv := http.Server{ - // create a stdlib log.Logger that writes to - // logrus.Logger. - ErrorLog: log.New(w, "", 0), -} -``` - -Each line written to that writer will be printed the usual way, using formatters -and hooks. The level for those entries is `info`. - -#### Rotation - -Log rotation is not provided with Logrus. Log rotation should be done by an -external program (like `logrotate(8)`) that can compress and delete old log -entries. It should not be a feature of the application-level logger. - -#### Tools - -| Tool | Description | -| ---- | ----------- | -|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| - -[godoc]: https://godoc.org/github.com/Sirupsen/logrus diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md deleted file mode 100644 index 7307d9694..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md +++ /dev/null @@ -1 +0,0 @@ -This code provides helper functions for dealing with archive files. diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/CONTRIBUTING.md deleted file mode 100644 index 9ea86d784..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/CONTRIBUTING.md +++ /dev/null @@ -1,67 +0,0 @@ -# Contributing to go-units - -Want to hack on go-units? Awesome! Here are instructions to get you started. - -go-units is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read Docker's -[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), -[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), -[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and -[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/MAINTAINERS deleted file mode 100644 index 477be8b21..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/MAINTAINERS +++ /dev/null @@ -1,27 +0,0 @@ -# go-connections maintainers file -# -# This file describes who runs the docker/go-connections project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "calavera", - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - [people.calavera] - Name = "David Calavera" - Email = "david.calavera@gmail.com" - GitHub = "calavera" diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/README.md b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/README.md deleted file mode 100644 index 3ce4d79da..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/README.md +++ /dev/null @@ -1,18 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) - -# Introduction - -go-units is a library to transform human friendly measurements into machine friendly values. - -## Usage - -See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. - -## Copyright and license - -Copyright © 2015 Docker, Inc. All rights reserved, except as follows. Code -is released under the Apache 2.0 license. The README.md file, and files in the -"docs" folder are licensed under the Creative Commons Attribution 4.0 -International License under the terms and conditions set forth in the file -"LICENSE.docs". You may obtain a duplicate copy of the same license, titled -CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/circle.yml b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/circle.yml deleted file mode 100644 index 9043b3547..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/circle.yml +++ /dev/null @@ -1,11 +0,0 @@ -dependencies: - post: - # install golint - - go get github.com/golang/lint/golint - -test: - pre: - # run analysis before tests - - go vet ./... - - test -z "$(golint ./... | tee /dev/stderr)" - - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/README.md deleted file mode 100644 index 036e5313f..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# cleanhttp - -Functions for accessing "clean" Go http.Client values - -------------- - -The Go standard library contains a default `http.Client` called -`http.DefaultClient`. It is a common idiom in Go code to start with -`http.DefaultClient` and tweak it as necessary, and in fact, this is -encouraged; from the `http` package documentation: - -> The Client's Transport typically has internal state (cached TCP connections), -so Clients should be reused instead of created as needed. Clients are safe for -concurrent use by multiple goroutines. - -Unfortunately, this is a shared value, and it is not uncommon for libraries to -assume that they are free to modify it at will. With enough dependencies, it -can be very easy to encounter strange problems and race conditions due to -manipulation of this shared value across libraries and goroutines (clients are -safe for concurrent use, but writing values to the client struct itself is not -protected). - -Making things worse is the fact that a bare `http.Client` will use a default -`http.Transport` called `http.DefaultTransport`, which is another global value -that behaves the same way. So it is not simply enough to replace -`http.DefaultClient` with `&http.Client{}`. - -This repository provides some simple functions to get a "clean" `http.Client` --- one that uses the same default values as the Go standard library, but -returns a client that does not share any state with other clients. diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS b/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS deleted file mode 100644 index edbe20066..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS +++ /dev/null @@ -1,2 +0,0 @@ -Tianon Gravi (@tianon) -Aleksa Sarai (@cyphar) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/net/context/context.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/net/context/context.go index 46629881b..dd138571f 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/net/context/context.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/net/context/context.go @@ -34,7 +34,7 @@ // // See http://blog.golang.org/context for example code for a server that uses // Contexts. -package context +package context // import "github.com/fsouza/go-dockerclient/external/golang.org/x/net/context" import ( "errors" diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mkall.sh b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mkall.sh deleted file mode 100755 index de95a4bbc..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mkall.sh +++ /dev/null @@ -1,274 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2009 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -# The unix package provides access to the raw system call -# interface of the underlying operating system. Porting Go to -# a new architecture/operating system combination requires -# some manual effort, though there are tools that automate -# much of the process. The auto-generated files have names -# beginning with z. -# -# This script runs or (given -n) prints suggested commands to generate z files -# for the current system. Running those commands is not automatic. -# This script is documentation more than anything else. -# -# * asm_${GOOS}_${GOARCH}.s -# -# This hand-written assembly file implements system call dispatch. -# There are three entry points: -# -# func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr); -# func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr); -# func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr); -# -# The first and second are the standard ones; they differ only in -# how many arguments can be passed to the kernel. -# The third is for low-level use by the ForkExec wrapper; -# unlike the first two, it does not call into the scheduler to -# let it know that a system call is running. -# -# * syscall_${GOOS}.go -# -# This hand-written Go file implements system calls that need -# special handling and lists "//sys" comments giving prototypes -# for ones that can be auto-generated. Mksyscall reads those -# comments to generate the stubs. -# -# * syscall_${GOOS}_${GOARCH}.go -# -# Same as syscall_${GOOS}.go except that it contains code specific -# to ${GOOS} on one particular architecture. -# -# * types_${GOOS}.c -# -# This hand-written C file includes standard C headers and then -# creates typedef or enum names beginning with a dollar sign -# (use of $ in variable names is a gcc extension). The hardest -# part about preparing this file is figuring out which headers to -# include and which symbols need to be #defined to get the -# actual data structures that pass through to the kernel system calls. -# Some C libraries present alternate versions for binary compatibility -# and translate them on the way in and out of system calls, but -# there is almost always a #define that can get the real ones. -# See types_darwin.c and types_linux.c for examples. -# -# * zerror_${GOOS}_${GOARCH}.go -# -# This machine-generated file defines the system's error numbers, -# error strings, and signal numbers. The generator is "mkerrors.sh". -# Usually no arguments are needed, but mkerrors.sh will pass its -# arguments on to godefs. -# -# * zsyscall_${GOOS}_${GOARCH}.go -# -# Generated by mksyscall.pl; see syscall_${GOOS}.go above. -# -# * zsysnum_${GOOS}_${GOARCH}.go -# -# Generated by mksysnum_${GOOS}. -# -# * ztypes_${GOOS}_${GOARCH}.go -# -# Generated by godefs; see types_${GOOS}.c above. - -GOOSARCH="${GOOS}_${GOARCH}" - -# defaults -mksyscall="./mksyscall.pl" -mkerrors="./mkerrors.sh" -zerrors="zerrors_$GOOSARCH.go" -mksysctl="" -zsysctl="zsysctl_$GOOSARCH.go" -mksysnum= -mktypes= -run="sh" - -case "$1" in --syscalls) - for i in zsyscall*go - do - sed 1q $i | sed 's;^// ;;' | sh > _$i && gofmt < _$i > $i - rm _$i - done - exit 0 - ;; --n) - run="cat" - shift -esac - -case "$#" in -0) - ;; -*) - echo 'usage: mkall.sh [-n]' 1>&2 - exit 2 -esac - -GOOSARCH_in=syscall_$GOOSARCH.go -case "$GOOSARCH" in -_* | *_ | _) - echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2 - exit 1 - ;; -darwin_386) - mkerrors="$mkerrors -m32" - mksyscall="./mksyscall.pl -l32" - mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -darwin_amd64) - mkerrors="$mkerrors -m64" - mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -darwin_arm) - mkerrors="$mkerrors" - mksysnum="./mksysnum_darwin.pl /usr/include/sys/syscall.h" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -darwin_arm64) - mkerrors="$mkerrors -m64" - mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -dragonfly_386) - mkerrors="$mkerrors -m32" - mksyscall="./mksyscall.pl -l32 -dragonfly" - mksysnum="curl -s 'http://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master' | ./mksysnum_dragonfly.pl" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -dragonfly_amd64) - mkerrors="$mkerrors -m64" - mksyscall="./mksyscall.pl -dragonfly" - mksysnum="curl -s 'http://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master' | ./mksysnum_dragonfly.pl" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -freebsd_386) - mkerrors="$mkerrors -m32" - mksyscall="./mksyscall.pl -l32" - mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -freebsd_amd64) - mkerrors="$mkerrors -m64" - mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -freebsd_arm) - mkerrors="$mkerrors" - mksyscall="./mksyscall.pl -l32 -arm" - mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl" - # Let the type of C char be singed for making the bare syscall - # API consistent across over platforms. - mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" - ;; -linux_386) - mkerrors="$mkerrors -m32" - mksyscall="./mksyscall.pl -l32" - mksysnum="./mksysnum_linux.pl /usr/include/asm/unistd_32.h" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -linux_amd64) - unistd_h=$(ls -1 /usr/include/asm/unistd_64.h /usr/include/x86_64-linux-gnu/asm/unistd_64.h 2>/dev/null | head -1) - if [ "$unistd_h" = "" ]; then - echo >&2 cannot find unistd_64.h - exit 1 - fi - mkerrors="$mkerrors -m64" - mksysnum="./mksysnum_linux.pl $unistd_h" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -linux_arm) - mkerrors="$mkerrors" - mksyscall="./mksyscall.pl -l32 -arm" - mksysnum="curl -s 'http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/plain/arch/arm/include/uapi/asm/unistd.h' | ./mksysnum_linux.pl -" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -linux_arm64) - unistd_h=$(ls -1 /usr/include/asm/unistd.h /usr/include/asm-generic/unistd.h 2>/dev/null | head -1) - if [ "$unistd_h" = "" ]; then - echo >&2 cannot find unistd_64.h - exit 1 - fi - mksysnum="./mksysnum_linux.pl $unistd_h" - # Let the type of C char be singed for making the bare syscall - # API consistent across over platforms. - mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" - ;; -linux_ppc64) - GOOSARCH_in=syscall_linux_ppc64x.go - unistd_h=/usr/include/asm/unistd.h - mkerrors="$mkerrors -m64" - mksysnum="./mksysnum_linux.pl $unistd_h" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -linux_ppc64le) - GOOSARCH_in=syscall_linux_ppc64x.go - unistd_h=/usr/include/powerpc64le-linux-gnu/asm/unistd.h - mkerrors="$mkerrors -m64" - mksysnum="./mksysnum_linux.pl $unistd_h" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -netbsd_386) - mkerrors="$mkerrors -m32" - mksyscall="./mksyscall.pl -l32 -netbsd" - mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -netbsd_amd64) - mkerrors="$mkerrors -m64" - mksyscall="./mksyscall.pl -netbsd" - mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -openbsd_386) - mkerrors="$mkerrors -m32" - mksyscall="./mksyscall.pl -l32 -openbsd" - mksysctl="./mksysctl_openbsd.pl" - zsysctl="zsysctl_openbsd.go" - mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -openbsd_amd64) - mkerrors="$mkerrors -m64" - mksyscall="./mksyscall.pl -openbsd" - mksysctl="./mksysctl_openbsd.pl" - zsysctl="zsysctl_openbsd.go" - mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -solaris_amd64) - mksyscall="./mksyscall_solaris.pl" - mkerrors="$mkerrors -m64" - mksysnum= - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -*) - echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2 - exit 1 - ;; -esac - -( - if [ -n "$mkerrors" ]; then echo "$mkerrors |gofmt >$zerrors"; fi - case "$GOOS" in - *) - syscall_goos="syscall_$GOOS.go" - case "$GOOS" in - darwin | dragonfly | freebsd | netbsd | openbsd) - syscall_goos="syscall_bsd.go $syscall_goos" - ;; - esac - if [ -n "$mksyscall" ]; then echo "$mksyscall $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; fi - ;; - esac - if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi - if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi - if [ -n "$mktypes" ]; then - echo "echo // +build $GOARCH,$GOOS > ztypes_$GOOSARCH.go"; - echo "$mktypes types_$GOOS.go | gofmt >>ztypes_$GOOSARCH.go"; - fi -) | $run diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mkerrors.sh b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mkerrors.sh deleted file mode 100755 index c40d788c4..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mkerrors.sh +++ /dev/null @@ -1,476 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2009 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -# Generate Go code listing errors and other #defined constant -# values (ENAMETOOLONG etc.), by asking the preprocessor -# about the definitions. - -unset LANG -export LC_ALL=C -export LC_CTYPE=C - -if test -z "$GOARCH" -o -z "$GOOS"; then - echo 1>&2 "GOARCH or GOOS not defined in environment" - exit 1 -fi - -CC=${CC:-cc} - -if [[ "$GOOS" -eq "solaris" ]]; then - # Assumes GNU versions of utilities in PATH. - export PATH=/usr/gnu/bin:$PATH -fi - -uname=$(uname) - -includes_Darwin=' -#define _DARWIN_C_SOURCE -#define KERNEL -#define _DARWIN_USE_64_BIT_INODE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -' - -includes_DragonFly=' -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -' - -includes_FreeBSD=' -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if __FreeBSD__ >= 10 -#define IFT_CARP 0xf8 // IFT_CARP is deprecated in FreeBSD 10 -#undef SIOCAIFADDR -#define SIOCAIFADDR _IOW(105, 26, struct oifaliasreq) // ifaliasreq contains if_data -#undef SIOCSIFPHYADDR -#define SIOCSIFPHYADDR _IOW(105, 70, struct oifaliasreq) // ifaliasreq contains if_data -#endif -' - -includes_Linux=' -#define _LARGEFILE_SOURCE -#define _LARGEFILE64_SOURCE -#ifndef __LP64__ -#define _FILE_OFFSET_BITS 64 -#endif -#define _GNU_SOURCE - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifndef MSG_FASTOPEN -#define MSG_FASTOPEN 0x20000000 -#endif - -#ifndef PTRACE_GETREGS -#define PTRACE_GETREGS 0xc -#endif - -#ifndef PTRACE_SETREGS -#define PTRACE_SETREGS 0xd -#endif -' - -includes_NetBSD=' -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -// Needed since refers to it... -#define schedppq 1 -' - -includes_OpenBSD=' -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -// We keep some constants not supported in OpenBSD 5.5 and beyond for -// the promise of compatibility. -#define EMUL_ENABLED 0x1 -#define EMUL_NATIVE 0x2 -#define IPV6_FAITH 0x1d -#define IPV6_OPTIONS 0x1 -#define IPV6_RTHDR_STRICT 0x1 -#define IPV6_SOCKOPT_RESERVED1 0x3 -#define SIOCGIFGENERIC 0xc020693a -#define SIOCSIFGENERIC 0x80206939 -#define WALTSIG 0x4 -' - -includes_SunOS=' -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -' - - -includes=' -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -' -ccflags="$@" - -# Write go tool cgo -godefs input. -( - echo package unix - echo - echo '/*' - indirect="includes_$(uname)" - echo "${!indirect} $includes" - echo '*/' - echo 'import "C"' - echo 'import "syscall"' - echo - echo 'const (' - - # The gcc command line prints all the #defines - # it encounters while processing the input - echo "${!indirect} $includes" | $CC -x c - -E -dM $ccflags | - awk ' - $1 != "#define" || $2 ~ /\(/ || $3 == "" {next} - - $2 ~ /^E([ABCD]X|[BIS]P|[SD]I|S|FL)$/ {next} # 386 registers - $2 ~ /^(SIGEV_|SIGSTKSZ|SIGRT(MIN|MAX))/ {next} - $2 ~ /^(SCM_SRCRT)$/ {next} - $2 ~ /^(MAP_FAILED)$/ {next} - $2 ~ /^ELF_.*$/ {next}# contains ELF_ARCH, etc. - - $2 ~ /^EXTATTR_NAMESPACE_NAMES/ || - $2 ~ /^EXTATTR_NAMESPACE_[A-Z]+_STRING/ {next} - - $2 !~ /^ETH_/ && - $2 !~ /^EPROC_/ && - $2 !~ /^EQUIV_/ && - $2 !~ /^EXPR_/ && - $2 ~ /^E[A-Z0-9_]+$/ || - $2 ~ /^B[0-9_]+$/ || - $2 == "BOTHER" || - $2 ~ /^CI?BAUD(EX)?$/ || - $2 == "IBSHIFT" || - $2 ~ /^V[A-Z0-9]+$/ || - $2 ~ /^CS[A-Z0-9]/ || - $2 ~ /^I(SIG|CANON|CRNL|UCLC|EXTEN|MAXBEL|STRIP|UTF8)$/ || - $2 ~ /^IGN/ || - $2 ~ /^IX(ON|ANY|OFF)$/ || - $2 ~ /^IN(LCR|PCK)$/ || - $2 ~ /(^FLU?SH)|(FLU?SH$)/ || - $2 ~ /^C(LOCAL|READ|MSPAR|RTSCTS)$/ || - $2 == "BRKINT" || - $2 == "HUPCL" || - $2 == "PENDIN" || - $2 == "TOSTOP" || - $2 == "XCASE" || - $2 == "ALTWERASE" || - $2 == "NOKERNINFO" || - $2 ~ /^PAR/ || - $2 ~ /^SIG[^_]/ || - $2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ || - $2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ || - $2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ || - $2 ~ /^O?XTABS$/ || - $2 ~ /^TC[IO](ON|OFF)$/ || - $2 ~ /^IN_/ || - $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || - $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|EVFILT|NOTE|EV|SHUT|PROT|MAP|PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ || - $2 == "ICMPV6_FILTER" || - $2 == "SOMAXCONN" || - $2 == "NAME_MAX" || - $2 == "IFNAMSIZ" || - $2 ~ /^CTL_(MAXNAME|NET|QUERY)$/ || - $2 ~ /^SYSCTL_VERS/ || - $2 ~ /^(MS|MNT)_/ || - $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ || - $2 ~ /^(O|F|FD|NAME|S|PTRACE|PT)_/ || - $2 ~ /^LINUX_REBOOT_CMD_/ || - $2 ~ /^LINUX_REBOOT_MAGIC[12]$/ || - $2 !~ "NLA_TYPE_MASK" && - $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P)_/ || - $2 ~ /^SIOC/ || - $2 ~ /^TIOC/ || - $2 ~ /^TCGET/ || - $2 ~ /^TCSET/ || - $2 ~ /^TC(FLSH|SBRKP?|XONC)$/ || - $2 !~ "RTF_BITS" && - $2 ~ /^(IFF|IFT|NET_RT|RTM|RTF|RTV|RTA|RTAX)_/ || - $2 ~ /^BIOC/ || - $2 ~ /^RUSAGE_(SELF|CHILDREN|THREAD)/ || - $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|NOFILE|STACK)|RLIM_INFINITY/ || - $2 ~ /^PRIO_(PROCESS|PGRP|USER)/ || - $2 ~ /^CLONE_[A-Z_]+/ || - $2 !~ /^(BPF_TIMEVAL)$/ && - $2 ~ /^(BPF|DLT)_/ || - $2 ~ /^CLOCK_/ || - $2 !~ "WMESGLEN" && - $2 ~ /^W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", $2, $2)} - $2 ~ /^__WCOREFLAG$/ {next} - $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)} - - {next} - ' | sort - - echo ')' -) >_const.go - -# Pull out the error names for later. -errors=$( - echo '#include ' | $CC -x c - -E -dM $ccflags | - awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print $2 }' | - sort -) - -# Pull out the signal names for later. -signals=$( - echo '#include ' | $CC -x c - -E -dM $ccflags | - awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' | - egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT)' | - sort -) - -# Again, writing regexps to a file. -echo '#include ' | $CC -x c - -E -dM $ccflags | - awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print "^\t" $2 "[ \t]*=" }' | - sort >_error.grep -echo '#include ' | $CC -x c - -E -dM $ccflags | - awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' | - egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT)' | - sort >_signal.grep - -echo '// mkerrors.sh' "$@" -echo '// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT' -echo -echo "// +build ${GOARCH},${GOOS}" -echo -go tool cgo -godefs -- "$@" _const.go >_error.out -cat _error.out | grep -vf _error.grep | grep -vf _signal.grep -echo -echo '// Errors' -echo 'const (' -cat _error.out | grep -f _error.grep | sed 's/=\(.*\)/= syscall.Errno(\1)/' -echo ')' - -echo -echo '// Signals' -echo 'const (' -cat _error.out | grep -f _signal.grep | sed 's/=\(.*\)/= syscall.Signal(\1)/' -echo ')' - -# Run C program to print error and syscall strings. -( - echo -E " -#include -#include -#include -#include -#include -#include - -#define nelem(x) (sizeof(x)/sizeof((x)[0])) - -enum { A = 'A', Z = 'Z', a = 'a', z = 'z' }; // avoid need for single quotes below - -int errors[] = { -" - for i in $errors - do - echo -E ' '$i, - done - - echo -E " -}; - -int signals[] = { -" - for i in $signals - do - echo -E ' '$i, - done - - # Use -E because on some systems bash builtin interprets \n itself. - echo -E ' -}; - -static int -intcmp(const void *a, const void *b) -{ - return *(int*)a - *(int*)b; -} - -int -main(void) -{ - int i, j, e; - char buf[1024], *p; - - printf("\n\n// Error table\n"); - printf("var errors = [...]string {\n"); - qsort(errors, nelem(errors), sizeof errors[0], intcmp); - for(i=0; i 0 && errors[i-1] == e) - continue; - strcpy(buf, strerror(e)); - // lowercase first letter: Bad -> bad, but STREAM -> STREAM. - if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z) - buf[0] += a - A; - printf("\t%d: \"%s\",\n", e, buf); - } - printf("}\n\n"); - - printf("\n\n// Signal table\n"); - printf("var signals = [...]string {\n"); - qsort(signals, nelem(signals), sizeof signals[0], intcmp); - for(i=0; i 0 && signals[i-1] == e) - continue; - strcpy(buf, strsignal(e)); - // lowercase first letter: Bad -> bad, but STREAM -> STREAM. - if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z) - buf[0] += a - A; - // cut trailing : number. - p = strrchr(buf, ":"[0]); - if(p) - *p = '\0'; - printf("\t%d: \"%s\",\n", e, buf); - } - printf("}\n\n"); - - return 0; -} - -' -) >_errors.c - -$CC $ccflags -o _errors _errors.c && $GORUN ./_errors && rm -f _errors.c _errors _const.go _error.grep _signal.grep _error.out diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksyscall.pl b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksyscall.pl deleted file mode 100755 index b1e7766da..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksyscall.pl +++ /dev/null @@ -1,323 +0,0 @@ -#!/usr/bin/env perl -# Copyright 2009 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -# This program reads a file containing function prototypes -# (like syscall_darwin.go) and generates system call bodies. -# The prototypes are marked by lines beginning with "//sys" -# and read like func declarations if //sys is replaced by func, but: -# * The parameter lists must give a name for each argument. -# This includes return parameters. -# * The parameter lists must give a type for each argument: -# the (x, y, z int) shorthand is not allowed. -# * If the return parameter is an error number, it must be named errno. - -# A line beginning with //sysnb is like //sys, except that the -# goroutine will not be suspended during the execution of the system -# call. This must only be used for system calls which can never -# block, as otherwise the system call could cause all goroutines to -# hang. - -use strict; - -my $cmdline = "mksyscall.pl " . join(' ', @ARGV); -my $errors = 0; -my $_32bit = ""; -my $plan9 = 0; -my $openbsd = 0; -my $netbsd = 0; -my $dragonfly = 0; -my $arm = 0; # 64-bit value should use (even, odd)-pair - -if($ARGV[0] eq "-b32") { - $_32bit = "big-endian"; - shift; -} elsif($ARGV[0] eq "-l32") { - $_32bit = "little-endian"; - shift; -} -if($ARGV[0] eq "-plan9") { - $plan9 = 1; - shift; -} -if($ARGV[0] eq "-openbsd") { - $openbsd = 1; - shift; -} -if($ARGV[0] eq "-netbsd") { - $netbsd = 1; - shift; -} -if($ARGV[0] eq "-dragonfly") { - $dragonfly = 1; - shift; -} -if($ARGV[0] eq "-arm") { - $arm = 1; - shift; -} - -if($ARGV[0] =~ /^-/) { - print STDERR "usage: mksyscall.pl [-b32 | -l32] [file ...]\n"; - exit 1; -} - -if($ENV{'GOARCH'} eq "" || $ENV{'GOOS'} eq "") { - print STDERR "GOARCH or GOOS not defined in environment\n"; - exit 1; -} - -sub parseparamlist($) { - my ($list) = @_; - $list =~ s/^\s*//; - $list =~ s/\s*$//; - if($list eq "") { - return (); - } - return split(/\s*,\s*/, $list); -} - -sub parseparam($) { - my ($p) = @_; - if($p !~ /^(\S*) (\S*)$/) { - print STDERR "$ARGV:$.: malformed parameter: $p\n"; - $errors = 1; - return ("xx", "int"); - } - return ($1, $2); -} - -my $text = ""; -while(<>) { - chomp; - s/\s+/ /g; - s/^\s+//; - s/\s+$//; - my $nonblock = /^\/\/sysnb /; - next if !/^\/\/sys / && !$nonblock; - - # Line must be of the form - # func Open(path string, mode int, perm int) (fd int, errno error) - # Split into name, in params, out params. - if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$/) { - print STDERR "$ARGV:$.: malformed //sys declaration\n"; - $errors = 1; - next; - } - my ($func, $in, $out, $sysname) = ($2, $3, $4, $5); - - # Split argument lists on comma. - my @in = parseparamlist($in); - my @out = parseparamlist($out); - - # Try in vain to keep people from editing this file. - # The theory is that they jump into the middle of the file - # without reading the header. - $text .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"; - - # Go function header. - my $out_decl = @out ? sprintf(" (%s)", join(', ', @out)) : ""; - $text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out_decl; - - # Check if err return available - my $errvar = ""; - foreach my $p (@out) { - my ($name, $type) = parseparam($p); - if($type eq "error") { - $errvar = $name; - last; - } - } - - # Prepare arguments to Syscall. - my @args = (); - my @uses = (); - my $n = 0; - foreach my $p (@in) { - my ($name, $type) = parseparam($p); - if($type =~ /^\*/) { - push @args, "uintptr(unsafe.Pointer($name))"; - } elsif($type eq "string" && $errvar ne "") { - $text .= "\tvar _p$n *byte\n"; - $text .= "\t_p$n, $errvar = BytePtrFromString($name)\n"; - $text .= "\tif $errvar != nil {\n\t\treturn\n\t}\n"; - push @args, "uintptr(unsafe.Pointer(_p$n))"; - push @uses, "use(unsafe.Pointer(_p$n))"; - $n++; - } elsif($type eq "string") { - print STDERR "$ARGV:$.: $func uses string arguments, but has no error return\n"; - $text .= "\tvar _p$n *byte\n"; - $text .= "\t_p$n, _ = BytePtrFromString($name)\n"; - push @args, "uintptr(unsafe.Pointer(_p$n))"; - push @uses, "use(unsafe.Pointer(_p$n))"; - $n++; - } elsif($type =~ /^\[\](.*)/) { - # Convert slice into pointer, length. - # Have to be careful not to take address of &a[0] if len == 0: - # pass dummy pointer in that case. - # Used to pass nil, but some OSes or simulators reject write(fd, nil, 0). - $text .= "\tvar _p$n unsafe.Pointer\n"; - $text .= "\tif len($name) > 0 {\n\t\t_p$n = unsafe.Pointer(\&${name}[0])\n\t}"; - $text .= " else {\n\t\t_p$n = unsafe.Pointer(&_zero)\n\t}"; - $text .= "\n"; - push @args, "uintptr(_p$n)", "uintptr(len($name))"; - $n++; - } elsif($type eq "int64" && ($openbsd || $netbsd)) { - push @args, "0"; - if($_32bit eq "big-endian") { - push @args, "uintptr($name>>32)", "uintptr($name)"; - } elsif($_32bit eq "little-endian") { - push @args, "uintptr($name)", "uintptr($name>>32)"; - } else { - push @args, "uintptr($name)"; - } - } elsif($type eq "int64" && $dragonfly) { - if ($func !~ /^extp(read|write)/i) { - push @args, "0"; - } - if($_32bit eq "big-endian") { - push @args, "uintptr($name>>32)", "uintptr($name)"; - } elsif($_32bit eq "little-endian") { - push @args, "uintptr($name)", "uintptr($name>>32)"; - } else { - push @args, "uintptr($name)"; - } - } elsif($type eq "int64" && $_32bit ne "") { - if(@args % 2 && $arm) { - # arm abi specifies 64-bit argument uses - # (even, odd) pair - push @args, "0" - } - if($_32bit eq "big-endian") { - push @args, "uintptr($name>>32)", "uintptr($name)"; - } else { - push @args, "uintptr($name)", "uintptr($name>>32)"; - } - } else { - push @args, "uintptr($name)"; - } - } - - # Determine which form to use; pad args with zeros. - my $asm = "Syscall"; - if ($nonblock) { - $asm = "RawSyscall"; - } - if(@args <= 3) { - while(@args < 3) { - push @args, "0"; - } - } elsif(@args <= 6) { - $asm .= "6"; - while(@args < 6) { - push @args, "0"; - } - } elsif(@args <= 9) { - $asm .= "9"; - while(@args < 9) { - push @args, "0"; - } - } else { - print STDERR "$ARGV:$.: too many arguments to system call\n"; - } - - # System call number. - if($sysname eq "") { - $sysname = "SYS_$func"; - $sysname =~ s/([a-z])([A-Z])/${1}_$2/g; # turn FooBar into Foo_Bar - $sysname =~ y/a-z/A-Z/; - } - - # Actual call. - my $args = join(', ', @args); - my $call = "$asm($sysname, $args)"; - - # Assign return values. - my $body = ""; - my @ret = ("_", "_", "_"); - my $do_errno = 0; - for(my $i=0; $i<@out; $i++) { - my $p = $out[$i]; - my ($name, $type) = parseparam($p); - my $reg = ""; - if($name eq "err" && !$plan9) { - $reg = "e1"; - $ret[2] = $reg; - $do_errno = 1; - } elsif($name eq "err" && $plan9) { - $ret[0] = "r0"; - $ret[2] = "e1"; - next; - } else { - $reg = sprintf("r%d", $i); - $ret[$i] = $reg; - } - if($type eq "bool") { - $reg = "$reg != 0"; - } - if($type eq "int64" && $_32bit ne "") { - # 64-bit number in r1:r0 or r0:r1. - if($i+2 > @out) { - print STDERR "$ARGV:$.: not enough registers for int64 return\n"; - } - if($_32bit eq "big-endian") { - $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i, $i+1); - } else { - $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i+1, $i); - } - $ret[$i] = sprintf("r%d", $i); - $ret[$i+1] = sprintf("r%d", $i+1); - } - if($reg ne "e1" || $plan9) { - $body .= "\t$name = $type($reg)\n"; - } - } - if ($ret[0] eq "_" && $ret[1] eq "_" && $ret[2] eq "_") { - $text .= "\t$call\n"; - } else { - $text .= "\t$ret[0], $ret[1], $ret[2] := $call\n"; - } - foreach my $use (@uses) { - $text .= "\t$use\n"; - } - $text .= $body; - - if ($plan9 && $ret[2] eq "e1") { - $text .= "\tif int32(r0) == -1 {\n"; - $text .= "\t\terr = e1\n"; - $text .= "\t}\n"; - } elsif ($do_errno) { - $text .= "\tif e1 != 0 {\n"; - $text .= "\t\terr = errnoErr(e1)\n"; - $text .= "\t}\n"; - } - $text .= "\treturn\n"; - $text .= "}\n\n"; -} - -chomp $text; -chomp $text; - -if($errors) { - exit 1; -} - -print <) { - chomp; - s/\s+/ /g; - s/^\s+//; - s/\s+$//; - $package = $1 if !$package && /^package (\S+)$/; - my $nonblock = /^\/\/sysnb /; - next if !/^\/\/sys / && !$nonblock; - - # Line must be of the form - # func Open(path string, mode int, perm int) (fd int, err error) - # Split into name, in params, out params. - if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$/) { - print STDERR "$ARGV:$.: malformed //sys declaration\n"; - $errors = 1; - next; - } - my ($nb, $func, $in, $out, $modname, $sysname) = ($1, $2, $3, $4, $5, $6); - - # Split argument lists on comma. - my @in = parseparamlist($in); - my @out = parseparamlist($out); - - # So file name. - if($modname eq "") { - $modname = "libc"; - } - - # System call name. - if($sysname eq "") { - $sysname = "$func"; - } - - # System call pointer variable name. - my $sysvarname = "proc$sysname"; - - my $strconvfunc = "BytePtrFromString"; - my $strconvtype = "*byte"; - - $sysname =~ y/A-Z/a-z/; # All libc functions are lowercase. - - # Runtime import of function to allow cross-platform builds. - $dynimports .= "//go:cgo_import_dynamic libc_${sysname} ${sysname} \"$modname.so\"\n"; - # Link symbol to proc address variable. - $linknames .= "//go:linkname ${sysvarname} libc_${sysname}\n"; - # Library proc address variable. - push @vars, $sysvarname; - - # Go function header. - $out = join(', ', @out); - if($out ne "") { - $out = " ($out)"; - } - if($text ne "") { - $text .= "\n" - } - $text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out; - - # Check if err return available - my $errvar = ""; - foreach my $p (@out) { - my ($name, $type) = parseparam($p); - if($type eq "error") { - $errvar = $name; - last; - } - } - - # Prepare arguments to Syscall. - my @args = (); - my @uses = (); - my $n = 0; - foreach my $p (@in) { - my ($name, $type) = parseparam($p); - if($type =~ /^\*/) { - push @args, "uintptr(unsafe.Pointer($name))"; - } elsif($type eq "string" && $errvar ne "") { - $text .= "\tvar _p$n $strconvtype\n"; - $text .= "\t_p$n, $errvar = $strconvfunc($name)\n"; - $text .= "\tif $errvar != nil {\n\t\treturn\n\t}\n"; - push @args, "uintptr(unsafe.Pointer(_p$n))"; - push @uses, "use(unsafe.Pointer(_p$n))"; - $n++; - } elsif($type eq "string") { - print STDERR "$ARGV:$.: $func uses string arguments, but has no error return\n"; - $text .= "\tvar _p$n $strconvtype\n"; - $text .= "\t_p$n, _ = $strconvfunc($name)\n"; - push @args, "uintptr(unsafe.Pointer(_p$n))"; - push @uses, "use(unsafe.Pointer(_p$n))"; - $n++; - } elsif($type =~ /^\[\](.*)/) { - # Convert slice into pointer, length. - # Have to be careful not to take address of &a[0] if len == 0: - # pass nil in that case. - $text .= "\tvar _p$n *$1\n"; - $text .= "\tif len($name) > 0 {\n\t\t_p$n = \&$name\[0]\n\t}\n"; - push @args, "uintptr(unsafe.Pointer(_p$n))", "uintptr(len($name))"; - $n++; - } elsif($type eq "int64" && $_32bit ne "") { - if($_32bit eq "big-endian") { - push @args, "uintptr($name >> 32)", "uintptr($name)"; - } else { - push @args, "uintptr($name)", "uintptr($name >> 32)"; - } - } elsif($type eq "bool") { - $text .= "\tvar _p$n uint32\n"; - $text .= "\tif $name {\n\t\t_p$n = 1\n\t} else {\n\t\t_p$n = 0\n\t}\n"; - push @args, "uintptr(_p$n)"; - $n++; - } else { - push @args, "uintptr($name)"; - } - } - my $nargs = @args; - - # Determine which form to use; pad args with zeros. - my $asm = "sysvicall6"; - if ($nonblock) { - $asm = "rawSysvicall6"; - } - if(@args <= 6) { - while(@args < 6) { - push @args, "0"; - } - } else { - print STDERR "$ARGV:$.: too many arguments to system call\n"; - } - - # Actual call. - my $args = join(', ', @args); - my $call = "$asm(uintptr(unsafe.Pointer(&$sysvarname)), $nargs, $args)"; - - # Assign return values. - my $body = ""; - my $failexpr = ""; - my @ret = ("_", "_", "_"); - my @pout= (); - my $do_errno = 0; - for(my $i=0; $i<@out; $i++) { - my $p = $out[$i]; - my ($name, $type) = parseparam($p); - my $reg = ""; - if($name eq "err") { - $reg = "e1"; - $ret[2] = $reg; - $do_errno = 1; - } else { - $reg = sprintf("r%d", $i); - $ret[$i] = $reg; - } - if($type eq "bool") { - $reg = "$reg != 0"; - } - if($type eq "int64" && $_32bit ne "") { - # 64-bit number in r1:r0 or r0:r1. - if($i+2 > @out) { - print STDERR "$ARGV:$.: not enough registers for int64 return\n"; - } - if($_32bit eq "big-endian") { - $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i, $i+1); - } else { - $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i+1, $i); - } - $ret[$i] = sprintf("r%d", $i); - $ret[$i+1] = sprintf("r%d", $i+1); - } - if($reg ne "e1") { - $body .= "\t$name = $type($reg)\n"; - } - } - if ($ret[0] eq "_" && $ret[1] eq "_" && $ret[2] eq "_") { - $text .= "\t$call\n"; - } else { - $text .= "\t$ret[0], $ret[1], $ret[2] := $call\n"; - } - foreach my $use (@uses) { - $text .= "\t$use\n"; - } - $text .= $body; - - if ($do_errno) { - $text .= "\tif e1 != 0 {\n"; - $text .= "\t\terr = e1\n"; - $text .= "\t}\n"; - } - $text .= "\treturn\n"; - $text .= "}\n"; -} - -if($errors) { - exit 1; -} - -print < "net.inet", - "net.inet.ipproto" => "net.inet", - "net.inet6.ipv6proto" => "net.inet6", - "net.inet6.ipv6" => "net.inet6.ip6", - "net.inet.icmpv6" => "net.inet6.icmp6", - "net.inet6.divert6" => "net.inet6.divert", - "net.inet6.tcp6" => "net.inet.tcp", - "net.inet6.udp6" => "net.inet.udp", - "mpls" => "net.mpls", - "swpenc" => "vm.swapencrypt" -); - -# Node mappings -my %node_map = ( - "net.inet.ip.ifq" => "net.ifq", - "net.inet.pfsync" => "net.pfsync", - "net.mpls.ifq" => "net.ifq" -); - -my $ctlname; -my %mib = (); -my %sysctl = (); -my $node; - -sub debug() { - print STDERR "$_[0]\n" if $debug; -} - -# Walk the MIB and build a sysctl name to OID mapping. -sub build_sysctl() { - my ($node, $name, $oid) = @_; - my %node = %{$node}; - my @oid = @{$oid}; - - foreach my $key (sort keys %node) { - my @node = @{$node{$key}}; - my $nodename = $name.($name ne '' ? '.' : '').$key; - my @nodeoid = (@oid, $node[0]); - if ($node[1] eq 'CTLTYPE_NODE') { - if (exists $node_map{$nodename}) { - $node = \%mib; - $ctlname = $node_map{$nodename}; - foreach my $part (split /\./, $ctlname) { - $node = \%{@{$$node{$part}}[2]}; - } - } else { - $node = $node[2]; - } - &build_sysctl($node, $nodename, \@nodeoid); - } elsif ($node[1] ne '') { - $sysctl{$nodename} = \@nodeoid; - } - } -} - -foreach my $ctl (@ctls) { - $ctls{$ctl} = $ctl; -} - -# Build MIB -foreach my $header (@headers) { - &debug("Processing $header..."); - open HEADER, "/usr/include/$header" || - print STDERR "Failed to open $header\n"; - while (
) { - if ($_ =~ /^#define\s+(CTL_NAMES)\s+{/ || - $_ =~ /^#define\s+(CTL_(.*)_NAMES)\s+{/ || - $_ =~ /^#define\s+((.*)CTL_NAMES)\s+{/) { - if ($1 eq 'CTL_NAMES') { - # Top level. - $node = \%mib; - } else { - # Node. - my $nodename = lc($2); - if ($header =~ /^netinet\//) { - $ctlname = "net.inet.$nodename"; - } elsif ($header =~ /^netinet6\//) { - $ctlname = "net.inet6.$nodename"; - } elsif ($header =~ /^net\//) { - $ctlname = "net.$nodename"; - } else { - $ctlname = "$nodename"; - $ctlname =~ s/^(fs|net|kern)_/$1\./; - } - if (exists $ctl_map{$ctlname}) { - $ctlname = $ctl_map{$ctlname}; - } - if (not exists $ctls{$ctlname}) { - &debug("Ignoring $ctlname..."); - next; - } - - # Walk down from the top of the MIB. - $node = \%mib; - foreach my $part (split /\./, $ctlname) { - if (not exists $$node{$part}) { - &debug("Missing node $part"); - $$node{$part} = [ 0, '', {} ]; - } - $node = \%{@{$$node{$part}}[2]}; - } - } - - # Populate current node with entries. - my $i = -1; - while (defined($_) && $_ !~ /^}/) { - $_ =
; - $i++ if $_ =~ /{.*}/; - next if $_ !~ /{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}/; - $$node{$1} = [ $i, $2, {} ]; - } - } - } - close HEADER; -} - -&build_sysctl(\%mib, "", []); - -print <){ - if(/^#define\s+SYS_(\w+)\s+([0-9]+)/){ - my $name = $1; - my $num = $2; - $name =~ y/a-z/A-Z/; - print " SYS_$name = $num;" - } -} - -print <){ - if(/^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$/){ - my $num = $1; - my $proto = $2; - my $name = "SYS_$3"; - $name =~ y/a-z/A-Z/; - - # There are multiple entries for enosys and nosys, so comment them out. - if($name =~ /^SYS_E?NOSYS$/){ - $name = "// $name"; - } - if($name eq 'SYS_SYS_EXIT'){ - $name = 'SYS_EXIT'; - } - - print " $name = $num; // $proto\n"; - } -} - -print <){ - if(/^([0-9]+)\s+\S+\s+STD\s+({ \S+\s+(\w+).*)$/){ - my $num = $1; - my $proto = $2; - my $name = "SYS_$3"; - $name =~ y/a-z/A-Z/; - - # There are multiple entries for enosys and nosys, so comment them out. - if($name =~ /^SYS_E?NOSYS$/){ - $name = "// $name"; - } - if($name eq 'SYS_SYS_EXIT'){ - $name = 'SYS_EXIT'; - } - if($name =~ /^SYS_CAP_+/ || $name =~ /^SYS___CAP_+/){ - next - } - - print " $name = $num; // $proto\n"; - - # We keep Capsicum syscall numbers for FreeBSD - # 9-STABLE here because we are not sure whether they - # are mature and stable. - if($num == 513){ - print " SYS_CAP_NEW = 514 // { int cap_new(int fd, uint64_t rights); }\n"; - print " SYS_CAP_GETRIGHTS = 515 // { int cap_getrights(int fd, \\\n"; - print " SYS_CAP_ENTER = 516 // { int cap_enter(void); }\n"; - print " SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); }\n"; - } - } -} - -print < 999){ - # ignore deprecated syscalls that are no longer implemented - # https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/include/uapi/asm-generic/unistd.h?id=refs/heads/master#n716 - return; - } - $name =~ y/a-z/A-Z/; - print " SYS_$name = $num;\n"; -} - -my $prev; -open(GCC, "gcc -E -dD $ARGV[0] |") || die "can't run gcc"; -while(){ - if(/^#define __NR_syscalls\s+/) { - # ignore redefinitions of __NR_syscalls - } - elsif(/^#define __NR_(\w+)\s+([0-9]+)/){ - $prev = $2; - fmt($1, $2); - } - elsif(/^#define __NR3264_(\w+)\s+([0-9]+)/){ - $prev = $2; - fmt($1, $2); - } - elsif(/^#define __NR_(\w+)\s+\(\w+\+\s*([0-9]+)\)/){ - fmt($1, $prev+$2) - } -} - -print <){ - if($line =~ /^(.*)\\$/) { - # Handle continuation - $line = $1; - $_ =~ s/^\s+//; - $line .= $_; - } else { - # New line - $line = $_; - } - next if $line =~ /\\$/; - if($line =~ /^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$/) { - my $num = $1; - my $proto = $6; - my $compat = $8; - my $name = "$7_$9"; - - $name = "$7_$11" if $11 ne ''; - $name =~ y/a-z/A-Z/; - - if($compat eq '' || $compat eq '30' || $compat eq '50') { - print " $name = $num; // $proto\n"; - } - } -} - -print <){ - if(/^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$/){ - my $num = $1; - my $proto = $3; - my $name = $4; - $name =~ y/a-z/A-Z/; - - # There are multiple entries for enosys and nosys, so comment them out. - if($name =~ /^SYS_E?NOSYS$/){ - $name = "// $name"; - } - if($name eq 'SYS_SYS_EXIT'){ - $name = 'SYS_EXIT'; - } - - print " $name = $num; // $proto\n"; - } -} - -print < parBlocks { - buf = parBlocks - } - - sendChan := make(chan subMul, buf) - - // Launch workers. A worker receives an {i, j} submatrix of c, and computes - // A_ik B_ki (or the transposed version) storing the result in c_ij. When the - // channel is finally closed, it signals to the waitgroup that it has finished - // computing. - var wg sync.WaitGroup - for i := 0; i < nWorkers; i++ { - wg.Add(1) - go func() { - defer wg.Done() - // Make local copies of otherwise global variables to reduce shared memory. - // This has a noticable effect on benchmarks in some cases. - alpha := alpha - aTrans := aTrans - bTrans := bTrans - crows := c.rows - ccols := c.cols - for sub := range sendChan { - i := sub.i - j := sub.j - leni := blockSize - if i+leni > crows { - leni = crows - i - } - lenj := blockSize - if j+lenj > ccols { - lenj = ccols - j - } - cSub := c.view(i, j, leni, lenj) - - // Compute A_ik B_kj for all k - for k := 0; k < maxKLen; k += blockSize { - lenk := blockSize - if k+lenk > maxKLen { - lenk = maxKLen - k - } - var aSub, bSub general64 - if aTrans { - aSub = a.view(k, i, lenk, leni) - } else { - aSub = a.view(i, k, leni, lenk) - } - if bTrans { - bSub = b.view(j, k, lenj, lenk) - } else { - bSub = b.view(k, j, lenk, lenj) - } - - dgemmSerial(tA, tB, aSub, bSub, cSub, alpha) - } - } - }() - } - - // Send out all of the {i, j} subblocks for computation. - for i := 0; i < c.rows; i += blockSize { - for j := 0; j < c.cols; j += blockSize { - sendChan <- subMul{ - i: i, - j: j, - } - } - } - close(sendChan) - wg.Wait() -} - -// computeNumBlocks says how many blocks there are to compute. maxKLen says the length of the -// k dimension, parBlocks is the number of blocks that could be computed in parallel -// (the submatrices in i and j). expect is the full number of blocks that will be computed. -func computeNumBlocks64(a, b general64, aTrans, bTrans bool) (maxKLen, parBlocks int) { - aRowBlocks := a.rows / blockSize - if a.rows%blockSize != 0 { - aRowBlocks++ - } - aColBlocks := a.cols / blockSize - if a.cols%blockSize != 0 { - aColBlocks++ - } - bRowBlocks := b.rows / blockSize - if b.rows%blockSize != 0 { - bRowBlocks++ - } - bColBlocks := b.cols / blockSize - if b.cols%blockSize != 0 { - bColBlocks++ - } - - switch { - case !aTrans && !bTrans: - // Cij = \sum_k Aik Bki - maxKLen = a.cols - parBlocks = aRowBlocks * bColBlocks - case aTrans && !bTrans: - // Cij = \sum_k Aki Bkj - maxKLen = a.rows - parBlocks = aColBlocks * bColBlocks - case !aTrans && bTrans: - // Cij = \sum_k Aik Bjk - maxKLen = a.cols - parBlocks = aRowBlocks * bRowBlocks - case aTrans && bTrans: - // Cij = \sum_k Aki Bjk - maxKLen = a.rows - parBlocks = aColBlocks * bRowBlocks - } - return -} - -// dgemmSerial is serial matrix multiply -func dgemmSerial(tA, tB blas.Transpose, a, b, c general64, alpha float64) { - switch { - case tA == blas.NoTrans && tB == blas.NoTrans: - dgemmSerialNotNot(a, b, c, alpha) - return - case tA != blas.NoTrans && tB == blas.NoTrans: - dgemmSerialTransNot(a, b, c, alpha) - return - case tA == blas.NoTrans && tB != blas.NoTrans: - dgemmSerialNotTrans(a, b, c, alpha) - return - case tA != blas.NoTrans && tB != blas.NoTrans: - dgemmSerialTransTrans(a, b, c, alpha) - return - default: - panic("unreachable") - } -} - -// dgemmSerial where neither a nor b are transposed -func dgemmSerialNotNot(a, b, c general64, alpha float64) { - if debug { - if a.cols != b.rows { - panic("inner dimension mismatch") - } - if a.rows != c.rows { - panic("outer dimension mismatch") - } - if b.cols != c.cols { - panic("outer dimension mismatch") - } - } - - // This style is used instead of the literal [i*stride +j]) is used because - // approximately 5 times faster as of go 1.3. - for i := 0; i < a.rows; i++ { - ctmp := c.data[i*c.stride : i*c.stride+c.cols] - for l, v := range a.data[i*a.stride : i*a.stride+a.cols] { - tmp := alpha * v - if tmp != 0 { - asm.DaxpyUnitary(tmp, b.data[l*b.stride:l*b.stride+b.cols], ctmp, ctmp) - } - } - } -} - -// dgemmSerial where neither a is transposed and b is not -func dgemmSerialTransNot(a, b, c general64, alpha float64) { - if debug { - if a.rows != b.rows { - fmt.Println(a.rows, b.rows) - panic("inner dimension mismatch") - } - if a.cols != c.rows { - panic("outer dimension mismatch") - } - if b.cols != c.cols { - panic("outer dimension mismatch") - } - } - - // This style is used instead of the literal [i*stride +j]) is used because - // approximately 5 times faster as of go 1.3. - for l := 0; l < a.rows; l++ { - btmp := b.data[l*b.stride : l*b.stride+b.cols] - for i, v := range a.data[l*a.stride : l*a.stride+a.cols] { - tmp := alpha * v - ctmp := c.data[i*c.stride : i*c.stride+c.cols] - if tmp != 0 { - asm.DaxpyUnitary(tmp, btmp, ctmp, ctmp) - } - } - } -} - -// dgemmSerial where neither a is not transposed and b is -func dgemmSerialNotTrans(a, b, c general64, alpha float64) { - if debug { - if a.cols != b.cols { - panic("inner dimension mismatch") - } - if a.rows != c.rows { - panic("outer dimension mismatch") - } - if b.rows != c.cols { - panic("outer dimension mismatch") - } - } - - // This style is used instead of the literal [i*stride +j]) is used because - // approximately 5 times faster as of go 1.3. - for i := 0; i < a.rows; i++ { - atmp := a.data[i*a.stride : i*a.stride+a.cols] - ctmp := c.data[i*c.stride : i*c.stride+c.cols] - for j := 0; j < b.rows; j++ { - ctmp[j] += alpha * asm.DdotUnitary(atmp, b.data[j*b.stride:j*b.stride+b.cols]) - } - } - -} - -// dgemmSerial where both are transposed -func dgemmSerialTransTrans(a, b, c general64, alpha float64) { - if debug { - if a.rows != b.cols { - panic("inner dimension mismatch") - } - if a.cols != c.rows { - panic("outer dimension mismatch") - } - if b.rows != c.cols { - panic("outer dimension mismatch") - } - } - - // This style is used instead of the literal [i*stride +j]) is used because - // approximately 5 times faster as of go 1.3. - for l := 0; l < a.rows; l++ { - for i, v := range a.data[l*a.stride : l*a.stride+a.cols] { - ctmp := c.data[i*c.stride : i*c.stride+c.cols] - if v != 0 { - tmp := alpha * v - if tmp != 0 { - asm.DaxpyInc(tmp, b.data[l:], ctmp, uintptr(b.rows), uintptr(b.stride), 1, 0, 0) - } - } - } - } -} diff --git a/vendor/github.com/gonum/blas/native/doc.go b/vendor/github.com/gonum/blas/native/doc.go deleted file mode 100644 index cb63fe776..000000000 --- a/vendor/github.com/gonum/blas/native/doc.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Ensure changes made to blas/native are reflected in blas/cgo where relevant. - -/* -Package native is a Go implementation of the BLAS API. This implementation -panics when the input arguments are invalid as per the standard, for example -if a vector increment is zero. Please note that the treatment of NaN values -is not specified, and differs among the BLAS implementations. -github.com/gonum/blas/blas64 provides helpful wrapper functions to the BLAS -interface. The rest of this text describes the layout of the data for the input types. - -Please note that in the function documentation, x[i] refers to the i^th element -of the vector, which will be different from the i^th element of the slice if -incX != 1. - -See http://www.netlib.org/lapack/explore-html/d4/de1/_l_i_c_e_n_s_e_source.html -for more license information. - -Vector arguments are effectively strided slices. They have two input arguments, -a number of elements, n, and an increment, incX. The increment specifies the -distance between elements of the vector. The actual Go slice may be longer -than necessary. -The increment may be positive or negative, except in functions with only -a single vector argument where the increment may only be positive. If the increment -is negative, s[0] is the last element in the slice. Note that this is not the same -as counting backward from the end of the slice, as len(s) may be longer than -necessary. So, for example, if n = 5 and incX = 3, the elements of s are - [0 * * 1 * * 2 * * 3 * * 4 * * * ...] -where ∗ elements are never accessed. If incX = -3, the same elements are -accessed, just in reverse order (4, 3, 2, 1, 0). - -Dense matrices are specified by a number of rows, a number of columns, and a stride. -The stride specifies the number of entries in the slice between the first element -of successive rows. The stride must be at least as large as the number of columns -but may be longer. - [a00 ... a0n a0* ... a1stride-1 a21 ... amn am* ... amstride-1] -Thus, dense[i*ld + j] refers to the {i, j}th element of the matrix. - -Symmetric and triangular matrices (non-packed) are stored identically to Dense, -except that only elements in one triangle of the matrix are accessed. - -Packed symmetric and packed triangular matrices are laid out with the entries -condensed such that all of the unreferenced elements are removed. So, the upper triangular -matrix - [ - 1 2 3 - 0 4 5 - 0 0 6 - ] -and the lower-triangular matrix - [ - 1 0 0 - 2 3 0 - 4 5 6 - ] -will both be compacted as [1 2 3 4 5 6]. The (i, j) element of the original -dense matrix can be found at element i*n - (i-1)*i/2 + j for upper triangular, -and at element i * (i+1) /2 + j for lower triangular. - -Banded matrices are laid out in a compact format, constructed by removing the -zeros in the rows and aligning the diagonals. For example, the matrix - [ - 1 2 3 0 0 0 - 4 5 6 7 0 0 - 0 8 9 10 11 0 - 0 0 12 13 14 15 - 0 0 0 16 17 18 - 0 0 0 0 19 20 - ] - -implicitly becomes (∗ entries are never accessed) - [ - * 1 2 3 - 4 5 6 7 - 8 9 10 11 - 12 13 14 15 - 16 17 18 * - 19 20 * * - ] -which is given to the BLAS routine as [∗ 1 2 3 4 ...]. - -See http://www.crest.iu.edu/research/mtl/reference/html/banded.html -for more information -*/ -package native diff --git a/vendor/github.com/gonum/blas/native/general_double.go b/vendor/github.com/gonum/blas/native/general_double.go deleted file mode 100644 index 0fa6cb747..000000000 --- a/vendor/github.com/gonum/blas/native/general_double.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright ©2014 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - "errors" - "fmt" - "math" -) - -func newGeneral64(r, c int) general64 { - return general64{ - data: make([]float64, r*c), - rows: r, - cols: c, - stride: c, - } -} - -type general64 struct { - data []float64 - rows, cols int - stride int -} - -// adds element-wise into receiver. rows and columns must match -func (g general64) add(h general64) { - if debug { - if g.rows != h.rows { - panic("blas: row size mismatch") - } - if g.cols != h.cols { - panic("blas: col size mismatch") - } - } - for i := 0; i < g.rows; i++ { - gtmp := g.data[i*g.stride : i*g.stride+g.cols] - for j, v := range h.data[i*h.stride : i*h.stride+h.cols] { - gtmp[j] += v - } - } -} - -// at returns the value at the ith row and jth column. For speed reasons, the -// rows and columns are not bounds checked. -func (g general64) at(i, j int) float64 { - if debug { - if i < 0 || i >= g.rows { - panic("blas: row out of bounds") - } - if j < 0 || j >= g.cols { - panic("blas: col out of bounds") - } - } - return g.data[i*g.stride+j] -} - -func (g general64) check(c byte) error { - if g.rows < 0 { - return errors.New("blas: rows < 0") - } - if g.cols < 0 { - return errors.New("blas: cols < 0") - } - if g.stride < 1 { - return errors.New("blas: stride < 1") - } - if g.stride < g.cols { - return errors.New("blas: illegal stride") - } - if (g.rows-1)*g.stride+g.cols > len(g.data) { - return fmt.Errorf("blas: index of %c out of range", c) - } - return nil -} - -func (g general64) clone() general64 { - data := make([]float64, len(g.data)) - copy(data, g.data) - return general64{ - data: data, - rows: g.rows, - cols: g.cols, - stride: g.stride, - } -} - -// assumes they are the same size -func (g general64) copy(h general64) { - if debug { - if g.rows != h.rows { - panic("blas: row mismatch") - } - if g.cols != h.cols { - panic("blas: col mismatch") - } - } - for k := 0; k < g.rows; k++ { - copy(g.data[k*g.stride:(k+1)*g.stride], h.data[k*h.stride:(k+1)*h.stride]) - } -} - -func (g general64) equal(a general64) bool { - if g.rows != a.rows || g.cols != a.cols || g.stride != a.stride { - return false - } - for i, v := range g.data { - if a.data[i] != v { - return false - } - } - return true -} - -/* -// print is to aid debugging. Commented out to avoid fmt import -func (g general64) print() { - fmt.Println("r = ", g.rows, "c = ", g.cols, "stride: ", g.stride) - for i := 0; i < g.rows; i++ { - fmt.Println(g.data[i*g.stride : (i+1)*g.stride]) - } - -} -*/ - -func (g general64) view(i, j, r, c int) general64 { - if debug { - if i < 0 || i+r > g.rows { - panic("blas: row out of bounds") - } - if j < 0 || j+c > g.cols { - panic("blas: col out of bounds") - } - } - return general64{ - data: g.data[i*g.stride+j : (i+r-1)*g.stride+j+c], - rows: r, - cols: c, - stride: g.stride, - } -} - -func (g general64) equalWithinAbs(a general64, tol float64) bool { - if g.rows != a.rows || g.cols != a.cols || g.stride != a.stride { - return false - } - for i, v := range g.data { - if math.Abs(a.data[i]-v) > tol { - return false - } - } - return true -} diff --git a/vendor/github.com/gonum/blas/native/general_single.go b/vendor/github.com/gonum/blas/native/general_single.go deleted file mode 100644 index bf648e5c9..000000000 --- a/vendor/github.com/gonum/blas/native/general_single.go +++ /dev/null @@ -1,157 +0,0 @@ -// Generated code do not edit. Run `go generate`. - -// Copyright ©2014 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - "errors" - "fmt" - math "github.com/gonum/blas/native/internal/math32" -) - -func newGeneral32(r, c int) general32 { - return general32{ - data: make([]float32, r*c), - rows: r, - cols: c, - stride: c, - } -} - -type general32 struct { - data []float32 - rows, cols int - stride int -} - -// adds element-wise into receiver. rows and columns must match -func (g general32) add(h general32) { - if debug { - if g.rows != h.rows { - panic("blas: row size mismatch") - } - if g.cols != h.cols { - panic("blas: col size mismatch") - } - } - for i := 0; i < g.rows; i++ { - gtmp := g.data[i*g.stride : i*g.stride+g.cols] - for j, v := range h.data[i*h.stride : i*h.stride+h.cols] { - gtmp[j] += v - } - } -} - -// at returns the value at the ith row and jth column. For speed reasons, the -// rows and columns are not bounds checked. -func (g general32) at(i, j int) float32 { - if debug { - if i < 0 || i >= g.rows { - panic("blas: row out of bounds") - } - if j < 0 || j >= g.cols { - panic("blas: col out of bounds") - } - } - return g.data[i*g.stride+j] -} - -func (g general32) check(c byte) error { - if g.rows < 0 { - return errors.New("blas: rows < 0") - } - if g.cols < 0 { - return errors.New("blas: cols < 0") - } - if g.stride < 1 { - return errors.New("blas: stride < 1") - } - if g.stride < g.cols { - return errors.New("blas: illegal stride") - } - if (g.rows-1)*g.stride+g.cols > len(g.data) { - return fmt.Errorf("blas: index of %c out of range", c) - } - return nil -} - -func (g general32) clone() general32 { - data := make([]float32, len(g.data)) - copy(data, g.data) - return general32{ - data: data, - rows: g.rows, - cols: g.cols, - stride: g.stride, - } -} - -// assumes they are the same size -func (g general32) copy(h general32) { - if debug { - if g.rows != h.rows { - panic("blas: row mismatch") - } - if g.cols != h.cols { - panic("blas: col mismatch") - } - } - for k := 0; k < g.rows; k++ { - copy(g.data[k*g.stride:(k+1)*g.stride], h.data[k*h.stride:(k+1)*h.stride]) - } -} - -func (g general32) equal(a general32) bool { - if g.rows != a.rows || g.cols != a.cols || g.stride != a.stride { - return false - } - for i, v := range g.data { - if a.data[i] != v { - return false - } - } - return true -} - -/* -// print is to aid debugging. Commented out to avoid fmt import -func (g general32) print() { - fmt.Println("r = ", g.rows, "c = ", g.cols, "stride: ", g.stride) - for i := 0; i < g.rows; i++ { - fmt.Println(g.data[i*g.stride : (i+1)*g.stride]) - } - -} -*/ - -func (g general32) view(i, j, r, c int) general32 { - if debug { - if i < 0 || i+r > g.rows { - panic("blas: row out of bounds") - } - if j < 0 || j+c > g.cols { - panic("blas: col out of bounds") - } - } - return general32{ - data: g.data[i*g.stride+j : (i+r-1)*g.stride+j+c], - rows: r, - cols: c, - stride: g.stride, - } -} - -func (g general32) equalWithinAbs(a general32, tol float32) bool { - if g.rows != a.rows || g.cols != a.cols || g.stride != a.stride { - return false - } - for i, v := range g.data { - if math.Abs(a.data[i]-v) > tol { - return false - } - } - return true -} diff --git a/vendor/github.com/gonum/blas/native/internal/math32/math.go b/vendor/github.com/gonum/blas/native/internal/math32/math.go deleted file mode 100644 index b33401b98..000000000 --- a/vendor/github.com/gonum/blas/native/internal/math32/math.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package math32 provides float32 versions of standard library math package -// routines used by gonum/blas/native. -package math32 - -import ( - "math" -) - -const ( - unan = 0x7fc00000 - uinf = 0x7f800000 - uneginf = 0xff800000 - mask = 0x7f8 >> 3 - shift = 32 - 8 - 1 - bias = 127 -) - -// Abs returns the absolute value of x. -// -// Special cases are: -// Abs(±Inf) = +Inf -// Abs(NaN) = NaN -func Abs(x float32) float32 { - switch { - case x < 0: - return -x - case x == 0: - return 0 // return correctly abs(-0) - } - return x -} - -// Copysign returns a value with the magnitude -// of x and the sign of y. -func Copysign(x, y float32) float32 { - const sign = 1 << 31 - return math.Float32frombits(math.Float32bits(x)&^sign | math.Float32bits(y)&sign) -} - -// Hypot returns Sqrt(p*p + q*q), taking care to avoid -// unnecessary overflow and underflow. -// -// Special cases are: -// Hypot(±Inf, q) = +Inf -// Hypot(p, ±Inf) = +Inf -// Hypot(NaN, q) = NaN -// Hypot(p, NaN) = NaN -func Hypot(p, q float32) float32 { - // special cases - switch { - case IsInf(p, 0) || IsInf(q, 0): - return Inf(1) - case IsNaN(p) || IsNaN(q): - return NaN() - } - if p < 0 { - p = -p - } - if q < 0 { - q = -q - } - if p < q { - p, q = q, p - } - if p == 0 { - return 0 - } - q = q / p - return p * Sqrt(1+q*q) -} - -// Inf returns positive infinity if sign >= 0, negative infinity if sign < 0. -func Inf(sign int) float32 { - var v uint32 - if sign >= 0 { - v = uinf - } else { - v = uneginf - } - return math.Float32frombits(v) -} - -// IsInf reports whether f is an infinity, according to sign. -// If sign > 0, IsInf reports whether f is positive infinity. -// If sign < 0, IsInf reports whether f is negative infinity. -// If sign == 0, IsInf reports whether f is either infinity. -func IsInf(f float32, sign int) bool { - // Test for infinity by comparing against maximum float. - // To avoid the floating-point hardware, could use: - // x := math.Float32bits(f); - // return sign >= 0 && x == uinf || sign <= 0 && x == uneginf; - return sign >= 0 && f > math.MaxFloat32 || sign <= 0 && f < -math.MaxFloat32 -} - -// IsNaN reports whether f is an IEEE 754 ``not-a-number'' value. -func IsNaN(f float32) (is bool) { - // IEEE 754 says that only NaNs satisfy f != f. - // To avoid the floating-point hardware, could use: - // x := math.Float32bits(f); - // return uint32(x>>shift)&mask == mask && x != uinf && x != uneginf - return f != f -} - -// NaN returns an IEEE 754 ``not-a-number'' value. -func NaN() float32 { return math.Float32frombits(unan) } diff --git a/vendor/github.com/gonum/blas/native/internal/math32/sqrt.go b/vendor/github.com/gonum/blas/native/internal/math32/sqrt.go deleted file mode 100644 index 058b731d2..000000000 --- a/vendor/github.com/gonum/blas/native/internal/math32/sqrt.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//+build !amd64 noasm - -package math32 - -import ( - "math" -) - -// Sqrt returns the square root of x. -// -// Special cases are: -// Sqrt(+Inf) = +Inf -// Sqrt(±0) = ±0 -// Sqrt(x < 0) = NaN -// Sqrt(NaN) = NaN -func Sqrt(x float32) float32 { - // FIXME(kortschak): Direct translation of the math package - // asm code for 386 fails to build. No test hardware is available - // for arm, so using conversion instead. - return float32(math.Sqrt(float64(x))) -} diff --git a/vendor/github.com/gonum/blas/native/internal/math32/sqrt_amd64.go b/vendor/github.com/gonum/blas/native/internal/math32/sqrt_amd64.go deleted file mode 100644 index ca11639e5..000000000 --- a/vendor/github.com/gonum/blas/native/internal/math32/sqrt_amd64.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//+build !noasm - -package math32 - -// Sqrt returns the square root of x. -// -// Special cases are: -// Sqrt(+Inf) = +Inf -// Sqrt(±0) = ±0 -// Sqrt(x < 0) = NaN -// Sqrt(NaN) = NaN -func Sqrt(x float32) float32 diff --git a/vendor/github.com/gonum/blas/native/internal/math32/sqrt_amd64.s b/vendor/github.com/gonum/blas/native/internal/math32/sqrt_amd64.s deleted file mode 100644 index 595ce5d58..000000000 --- a/vendor/github.com/gonum/blas/native/internal/math32/sqrt_amd64.s +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//+build !noasm - -// TODO(kortschak): use textflag.h after we drop Go 1.3 support -//#include "textflag.h" -// Don't insert stack check preamble. -#define NOSPLIT 4 - -// func Sqrt(x float32) float32 -TEXT ·Sqrt(SB),NOSPLIT,$0 - SQRTSS x+0(FP), X0 - MOVSS X0, ret+8(FP) - RET diff --git a/vendor/github.com/gonum/blas/native/level1double.go b/vendor/github.com/gonum/blas/native/level1double.go deleted file mode 100644 index 0d77243f3..000000000 --- a/vendor/github.com/gonum/blas/native/level1double.go +++ /dev/null @@ -1,599 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - "math" - - "github.com/gonum/blas" - "github.com/gonum/internal/asm" -) - -var _ blas.Float64Level1 = Implementation{} - -// Dnrm2 computes the Euclidean norm of a vector, -// sqrt(\sum_i x[i] * x[i]). -// This function returns 0 if incX is negative. -func (Implementation) Dnrm2(n int, x []float64, incX int) float64 { - if incX < 1 { - if incX == 0 { - panic(zeroIncX) - } - return 0 - } - if incX > 0 && (n-1)*incX >= len(x) { - panic(badX) - } - if n < 2 { - if n == 1 { - return math.Abs(x[0]) - } - if n == 0 { - return 0 - } - if n < 1 { - panic(negativeN) - } - } - var ( - scale float64 = 0 - sumSquares float64 = 1 - ) - if incX == 1 { - x = x[:n] - for _, v := range x { - absxi := math.Abs(v) - if scale < absxi { - sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) - scale = absxi - } else { - sumSquares = sumSquares + (absxi/scale)*(absxi/scale) - } - } - return scale * math.Sqrt(sumSquares) - } - for ix := 0; ix < n*incX; ix += incX { - val := x[ix] - if val == 0 { - continue - } - absxi := math.Abs(val) - if scale < absxi { - sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) - scale = absxi - } else { - sumSquares = sumSquares + (absxi/scale)*(absxi/scale) - } - } - return scale * math.Sqrt(sumSquares) -} - -// Dasum computes the sum of the absolute values of the elements of x. -// \sum_i |x[i]| -// Dasum returns 0 if incX is negative. -func (Implementation) Dasum(n int, x []float64, incX int) float64 { - var sum float64 - if n < 0 { - panic(negativeN) - } - if incX < 1 { - if incX == 0 { - panic(zeroIncX) - } - return 0 - } - if incX > 0 && (n-1)*incX >= len(x) { - panic(badX) - } - if incX == 1 { - x = x[:n] - for _, v := range x { - sum += math.Abs(v) - } - return sum - } - for i := 0; i < n; i++ { - sum += math.Abs(x[i*incX]) - } - return sum -} - -// Idamax returns the index of the largest element of x. If there are multiple -// such indices the earliest is returned. Idamax returns -1 if incX is negative or if -// n == 0. -func (Implementation) Idamax(n int, x []float64, incX int) int { - if incX < 1 { - if incX == 0 { - panic(zeroIncX) - } - return -1 - } - if incX > 0 && (n-1)*incX >= len(x) { - panic(badX) - } - if n < 2 { - if n == 1 { - return 0 - } - if n == 0 { - return -1 // Netlib returns invalid index when n == 0 - } - if n < 1 { - panic(negativeN) - } - } - idx := 0 - max := math.Abs(x[0]) - if incX == 1 { - for i, v := range x[:n] { - absV := math.Abs(v) - if absV > max { - max = absV - idx = i - } - } - return idx - } - ix := incX - for i := 1; i < n; i++ { - v := x[ix] - absV := math.Abs(v) - if absV > max { - max = absV - idx = i - } - ix += incX - } - return idx -} - -// Dswap exchanges the elements of two vectors. -// x[i], y[i] = y[i], x[i] for all i -func (Implementation) Dswap(n int, x []float64, incX int, y []float64, incY int) { - if n < 1 { - if n == 0 { - return - } - panic(negativeN) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(badY) - } - if incX == 1 && incY == 1 { - x = x[:n] - for i, v := range x { - x[i], y[i] = y[i], v - } - return - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - for i := 0; i < n; i++ { - x[ix], y[iy] = y[iy], x[ix] - ix += incX - iy += incY - } -} - -// Dcopy copies the elements of x into the elements of y. -// y[i] = x[i] for all i -func (Implementation) Dcopy(n int, x []float64, incX int, y []float64, incY int) { - if n < 1 { - if n == 0 { - return - } - panic(negativeN) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(badY) - } - if incX == 1 && incY == 1 { - copy(y[:n], x[:n]) - return - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - for i := 0; i < n; i++ { - y[iy] = x[ix] - ix += incX - iy += incY - } -} - -// Daxpy adds alpha times x to y -// y[i] += alpha * x[i] for all i -func (Implementation) Daxpy(n int, alpha float64, x []float64, incX int, y []float64, incY int) { - if n < 1 { - if n == 0 { - return - } - panic(negativeN) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(badY) - } - if alpha == 0 { - return - } - if incX == 1 && incY == 1 { - if len(x) < n { - panic(badLenX) - } - if len(y) < n { - panic(badLenY) - } - asm.DaxpyUnitary(alpha, x[:n], y, y) - return - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - if ix >= len(x) || ix+(n-1)*incX >= len(x) { - panic(badLenX) - } - if iy >= len(y) || iy+(n-1)*incY >= len(y) { - panic(badLenY) - } - asm.DaxpyInc(alpha, x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) -} - -// Drotg computes the plane rotation -// _ _ _ _ _ _ -// | c s | | a | | r | -// | -s c | * | b | = | 0 | -// ‾ ‾ ‾ ‾ ‾ ‾ -// where -// r = ±(a^2 + b^2) -// c = a/r, the cosine of the plane rotation -// s = b/r, the sine of the plane rotation -// -// NOTE: There is a discrepancy between the refence implementation and the BLAS -// technical manual regarding the sign for r when a or b are zero. -// Drotg agrees with the definition in the manual and other -// common BLAS implementations. -func (Implementation) Drotg(a, b float64) (c, s, r, z float64) { - if b == 0 && a == 0 { - return 1, 0, a, 0 - } - absA := math.Abs(a) - absB := math.Abs(b) - aGTb := absA > absB - r = math.Hypot(a, b) - if aGTb { - r = math.Copysign(r, a) - } else { - r = math.Copysign(r, b) - } - c = a / r - s = b / r - if aGTb { - z = s - } else if c != 0 { // r == 0 case handled above - z = 1 / c - } else { - z = 1 - } - return -} - -// Drotmg computes the modified Givens rotation. See -// http://www.netlib.org/lapack/explore-html/df/deb/drotmg_8f.html -// for more details. -func (Implementation) Drotmg(d1, d2, x1, y1 float64) (p blas.DrotmParams, rd1, rd2, rx1 float64) { - var p1, p2, q1, q2, u float64 - - const ( - gam = 4096.0 - gamsq = 16777216.0 - rgamsq = 5.9604645e-8 - ) - - if d1 < 0 { - p.Flag = blas.Rescaling - return - } - - p2 = d2 * y1 - if p2 == 0 { - p.Flag = blas.Identity - rd1 = d1 - rd2 = d2 - rx1 = x1 - return - } - p1 = d1 * x1 - q2 = p2 * y1 - q1 = p1 * x1 - - absQ1 := math.Abs(q1) - absQ2 := math.Abs(q2) - - if absQ1 < absQ2 && q2 < 0 { - p.Flag = blas.Rescaling - return - } - - if d1 == 0 { - p.Flag = blas.Diagonal - p.H[0] = p1 / p2 - p.H[3] = x1 / y1 - u = 1 + p.H[0]*p.H[3] - rd1, rd2 = d2/u, d1/u - rx1 = y1 / u - return - } - - // Now we know that d1 != 0, and d2 != 0. If d2 == 0, it would be caught - // when p2 == 0, and if d1 == 0, then it is caught above - - if absQ1 > absQ2 { - p.H[1] = -y1 / x1 - p.H[2] = p2 / p1 - u = 1 - p.H[2]*p.H[1] - rd1 = d1 - rd2 = d2 - rx1 = x1 - p.Flag = blas.OffDiagonal - // u must be greater than zero because |q1| > |q2|, so check from netlib - // is unnecessary - // This is left in for ease of comparison with complex routines - //if u > 0 { - rd1 /= u - rd2 /= u - rx1 *= u - //} - } else { - p.Flag = blas.Diagonal - p.H[0] = p1 / p2 - p.H[3] = x1 / y1 - u = 1 + p.H[0]*p.H[3] - rd1 = d2 / u - rd2 = d1 / u - rx1 = y1 * u - } - - for rd1 <= rgamsq || rd1 >= gamsq { - if p.Flag == blas.OffDiagonal { - p.H[0] = 1 - p.H[3] = 1 - p.Flag = blas.Rescaling - } else if p.Flag == blas.Diagonal { - p.H[1] = -1 - p.H[2] = 1 - p.Flag = blas.Rescaling - } - if rd1 <= rgamsq { - rd1 *= gam * gam - rx1 /= gam - p.H[0] /= gam - p.H[2] /= gam - } else { - rd1 /= gam * gam - rx1 *= gam - p.H[0] *= gam - p.H[2] *= gam - } - } - - for math.Abs(rd2) <= rgamsq || math.Abs(rd2) >= gamsq { - if p.Flag == blas.OffDiagonal { - p.H[0] = 1 - p.H[3] = 1 - p.Flag = blas.Rescaling - } else if p.Flag == blas.Diagonal { - p.H[1] = -1 - p.H[2] = 1 - p.Flag = blas.Rescaling - } - if math.Abs(rd2) <= rgamsq { - rd2 *= gam * gam - p.H[1] /= gam - p.H[3] /= gam - } else { - rd2 /= gam * gam - p.H[1] *= gam - p.H[3] *= gam - } - } - return -} - -// Drot applies a plane transformation. -// x[i] = c * x[i] + s * y[i] -// y[i] = c * y[i] - s * x[i] -func (Implementation) Drot(n int, x []float64, incX int, y []float64, incY int, c float64, s float64) { - if n < 1 { - if n == 0 { - return - } - panic(negativeN) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(badY) - } - if incX == 1 && incY == 1 { - x = x[:n] - for i, vx := range x { - vy := y[i] - x[i], y[i] = c*vx+s*vy, c*vy-s*vx - } - return - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - for i := 0; i < n; i++ { - vx := x[ix] - vy := y[iy] - x[ix], y[iy] = c*vx+s*vy, c*vy-s*vx - ix += incX - iy += incY - } -} - -// Drotm applies the modified Givens rotation to the 2×n matrix. -func (Implementation) Drotm(n int, x []float64, incX int, y []float64, incY int, p blas.DrotmParams) { - if n <= 0 { - if n == 0 { - return - } - panic(negativeN) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(badY) - } - - var h11, h12, h21, h22 float64 - var ix, iy int - switch p.Flag { - case blas.Identity: - return - case blas.Rescaling: - h11 = p.H[0] - h12 = p.H[2] - h21 = p.H[1] - h22 = p.H[3] - case blas.OffDiagonal: - h11 = 1 - h12 = p.H[2] - h21 = p.H[1] - h22 = 1 - case blas.Diagonal: - h11 = p.H[0] - h12 = 1 - h21 = -1 - h22 = p.H[3] - } - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - if incX == 1 && incY == 1 { - x = x[:n] - for i, vx := range x { - vy := y[i] - x[i], y[i] = vx*h11+vy*h12, vx*h21+vy*h22 - } - return - } - for i := 0; i < n; i++ { - vx := x[ix] - vy := y[iy] - x[ix], y[iy] = vx*h11+vy*h12, vx*h21+vy*h22 - ix += incX - iy += incY - } - return -} - -// Dscal scales x by alpha. -// x[i] *= alpha -// Dscal has no effect if incX < 0. -func (Implementation) Dscal(n int, alpha float64, x []float64, incX int) { - if incX < 1 { - if incX == 0 { - panic(zeroIncX) - } - return - } - if incX > 0 && (n-1)*incX >= len(x) { - panic(badX) - } - if n < 1 { - if n == 0 { - return - } - if n < 1 { - panic(negativeN) - } - } - if alpha == 0 { - if incX == 1 { - x = x[:n] - for i := range x { - x[i] = 0 - } - } - for ix := 0; ix < n*incX; ix += incX { - x[ix] = 0 - } - } - if incX == 1 { - x = x[:n] - for i := range x { - x[i] *= alpha - } - return - } - for ix := 0; ix < n*incX; ix += incX { - x[ix] *= alpha - } - return -} diff --git a/vendor/github.com/gonum/blas/native/level1double_ddot.go b/vendor/github.com/gonum/blas/native/level1double_ddot.go deleted file mode 100644 index 7af4e0429..000000000 --- a/vendor/github.com/gonum/blas/native/level1double_ddot.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - "github.com/gonum/internal/asm" -) - -// Ddot computes the dot product of the two vectors -// \sum_i x[i]*y[i] -func (Implementation) Ddot(n int, x []float64, incX int, y []float64, incY int) float64 { - if n < 0 { - panic(negativeN) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if incX == 1 && incY == 1 { - if len(x) < n { - panic(badLenX) - } - if len(y) < n { - panic(badLenY) - } - return asm.DdotUnitary(x[:n], y) - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - if ix >= len(x) || ix+(n-1)*incX >= len(x) { - panic(badLenX) - } - if iy >= len(y) || iy+(n-1)*incY >= len(y) { - panic(badLenY) - } - return asm.DdotInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) -} diff --git a/vendor/github.com/gonum/blas/native/level1single.go b/vendor/github.com/gonum/blas/native/level1single.go deleted file mode 100644 index 6bcba83f5..000000000 --- a/vendor/github.com/gonum/blas/native/level1single.go +++ /dev/null @@ -1,623 +0,0 @@ -// Generated code do not edit. Run `go generate`. - -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - math "github.com/gonum/blas/native/internal/math32" - - "github.com/gonum/blas" - "github.com/gonum/internal/asm" -) - -var _ blas.Float32Level1 = Implementation{} - -// Snrm2 computes the Euclidean norm of a vector, -// sqrt(\sum_i x[i] * x[i]). -// This function returns 0 if incX is negative. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Snrm2(n int, x []float32, incX int) float32 { - if incX < 1 { - if incX == 0 { - panic(zeroIncX) - } - return 0 - } - if incX > 0 && (n-1)*incX >= len(x) { - panic(badX) - } - if n < 2 { - if n == 1 { - return math.Abs(x[0]) - } - if n == 0 { - return 0 - } - if n < 1 { - panic(negativeN) - } - } - var ( - scale float32 = 0 - sumSquares float32 = 1 - ) - if incX == 1 { - x = x[:n] - for _, v := range x { - absxi := math.Abs(v) - if scale < absxi { - sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) - scale = absxi - } else { - sumSquares = sumSquares + (absxi/scale)*(absxi/scale) - } - } - return scale * math.Sqrt(sumSquares) - } - for ix := 0; ix < n*incX; ix += incX { - val := x[ix] - if val == 0 { - continue - } - absxi := math.Abs(val) - if scale < absxi { - sumSquares = 1 + sumSquares*(scale/absxi)*(scale/absxi) - scale = absxi - } else { - sumSquares = sumSquares + (absxi/scale)*(absxi/scale) - } - } - return scale * math.Sqrt(sumSquares) -} - -// Sasum computes the sum of the absolute values of the elements of x. -// \sum_i |x[i]| -// Sasum returns 0 if incX is negative. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Sasum(n int, x []float32, incX int) float32 { - var sum float32 - if n < 0 { - panic(negativeN) - } - if incX < 1 { - if incX == 0 { - panic(zeroIncX) - } - return 0 - } - if incX > 0 && (n-1)*incX >= len(x) { - panic(badX) - } - if incX == 1 { - x = x[:n] - for _, v := range x { - sum += math.Abs(v) - } - return sum - } - for i := 0; i < n; i++ { - sum += math.Abs(x[i*incX]) - } - return sum -} - -// Isamax returns the index of the largest element of x. If there are multiple -// such indices the earliest is returned. Idamax returns -1 if incX is negative or if -// n == 0. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Isamax(n int, x []float32, incX int) int { - if incX < 1 { - if incX == 0 { - panic(zeroIncX) - } - return -1 - } - if incX > 0 && (n-1)*incX >= len(x) { - panic(badX) - } - if n < 2 { - if n == 1 { - return 0 - } - if n == 0 { - return -1 // Netlib returns invalid index when n == 0 - } - if n < 1 { - panic(negativeN) - } - } - idx := 0 - max := math.Abs(x[0]) - if incX == 1 { - for i, v := range x[:n] { - absV := math.Abs(v) - if absV > max { - max = absV - idx = i - } - } - return idx - } - ix := incX - for i := 1; i < n; i++ { - v := x[ix] - absV := math.Abs(v) - if absV > max { - max = absV - idx = i - } - ix += incX - } - return idx -} - -// Sswap exchanges the elements of two vectors. -// x[i], y[i] = y[i], x[i] for all i -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Sswap(n int, x []float32, incX int, y []float32, incY int) { - if n < 1 { - if n == 0 { - return - } - panic(negativeN) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(badY) - } - if incX == 1 && incY == 1 { - x = x[:n] - for i, v := range x { - x[i], y[i] = y[i], v - } - return - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - for i := 0; i < n; i++ { - x[ix], y[iy] = y[iy], x[ix] - ix += incX - iy += incY - } -} - -// Scopy copies the elements of x into the elements of y. -// y[i] = x[i] for all i -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Scopy(n int, x []float32, incX int, y []float32, incY int) { - if n < 1 { - if n == 0 { - return - } - panic(negativeN) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(badY) - } - if incX == 1 && incY == 1 { - copy(y[:n], x[:n]) - return - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - for i := 0; i < n; i++ { - y[iy] = x[ix] - ix += incX - iy += incY - } -} - -// Saxpy adds alpha times x to y -// y[i] += alpha * x[i] for all i -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Saxpy(n int, alpha float32, x []float32, incX int, y []float32, incY int) { - if n < 1 { - if n == 0 { - return - } - panic(negativeN) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(badY) - } - if alpha == 0 { - return - } - if incX == 1 && incY == 1 { - if len(x) < n { - panic(badLenX) - } - if len(y) < n { - panic(badLenY) - } - asm.SaxpyUnitary(alpha, x[:n], y, y) - return - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - if ix >= len(x) || ix+(n-1)*incX >= len(x) { - panic(badLenX) - } - if iy >= len(y) || iy+(n-1)*incY >= len(y) { - panic(badLenY) - } - asm.SaxpyInc(alpha, x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) -} - -// Srotg computes the plane rotation -// _ _ _ _ _ _ -// | c s | | a | | r | -// | -s c | * | b | = | 0 | -// ‾ ‾ ‾ ‾ ‾ ‾ -// where -// r = ±(a^2 + b^2) -// c = a/r, the cosine of the plane rotation -// s = b/r, the sine of the plane rotation -// -// NOTE: There is a discrepancy between the refence implementation and the BLAS -// technical manual regarding the sign for r when a or b are zero. -// Srotg agrees with the definition in the manual and other -// common BLAS implementations. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Srotg(a, b float32) (c, s, r, z float32) { - if b == 0 && a == 0 { - return 1, 0, a, 0 - } - absA := math.Abs(a) - absB := math.Abs(b) - aGTb := absA > absB - r = math.Hypot(a, b) - if aGTb { - r = math.Copysign(r, a) - } else { - r = math.Copysign(r, b) - } - c = a / r - s = b / r - if aGTb { - z = s - } else if c != 0 { // r == 0 case handled above - z = 1 / c - } else { - z = 1 - } - return -} - -// Srotmg computes the modified Givens rotation. See -// http://www.netlib.org/lapack/explore-html/df/deb/drotmg_8f.html -// for more details. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Srotmg(d1, d2, x1, y1 float32) (p blas.SrotmParams, rd1, rd2, rx1 float32) { - var p1, p2, q1, q2, u float32 - - const ( - gam = 4096.0 - gamsq = 16777216.0 - rgamsq = 5.9604645e-8 - ) - - if d1 < 0 { - p.Flag = blas.Rescaling - return - } - - p2 = d2 * y1 - if p2 == 0 { - p.Flag = blas.Identity - rd1 = d1 - rd2 = d2 - rx1 = x1 - return - } - p1 = d1 * x1 - q2 = p2 * y1 - q1 = p1 * x1 - - absQ1 := math.Abs(q1) - absQ2 := math.Abs(q2) - - if absQ1 < absQ2 && q2 < 0 { - p.Flag = blas.Rescaling - return - } - - if d1 == 0 { - p.Flag = blas.Diagonal - p.H[0] = p1 / p2 - p.H[3] = x1 / y1 - u = 1 + p.H[0]*p.H[3] - rd1, rd2 = d2/u, d1/u - rx1 = y1 / u - return - } - - // Now we know that d1 != 0, and d2 != 0. If d2 == 0, it would be caught - // when p2 == 0, and if d1 == 0, then it is caught above - - if absQ1 > absQ2 { - p.H[1] = -y1 / x1 - p.H[2] = p2 / p1 - u = 1 - p.H[2]*p.H[1] - rd1 = d1 - rd2 = d2 - rx1 = x1 - p.Flag = blas.OffDiagonal - // u must be greater than zero because |q1| > |q2|, so check from netlib - // is unnecessary - // This is left in for ease of comparison with complex routines - //if u > 0 { - rd1 /= u - rd2 /= u - rx1 *= u - //} - } else { - p.Flag = blas.Diagonal - p.H[0] = p1 / p2 - p.H[3] = x1 / y1 - u = 1 + p.H[0]*p.H[3] - rd1 = d2 / u - rd2 = d1 / u - rx1 = y1 * u - } - - for rd1 <= rgamsq || rd1 >= gamsq { - if p.Flag == blas.OffDiagonal { - p.H[0] = 1 - p.H[3] = 1 - p.Flag = blas.Rescaling - } else if p.Flag == blas.Diagonal { - p.H[1] = -1 - p.H[2] = 1 - p.Flag = blas.Rescaling - } - if rd1 <= rgamsq { - rd1 *= gam * gam - rx1 /= gam - p.H[0] /= gam - p.H[2] /= gam - } else { - rd1 /= gam * gam - rx1 *= gam - p.H[0] *= gam - p.H[2] *= gam - } - } - - for math.Abs(rd2) <= rgamsq || math.Abs(rd2) >= gamsq { - if p.Flag == blas.OffDiagonal { - p.H[0] = 1 - p.H[3] = 1 - p.Flag = blas.Rescaling - } else if p.Flag == blas.Diagonal { - p.H[1] = -1 - p.H[2] = 1 - p.Flag = blas.Rescaling - } - if math.Abs(rd2) <= rgamsq { - rd2 *= gam * gam - p.H[1] /= gam - p.H[3] /= gam - } else { - rd2 /= gam * gam - p.H[1] *= gam - p.H[3] *= gam - } - } - return -} - -// Srot applies a plane transformation. -// x[i] = c * x[i] + s * y[i] -// y[i] = c * y[i] - s * x[i] -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Srot(n int, x []float32, incX int, y []float32, incY int, c float32, s float32) { - if n < 1 { - if n == 0 { - return - } - panic(negativeN) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(badY) - } - if incX == 1 && incY == 1 { - x = x[:n] - for i, vx := range x { - vy := y[i] - x[i], y[i] = c*vx+s*vy, c*vy-s*vx - } - return - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - for i := 0; i < n; i++ { - vx := x[ix] - vy := y[iy] - x[ix], y[iy] = c*vx+s*vy, c*vy-s*vx - ix += incX - iy += incY - } -} - -// Srotm applies the modified Givens rotation to the 2×n matrix. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Srotm(n int, x []float32, incX int, y []float32, incY int, p blas.SrotmParams) { - if n <= 0 { - if n == 0 { - return - } - panic(negativeN) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(badY) - } - - var h11, h12, h21, h22 float32 - var ix, iy int - switch p.Flag { - case blas.Identity: - return - case blas.Rescaling: - h11 = p.H[0] - h12 = p.H[2] - h21 = p.H[1] - h22 = p.H[3] - case blas.OffDiagonal: - h11 = 1 - h12 = p.H[2] - h21 = p.H[1] - h22 = 1 - case blas.Diagonal: - h11 = p.H[0] - h12 = 1 - h21 = -1 - h22 = p.H[3] - } - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - if incX == 1 && incY == 1 { - x = x[:n] - for i, vx := range x { - vy := y[i] - x[i], y[i] = vx*h11+vy*h12, vx*h21+vy*h22 - } - return - } - for i := 0; i < n; i++ { - vx := x[ix] - vy := y[iy] - x[ix], y[iy] = vx*h11+vy*h12, vx*h21+vy*h22 - ix += incX - iy += incY - } - return -} - -// Sscal scales x by alpha. -// x[i] *= alpha -// Sscal has no effect if incX < 0. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Sscal(n int, alpha float32, x []float32, incX int) { - if incX < 1 { - if incX == 0 { - panic(zeroIncX) - } - return - } - if incX > 0 && (n-1)*incX >= len(x) { - panic(badX) - } - if n < 1 { - if n == 0 { - return - } - if n < 1 { - panic(negativeN) - } - } - if alpha == 0 { - if incX == 1 { - x = x[:n] - for i := range x { - x[i] = 0 - } - } - for ix := 0; ix < n*incX; ix += incX { - x[ix] = 0 - } - } - if incX == 1 { - x = x[:n] - for i := range x { - x[i] *= alpha - } - return - } - for ix := 0; ix < n*incX; ix += incX { - x[ix] *= alpha - } - return -} diff --git a/vendor/github.com/gonum/blas/native/level1single_dsdot.go b/vendor/github.com/gonum/blas/native/level1single_dsdot.go deleted file mode 100644 index 4665a0106..000000000 --- a/vendor/github.com/gonum/blas/native/level1single_dsdot.go +++ /dev/null @@ -1,50 +0,0 @@ -// Generated code do not edit. Run `go generate`. - -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - "github.com/gonum/internal/asm" -) - -// Dsdot computes the dot product of the two vectors -// \sum_i x[i]*y[i] -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Dsdot(n int, x []float32, incX int, y []float32, incY int) float64 { - if n < 0 { - panic(negativeN) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if incX == 1 && incY == 1 { - if len(x) < n { - panic(badLenX) - } - if len(y) < n { - panic(badLenY) - } - return asm.DsdotUnitary(x[:n], y) - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - if ix >= len(x) || ix+(n-1)*incX >= len(x) { - panic(badLenX) - } - if iy >= len(y) || iy+(n-1)*incY >= len(y) { - panic(badLenY) - } - return asm.DsdotInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) -} diff --git a/vendor/github.com/gonum/blas/native/level1single_sdot.go b/vendor/github.com/gonum/blas/native/level1single_sdot.go deleted file mode 100644 index 1e5b56569..000000000 --- a/vendor/github.com/gonum/blas/native/level1single_sdot.go +++ /dev/null @@ -1,50 +0,0 @@ -// Generated code do not edit. Run `go generate`. - -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - "github.com/gonum/internal/asm" -) - -// Sdot computes the dot product of the two vectors -// \sum_i x[i]*y[i] -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Sdot(n int, x []float32, incX int, y []float32, incY int) float32 { - if n < 0 { - panic(negativeN) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if incX == 1 && incY == 1 { - if len(x) < n { - panic(badLenX) - } - if len(y) < n { - panic(badLenY) - } - return asm.SdotUnitary(x[:n], y) - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - if ix >= len(x) || ix+(n-1)*incX >= len(x) { - panic(badLenX) - } - if iy >= len(y) || iy+(n-1)*incY >= len(y) { - panic(badLenY) - } - return asm.SdotInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy)) -} diff --git a/vendor/github.com/gonum/blas/native/level1single_sdsdot.go b/vendor/github.com/gonum/blas/native/level1single_sdsdot.go deleted file mode 100644 index d58be3d5e..000000000 --- a/vendor/github.com/gonum/blas/native/level1single_sdsdot.go +++ /dev/null @@ -1,50 +0,0 @@ -// Generated code do not edit. Run `go generate`. - -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - "github.com/gonum/internal/asm" -) - -// Sdsdot computes the dot product of the two vectors plus a constant -// alpha + \sum_i x[i]*y[i] -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Sdsdot(n int, alpha float32, x []float32, incX int, y []float32, incY int) float32 { - if n < 0 { - panic(negativeN) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if incX == 1 && incY == 1 { - if len(x) < n { - panic(badLenX) - } - if len(y) < n { - panic(badLenY) - } - return alpha + float32(asm.DsdotUnitary(x[:n], y)) - } - var ix, iy int - if incX < 0 { - ix = (-n + 1) * incX - } - if incY < 0 { - iy = (-n + 1) * incY - } - if ix >= len(x) || ix+(n-1)*incX >= len(x) { - panic(badLenX) - } - if iy >= len(y) || iy+(n-1)*incY >= len(y) { - panic(badLenY) - } - return alpha + float32(asm.DsdotInc(x, y, uintptr(n), uintptr(incX), uintptr(incY), uintptr(ix), uintptr(iy))) -} diff --git a/vendor/github.com/gonum/blas/native/level2double.go b/vendor/github.com/gonum/blas/native/level2double.go deleted file mode 100644 index 6d595594f..000000000 --- a/vendor/github.com/gonum/blas/native/level2double.go +++ /dev/null @@ -1,2258 +0,0 @@ -// Copyright ©2014 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - "github.com/gonum/blas" - "github.com/gonum/internal/asm" -) - -var _ blas.Float64Level2 = Implementation{} - -// Dgemv computes -// y = alpha * a * x + beta * y if tA = blas.NoTrans -// y = alpha * A^T * x + beta * y if tA = blas.Trans or blas.ConjTrans -// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. -func (Implementation) Dgemv(tA blas.Transpose, m, n int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) { - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - // Set up indexes - lenX := m - lenY := n - if tA == blas.NoTrans { - lenX = n - lenY = m - } - if (incX > 0 && (lenX-1)*incX >= len(x)) || (incX < 0 && (1-lenX)*incX >= len(x)) { - panic(badX) - } - if (incY > 0 && (lenY-1)*incY >= len(y)) || (incY < 0 && (1-lenY)*incY >= len(y)) { - panic(badY) - } - if lda*(m-1)+n > len(a) || lda < max(1, n) { - panic(badLdA) - } - - // Quick return if possible - if m == 0 || n == 0 || (alpha == 0 && beta == 1) { - return - } - - var kx, ky int - if incX > 0 { - kx = 0 - } else { - kx = -(lenX - 1) * incX - } - if incY > 0 { - ky = 0 - } else { - ky = -(lenY - 1) * incY - } - - // First form y := beta * y - if incY > 0 { - Implementation{}.Dscal(lenY, beta, y, incY) - } else { - Implementation{}.Dscal(lenY, beta, y, -incY) - } - - if alpha == 0 { - return - } - - // Form y := alpha * A * x + y - if tA == blas.NoTrans { - if incX == 1 && incY == 1 { - for i := 0; i < m; i++ { - y[i] += alpha * asm.DdotUnitary(a[lda*i:lda*i+n], x) - } - return - } - iy := ky - for i := 0; i < m; i++ { - y[iy] += alpha * asm.DdotInc(x, a[lda*i:lda*i+n], uintptr(n), uintptr(incX), 1, uintptr(kx), 0) - iy += incY - } - return - } - // Cases where a is transposed. - if incX == 1 && incY == 1 { - for i := 0; i < m; i++ { - tmp := alpha * x[i] - if tmp != 0 { - asm.DaxpyUnitary(tmp, a[lda*i:lda*i+n], y, y) - } - } - return - } - ix := kx - for i := 0; i < m; i++ { - tmp := alpha * x[ix] - if tmp != 0 { - asm.DaxpyInc(tmp, a[lda*i:lda*i+n], y, uintptr(n), 1, uintptr(incY), 0, uintptr(ky)) - } - ix += incX - } -} - -// Dger performs the rank-one operation -// A += alpha * x * y^T -// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. -func (Implementation) Dger(m, n int, alpha float64, x []float64, incX int, y []float64, incY int, a []float64, lda int) { - // Check inputs - if m < 0 { - panic("m < 0") - } - if n < 0 { - panic(negativeN) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if (incX > 0 && (m-1)*incX >= len(x)) || (incX < 0 && (1-m)*incX >= len(x)) { - panic(badX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(badY) - } - if lda*(m-1)+n > len(a) || lda < max(1, n) { - panic(badLdA) - } - if lda < max(1, n) { - panic(badLdA) - } - - // Quick return if possible - if m == 0 || n == 0 || alpha == 0 { - return - } - - var ky, kx int - if incY > 0 { - ky = 0 - } else { - ky = -(n - 1) * incY - } - - if incX > 0 { - kx = 0 - } else { - kx = -(m - 1) * incX - } - - if incX == 1 && incY == 1 { - x = x[:m] - y = y[:n] - for i, xv := range x { - tmp := alpha * xv - if tmp != 0 { - atmp := a[i*lda : i*lda+n] - asm.DaxpyUnitary(tmp, y, atmp, atmp) - } - } - return - } - - ix := kx - for i := 0; i < m; i++ { - tmp := alpha * x[ix] - if tmp != 0 { - asm.DaxpyInc(tmp, y, a[i*lda:i*lda+n], uintptr(n), uintptr(incY), 1, uintptr(ky), 0) - } - ix += incX - } -} - -// Dgbmv computes -// y = alpha * A * x + beta * y if tA == blas.NoTrans -// y = alpha * A^T * x + beta * y if tA == blas.Trans or blas.ConjTrans -// where a is an m×n band matrix kL subdiagonals and kU super-diagonals, and -// m and n refer to the size of the full dense matrix it represents. -// x and y are vectors, and alpha and beta are scalars. -func (Implementation) Dgbmv(tA blas.Transpose, m, n, kL, kU int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) { - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - if kL < 0 { - panic(kLLT0) - } - if kL < 0 { - panic(kULT0) - } - if lda < kL+kU+1 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - // Set up indexes - lenX := m - lenY := n - if tA == blas.NoTrans { - lenX = n - lenY = m - } - if (incX > 0 && (lenX-1)*incX >= len(x)) || (incX < 0 && (1-lenX)*incX >= len(x)) { - panic(badX) - } - if (incY > 0 && (lenY-1)*incY >= len(y)) || (incY < 0 && (1-lenY)*incY >= len(y)) { - panic(badY) - } - if lda*(m-1)+kL+kU+1 > len(a) || lda < kL+kU+1 { - panic(badLdA) - } - - // Quick return if possible - if m == 0 || n == 0 || (alpha == 0 && beta == 1) { - return - } - - var kx, ky int - if incX > 0 { - kx = 0 - } else { - kx = -(lenX - 1) * incX - } - if incY > 0 { - ky = 0 - } else { - ky = -(lenY - 1) * incY - } - - // First form y := beta * y - if incY > 0 { - Implementation{}.Dscal(lenY, beta, y, incY) - } else { - Implementation{}.Dscal(lenY, beta, y, -incY) - } - - if alpha == 0 { - return - } - - // i and j are indices of the compacted banded matrix. - // off is the offset into the dense matrix (off + j = densej) - ld := min(m, n) - nCol := kU + 1 + kL - if tA == blas.NoTrans { - iy := ky - if incX == 1 { - for i := 0; i < m; i++ { - l := max(0, kL-i) - u := min(nCol, ld+kL-i) - off := max(0, i-kL) - atmp := a[i*lda+l : i*lda+u] - xtmp := x[off : off+u-l] - var sum float64 - for j, v := range atmp { - sum += xtmp[j] * v - } - y[iy] += sum * alpha - iy += incY - } - return - } - for i := 0; i < m; i++ { - l := max(0, kL-i) - u := min(nCol, ld+kL-i) - off := max(0, i-kL) - atmp := a[i*lda+l : i*lda+u] - jx := kx - var sum float64 - for _, v := range atmp { - sum += x[off*incX+jx] * v - jx += incX - } - y[iy] += sum * alpha - iy += incY - } - return - } - if incX == 1 { - for i := 0; i < m; i++ { - l := max(0, kL-i) - u := min(nCol, ld+kL-i) - off := max(0, i-kL) - atmp := a[i*lda+l : i*lda+u] - tmp := alpha * x[i] - jy := ky - for _, v := range atmp { - y[jy+off*incY] += tmp * v - jy += incY - } - } - return - } - ix := kx - for i := 0; i < m; i++ { - l := max(0, kL-i) - u := min(nCol, ld+kL-i) - off := max(0, i-kL) - atmp := a[i*lda+l : i*lda+u] - tmp := alpha * x[ix] - jy := ky - for _, v := range atmp { - y[jy+off*incY] += tmp * v - jy += incY - } - ix += incX - } -} - -// Dtrmv computes -// x = A * x if tA == blas.NoTrans -// x = A^T * x if tA == blas.Trans or blas.ConjTrans -// A is an n×n Triangular matrix and x is a vector. -func (Implementation) Dtrmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, a []float64, lda int, x []float64, incX int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if n < 0 { - panic(nLT0) - } - if lda < n { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if lda*(n-1)+n > len(a) || lda < max(1, n) { - panic(badLdA) - } - if n == 0 { - return - } - nonUnit := d != blas.Unit - if n == 1 { - x[0] *= a[0] - return - } - var kx int - if incX <= 0 { - kx = -(n - 1) * incX - } - if tA == blas.NoTrans { - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - var tmp float64 - if nonUnit { - tmp = a[i*lda+i] * x[i] - } else { - tmp = x[i] - } - xtmp := x[i+1:] - for j, v := range a[i*lda+i+1 : i*lda+n] { - tmp += v * xtmp[j] - } - x[i] = tmp - } - return - } - ix := kx - for i := 0; i < n; i++ { - var tmp float64 - if nonUnit { - tmp = a[i*lda+i] * x[ix] - } else { - tmp = x[ix] - } - jx := ix + incX - for _, v := range a[i*lda+i+1 : i*lda+n] { - tmp += v * x[jx] - jx += incX - } - x[ix] = tmp - ix += incX - } - return - } - if incX == 1 { - for i := n - 1; i >= 0; i-- { - var tmp float64 - if nonUnit { - tmp += a[i*lda+i] * x[i] - } else { - tmp = x[i] - } - for j, v := range a[i*lda : i*lda+i] { - tmp += v * x[j] - } - x[i] = tmp - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - var tmp float64 - if nonUnit { - tmp += a[i*lda+i] * x[ix] - } else { - tmp = x[ix] - } - jx := kx - for _, v := range a[i*lda : i*lda+i] { - tmp += v * x[jx] - jx += incX - } - x[ix] = tmp - ix -= incX - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - xi := x[i] - atmp := a[i*lda+i+1 : i*lda+n] - xtmp := x[i+1 : n] - for j, v := range atmp { - xtmp[j] += xi * v - } - if nonUnit { - x[i] *= a[i*lda+i] - } - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - xi := x[ix] - jx := kx + (i+1)*incX - atmp := a[i*lda+i+1 : i*lda+n] - for _, v := range atmp { - x[jx] += xi * v - jx += incX - } - if nonUnit { - x[ix] *= a[i*lda+i] - } - ix -= incX - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - xi := x[i] - atmp := a[i*lda : i*lda+i] - for j, v := range atmp { - x[j] += xi * v - } - if nonUnit { - x[i] *= a[i*lda+i] - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - xi := x[ix] - jx := kx - atmp := a[i*lda : i*lda+i] - for _, v := range atmp { - x[jx] += xi * v - jx += incX - } - if nonUnit { - x[ix] *= a[i*lda+i] - } - ix += incX - } -} - -// Dtrsv solves -// A * x = b if tA == blas.NoTrans -// A^T * x = b if tA == blas.Trans or blas.ConjTrans -// A is an n×n triangular matrix and x is a vector. -// At entry to the function, x contains the values of b, and the result is -// stored in place into x. -// -// No test for singularity or near-singularity is included in this -// routine. Such tests must be performed before calling this routine. -func (Implementation) Dtrsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, a []float64, lda int, x []float64, incX int) { - // Test the input parameters - // Verify inputs - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if n < 0 { - panic(nLT0) - } - if lda*(n-1)+n > len(a) || lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - // Quick return if possible - if n == 0 { - return - } - if n == 1 { - if d == blas.NonUnit { - x[0] /= a[0] - } - return - } - - var kx int - if incX < 0 { - kx = -(n - 1) * incX - } - nonUnit := d == blas.NonUnit - if tA == blas.NoTrans { - if ul == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - var sum float64 - atmp := a[i*lda+i+1 : i*lda+n] - for j, v := range atmp { - jv := i + j + 1 - sum += x[jv] * v - } - x[i] -= sum - if nonUnit { - x[i] /= a[i*lda+i] - } - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - var sum float64 - jx := ix + incX - atmp := a[i*lda+i+1 : i*lda+n] - for _, v := range atmp { - sum += x[jx] * v - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= a[i*lda+i] - } - ix -= incX - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - var sum float64 - atmp := a[i*lda : i*lda+i] - for j, v := range atmp { - sum += x[j] * v - } - x[i] -= sum - if nonUnit { - x[i] /= a[i*lda+i] - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - jx := kx - var sum float64 - atmp := a[i*lda : i*lda+i] - for _, v := range atmp { - sum += x[jx] * v - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= a[i*lda+i] - } - ix += incX - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - if nonUnit { - x[i] /= a[i*lda+i] - } - xi := x[i] - atmp := a[i*lda+i+1 : i*lda+n] - for j, v := range atmp { - jv := j + i + 1 - x[jv] -= v * xi - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - if nonUnit { - x[ix] /= a[i*lda+i] - } - xi := x[ix] - jx := kx + (i+1)*incX - atmp := a[i*lda+i+1 : i*lda+n] - for _, v := range atmp { - x[jx] -= v * xi - jx += incX - } - ix += incX - } - return - } - if incX == 1 { - for i := n - 1; i >= 0; i-- { - if nonUnit { - x[i] /= a[i*lda+i] - } - xi := x[i] - atmp := a[i*lda : i*lda+i] - for j, v := range atmp { - x[j] -= v * xi - } - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - if nonUnit { - x[ix] /= a[i*lda+i] - } - xi := x[ix] - jx := kx - atmp := a[i*lda : i*lda+i] - for _, v := range atmp { - x[jx] -= v * xi - jx += incX - } - ix -= incX - } -} - -// Dsymv computes -// y = alpha * A * x + beta * y, -// where a is an n×n symmetric matrix, x and y are vectors, and alpha and -// beta are scalars. -func (Implementation) Dsymv(ul blas.Uplo, n int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) { - // Check inputs - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(negativeN) - } - if lda > 1 && lda > n { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(badY) - } - if lda*(n-1)+n > len(a) || lda < max(1, n) { - panic(badLdA) - } - // Quick return if possible - if n == 0 || (alpha == 0 && beta == 1) { - return - } - - // Set up start points - var kx, ky int - if incX > 0 { - kx = 0 - } else { - kx = -(n - 1) * incX - } - if incY > 0 { - ky = 0 - } else { - ky = -(n - 1) * incY - } - - // Form y = beta * y - if beta != 1 { - if incY > 0 { - Implementation{}.Dscal(n, beta, y, incY) - } else { - Implementation{}.Dscal(n, beta, y, -incY) - } - } - - if alpha == 0 { - return - } - - if n == 1 { - y[0] += alpha * a[0] * x[0] - return - } - - if ul == blas.Upper { - if incX == 1 { - iy := ky - for i := 0; i < n; i++ { - xv := x[i] * alpha - sum := x[i] * a[i*lda+i] - jy := ky + (i+1)*incY - atmp := a[i*lda+i+1 : i*lda+n] - for j, v := range atmp { - jp := j + i + 1 - sum += x[jp] * v - y[jy] += xv * v - jy += incY - } - y[iy] += alpha * sum - iy += incY - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - xv := x[ix] * alpha - sum := x[ix] * a[i*lda+i] - jx := kx + (i+1)*incX - jy := ky + (i+1)*incY - atmp := a[i*lda+i+1 : i*lda+n] - for _, v := range atmp { - sum += x[jx] * v - y[jy] += xv * v - jx += incX - jy += incY - } - y[iy] += alpha * sum - ix += incX - iy += incY - } - return - } - // Cases where a is lower triangular. - if incX == 1 { - iy := ky - for i := 0; i < n; i++ { - jy := ky - xv := alpha * x[i] - atmp := a[i*lda : i*lda+i] - var sum float64 - for j, v := range atmp { - sum += x[j] * v - y[jy] += xv * v - jy += incY - } - sum += x[i] * a[i*lda+i] - sum *= alpha - y[iy] += sum - iy += incY - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - jx := kx - jy := ky - xv := alpha * x[ix] - atmp := a[i*lda : i*lda+i] - var sum float64 - for _, v := range atmp { - sum += x[jx] * v - y[jy] += xv * v - jx += incX - jy += incY - } - sum += x[ix] * a[i*lda+i] - sum *= alpha - y[iy] += sum - ix += incX - iy += incY - } -} - -// Dtbmv computes -// x = A * x if tA == blas.NoTrans -// x = A^T * x if tA == blas.Trans or blas.ConjTrans -// where A is an n×n triangular banded matrix with k diagonals, and x is a vector. -func (Implementation) Dtbmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n, k int, a []float64, lda int, x []float64, incX int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if n < 0 { - panic(nLT0) - } - if k < 0 { - panic(kLT0) - } - if lda*(n-1)+k+1 > len(a) || lda < k+1 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if n == 0 { - return - } - var kx int - if incX <= 0 { - kx = -(n - 1) * incX - } else if incX != 1 { - kx = 0 - } - - nonunit := d != blas.Unit - - if tA == blas.NoTrans { - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - u := min(1+k, n-i) - var sum float64 - atmp := a[i*lda:] - xtmp := x[i:] - for j := 1; j < u; j++ { - sum += xtmp[j] * atmp[j] - } - if nonunit { - sum += xtmp[0] * atmp[0] - } else { - sum += xtmp[0] - } - x[i] = sum - } - return - } - ix := kx - for i := 0; i < n; i++ { - u := min(1+k, n-i) - var sum float64 - atmp := a[i*lda:] - jx := incX - for j := 1; j < u; j++ { - sum += x[ix+jx] * atmp[j] - jx += incX - } - if nonunit { - sum += x[ix] * atmp[0] - } else { - sum += x[ix] - } - x[ix] = sum - ix += incX - } - return - } - if incX == 1 { - for i := n - 1; i >= 0; i-- { - l := max(0, k-i) - atmp := a[i*lda:] - var sum float64 - for j := l; j < k; j++ { - sum += x[i-k+j] * atmp[j] - } - if nonunit { - sum += x[i] * atmp[k] - } else { - sum += x[i] - } - x[i] = sum - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - l := max(0, k-i) - atmp := a[i*lda:] - var sum float64 - jx := l * incX - for j := l; j < k; j++ { - sum += x[ix-k*incX+jx] * atmp[j] - jx += incX - } - if nonunit { - sum += x[ix] * atmp[k] - } else { - sum += x[ix] - } - x[ix] = sum - ix -= incX - } - return - } - if ul == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - u := k + 1 - if i < u { - u = i + 1 - } - var sum float64 - for j := 1; j < u; j++ { - sum += x[i-j] * a[(i-j)*lda+j] - } - if nonunit { - sum += x[i] * a[i*lda] - } else { - sum += x[i] - } - x[i] = sum - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - u := k + 1 - if i < u { - u = i + 1 - } - var sum float64 - jx := incX - for j := 1; j < u; j++ { - sum += x[ix-jx] * a[(i-j)*lda+j] - jx += incX - } - if nonunit { - sum += x[ix] * a[i*lda] - } else { - sum += x[ix] - } - x[ix] = sum - ix -= incX - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - u := k - if i+k >= n { - u = n - i - 1 - } - var sum float64 - for j := 0; j < u; j++ { - sum += x[i+j+1] * a[(i+j+1)*lda+k-j-1] - } - if nonunit { - sum += x[i] * a[i*lda+k] - } else { - sum += x[i] - } - x[i] = sum - } - return - } - ix := kx - for i := 0; i < n; i++ { - u := k - if i+k >= n { - u = n - i - 1 - } - var ( - sum float64 - jx int - ) - for j := 0; j < u; j++ { - sum += x[ix+jx+incX] * a[(i+j+1)*lda+k-j-1] - jx += incX - } - if nonunit { - sum += x[ix] * a[i*lda+k] - } else { - sum += x[ix] - } - x[ix] = sum - ix += incX - } -} - -// Dtpmv computes -// x = A * x if tA == blas.NoTrans -// x = A^T * x if tA == blas.Trans or blas.ConjTrans -// where A is an n×n unit triangular matrix in packed format, and x is a vector. -func (Implementation) Dtpmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, ap []float64, x []float64, incX int) { - // Verify inputs - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if n < 0 { - panic(nLT0) - } - if len(ap) < (n*(n+1))/2 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if n == 0 { - return - } - var kx int - if incX <= 0 { - kx = -(n - 1) * incX - } - - nonUnit := d == blas.NonUnit - var offset int // Offset is the index of (i,i) - if tA == blas.NoTrans { - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - xi := x[i] - if nonUnit { - xi *= ap[offset] - } - atmp := ap[offset+1 : offset+n-i] - xtmp := x[i+1:] - for j, v := range atmp { - xi += v * xtmp[j] - } - x[i] = xi - offset += n - i - } - return - } - ix := kx - for i := 0; i < n; i++ { - xix := x[ix] - if nonUnit { - xix *= ap[offset] - } - atmp := ap[offset+1 : offset+n-i] - jx := kx + (i+1)*incX - for _, v := range atmp { - xix += v * x[jx] - jx += incX - } - x[ix] = xix - offset += n - i - ix += incX - } - return - } - if incX == 1 { - offset = n*(n+1)/2 - 1 - for i := n - 1; i >= 0; i-- { - xi := x[i] - if nonUnit { - xi *= ap[offset] - } - atmp := ap[offset-i : offset] - for j, v := range atmp { - xi += v * x[j] - } - x[i] = xi - offset -= i + 1 - } - return - } - ix := kx + (n-1)*incX - offset = n*(n+1)/2 - 1 - for i := n - 1; i >= 0; i-- { - xix := x[ix] - if nonUnit { - xix *= ap[offset] - } - atmp := ap[offset-i : offset] - jx := kx - for _, v := range atmp { - xix += v * x[jx] - jx += incX - } - x[ix] = xix - offset -= i + 1 - ix -= incX - } - return - } - // Cases where ap is transposed. - if ul == blas.Upper { - if incX == 1 { - offset = n*(n+1)/2 - 1 - for i := n - 1; i >= 0; i-- { - xi := x[i] - atmp := ap[offset+1 : offset+n-i] - xtmp := x[i+1:] - for j, v := range atmp { - xtmp[j] += v * xi - } - if nonUnit { - x[i] *= ap[offset] - } - offset -= n - i + 1 - } - return - } - ix := kx + (n-1)*incX - offset = n*(n+1)/2 - 1 - for i := n - 1; i >= 0; i-- { - xix := x[ix] - jx := kx + (i+1)*incX - atmp := ap[offset+1 : offset+n-i] - for _, v := range atmp { - x[jx] += v * xix - jx += incX - } - if nonUnit { - x[ix] *= ap[offset] - } - offset -= n - i + 1 - ix -= incX - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - xi := x[i] - atmp := ap[offset-i : offset] - for j, v := range atmp { - x[j] += v * xi - } - if nonUnit { - x[i] *= ap[offset] - } - offset += i + 2 - } - return - } - ix := kx - for i := 0; i < n; i++ { - xix := x[ix] - jx := kx - atmp := ap[offset-i : offset] - for _, v := range atmp { - x[jx] += v * xix - jx += incX - } - if nonUnit { - x[ix] *= ap[offset] - } - ix += incX - offset += i + 2 - } -} - -// Dtbsv solves -// A * x = b -// where A is an n×n triangular banded matrix with k diagonals in packed format, -// and x is a vector. -// At entry to the function, x contains the values of b, and the result is -// stored in place into x. -// -// No test for singularity or near-singularity is included in this -// routine. Such tests must be performed before calling this routine. -func (Implementation) Dtbsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n, k int, a []float64, lda int, x []float64, incX int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if n < 0 { - panic(nLT0) - } - if lda*(n-1)+k+1 > len(a) || lda < k+1 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if n == 0 { - return - } - var kx int - if incX < 0 { - kx = -(n - 1) * incX - } else { - kx = 0 - } - nonUnit := d == blas.NonUnit - // Form x = A^-1 x. - // Several cases below use subslices for speed improvement. - // The incX != 1 cases usually do not because incX may be negative. - if tA == blas.NoTrans { - if ul == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - bands := k - if i+bands >= n { - bands = n - i - 1 - } - atmp := a[i*lda+1:] - xtmp := x[i+1 : i+bands+1] - var sum float64 - for j, v := range xtmp { - sum += v * atmp[j] - } - x[i] -= sum - if nonUnit { - x[i] /= a[i*lda] - } - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - max := k + 1 - if i+max > n { - max = n - i - } - atmp := a[i*lda:] - var ( - jx int - sum float64 - ) - for j := 1; j < max; j++ { - jx += incX - sum += x[ix+jx] * atmp[j] - } - x[ix] -= sum - if nonUnit { - x[ix] /= atmp[0] - } - ix -= incX - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - bands := k - if i-k < 0 { - bands = i - } - atmp := a[i*lda+k-bands:] - xtmp := x[i-bands : i] - var sum float64 - for j, v := range xtmp { - sum += v * atmp[j] - } - x[i] -= sum - if nonUnit { - x[i] /= atmp[bands] - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - bands := k - if i-k < 0 { - bands = i - } - atmp := a[i*lda+k-bands:] - var ( - sum float64 - jx int - ) - for j := 0; j < bands; j++ { - sum += x[ix-bands*incX+jx] * atmp[j] - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= atmp[bands] - } - ix += incX - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - bands := k - if i-k < 0 { - bands = i - } - var sum float64 - for j := 0; j < bands; j++ { - sum += x[i-bands+j] * a[(i-bands+j)*lda+bands-j] - } - x[i] -= sum - if nonUnit { - x[i] /= a[i*lda] - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - bands := k - if i-k < 0 { - bands = i - } - var ( - sum float64 - jx int - ) - for j := 0; j < bands; j++ { - sum += x[ix-bands*incX+jx] * a[(i-bands+j)*lda+bands-j] - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= a[i*lda] - } - ix += incX - } - return - } - if incX == 1 { - for i := n - 1; i >= 0; i-- { - bands := k - if i+bands >= n { - bands = n - i - 1 - } - var sum float64 - xtmp := x[i+1 : i+1+bands] - for j, v := range xtmp { - sum += v * a[(i+j+1)*lda+k-j-1] - } - x[i] -= sum - if nonUnit { - x[i] /= a[i*lda+k] - } - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - bands := k - if i+bands >= n { - bands = n - i - 1 - } - var ( - sum float64 - jx int - ) - for j := 0; j < bands; j++ { - sum += x[ix+jx+incX] * a[(i+j+1)*lda+k-j-1] - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= a[i*lda+k] - } - ix -= incX - } -} - -// Dsbmv performs -// y = alpha * A * x + beta * y -// where A is an n×n symmetric banded matrix, x and y are vectors, and alpha -// and beta are scalars. -func (Implementation) Dsbmv(ul blas.Uplo, n, k int, alpha float64, a []float64, lda int, x []float64, incX int, beta float64, y []float64, incY int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(badY) - } - if lda*(n-1)+k+1 > len(a) || lda < k+1 { - panic(badLdA) - } - - // Quick return if possible - if n == 0 || (alpha == 0 && beta == 1) { - return - } - - // Set up indexes - lenX := n - lenY := n - var kx, ky int - if incX > 0 { - kx = 0 - } else { - kx = -(lenX - 1) * incX - } - if incY > 0 { - ky = 0 - } else { - ky = -(lenY - 1) * incY - } - - // First form y := beta * y - if incY > 0 { - Implementation{}.Dscal(lenY, beta, y, incY) - } else { - Implementation{}.Dscal(lenY, beta, y, -incY) - } - - if alpha == 0 { - return - } - - if ul == blas.Upper { - if incX == 1 { - iy := ky - for i := 0; i < n; i++ { - atmp := a[i*lda:] - tmp := alpha * x[i] - sum := tmp * atmp[0] - u := min(k, n-i-1) - jy := incY - for j := 1; j <= u; j++ { - v := atmp[j] - sum += alpha * x[i+j] * v - y[iy+jy] += tmp * v - jy += incY - } - y[iy] += sum - iy += incY - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - atmp := a[i*lda:] - tmp := alpha * x[ix] - sum := tmp * atmp[0] - u := min(k, n-i-1) - jx := incX - jy := incY - for j := 1; j <= u; j++ { - v := atmp[j] - sum += alpha * x[ix+jx] * v - y[iy+jy] += tmp * v - jx += incX - jy += incY - } - y[iy] += sum - ix += incX - iy += incY - } - return - } - - // Casses where a has bands below the diagonal. - if incX == 1 { - iy := ky - for i := 0; i < n; i++ { - l := max(0, k-i) - tmp := alpha * x[i] - jy := l * incY - atmp := a[i*lda:] - for j := l; j < k; j++ { - v := atmp[j] - y[iy] += alpha * v * x[i-k+j] - y[iy-k*incY+jy] += tmp * v - jy += incY - } - y[iy] += tmp * atmp[k] - iy += incY - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - l := max(0, k-i) - tmp := alpha * x[ix] - jx := l * incX - jy := l * incY - atmp := a[i*lda:] - for j := l; j < k; j++ { - v := atmp[j] - y[iy] += alpha * v * x[ix-k*incX+jx] - y[iy-k*incY+jy] += tmp * v - jx += incX - jy += incY - } - y[iy] += tmp * atmp[k] - ix += incX - iy += incY - } - return -} - -// Dsyr performs the rank-one update -// a += alpha * x * x^T -// where a is an n×n symmetric matrix, and x is a vector. -func (Implementation) Dsyr(ul blas.Uplo, n int, alpha float64, x []float64, incX int, a []float64, lda int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if lda*(n-1)+n > len(a) || lda < max(1, n) { - panic(badLdA) - } - if alpha == 0 || n == 0 { - return - } - - lenX := n - var kx int - if incX > 0 { - kx = 0 - } else { - kx = -(lenX - 1) * incX - } - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - tmp := x[i] * alpha - if tmp != 0 { - atmp := a[i*lda+i : i*lda+n] - xtmp := x[i:n] - for j, v := range xtmp { - atmp[j] += v * tmp - } - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - tmp := x[ix] * alpha - if tmp != 0 { - jx := ix - atmp := a[i*lda:] - for j := i; j < n; j++ { - atmp[j] += x[jx] * tmp - jx += incX - } - } - ix += incX - } - return - } - // Cases where a is lower triangular. - if incX == 1 { - for i := 0; i < n; i++ { - tmp := x[i] * alpha - if tmp != 0 { - atmp := a[i*lda:] - xtmp := x[:i+1] - for j, v := range xtmp { - atmp[j] += tmp * v - } - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - tmp := x[ix] * alpha - if tmp != 0 { - atmp := a[i*lda:] - jx := kx - for j := 0; j < i+1; j++ { - atmp[j] += tmp * x[jx] - jx += incX - } - } - ix += incX - } -} - -// Dsyr2 performs the symmetric rank-two update -// A += alpha * x * y^T + alpha * y * x^T -// where A is a symmetric n×n matrix, x and y are vectors, and alpha is a scalar. -func (Implementation) Dsyr2(ul blas.Uplo, n int, alpha float64, x []float64, incX int, y []float64, incY int, a []float64, lda int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(badY) - } - if lda*(n-1)+n > len(a) || lda < max(1, n) { - panic(badLdA) - } - if alpha == 0 { - return - } - - var ky, kx int - if incY > 0 { - ky = 0 - } else { - ky = -(n - 1) * incY - } - if incX > 0 { - kx = 0 - } else { - kx = -(n - 1) * incX - } - if ul == blas.Upper { - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - xi := x[i] - yi := y[i] - atmp := a[i*lda:] - for j := i; j < n; j++ { - atmp[j] += alpha * (xi*y[j] + x[j]*yi) - } - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - jx := kx + i*incX - jy := ky + i*incY - xi := x[ix] - yi := y[iy] - atmp := a[i*lda:] - for j := i; j < n; j++ { - atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) - jx += incX - jy += incY - } - ix += incX - iy += incY - } - return - } - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - xi := x[i] - yi := y[i] - atmp := a[i*lda:] - for j := 0; j <= i; j++ { - atmp[j] += alpha * (xi*y[j] + x[j]*yi) - } - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - jx := kx - jy := ky - xi := x[ix] - yi := y[iy] - atmp := a[i*lda:] - for j := 0; j <= i; j++ { - atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) - jx += incX - jy += incY - } - ix += incX - iy += incY - } - return -} - -// Dtpsv solves -// A * x = b if tA == blas.NoTrans -// A^T * x = b if tA == blas.Trans or blas.ConjTrans -// where A is an n×n triangular matrix in packed format and x is a vector. -// At entry to the function, x contains the values of b, and the result is -// stored in place into x. -// -// No test for singularity or near-singularity is included in this -// routine. Such tests must be performed before calling this routine. -func (Implementation) Dtpsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, ap []float64, x []float64, incX int) { - // Verify inputs - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if n < 0 { - panic(nLT0) - } - if len(ap) < (n*(n+1))/2 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if n == 0 { - return - } - var kx int - if incX <= 0 { - kx = -(n - 1) * incX - } - - nonUnit := d == blas.NonUnit - var offset int // Offset is the index of (i,i) - if tA == blas.NoTrans { - if ul == blas.Upper { - offset = n*(n+1)/2 - 1 - if incX == 1 { - for i := n - 1; i >= 0; i-- { - atmp := ap[offset+1 : offset+n-i] - xtmp := x[i+1:] - var sum float64 - for j, v := range atmp { - sum += v * xtmp[j] - } - x[i] -= sum - if nonUnit { - x[i] /= ap[offset] - } - offset -= n - i + 1 - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - atmp := ap[offset+1 : offset+n-i] - jx := kx + (i+1)*incX - var sum float64 - for _, v := range atmp { - sum += v * x[jx] - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= ap[offset] - } - ix -= incX - offset -= n - i + 1 - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - atmp := ap[offset-i : offset] - var sum float64 - for j, v := range atmp { - sum += v * x[j] - } - x[i] -= sum - if nonUnit { - x[i] /= ap[offset] - } - offset += i + 2 - } - return - } - ix := kx - for i := 0; i < n; i++ { - jx := kx - atmp := ap[offset-i : offset] - var sum float64 - for _, v := range atmp { - sum += v * x[jx] - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= ap[offset] - } - ix += incX - offset += i + 2 - } - return - } - // Cases where ap is transposed. - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - if nonUnit { - x[i] /= ap[offset] - } - xi := x[i] - atmp := ap[offset+1 : offset+n-i] - xtmp := x[i+1:] - for j, v := range atmp { - xtmp[j] -= v * xi - } - offset += n - i - } - return - } - ix := kx - for i := 0; i < n; i++ { - if nonUnit { - x[ix] /= ap[offset] - } - xix := x[ix] - atmp := ap[offset+1 : offset+n-i] - jx := kx + (i+1)*incX - for _, v := range atmp { - x[jx] -= v * xix - jx += incX - } - ix += incX - offset += n - i - } - return - } - if incX == 1 { - offset = n*(n+1)/2 - 1 - for i := n - 1; i >= 0; i-- { - if nonUnit { - x[i] /= ap[offset] - } - xi := x[i] - atmp := ap[offset-i : offset] - for j, v := range atmp { - x[j] -= v * xi - } - offset -= i + 1 - } - return - } - ix := kx + (n-1)*incX - offset = n*(n+1)/2 - 1 - for i := n - 1; i >= 0; i-- { - if nonUnit { - x[ix] /= ap[offset] - } - xix := x[ix] - atmp := ap[offset-i : offset] - jx := kx - for _, v := range atmp { - x[jx] -= v * xix - jx += incX - } - ix -= incX - offset -= i + 1 - } -} - -// Dspmv performs -// y = alpha * A * x + beta * y, -// where A is an n×n symmetric matrix in packed format, x and y are vectors -// and alpha and beta are scalars. -func (Implementation) Dspmv(ul blas.Uplo, n int, alpha float64, a []float64, x []float64, incX int, beta float64, y []float64, incY int) { - // Verify inputs - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - if len(a) < (n*(n+1))/2 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(badY) - } - // Quick return if possible - if n == 0 || (alpha == 0 && beta == 1) { - return - } - - // Set up start points - var kx, ky int - if incX > 0 { - kx = 0 - } else { - kx = -(n - 1) * incX - } - if incY > 0 { - ky = 0 - } else { - ky = -(n - 1) * incY - } - - // Form y = beta * y - if beta != 1 { - if incY > 0 { - Implementation{}.Dscal(n, beta, y, incY) - } else { - Implementation{}.Dscal(n, beta, y, -incY) - } - } - - if alpha == 0 { - return - } - - if n == 1 { - y[0] += alpha * a[0] * x[0] - return - } - var offset int // Offset is the index of (i,i). - if ul == blas.Upper { - if incX == 1 { - iy := ky - for i := 0; i < n; i++ { - xv := x[i] * alpha - sum := a[offset] * x[i] - atmp := a[offset+1 : offset+n-i] - xtmp := x[i+1:] - jy := ky + (i+1)*incY - for j, v := range atmp { - sum += v * xtmp[j] - y[jy] += v * xv - jy += incY - } - y[iy] += alpha * sum - iy += incY - offset += n - i - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - xv := x[ix] * alpha - sum := a[offset] * x[ix] - atmp := a[offset+1 : offset+n-i] - jx := kx + (i+1)*incX - jy := ky + (i+1)*incY - for _, v := range atmp { - sum += v * x[jx] - y[jy] += v * xv - jx += incX - jy += incY - } - y[iy] += alpha * sum - ix += incX - iy += incY - offset += n - i - } - return - } - if incX == 1 { - iy := ky - for i := 0; i < n; i++ { - xv := x[i] * alpha - atmp := a[offset-i : offset] - jy := ky - var sum float64 - for j, v := range atmp { - sum += v * x[j] - y[jy] += v * xv - jy += incY - } - sum += a[offset] * x[i] - y[iy] += alpha * sum - iy += incY - offset += i + 2 - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - xv := x[ix] * alpha - atmp := a[offset-i : offset] - jx := kx - jy := ky - var sum float64 - for _, v := range atmp { - sum += v * x[jx] - y[jy] += v * xv - jx += incX - jy += incY - } - - sum += a[offset] * x[ix] - y[iy] += alpha * sum - ix += incX - iy += incY - offset += i + 2 - } -} - -// Dspr computes the rank-one operation -// a += alpha * x * x^T -// where a is an n×n symmetric matrix in packed format, x is a vector, and -// alpha is a scalar. -func (Implementation) Dspr(ul blas.Uplo, n int, alpha float64, x []float64, incX int, a []float64) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if len(a) < (n*(n+1))/2 { - panic(badLdA) - } - if alpha == 0 || n == 0 { - return - } - lenX := n - var kx int - if incX > 0 { - kx = 0 - } else { - kx = -(lenX - 1) * incX - } - var offset int // Offset is the index of (i,i). - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - atmp := a[offset:] - xv := alpha * x[i] - xtmp := x[i:n] - for j, v := range xtmp { - atmp[j] += xv * v - } - offset += n - i - } - return - } - ix := kx - for i := 0; i < n; i++ { - jx := kx + i*incX - atmp := a[offset:] - xv := alpha * x[ix] - for j := 0; j < n-i; j++ { - atmp[j] += xv * x[jx] - jx += incX - } - ix += incX - offset += n - i - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - atmp := a[offset-i:] - xv := alpha * x[i] - xtmp := x[:i+1] - for j, v := range xtmp { - atmp[j] += xv * v - } - offset += i + 2 - } - return - } - ix := kx - for i := 0; i < n; i++ { - jx := kx - atmp := a[offset-i:] - xv := alpha * x[ix] - for j := 0; j <= i; j++ { - atmp[j] += xv * x[jx] - jx += incX - } - ix += incX - offset += i + 2 - } -} - -// Dspr2 performs the symmetric rank-2 update -// a += alpha * x * y^T + alpha * y * x^T -// where a is an n×n symmetric matrix in packed format and x and y are vectors. -func (Implementation) Dspr2(ul blas.Uplo, n int, alpha float64, x []float64, incX int, y []float64, incY int, ap []float64) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(badY) - } - if len(ap) < (n*(n+1))/2 { - panic(badLdA) - } - if alpha == 0 { - return - } - var ky, kx int - if incY > 0 { - ky = 0 - } else { - ky = -(n - 1) * incY - } - if incX > 0 { - kx = 0 - } else { - kx = -(n - 1) * incX - } - var offset int // Offset is the index of (i,i). - if ul == blas.Upper { - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - atmp := ap[offset:] - xi := x[i] - yi := y[i] - xtmp := x[i:n] - ytmp := y[i:n] - for j, v := range xtmp { - atmp[j] += alpha * (xi*ytmp[j] + v*yi) - } - offset += n - i - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - jx := kx + i*incX - jy := ky + i*incY - atmp := ap[offset:] - xi := x[ix] - yi := y[iy] - for j := 0; j < n-i; j++ { - atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) - jx += incX - jy += incY - } - ix += incX - iy += incY - offset += n - i - } - return - } - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - atmp := ap[offset-i:] - xi := x[i] - yi := y[i] - xtmp := x[:i+1] - for j, v := range xtmp { - atmp[j] += alpha * (xi*y[j] + v*yi) - } - offset += i + 2 - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - jx := kx - jy := ky - atmp := ap[offset-i:] - for j := 0; j <= i; j++ { - atmp[j] += alpha * (x[ix]*y[jy] + x[jx]*y[iy]) - jx += incX - jy += incY - } - ix += incX - iy += incY - offset += i + 2 - } -} diff --git a/vendor/github.com/gonum/blas/native/level2single.go b/vendor/github.com/gonum/blas/native/level2single.go deleted file mode 100644 index b213ad6e7..000000000 --- a/vendor/github.com/gonum/blas/native/level2single.go +++ /dev/null @@ -1,2292 +0,0 @@ -// Generated code do not edit. Run `go generate`. - -// Copyright ©2014 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - "github.com/gonum/blas" - "github.com/gonum/internal/asm" -) - -var _ blas.Float32Level2 = Implementation{} - -// Sgemv computes -// y = alpha * a * x + beta * y if tA = blas.NoTrans -// y = alpha * A^T * x + beta * y if tA = blas.Trans or blas.ConjTrans -// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Sgemv(tA blas.Transpose, m, n int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) { - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - if lda < max(1, n) { - panic(badLdA) - } - - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - // Set up indexes - lenX := m - lenY := n - if tA == blas.NoTrans { - lenX = n - lenY = m - } - if (incX > 0 && (lenX-1)*incX >= len(x)) || (incX < 0 && (1-lenX)*incX >= len(x)) { - panic(badX) - } - if (incY > 0 && (lenY-1)*incY >= len(y)) || (incY < 0 && (1-lenY)*incY >= len(y)) { - panic(badY) - } - if lda*(m-1)+n > len(a) || lda < max(1, n) { - panic(badLdA) - } - - // Quick return if possible - if m == 0 || n == 0 || (alpha == 0 && beta == 1) { - return - } - - var kx, ky int - if incX > 0 { - kx = 0 - } else { - kx = -(lenX - 1) * incX - } - if incY > 0 { - ky = 0 - } else { - ky = -(lenY - 1) * incY - } - - // First form y := beta * y - if incY > 0 { - Implementation{}.Sscal(lenY, beta, y, incY) - } else { - Implementation{}.Sscal(lenY, beta, y, -incY) - } - - if alpha == 0 { - return - } - - // Form y := alpha * A * x + y - if tA == blas.NoTrans { - if incX == 1 && incY == 1 { - for i := 0; i < m; i++ { - y[i] += alpha * asm.SdotUnitary(a[lda*i:lda*i+n], x) - } - return - } - iy := ky - for i := 0; i < m; i++ { - y[iy] += alpha * asm.SdotInc(x, a[lda*i:lda*i+n], uintptr(n), uintptr(incX), 1, uintptr(kx), 0) - iy += incY - } - return - } - // Cases where a is transposed. - if incX == 1 && incY == 1 { - for i := 0; i < m; i++ { - tmp := alpha * x[i] - if tmp != 0 { - asm.SaxpyUnitary(tmp, a[lda*i:lda*i+n], y, y) - } - } - return - } - ix := kx - for i := 0; i < m; i++ { - tmp := alpha * x[ix] - if tmp != 0 { - asm.SaxpyInc(tmp, a[lda*i:lda*i+n], y, uintptr(n), 1, uintptr(incY), 0, uintptr(ky)) - } - ix += incX - } -} - -// Sger performs the rank-one operation -// A += alpha * x * y^T -// where A is an m×n dense matrix, x and y are vectors, and alpha is a scalar. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Sger(m, n int, alpha float32, x []float32, incX int, y []float32, incY int, a []float32, lda int) { - // Check inputs - if m < 0 { - panic("m < 0") - } - if n < 0 { - panic(negativeN) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if (incX > 0 && (m-1)*incX >= len(x)) || (incX < 0 && (1-m)*incX >= len(x)) { - panic(badX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(badY) - } - if lda*(m-1)+n > len(a) || lda < max(1, n) { - panic(badLdA) - } - if lda < max(1, n) { - panic(badLdA) - } - - // Quick return if possible - if m == 0 || n == 0 || alpha == 0 { - return - } - - var ky, kx int - if incY > 0 { - ky = 0 - } else { - ky = -(n - 1) * incY - } - - if incX > 0 { - kx = 0 - } else { - kx = -(m - 1) * incX - } - - if incX == 1 && incY == 1 { - x = x[:m] - y = y[:n] - for i, xv := range x { - tmp := alpha * xv - if tmp != 0 { - atmp := a[i*lda : i*lda+n] - asm.SaxpyUnitary(tmp, y, atmp, atmp) - } - } - return - } - - ix := kx - for i := 0; i < m; i++ { - tmp := alpha * x[ix] - if tmp != 0 { - asm.SaxpyInc(tmp, y, a[i*lda:i*lda+n], uintptr(n), uintptr(incY), 1, uintptr(ky), 0) - } - ix += incX - } -} - -// Sgbmv computes -// y = alpha * A * x + beta * y if tA == blas.NoTrans -// y = alpha * A^T * x + beta * y if tA == blas.Trans or blas.ConjTrans -// where a is an m×n band matrix kL subdiagonals and kU super-diagonals, and -// m and n refer to the size of the full dense matrix it represents. -// x and y are vectors, and alpha and beta are scalars. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Sgbmv(tA blas.Transpose, m, n, kL, kU int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) { - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - if kL < 0 { - panic(kLLT0) - } - if kL < 0 { - panic(kULT0) - } - if lda < kL+kU+1 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - // Set up indexes - lenX := m - lenY := n - if tA == blas.NoTrans { - lenX = n - lenY = m - } - if (incX > 0 && (lenX-1)*incX >= len(x)) || (incX < 0 && (1-lenX)*incX >= len(x)) { - panic(badX) - } - if (incY > 0 && (lenY-1)*incY >= len(y)) || (incY < 0 && (1-lenY)*incY >= len(y)) { - panic(badY) - } - if lda*(m-1)+kL+kU+1 > len(a) || lda < kL+kU+1 { - panic(badLdA) - } - - // Quick return if possible - if m == 0 || n == 0 || (alpha == 0 && beta == 1) { - return - } - - var kx, ky int - if incX > 0 { - kx = 0 - } else { - kx = -(lenX - 1) * incX - } - if incY > 0 { - ky = 0 - } else { - ky = -(lenY - 1) * incY - } - - // First form y := beta * y - if incY > 0 { - Implementation{}.Sscal(lenY, beta, y, incY) - } else { - Implementation{}.Sscal(lenY, beta, y, -incY) - } - - if alpha == 0 { - return - } - - // i and j are indices of the compacted banded matrix. - // off is the offset into the dense matrix (off + j = densej) - ld := min(m, n) - nCol := kU + 1 + kL - if tA == blas.NoTrans { - iy := ky - if incX == 1 { - for i := 0; i < m; i++ { - l := max(0, kL-i) - u := min(nCol, ld+kL-i) - off := max(0, i-kL) - atmp := a[i*lda+l : i*lda+u] - xtmp := x[off : off+u-l] - var sum float32 - for j, v := range atmp { - sum += xtmp[j] * v - } - y[iy] += sum * alpha - iy += incY - } - return - } - for i := 0; i < m; i++ { - l := max(0, kL-i) - u := min(nCol, ld+kL-i) - off := max(0, i-kL) - atmp := a[i*lda+l : i*lda+u] - jx := kx - var sum float32 - for _, v := range atmp { - sum += x[off*incX+jx] * v - jx += incX - } - y[iy] += sum * alpha - iy += incY - } - return - } - if incX == 1 { - for i := 0; i < m; i++ { - l := max(0, kL-i) - u := min(nCol, ld+kL-i) - off := max(0, i-kL) - atmp := a[i*lda+l : i*lda+u] - tmp := alpha * x[i] - jy := ky - for _, v := range atmp { - y[jy+off*incY] += tmp * v - jy += incY - } - } - return - } - ix := kx - for i := 0; i < m; i++ { - l := max(0, kL-i) - u := min(nCol, ld+kL-i) - off := max(0, i-kL) - atmp := a[i*lda+l : i*lda+u] - tmp := alpha * x[ix] - jy := ky - for _, v := range atmp { - y[jy+off*incY] += tmp * v - jy += incY - } - ix += incX - } -} - -// Strmv computes -// x = A * x if tA == blas.NoTrans -// x = A^T * x if tA == blas.Trans or blas.ConjTrans -// A is an n×n Triangular matrix and x is a vector. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Strmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, a []float32, lda int, x []float32, incX int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if n < 0 { - panic(nLT0) - } - if lda < n { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if lda*(n-1)+n > len(a) || lda < max(1, n) { - panic(badLdA) - } - if n == 0 { - return - } - nonUnit := d != blas.Unit - if n == 1 { - x[0] *= a[0] - return - } - var kx int - if incX <= 0 { - kx = -(n - 1) * incX - } - if tA == blas.NoTrans { - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - var tmp float32 - if nonUnit { - tmp = a[i*lda+i] * x[i] - } else { - tmp = x[i] - } - xtmp := x[i+1:] - for j, v := range a[i*lda+i+1 : i*lda+n] { - tmp += v * xtmp[j] - } - x[i] = tmp - } - return - } - ix := kx - for i := 0; i < n; i++ { - var tmp float32 - if nonUnit { - tmp = a[i*lda+i] * x[ix] - } else { - tmp = x[ix] - } - jx := ix + incX - for _, v := range a[i*lda+i+1 : i*lda+n] { - tmp += v * x[jx] - jx += incX - } - x[ix] = tmp - ix += incX - } - return - } - if incX == 1 { - for i := n - 1; i >= 0; i-- { - var tmp float32 - if nonUnit { - tmp += a[i*lda+i] * x[i] - } else { - tmp = x[i] - } - for j, v := range a[i*lda : i*lda+i] { - tmp += v * x[j] - } - x[i] = tmp - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - var tmp float32 - if nonUnit { - tmp += a[i*lda+i] * x[ix] - } else { - tmp = x[ix] - } - jx := kx - for _, v := range a[i*lda : i*lda+i] { - tmp += v * x[jx] - jx += incX - } - x[ix] = tmp - ix -= incX - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - xi := x[i] - atmp := a[i*lda+i+1 : i*lda+n] - xtmp := x[i+1 : n] - for j, v := range atmp { - xtmp[j] += xi * v - } - if nonUnit { - x[i] *= a[i*lda+i] - } - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - xi := x[ix] - jx := kx + (i+1)*incX - atmp := a[i*lda+i+1 : i*lda+n] - for _, v := range atmp { - x[jx] += xi * v - jx += incX - } - if nonUnit { - x[ix] *= a[i*lda+i] - } - ix -= incX - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - xi := x[i] - atmp := a[i*lda : i*lda+i] - for j, v := range atmp { - x[j] += xi * v - } - if nonUnit { - x[i] *= a[i*lda+i] - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - xi := x[ix] - jx := kx - atmp := a[i*lda : i*lda+i] - for _, v := range atmp { - x[jx] += xi * v - jx += incX - } - if nonUnit { - x[ix] *= a[i*lda+i] - } - ix += incX - } -} - -// Strsv solves -// A * x = b if tA == blas.NoTrans -// A^T * x = b if tA == blas.Trans or blas.ConjTrans -// A is an n×n triangular matrix and x is a vector. -// At entry to the function, x contains the values of b, and the result is -// stored in place into x. -// -// No test for singularity or near-singularity is included in this -// routine. Such tests must be performed before calling this routine. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Strsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, a []float32, lda int, x []float32, incX int) { - // Test the input parameters - // Verify inputs - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if n < 0 { - panic(nLT0) - } - if lda*(n-1)+n > len(a) || lda < max(1, n) { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - // Quick return if possible - if n == 0 { - return - } - if n == 1 { - if d == blas.NonUnit { - x[0] /= a[0] - } - return - } - - var kx int - if incX < 0 { - kx = -(n - 1) * incX - } - nonUnit := d == blas.NonUnit - if tA == blas.NoTrans { - if ul == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - var sum float32 - atmp := a[i*lda+i+1 : i*lda+n] - for j, v := range atmp { - jv := i + j + 1 - sum += x[jv] * v - } - x[i] -= sum - if nonUnit { - x[i] /= a[i*lda+i] - } - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - var sum float32 - jx := ix + incX - atmp := a[i*lda+i+1 : i*lda+n] - for _, v := range atmp { - sum += x[jx] * v - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= a[i*lda+i] - } - ix -= incX - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - var sum float32 - atmp := a[i*lda : i*lda+i] - for j, v := range atmp { - sum += x[j] * v - } - x[i] -= sum - if nonUnit { - x[i] /= a[i*lda+i] - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - jx := kx - var sum float32 - atmp := a[i*lda : i*lda+i] - for _, v := range atmp { - sum += x[jx] * v - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= a[i*lda+i] - } - ix += incX - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - if nonUnit { - x[i] /= a[i*lda+i] - } - xi := x[i] - atmp := a[i*lda+i+1 : i*lda+n] - for j, v := range atmp { - jv := j + i + 1 - x[jv] -= v * xi - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - if nonUnit { - x[ix] /= a[i*lda+i] - } - xi := x[ix] - jx := kx + (i+1)*incX - atmp := a[i*lda+i+1 : i*lda+n] - for _, v := range atmp { - x[jx] -= v * xi - jx += incX - } - ix += incX - } - return - } - if incX == 1 { - for i := n - 1; i >= 0; i-- { - if nonUnit { - x[i] /= a[i*lda+i] - } - xi := x[i] - atmp := a[i*lda : i*lda+i] - for j, v := range atmp { - x[j] -= v * xi - } - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - if nonUnit { - x[ix] /= a[i*lda+i] - } - xi := x[ix] - jx := kx - atmp := a[i*lda : i*lda+i] - for _, v := range atmp { - x[jx] -= v * xi - jx += incX - } - ix -= incX - } -} - -// Ssymv computes -// y = alpha * A * x + beta * y, -// where a is an n×n symmetric matrix, x and y are vectors, and alpha and -// beta are scalars. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Ssymv(ul blas.Uplo, n int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) { - // Check inputs - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(negativeN) - } - if lda > 1 && lda > n { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(badY) - } - if lda*(n-1)+n > len(a) || lda < max(1, n) { - panic(badLdA) - } - // Quick return if possible - if n == 0 || (alpha == 0 && beta == 1) { - return - } - - // Set up start points - var kx, ky int - if incX > 0 { - kx = 0 - } else { - kx = -(n - 1) * incX - } - if incY > 0 { - ky = 0 - } else { - ky = -(n - 1) * incY - } - - // Form y = beta * y - if beta != 1 { - if incY > 0 { - Implementation{}.Sscal(n, beta, y, incY) - } else { - Implementation{}.Sscal(n, beta, y, -incY) - } - } - - if alpha == 0 { - return - } - - if n == 1 { - y[0] += alpha * a[0] * x[0] - return - } - - if ul == blas.Upper { - if incX == 1 { - iy := ky - for i := 0; i < n; i++ { - xv := x[i] * alpha - sum := x[i] * a[i*lda+i] - jy := ky + (i+1)*incY - atmp := a[i*lda+i+1 : i*lda+n] - for j, v := range atmp { - jp := j + i + 1 - sum += x[jp] * v - y[jy] += xv * v - jy += incY - } - y[iy] += alpha * sum - iy += incY - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - xv := x[ix] * alpha - sum := x[ix] * a[i*lda+i] - jx := kx + (i+1)*incX - jy := ky + (i+1)*incY - atmp := a[i*lda+i+1 : i*lda+n] - for _, v := range atmp { - sum += x[jx] * v - y[jy] += xv * v - jx += incX - jy += incY - } - y[iy] += alpha * sum - ix += incX - iy += incY - } - return - } - // Cases where a is lower triangular. - if incX == 1 { - iy := ky - for i := 0; i < n; i++ { - jy := ky - xv := alpha * x[i] - atmp := a[i*lda : i*lda+i] - var sum float32 - for j, v := range atmp { - sum += x[j] * v - y[jy] += xv * v - jy += incY - } - sum += x[i] * a[i*lda+i] - sum *= alpha - y[iy] += sum - iy += incY - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - jx := kx - jy := ky - xv := alpha * x[ix] - atmp := a[i*lda : i*lda+i] - var sum float32 - for _, v := range atmp { - sum += x[jx] * v - y[jy] += xv * v - jx += incX - jy += incY - } - sum += x[ix] * a[i*lda+i] - sum *= alpha - y[iy] += sum - ix += incX - iy += incY - } -} - -// Stbmv computes -// x = A * x if tA == blas.NoTrans -// x = A^T * x if tA == blas.Trans or blas.ConjTrans -// where A is an n×n triangular banded matrix with k diagonals, and x is a vector. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Stbmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n, k int, a []float32, lda int, x []float32, incX int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if n < 0 { - panic(nLT0) - } - if k < 0 { - panic(kLT0) - } - if lda*(n-1)+k+1 > len(a) || lda < k+1 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if n == 0 { - return - } - var kx int - if incX <= 0 { - kx = -(n - 1) * incX - } else if incX != 1 { - kx = 0 - } - - nonunit := d != blas.Unit - - if tA == blas.NoTrans { - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - u := min(1+k, n-i) - var sum float32 - atmp := a[i*lda:] - xtmp := x[i:] - for j := 1; j < u; j++ { - sum += xtmp[j] * atmp[j] - } - if nonunit { - sum += xtmp[0] * atmp[0] - } else { - sum += xtmp[0] - } - x[i] = sum - } - return - } - ix := kx - for i := 0; i < n; i++ { - u := min(1+k, n-i) - var sum float32 - atmp := a[i*lda:] - jx := incX - for j := 1; j < u; j++ { - sum += x[ix+jx] * atmp[j] - jx += incX - } - if nonunit { - sum += x[ix] * atmp[0] - } else { - sum += x[ix] - } - x[ix] = sum - ix += incX - } - return - } - if incX == 1 { - for i := n - 1; i >= 0; i-- { - l := max(0, k-i) - atmp := a[i*lda:] - var sum float32 - for j := l; j < k; j++ { - sum += x[i-k+j] * atmp[j] - } - if nonunit { - sum += x[i] * atmp[k] - } else { - sum += x[i] - } - x[i] = sum - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - l := max(0, k-i) - atmp := a[i*lda:] - var sum float32 - jx := l * incX - for j := l; j < k; j++ { - sum += x[ix-k*incX+jx] * atmp[j] - jx += incX - } - if nonunit { - sum += x[ix] * atmp[k] - } else { - sum += x[ix] - } - x[ix] = sum - ix -= incX - } - return - } - if ul == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - u := k + 1 - if i < u { - u = i + 1 - } - var sum float32 - for j := 1; j < u; j++ { - sum += x[i-j] * a[(i-j)*lda+j] - } - if nonunit { - sum += x[i] * a[i*lda] - } else { - sum += x[i] - } - x[i] = sum - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - u := k + 1 - if i < u { - u = i + 1 - } - var sum float32 - jx := incX - for j := 1; j < u; j++ { - sum += x[ix-jx] * a[(i-j)*lda+j] - jx += incX - } - if nonunit { - sum += x[ix] * a[i*lda] - } else { - sum += x[ix] - } - x[ix] = sum - ix -= incX - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - u := k - if i+k >= n { - u = n - i - 1 - } - var sum float32 - for j := 0; j < u; j++ { - sum += x[i+j+1] * a[(i+j+1)*lda+k-j-1] - } - if nonunit { - sum += x[i] * a[i*lda+k] - } else { - sum += x[i] - } - x[i] = sum - } - return - } - ix := kx - for i := 0; i < n; i++ { - u := k - if i+k >= n { - u = n - i - 1 - } - var ( - sum float32 - jx int - ) - for j := 0; j < u; j++ { - sum += x[ix+jx+incX] * a[(i+j+1)*lda+k-j-1] - jx += incX - } - if nonunit { - sum += x[ix] * a[i*lda+k] - } else { - sum += x[ix] - } - x[ix] = sum - ix += incX - } -} - -// Stpmv computes -// x = A * x if tA == blas.NoTrans -// x = A^T * x if tA == blas.Trans or blas.ConjTrans -// where A is an n×n unit triangular matrix in packed format, and x is a vector. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Stpmv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, ap []float32, x []float32, incX int) { - // Verify inputs - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if n < 0 { - panic(nLT0) - } - if len(ap) < (n*(n+1))/2 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if n == 0 { - return - } - var kx int - if incX <= 0 { - kx = -(n - 1) * incX - } - - nonUnit := d == blas.NonUnit - var offset int // Offset is the index of (i,i) - if tA == blas.NoTrans { - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - xi := x[i] - if nonUnit { - xi *= ap[offset] - } - atmp := ap[offset+1 : offset+n-i] - xtmp := x[i+1:] - for j, v := range atmp { - xi += v * xtmp[j] - } - x[i] = xi - offset += n - i - } - return - } - ix := kx - for i := 0; i < n; i++ { - xix := x[ix] - if nonUnit { - xix *= ap[offset] - } - atmp := ap[offset+1 : offset+n-i] - jx := kx + (i+1)*incX - for _, v := range atmp { - xix += v * x[jx] - jx += incX - } - x[ix] = xix - offset += n - i - ix += incX - } - return - } - if incX == 1 { - offset = n*(n+1)/2 - 1 - for i := n - 1; i >= 0; i-- { - xi := x[i] - if nonUnit { - xi *= ap[offset] - } - atmp := ap[offset-i : offset] - for j, v := range atmp { - xi += v * x[j] - } - x[i] = xi - offset -= i + 1 - } - return - } - ix := kx + (n-1)*incX - offset = n*(n+1)/2 - 1 - for i := n - 1; i >= 0; i-- { - xix := x[ix] - if nonUnit { - xix *= ap[offset] - } - atmp := ap[offset-i : offset] - jx := kx - for _, v := range atmp { - xix += v * x[jx] - jx += incX - } - x[ix] = xix - offset -= i + 1 - ix -= incX - } - return - } - // Cases where ap is transposed. - if ul == blas.Upper { - if incX == 1 { - offset = n*(n+1)/2 - 1 - for i := n - 1; i >= 0; i-- { - xi := x[i] - atmp := ap[offset+1 : offset+n-i] - xtmp := x[i+1:] - for j, v := range atmp { - xtmp[j] += v * xi - } - if nonUnit { - x[i] *= ap[offset] - } - offset -= n - i + 1 - } - return - } - ix := kx + (n-1)*incX - offset = n*(n+1)/2 - 1 - for i := n - 1; i >= 0; i-- { - xix := x[ix] - jx := kx + (i+1)*incX - atmp := ap[offset+1 : offset+n-i] - for _, v := range atmp { - x[jx] += v * xix - jx += incX - } - if nonUnit { - x[ix] *= ap[offset] - } - offset -= n - i + 1 - ix -= incX - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - xi := x[i] - atmp := ap[offset-i : offset] - for j, v := range atmp { - x[j] += v * xi - } - if nonUnit { - x[i] *= ap[offset] - } - offset += i + 2 - } - return - } - ix := kx - for i := 0; i < n; i++ { - xix := x[ix] - jx := kx - atmp := ap[offset-i : offset] - for _, v := range atmp { - x[jx] += v * xix - jx += incX - } - if nonUnit { - x[ix] *= ap[offset] - } - ix += incX - offset += i + 2 - } -} - -// Stbsv solves -// A * x = b -// where A is an n×n triangular banded matrix with k diagonals in packed format, -// and x is a vector. -// At entry to the function, x contains the values of b, and the result is -// stored in place into x. -// -// No test for singularity or near-singularity is included in this -// routine. Such tests must be performed before calling this routine. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Stbsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n, k int, a []float32, lda int, x []float32, incX int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if n < 0 { - panic(nLT0) - } - if lda*(n-1)+k+1 > len(a) || lda < k+1 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if n == 0 { - return - } - var kx int - if incX < 0 { - kx = -(n - 1) * incX - } else { - kx = 0 - } - nonUnit := d == blas.NonUnit - // Form x = A^-1 x. - // Several cases below use subslices for speed improvement. - // The incX != 1 cases usually do not because incX may be negative. - if tA == blas.NoTrans { - if ul == blas.Upper { - if incX == 1 { - for i := n - 1; i >= 0; i-- { - bands := k - if i+bands >= n { - bands = n - i - 1 - } - atmp := a[i*lda+1:] - xtmp := x[i+1 : i+bands+1] - var sum float32 - for j, v := range xtmp { - sum += v * atmp[j] - } - x[i] -= sum - if nonUnit { - x[i] /= a[i*lda] - } - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - max := k + 1 - if i+max > n { - max = n - i - } - atmp := a[i*lda:] - var ( - jx int - sum float32 - ) - for j := 1; j < max; j++ { - jx += incX - sum += x[ix+jx] * atmp[j] - } - x[ix] -= sum - if nonUnit { - x[ix] /= atmp[0] - } - ix -= incX - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - bands := k - if i-k < 0 { - bands = i - } - atmp := a[i*lda+k-bands:] - xtmp := x[i-bands : i] - var sum float32 - for j, v := range xtmp { - sum += v * atmp[j] - } - x[i] -= sum - if nonUnit { - x[i] /= atmp[bands] - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - bands := k - if i-k < 0 { - bands = i - } - atmp := a[i*lda+k-bands:] - var ( - sum float32 - jx int - ) - for j := 0; j < bands; j++ { - sum += x[ix-bands*incX+jx] * atmp[j] - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= atmp[bands] - } - ix += incX - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - bands := k - if i-k < 0 { - bands = i - } - var sum float32 - for j := 0; j < bands; j++ { - sum += x[i-bands+j] * a[(i-bands+j)*lda+bands-j] - } - x[i] -= sum - if nonUnit { - x[i] /= a[i*lda] - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - bands := k - if i-k < 0 { - bands = i - } - var ( - sum float32 - jx int - ) - for j := 0; j < bands; j++ { - sum += x[ix-bands*incX+jx] * a[(i-bands+j)*lda+bands-j] - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= a[i*lda] - } - ix += incX - } - return - } - if incX == 1 { - for i := n - 1; i >= 0; i-- { - bands := k - if i+bands >= n { - bands = n - i - 1 - } - var sum float32 - xtmp := x[i+1 : i+1+bands] - for j, v := range xtmp { - sum += v * a[(i+j+1)*lda+k-j-1] - } - x[i] -= sum - if nonUnit { - x[i] /= a[i*lda+k] - } - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - bands := k - if i+bands >= n { - bands = n - i - 1 - } - var ( - sum float32 - jx int - ) - for j := 0; j < bands; j++ { - sum += x[ix+jx+incX] * a[(i+j+1)*lda+k-j-1] - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= a[i*lda+k] - } - ix -= incX - } -} - -// Ssbmv performs -// y = alpha * A * x + beta * y -// where A is an n×n symmetric banded matrix, x and y are vectors, and alpha -// and beta are scalars. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Ssbmv(ul blas.Uplo, n, k int, alpha float32, a []float32, lda int, x []float32, incX int, beta float32, y []float32, incY int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(badY) - } - if lda*(n-1)+k+1 > len(a) || lda < k+1 { - panic(badLdA) - } - - // Quick return if possible - if n == 0 || (alpha == 0 && beta == 1) { - return - } - - // Set up indexes - lenX := n - lenY := n - var kx, ky int - if incX > 0 { - kx = 0 - } else { - kx = -(lenX - 1) * incX - } - if incY > 0 { - ky = 0 - } else { - ky = -(lenY - 1) * incY - } - - // First form y := beta * y - if incY > 0 { - Implementation{}.Sscal(lenY, beta, y, incY) - } else { - Implementation{}.Sscal(lenY, beta, y, -incY) - } - - if alpha == 0 { - return - } - - if ul == blas.Upper { - if incX == 1 { - iy := ky - for i := 0; i < n; i++ { - atmp := a[i*lda:] - tmp := alpha * x[i] - sum := tmp * atmp[0] - u := min(k, n-i-1) - jy := incY - for j := 1; j <= u; j++ { - v := atmp[j] - sum += alpha * x[i+j] * v - y[iy+jy] += tmp * v - jy += incY - } - y[iy] += sum - iy += incY - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - atmp := a[i*lda:] - tmp := alpha * x[ix] - sum := tmp * atmp[0] - u := min(k, n-i-1) - jx := incX - jy := incY - for j := 1; j <= u; j++ { - v := atmp[j] - sum += alpha * x[ix+jx] * v - y[iy+jy] += tmp * v - jx += incX - jy += incY - } - y[iy] += sum - ix += incX - iy += incY - } - return - } - - // Casses where a has bands below the diagonal. - if incX == 1 { - iy := ky - for i := 0; i < n; i++ { - l := max(0, k-i) - tmp := alpha * x[i] - jy := l * incY - atmp := a[i*lda:] - for j := l; j < k; j++ { - v := atmp[j] - y[iy] += alpha * v * x[i-k+j] - y[iy-k*incY+jy] += tmp * v - jy += incY - } - y[iy] += tmp * atmp[k] - iy += incY - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - l := max(0, k-i) - tmp := alpha * x[ix] - jx := l * incX - jy := l * incY - atmp := a[i*lda:] - for j := l; j < k; j++ { - v := atmp[j] - y[iy] += alpha * v * x[ix-k*incX+jx] - y[iy-k*incY+jy] += tmp * v - jx += incX - jy += incY - } - y[iy] += tmp * atmp[k] - ix += incX - iy += incY - } - return -} - -// Ssyr performs the rank-one update -// a += alpha * x * x^T -// where a is an n×n symmetric matrix, and x is a vector. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Ssyr(ul blas.Uplo, n int, alpha float32, x []float32, incX int, a []float32, lda int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if lda*(n-1)+n > len(a) || lda < max(1, n) { - panic(badLdA) - } - if alpha == 0 || n == 0 { - return - } - - lenX := n - var kx int - if incX > 0 { - kx = 0 - } else { - kx = -(lenX - 1) * incX - } - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - tmp := x[i] * alpha - if tmp != 0 { - atmp := a[i*lda+i : i*lda+n] - xtmp := x[i:n] - for j, v := range xtmp { - atmp[j] += v * tmp - } - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - tmp := x[ix] * alpha - if tmp != 0 { - jx := ix - atmp := a[i*lda:] - for j := i; j < n; j++ { - atmp[j] += x[jx] * tmp - jx += incX - } - } - ix += incX - } - return - } - // Cases where a is lower triangular. - if incX == 1 { - for i := 0; i < n; i++ { - tmp := x[i] * alpha - if tmp != 0 { - atmp := a[i*lda:] - xtmp := x[:i+1] - for j, v := range xtmp { - atmp[j] += tmp * v - } - } - } - return - } - ix := kx - for i := 0; i < n; i++ { - tmp := x[ix] * alpha - if tmp != 0 { - atmp := a[i*lda:] - jx := kx - for j := 0; j < i+1; j++ { - atmp[j] += tmp * x[jx] - jx += incX - } - } - ix += incX - } -} - -// Ssyr2 performs the symmetric rank-two update -// A += alpha * x * y^T + alpha * y * x^T -// where A is a symmetric n×n matrix, x and y are vectors, and alpha is a scalar. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Ssyr2(ul blas.Uplo, n int, alpha float32, x []float32, incX int, y []float32, incY int, a []float32, lda int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(badY) - } - if lda*(n-1)+n > len(a) || lda < max(1, n) { - panic(badLdA) - } - if alpha == 0 { - return - } - - var ky, kx int - if incY > 0 { - ky = 0 - } else { - ky = -(n - 1) * incY - } - if incX > 0 { - kx = 0 - } else { - kx = -(n - 1) * incX - } - if ul == blas.Upper { - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - xi := x[i] - yi := y[i] - atmp := a[i*lda:] - for j := i; j < n; j++ { - atmp[j] += alpha * (xi*y[j] + x[j]*yi) - } - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - jx := kx + i*incX - jy := ky + i*incY - xi := x[ix] - yi := y[iy] - atmp := a[i*lda:] - for j := i; j < n; j++ { - atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) - jx += incX - jy += incY - } - ix += incX - iy += incY - } - return - } - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - xi := x[i] - yi := y[i] - atmp := a[i*lda:] - for j := 0; j <= i; j++ { - atmp[j] += alpha * (xi*y[j] + x[j]*yi) - } - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - jx := kx - jy := ky - xi := x[ix] - yi := y[iy] - atmp := a[i*lda:] - for j := 0; j <= i; j++ { - atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) - jx += incX - jy += incY - } - ix += incX - iy += incY - } - return -} - -// Stpsv solves -// A * x = b if tA == blas.NoTrans -// A^T * x = b if tA == blas.Trans or blas.ConjTrans -// where A is an n×n triangular matrix in packed format and x is a vector. -// At entry to the function, x contains the values of b, and the result is -// stored in place into x. -// -// No test for singularity or near-singularity is included in this -// routine. Such tests must be performed before calling this routine. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Stpsv(ul blas.Uplo, tA blas.Transpose, d blas.Diag, n int, ap []float32, x []float32, incX int) { - // Verify inputs - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if n < 0 { - panic(nLT0) - } - if len(ap) < (n*(n+1))/2 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if n == 0 { - return - } - var kx int - if incX <= 0 { - kx = -(n - 1) * incX - } - - nonUnit := d == blas.NonUnit - var offset int // Offset is the index of (i,i) - if tA == blas.NoTrans { - if ul == blas.Upper { - offset = n*(n+1)/2 - 1 - if incX == 1 { - for i := n - 1; i >= 0; i-- { - atmp := ap[offset+1 : offset+n-i] - xtmp := x[i+1:] - var sum float32 - for j, v := range atmp { - sum += v * xtmp[j] - } - x[i] -= sum - if nonUnit { - x[i] /= ap[offset] - } - offset -= n - i + 1 - } - return - } - ix := kx + (n-1)*incX - for i := n - 1; i >= 0; i-- { - atmp := ap[offset+1 : offset+n-i] - jx := kx + (i+1)*incX - var sum float32 - for _, v := range atmp { - sum += v * x[jx] - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= ap[offset] - } - ix -= incX - offset -= n - i + 1 - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - atmp := ap[offset-i : offset] - var sum float32 - for j, v := range atmp { - sum += v * x[j] - } - x[i] -= sum - if nonUnit { - x[i] /= ap[offset] - } - offset += i + 2 - } - return - } - ix := kx - for i := 0; i < n; i++ { - jx := kx - atmp := ap[offset-i : offset] - var sum float32 - for _, v := range atmp { - sum += v * x[jx] - jx += incX - } - x[ix] -= sum - if nonUnit { - x[ix] /= ap[offset] - } - ix += incX - offset += i + 2 - } - return - } - // Cases where ap is transposed. - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - if nonUnit { - x[i] /= ap[offset] - } - xi := x[i] - atmp := ap[offset+1 : offset+n-i] - xtmp := x[i+1:] - for j, v := range atmp { - xtmp[j] -= v * xi - } - offset += n - i - } - return - } - ix := kx - for i := 0; i < n; i++ { - if nonUnit { - x[ix] /= ap[offset] - } - xix := x[ix] - atmp := ap[offset+1 : offset+n-i] - jx := kx + (i+1)*incX - for _, v := range atmp { - x[jx] -= v * xix - jx += incX - } - ix += incX - offset += n - i - } - return - } - if incX == 1 { - offset = n*(n+1)/2 - 1 - for i := n - 1; i >= 0; i-- { - if nonUnit { - x[i] /= ap[offset] - } - xi := x[i] - atmp := ap[offset-i : offset] - for j, v := range atmp { - x[j] -= v * xi - } - offset -= i + 1 - } - return - } - ix := kx + (n-1)*incX - offset = n*(n+1)/2 - 1 - for i := n - 1; i >= 0; i-- { - if nonUnit { - x[ix] /= ap[offset] - } - xix := x[ix] - atmp := ap[offset-i : offset] - jx := kx - for _, v := range atmp { - x[jx] -= v * xix - jx += incX - } - ix -= incX - offset -= i + 1 - } -} - -// Sspmv performs -// y = alpha * A * x + beta * y, -// where A is an n×n symmetric matrix in packed format, x and y are vectors -// and alpha and beta are scalars. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Sspmv(ul blas.Uplo, n int, alpha float32, a []float32, x []float32, incX int, beta float32, y []float32, incY int) { - // Verify inputs - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - if len(a) < (n*(n+1))/2 { - panic(badLdA) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(badY) - } - // Quick return if possible - if n == 0 || (alpha == 0 && beta == 1) { - return - } - - // Set up start points - var kx, ky int - if incX > 0 { - kx = 0 - } else { - kx = -(n - 1) * incX - } - if incY > 0 { - ky = 0 - } else { - ky = -(n - 1) * incY - } - - // Form y = beta * y - if beta != 1 { - if incY > 0 { - Implementation{}.Sscal(n, beta, y, incY) - } else { - Implementation{}.Sscal(n, beta, y, -incY) - } - } - - if alpha == 0 { - return - } - - if n == 1 { - y[0] += alpha * a[0] * x[0] - return - } - var offset int // Offset is the index of (i,i). - if ul == blas.Upper { - if incX == 1 { - iy := ky - for i := 0; i < n; i++ { - xv := x[i] * alpha - sum := a[offset] * x[i] - atmp := a[offset+1 : offset+n-i] - xtmp := x[i+1:] - jy := ky + (i+1)*incY - for j, v := range atmp { - sum += v * xtmp[j] - y[jy] += v * xv - jy += incY - } - y[iy] += alpha * sum - iy += incY - offset += n - i - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - xv := x[ix] * alpha - sum := a[offset] * x[ix] - atmp := a[offset+1 : offset+n-i] - jx := kx + (i+1)*incX - jy := ky + (i+1)*incY - for _, v := range atmp { - sum += v * x[jx] - y[jy] += v * xv - jx += incX - jy += incY - } - y[iy] += alpha * sum - ix += incX - iy += incY - offset += n - i - } - return - } - if incX == 1 { - iy := ky - for i := 0; i < n; i++ { - xv := x[i] * alpha - atmp := a[offset-i : offset] - jy := ky - var sum float32 - for j, v := range atmp { - sum += v * x[j] - y[jy] += v * xv - jy += incY - } - sum += a[offset] * x[i] - y[iy] += alpha * sum - iy += incY - offset += i + 2 - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - xv := x[ix] * alpha - atmp := a[offset-i : offset] - jx := kx - jy := ky - var sum float32 - for _, v := range atmp { - sum += v * x[jx] - y[jy] += v * xv - jx += incX - jy += incY - } - - sum += a[offset] * x[ix] - y[iy] += alpha * sum - ix += incX - iy += incY - offset += i + 2 - } -} - -// Sspr computes the rank-one operation -// a += alpha * x * x^T -// where a is an n×n symmetric matrix in packed format, x is a vector, and -// alpha is a scalar. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Sspr(ul blas.Uplo, n int, alpha float32, x []float32, incX int, a []float32) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if len(a) < (n*(n+1))/2 { - panic(badLdA) - } - if alpha == 0 || n == 0 { - return - } - lenX := n - var kx int - if incX > 0 { - kx = 0 - } else { - kx = -(lenX - 1) * incX - } - var offset int // Offset is the index of (i,i). - if ul == blas.Upper { - if incX == 1 { - for i := 0; i < n; i++ { - atmp := a[offset:] - xv := alpha * x[i] - xtmp := x[i:n] - for j, v := range xtmp { - atmp[j] += xv * v - } - offset += n - i - } - return - } - ix := kx - for i := 0; i < n; i++ { - jx := kx + i*incX - atmp := a[offset:] - xv := alpha * x[ix] - for j := 0; j < n-i; j++ { - atmp[j] += xv * x[jx] - jx += incX - } - ix += incX - offset += n - i - } - return - } - if incX == 1 { - for i := 0; i < n; i++ { - atmp := a[offset-i:] - xv := alpha * x[i] - xtmp := x[:i+1] - for j, v := range xtmp { - atmp[j] += xv * v - } - offset += i + 2 - } - return - } - ix := kx - for i := 0; i < n; i++ { - jx := kx - atmp := a[offset-i:] - xv := alpha * x[ix] - for j := 0; j <= i; j++ { - atmp[j] += xv * x[jx] - jx += incX - } - ix += incX - offset += i + 2 - } -} - -// Sspr2 performs the symmetric rank-2 update -// a += alpha * x * y^T + alpha * y * x^T -// where a is an n×n symmetric matrix in packed format and x and y are vectors. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Sspr2(ul blas.Uplo, n int, alpha float32, x []float32, incX int, y []float32, incY int, ap []float32) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - if incX == 0 { - panic(zeroIncX) - } - if incY == 0 { - panic(zeroIncY) - } - if (incX > 0 && (n-1)*incX >= len(x)) || (incX < 0 && (1-n)*incX >= len(x)) { - panic(badX) - } - if (incY > 0 && (n-1)*incY >= len(y)) || (incY < 0 && (1-n)*incY >= len(y)) { - panic(badY) - } - if len(ap) < (n*(n+1))/2 { - panic(badLdA) - } - if alpha == 0 { - return - } - var ky, kx int - if incY > 0 { - ky = 0 - } else { - ky = -(n - 1) * incY - } - if incX > 0 { - kx = 0 - } else { - kx = -(n - 1) * incX - } - var offset int // Offset is the index of (i,i). - if ul == blas.Upper { - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - atmp := ap[offset:] - xi := x[i] - yi := y[i] - xtmp := x[i:n] - ytmp := y[i:n] - for j, v := range xtmp { - atmp[j] += alpha * (xi*ytmp[j] + v*yi) - } - offset += n - i - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - jx := kx + i*incX - jy := ky + i*incY - atmp := ap[offset:] - xi := x[ix] - yi := y[iy] - for j := 0; j < n-i; j++ { - atmp[j] += alpha * (xi*y[jy] + x[jx]*yi) - jx += incX - jy += incY - } - ix += incX - iy += incY - offset += n - i - } - return - } - if incX == 1 && incY == 1 { - for i := 0; i < n; i++ { - atmp := ap[offset-i:] - xi := x[i] - yi := y[i] - xtmp := x[:i+1] - for j, v := range xtmp { - atmp[j] += alpha * (xi*y[j] + v*yi) - } - offset += i + 2 - } - return - } - ix := kx - iy := ky - for i := 0; i < n; i++ { - jx := kx - jy := ky - atmp := ap[offset-i:] - for j := 0; j <= i; j++ { - atmp[j] += alpha * (x[ix]*y[jy] + x[jx]*y[iy]) - jx += incX - jy += incY - } - ix += incX - iy += incY - offset += i + 2 - } -} diff --git a/vendor/github.com/gonum/blas/native/level3double.go b/vendor/github.com/gonum/blas/native/level3double.go deleted file mode 100644 index 5a1965228..000000000 --- a/vendor/github.com/gonum/blas/native/level3double.go +++ /dev/null @@ -1,831 +0,0 @@ -// Copyright ©2014 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - "github.com/gonum/blas" - "github.com/gonum/internal/asm" -) - -var _ blas.Float64Level3 = Implementation{} - -// Dtrsm solves -// A * X = alpha * B if tA == blas.NoTrans and side == blas.Left -// A^T * X = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left -// X * A = alpha * B if tA == blas.NoTrans and side == blas.Right -// X * A^T = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Right -// where A is an n×n triangular matrix, x is an m×n matrix, and alpha is a -// scalar. -// -// At entry to the function, X contains the values of B, and the result is -// stored in place into X. -// -// No check is made that A is invertible. -func (Implementation) Dtrsm(s blas.Side, ul blas.Uplo, tA blas.Transpose, d blas.Diag, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int) { - if s != blas.Left && s != blas.Right { - panic(badSide) - } - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - if ldb < n { - panic(badLdB) - } - var k int - if s == blas.Left { - k = m - } else { - k = n - } - if lda*(k-1)+k > len(a) || lda < max(1, k) { - panic(badLdA) - } - if ldb*(m-1)+n > len(b) || ldb < max(1, n) { - panic(badLdB) - } - - if m == 0 || n == 0 { - return - } - - if alpha == 0 { - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for j := range btmp { - btmp[j] = 0 - } - } - return - } - nonUnit := d == blas.NonUnit - if s == blas.Left { - if tA == blas.NoTrans { - if ul == blas.Upper { - for i := m - 1; i >= 0; i-- { - btmp := b[i*ldb : i*ldb+n] - if alpha != 1 { - for j := range btmp { - btmp[j] *= alpha - } - } - for ka, va := range a[i*lda+i+1 : i*lda+m] { - k := ka + i + 1 - if va != 0 { - asm.DaxpyUnitary(-va, b[k*ldb:k*ldb+n], btmp, btmp) - } - } - if nonUnit { - tmp := 1 / a[i*lda+i] - for j := 0; j < n; j++ { - btmp[j] *= tmp - } - } - } - return - } - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - if alpha != 1 { - for j := 0; j < n; j++ { - btmp[j] *= alpha - } - } - for k, va := range a[i*lda : i*lda+i] { - if va != 0 { - asm.DaxpyUnitary(-va, b[k*ldb:k*ldb+n], btmp, btmp) - } - } - if nonUnit { - tmp := 1 / a[i*lda+i] - for j := 0; j < n; j++ { - btmp[j] *= tmp - } - } - } - return - } - // Cases where a is transposed - if ul == blas.Upper { - for k := 0; k < m; k++ { - btmpk := b[k*ldb : k*ldb+n] - if nonUnit { - tmp := 1 / a[k*lda+k] - for j := 0; j < n; j++ { - btmpk[j] *= tmp - } - } - for ia, va := range a[k*lda+k+1 : k*lda+m] { - i := ia + k + 1 - if va != 0 { - btmp := b[i*ldb : i*ldb+n] - asm.DaxpyUnitary(-va, btmpk, btmp, btmp) - } - } - if alpha != 1 { - for j := 0; j < n; j++ { - btmpk[j] *= alpha - } - } - } - return - } - for k := m - 1; k >= 0; k-- { - btmpk := b[k*ldb : k*ldb+n] - if nonUnit { - tmp := 1 / a[k*lda+k] - for j := 0; j < n; j++ { - btmpk[j] *= tmp - } - } - for i, va := range a[k*lda : k*lda+k] { - if va != 0 { - btmp := b[i*ldb : i*ldb+n] - asm.DaxpyUnitary(-va, btmpk, btmp, btmp) - } - } - if alpha != 1 { - for j := 0; j < n; j++ { - btmpk[j] *= alpha - } - } - } - return - } - // Cases where a is to the right of X. - if tA == blas.NoTrans { - if ul == blas.Upper { - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - if alpha != 1 { - for j := 0; j < n; j++ { - btmp[j] *= alpha - } - } - for k, vb := range btmp { - if vb != 0 { - if btmp[k] != 0 { - if nonUnit { - btmp[k] /= a[k*lda+k] - } - btmpk := btmp[k+1 : n] - asm.DaxpyUnitary(-btmp[k], a[k*lda+k+1:k*lda+n], btmpk, btmpk) - } - } - } - } - return - } - for i := 0; i < m; i++ { - btmp := b[i*lda : i*lda+n] - if alpha != 1 { - for j := 0; j < n; j++ { - btmp[j] *= alpha - } - } - for k := n - 1; k >= 0; k-- { - if btmp[k] != 0 { - if nonUnit { - btmp[k] /= a[k*lda+k] - } - asm.DaxpyUnitary(-btmp[k], a[k*lda:k*lda+k], btmp, btmp) - } - } - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - for i := 0; i < m; i++ { - btmp := b[i*lda : i*lda+n] - for j := n - 1; j >= 0; j-- { - tmp := alpha*btmp[j] - asm.DdotUnitary(a[j*lda+j+1:j*lda+n], btmp[j+1:]) - if nonUnit { - tmp /= a[j*lda+j] - } - btmp[j] = tmp - } - } - return - } - for i := 0; i < m; i++ { - btmp := b[i*lda : i*lda+n] - for j := 0; j < n; j++ { - tmp := alpha*btmp[j] - asm.DdotUnitary(a[j*lda:j*lda+j], btmp) - if nonUnit { - tmp /= a[j*lda+j] - } - btmp[j] = tmp - } - } -} - -// Dsymm performs one of -// C = alpha * A * B + beta * C if side == blas.Left -// C = alpha * B * A + beta * C if side == blas.Right -// where A is an n×n symmetric matrix, B and C are m×n matrices, and alpha -// is a scalar. -func (Implementation) Dsymm(s blas.Side, ul blas.Uplo, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) { - if s != blas.Right && s != blas.Left { - panic("goblas: bad side") - } - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - var k int - if s == blas.Left { - k = m - } else { - k = n - } - if lda*(k-1)+k > len(a) || lda < max(1, k) { - panic(badLdA) - } - if ldb*(m-1)+n > len(b) || ldb < max(1, n) { - panic(badLdB) - } - if ldc*(m-1)+n > len(c) || ldc < max(1, n) { - panic(badLdC) - } - if m == 0 || n == 0 { - return - } - if alpha == 0 && beta == 1 { - return - } - if alpha == 0 { - if beta == 0 { - for i := 0; i < m; i++ { - ctmp := c[i*ldc : i*ldc+n] - for j := range ctmp { - ctmp[j] = 0 - } - } - return - } - for i := 0; i < m; i++ { - ctmp := c[i*ldc : i*ldc+n] - for j := 0; j < n; j++ { - ctmp[j] *= beta - } - } - return - } - - isUpper := ul == blas.Upper - if s == blas.Left { - for i := 0; i < m; i++ { - atmp := alpha * a[i*lda+i] - btmp := b[i*ldb : i*ldb+n] - ctmp := c[i*ldc : i*ldc+n] - for j, v := range btmp { - ctmp[j] *= beta - ctmp[j] += atmp * v - } - - for k := 0; k < i; k++ { - var atmp float64 - if isUpper { - atmp = a[k*lda+i] - } else { - atmp = a[i*lda+k] - } - atmp *= alpha - ctmp := c[i*ldc : i*ldc+n] - asm.DaxpyUnitary(atmp, b[k*ldb:k*ldb+n], ctmp, ctmp) - } - for k := i + 1; k < m; k++ { - var atmp float64 - if isUpper { - atmp = a[i*lda+k] - } else { - atmp = a[k*lda+i] - } - atmp *= alpha - ctmp := c[i*ldc : i*ldc+n] - asm.DaxpyUnitary(atmp, b[k*ldb:k*ldb+n], ctmp, ctmp) - } - } - return - } - if isUpper { - for i := 0; i < m; i++ { - for j := n - 1; j >= 0; j-- { - tmp := alpha * b[i*ldb+j] - var tmp2 float64 - atmp := a[j*lda+j+1 : j*lda+n] - btmp := b[i*ldb+j+1 : i*ldb+n] - ctmp := c[i*ldc+j+1 : i*ldc+n] - for k, v := range atmp { - ctmp[k] += tmp * v - tmp2 += btmp[k] * v - } - c[i*ldc+j] *= beta - c[i*ldc+j] += tmp*a[j*lda+j] + alpha*tmp2 - } - } - return - } - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - tmp := alpha * b[i*ldb+j] - var tmp2 float64 - atmp := a[j*lda : j*lda+j] - btmp := b[i*ldb : i*ldb+j] - ctmp := c[i*ldc : i*ldc+j] - for k, v := range atmp { - ctmp[k] += tmp * v - tmp2 += btmp[k] * v - } - c[i*ldc+j] *= beta - c[i*ldc+j] += tmp*a[j*lda+j] + alpha*tmp2 - } - } -} - -// Dsyrk performs the symmetric rank-k operation -// C = alpha * A * A^T + beta*C -// C is an n×n symmetric matrix. A is an n×k matrix if tA == blas.NoTrans, and -// a k×n matrix otherwise. alpha and beta are scalars. -func (Implementation) Dsyrk(ul blas.Uplo, tA blas.Transpose, n, k int, alpha float64, a []float64, lda int, beta float64, c []float64, ldc int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.Trans && tA != blas.NoTrans && tA != blas.ConjTrans { - panic(badTranspose) - } - if n < 0 { - panic(nLT0) - } - if k < 0 { - panic(kLT0) - } - if ldc < n { - panic(badLdC) - } - var row, col int - if tA == blas.NoTrans { - row, col = n, k - } else { - row, col = k, n - } - if lda*(row-1)+col > len(a) || lda < max(1, col) { - panic(badLdA) - } - if ldc*(n-1)+n > len(c) || ldc < max(1, n) { - panic(badLdC) - } - if alpha == 0 { - if beta == 0 { - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - for j := range ctmp { - ctmp[j] = 0 - } - } - return - } - for i := 0; i < n; i++ { - ctmp := c[i*ldc : i*ldc+i+1] - for j := range ctmp { - ctmp[j] = 0 - } - } - return - } - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - for j := range ctmp { - ctmp[j] *= beta - } - } - return - } - for i := 0; i < n; i++ { - ctmp := c[i*ldc : i*ldc+i+1] - for j := range ctmp { - ctmp[j] *= beta - } - } - return - } - if tA == blas.NoTrans { - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - atmp := a[i*lda : i*lda+k] - for jc, vc := range ctmp { - j := jc + i - ctmp[jc] = vc*beta + alpha*asm.DdotUnitary(atmp, a[j*lda:j*lda+k]) - } - } - return - } - for i := 0; i < n; i++ { - atmp := a[i*lda : i*lda+k] - for j, vc := range c[i*ldc : i*ldc+i+1] { - c[i*ldc+j] = vc*beta + alpha*asm.DdotUnitary(a[j*lda:j*lda+k], atmp) - } - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - if beta != 1 { - for j := range ctmp { - ctmp[j] *= beta - } - } - for l := 0; l < k; l++ { - tmp := alpha * a[l*lda+i] - if tmp != 0 { - asm.DaxpyUnitary(tmp, a[l*lda+i:l*lda+n], ctmp, ctmp) - } - } - } - return - } - for i := 0; i < n; i++ { - ctmp := c[i*ldc : i*ldc+i+1] - if beta != 0 { - for j := range ctmp { - ctmp[j] *= beta - } - } - for l := 0; l < k; l++ { - tmp := alpha * a[l*lda+i] - if tmp != 0 { - asm.DaxpyUnitary(tmp, a[l*lda:l*lda+i+1], ctmp, ctmp) - } - } - } -} - -// Dsyr2k performs the symmetric rank 2k operation -// C = alpha * A * B^T + alpha * B * A^T + beta * C -// where C is an n×n symmetric matrix. A and B are n×k matrices if -// tA == NoTrans and k×n otherwise. alpha and beta are scalars. -func (Implementation) Dsyr2k(ul blas.Uplo, tA blas.Transpose, n, k int, alpha float64, a []float64, lda int, b []float64, ldb int, beta float64, c []float64, ldc int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.Trans && tA != blas.NoTrans && tA != blas.ConjTrans { - panic(badTranspose) - } - if n < 0 { - panic(nLT0) - } - if k < 0 { - panic(kLT0) - } - if ldc < n { - panic(badLdC) - } - var row, col int - if tA == blas.NoTrans { - row, col = n, k - } else { - row, col = k, n - } - if lda*(row-1)+col > len(a) || lda < max(1, col) { - panic(badLdA) - } - if ldb*(row-1)+col > len(b) || ldb < max(1, col) { - panic(badLdB) - } - if ldc*(n-1)+n > len(c) || ldc < max(1, n) { - panic(badLdC) - } - if alpha == 0 { - if beta == 0 { - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - for j := range ctmp { - ctmp[j] = 0 - } - } - return - } - for i := 0; i < n; i++ { - ctmp := c[i*ldc : i*ldc+i+1] - for j := range ctmp { - ctmp[j] = 0 - } - } - return - } - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - for j := range ctmp { - ctmp[j] *= beta - } - } - return - } - for i := 0; i < n; i++ { - ctmp := c[i*ldc : i*ldc+i+1] - for j := range ctmp { - ctmp[j] *= beta - } - } - return - } - if tA == blas.NoTrans { - if ul == blas.Upper { - for i := 0; i < n; i++ { - atmp := a[i*lda : i*lda+k] - btmp := b[i*lda : i*lda+k] - ctmp := c[i*ldc+i : i*ldc+n] - for jc := range ctmp { - j := i + jc - var tmp1, tmp2 float64 - binner := b[j*ldb : j*ldb+k] - for l, v := range a[j*lda : j*lda+k] { - tmp1 += v * btmp[l] - tmp2 += atmp[l] * binner[l] - } - ctmp[jc] *= beta - ctmp[jc] += alpha * (tmp1 + tmp2) - } - } - return - } - for i := 0; i < n; i++ { - atmp := a[i*lda : i*lda+k] - btmp := b[i*lda : i*lda+k] - ctmp := c[i*ldc : i*ldc+i+1] - for j := 0; j <= i; j++ { - var tmp1, tmp2 float64 - binner := b[j*ldb : j*ldb+k] - for l, v := range a[j*lda : j*lda+k] { - tmp1 += v * btmp[l] - tmp2 += atmp[l] * binner[l] - } - ctmp[j] *= beta - ctmp[j] += alpha * (tmp1 + tmp2) - } - } - return - } - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - if beta != 1 { - for j := range ctmp { - ctmp[j] *= beta - } - } - for l := 0; l < k; l++ { - tmp1 := alpha * b[l*lda+i] - tmp2 := alpha * a[l*lda+i] - btmp := b[l*ldb+i : l*ldb+n] - if tmp1 != 0 || tmp2 != 0 { - for j, v := range a[l*lda+i : l*lda+n] { - ctmp[j] += v*tmp1 + btmp[j]*tmp2 - } - } - } - } - return - } - for i := 0; i < n; i++ { - ctmp := c[i*ldc : i*ldc+i+1] - if beta != 1 { - for j := range ctmp { - ctmp[j] *= beta - } - } - for l := 0; l < k; l++ { - tmp1 := alpha * b[l*lda+i] - tmp2 := alpha * a[l*lda+i] - btmp := b[l*ldb : l*ldb+i+1] - if tmp1 != 0 || tmp2 != 0 { - for j, v := range a[l*lda : l*lda+i+1] { - ctmp[j] += v*tmp1 + btmp[j]*tmp2 - } - } - } - } -} - -// Dtrmm performs -// B = alpha * A * B if tA == blas.NoTrans and side == blas.Left -// B = alpha * A^T * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left -// B = alpha * B * A if tA == blas.NoTrans and side == blas.Right -// B = alpha * B * A^T if tA == blas.Trans or blas.ConjTrans, and side == blas.Right -// where A is an n×n triangular matrix, and B is an m×n matrix. -func (Implementation) Dtrmm(s blas.Side, ul blas.Uplo, tA blas.Transpose, d blas.Diag, m, n int, alpha float64, a []float64, lda int, b []float64, ldb int) { - if s != blas.Left && s != blas.Right { - panic(badSide) - } - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - var k int - if s == blas.Left { - k = m - } else { - k = n - } - if lda*(k-1)+k > len(a) || lda < max(1, k) { - panic(badLdA) - } - if ldb*(m-1)+n > len(b) || ldb < max(1, n) { - panic(badLdB) - } - if alpha == 0 { - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for j := range btmp { - btmp[j] = 0 - } - } - return - } - - nonUnit := d == blas.NonUnit - if s == blas.Left { - if tA == blas.NoTrans { - if ul == blas.Upper { - for i := 0; i < m; i++ { - tmp := alpha - if nonUnit { - tmp *= a[i*lda+i] - } - btmp := b[i*ldb : i*ldb+n] - for j := range btmp { - btmp[j] *= tmp - } - for ka, va := range a[i*lda+i+1 : i*lda+m] { - k := ka + i + 1 - tmp := alpha * va - if tmp != 0 { - asm.DaxpyUnitary(tmp, b[k*ldb:k*ldb+n], btmp, btmp) - } - } - } - return - } - for i := m - 1; i >= 0; i-- { - tmp := alpha - if nonUnit { - tmp *= a[i*lda+i] - } - btmp := b[i*ldb : i*ldb+n] - for j := range btmp { - btmp[j] *= tmp - } - for k, va := range a[i*lda : i*lda+i] { - tmp := alpha * va - if tmp != 0 { - asm.DaxpyUnitary(tmp, b[k*ldb:k*ldb+n], btmp, btmp) - } - } - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - for k := m - 1; k >= 0; k-- { - btmpk := b[k*ldb : k*ldb+n] - for ia, va := range a[k*lda+k+1 : k*lda+m] { - i := ia + k + 1 - btmp := b[i*ldb : i*ldb+n] - tmp := alpha * va - if tmp != 0 { - asm.DaxpyUnitary(tmp, btmpk, btmp, btmp) - } - } - tmp := alpha - if nonUnit { - tmp *= a[k*lda+k] - } - if tmp != 1 { - for j := 0; j < n; j++ { - btmpk[j] *= tmp - } - } - } - return - } - for k := 0; k < m; k++ { - btmpk := b[k*ldb : k*ldb+n] - for i, va := range a[k*lda : k*lda+k] { - btmp := b[i*ldb : i*ldb+n] - tmp := alpha * va - if tmp != 0 { - asm.DaxpyUnitary(tmp, btmpk, btmp, btmp) - } - } - tmp := alpha - if nonUnit { - tmp *= a[k*lda+k] - } - if tmp != 1 { - for j := 0; j < n; j++ { - btmpk[j] *= tmp - } - } - } - return - } - // Cases where a is on the right - if tA == blas.NoTrans { - if ul == blas.Upper { - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for k := n - 1; k >= 0; k-- { - tmp := alpha * btmp[k] - if tmp != 0 { - btmp[k] = tmp - if nonUnit { - btmp[k] *= a[k*lda+k] - } - for ja, v := range a[k*lda+k+1 : k*lda+n] { - j := ja + k + 1 - btmp[j] += tmp * v - } - } - } - } - return - } - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for k := 0; k < n; k++ { - tmp := alpha * btmp[k] - if tmp != 0 { - btmp[k] = tmp - if nonUnit { - btmp[k] *= a[k*lda+k] - } - asm.DaxpyUnitary(tmp, a[k*lda:k*lda+k], btmp, btmp) - } - } - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for j, vb := range btmp { - tmp := vb - if nonUnit { - tmp *= a[j*lda+j] - } - tmp += asm.DdotUnitary(a[j*lda+j+1:j*lda+n], btmp[j+1:n]) - btmp[j] = alpha * tmp - } - } - return - } - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for j := n - 1; j >= 0; j-- { - tmp := btmp[j] - if nonUnit { - tmp *= a[j*lda+j] - } - tmp += asm.DdotUnitary(a[j*lda:j*lda+j], btmp[:j]) - btmp[j] = alpha * tmp - } - } -} diff --git a/vendor/github.com/gonum/blas/native/level3single.go b/vendor/github.com/gonum/blas/native/level3single.go deleted file mode 100644 index 6bc9a56da..000000000 --- a/vendor/github.com/gonum/blas/native/level3single.go +++ /dev/null @@ -1,843 +0,0 @@ -// Generated code do not edit. Run `go generate`. - -// Copyright ©2014 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - "github.com/gonum/blas" - "github.com/gonum/internal/asm" -) - -var _ blas.Float32Level3 = Implementation{} - -// Strsm solves -// A * X = alpha * B if tA == blas.NoTrans and side == blas.Left -// A^T * X = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left -// X * A = alpha * B if tA == blas.NoTrans and side == blas.Right -// X * A^T = alpha * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Right -// where A is an n×n triangular matrix, x is an m×n matrix, and alpha is a -// scalar. -// -// At entry to the function, X contains the values of B, and the result is -// stored in place into X. -// -// No check is made that A is invertible. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Strsm(s blas.Side, ul blas.Uplo, tA blas.Transpose, d blas.Diag, m, n int, alpha float32, a []float32, lda int, b []float32, ldb int) { - if s != blas.Left && s != blas.Right { - panic(badSide) - } - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - if ldb < n { - panic(badLdB) - } - var k int - if s == blas.Left { - k = m - } else { - k = n - } - if lda*(k-1)+k > len(a) || lda < max(1, k) { - panic(badLdA) - } - if ldb*(m-1)+n > len(b) || ldb < max(1, n) { - panic(badLdB) - } - - if m == 0 || n == 0 { - return - } - - if alpha == 0 { - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for j := range btmp { - btmp[j] = 0 - } - } - return - } - nonUnit := d == blas.NonUnit - if s == blas.Left { - if tA == blas.NoTrans { - if ul == blas.Upper { - for i := m - 1; i >= 0; i-- { - btmp := b[i*ldb : i*ldb+n] - if alpha != 1 { - for j := range btmp { - btmp[j] *= alpha - } - } - for ka, va := range a[i*lda+i+1 : i*lda+m] { - k := ka + i + 1 - if va != 0 { - asm.SaxpyUnitary(-va, b[k*ldb:k*ldb+n], btmp, btmp) - } - } - if nonUnit { - tmp := 1 / a[i*lda+i] - for j := 0; j < n; j++ { - btmp[j] *= tmp - } - } - } - return - } - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - if alpha != 1 { - for j := 0; j < n; j++ { - btmp[j] *= alpha - } - } - for k, va := range a[i*lda : i*lda+i] { - if va != 0 { - asm.SaxpyUnitary(-va, b[k*ldb:k*ldb+n], btmp, btmp) - } - } - if nonUnit { - tmp := 1 / a[i*lda+i] - for j := 0; j < n; j++ { - btmp[j] *= tmp - } - } - } - return - } - // Cases where a is transposed - if ul == blas.Upper { - for k := 0; k < m; k++ { - btmpk := b[k*ldb : k*ldb+n] - if nonUnit { - tmp := 1 / a[k*lda+k] - for j := 0; j < n; j++ { - btmpk[j] *= tmp - } - } - for ia, va := range a[k*lda+k+1 : k*lda+m] { - i := ia + k + 1 - if va != 0 { - btmp := b[i*ldb : i*ldb+n] - asm.SaxpyUnitary(-va, btmpk, btmp, btmp) - } - } - if alpha != 1 { - for j := 0; j < n; j++ { - btmpk[j] *= alpha - } - } - } - return - } - for k := m - 1; k >= 0; k-- { - btmpk := b[k*ldb : k*ldb+n] - if nonUnit { - tmp := 1 / a[k*lda+k] - for j := 0; j < n; j++ { - btmpk[j] *= tmp - } - } - for i, va := range a[k*lda : k*lda+k] { - if va != 0 { - btmp := b[i*ldb : i*ldb+n] - asm.SaxpyUnitary(-va, btmpk, btmp, btmp) - } - } - if alpha != 1 { - for j := 0; j < n; j++ { - btmpk[j] *= alpha - } - } - } - return - } - // Cases where a is to the right of X. - if tA == blas.NoTrans { - if ul == blas.Upper { - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - if alpha != 1 { - for j := 0; j < n; j++ { - btmp[j] *= alpha - } - } - for k, vb := range btmp { - if vb != 0 { - if btmp[k] != 0 { - if nonUnit { - btmp[k] /= a[k*lda+k] - } - btmpk := btmp[k+1 : n] - asm.SaxpyUnitary(-btmp[k], a[k*lda+k+1:k*lda+n], btmpk, btmpk) - } - } - } - } - return - } - for i := 0; i < m; i++ { - btmp := b[i*lda : i*lda+n] - if alpha != 1 { - for j := 0; j < n; j++ { - btmp[j] *= alpha - } - } - for k := n - 1; k >= 0; k-- { - if btmp[k] != 0 { - if nonUnit { - btmp[k] /= a[k*lda+k] - } - asm.SaxpyUnitary(-btmp[k], a[k*lda:k*lda+k], btmp, btmp) - } - } - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - for i := 0; i < m; i++ { - btmp := b[i*lda : i*lda+n] - for j := n - 1; j >= 0; j-- { - tmp := alpha*btmp[j] - asm.SdotUnitary(a[j*lda+j+1:j*lda+n], btmp[j+1:]) - if nonUnit { - tmp /= a[j*lda+j] - } - btmp[j] = tmp - } - } - return - } - for i := 0; i < m; i++ { - btmp := b[i*lda : i*lda+n] - for j := 0; j < n; j++ { - tmp := alpha*btmp[j] - asm.SdotUnitary(a[j*lda:j*lda+j], btmp) - if nonUnit { - tmp /= a[j*lda+j] - } - btmp[j] = tmp - } - } -} - -// Ssymm performs one of -// C = alpha * A * B + beta * C if side == blas.Left -// C = alpha * B * A + beta * C if side == blas.Right -// where A is an n×n symmetric matrix, B and C are m×n matrices, and alpha -// is a scalar. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Ssymm(s blas.Side, ul blas.Uplo, m, n int, alpha float32, a []float32, lda int, b []float32, ldb int, beta float32, c []float32, ldc int) { - if s != blas.Right && s != blas.Left { - panic("goblas: bad side") - } - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - var k int - if s == blas.Left { - k = m - } else { - k = n - } - if lda*(k-1)+k > len(a) || lda < max(1, k) { - panic(badLdA) - } - if ldb*(m-1)+n > len(b) || ldb < max(1, n) { - panic(badLdB) - } - if ldc*(m-1)+n > len(c) || ldc < max(1, n) { - panic(badLdC) - } - if m == 0 || n == 0 { - return - } - if alpha == 0 && beta == 1 { - return - } - if alpha == 0 { - if beta == 0 { - for i := 0; i < m; i++ { - ctmp := c[i*ldc : i*ldc+n] - for j := range ctmp { - ctmp[j] = 0 - } - } - return - } - for i := 0; i < m; i++ { - ctmp := c[i*ldc : i*ldc+n] - for j := 0; j < n; j++ { - ctmp[j] *= beta - } - } - return - } - - isUpper := ul == blas.Upper - if s == blas.Left { - for i := 0; i < m; i++ { - atmp := alpha * a[i*lda+i] - btmp := b[i*ldb : i*ldb+n] - ctmp := c[i*ldc : i*ldc+n] - for j, v := range btmp { - ctmp[j] *= beta - ctmp[j] += atmp * v - } - - for k := 0; k < i; k++ { - var atmp float32 - if isUpper { - atmp = a[k*lda+i] - } else { - atmp = a[i*lda+k] - } - atmp *= alpha - ctmp := c[i*ldc : i*ldc+n] - asm.SaxpyUnitary(atmp, b[k*ldb:k*ldb+n], ctmp, ctmp) - } - for k := i + 1; k < m; k++ { - var atmp float32 - if isUpper { - atmp = a[i*lda+k] - } else { - atmp = a[k*lda+i] - } - atmp *= alpha - ctmp := c[i*ldc : i*ldc+n] - asm.SaxpyUnitary(atmp, b[k*ldb:k*ldb+n], ctmp, ctmp) - } - } - return - } - if isUpper { - for i := 0; i < m; i++ { - for j := n - 1; j >= 0; j-- { - tmp := alpha * b[i*ldb+j] - var tmp2 float32 - atmp := a[j*lda+j+1 : j*lda+n] - btmp := b[i*ldb+j+1 : i*ldb+n] - ctmp := c[i*ldc+j+1 : i*ldc+n] - for k, v := range atmp { - ctmp[k] += tmp * v - tmp2 += btmp[k] * v - } - c[i*ldc+j] *= beta - c[i*ldc+j] += tmp*a[j*lda+j] + alpha*tmp2 - } - } - return - } - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - tmp := alpha * b[i*ldb+j] - var tmp2 float32 - atmp := a[j*lda : j*lda+j] - btmp := b[i*ldb : i*ldb+j] - ctmp := c[i*ldc : i*ldc+j] - for k, v := range atmp { - ctmp[k] += tmp * v - tmp2 += btmp[k] * v - } - c[i*ldc+j] *= beta - c[i*ldc+j] += tmp*a[j*lda+j] + alpha*tmp2 - } - } -} - -// Ssyrk performs the symmetric rank-k operation -// C = alpha * A * A^T + beta*C -// C is an n×n symmetric matrix. A is an n×k matrix if tA == blas.NoTrans, and -// a k×n matrix otherwise. alpha and beta are scalars. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Ssyrk(ul blas.Uplo, tA blas.Transpose, n, k int, alpha float32, a []float32, lda int, beta float32, c []float32, ldc int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.Trans && tA != blas.NoTrans && tA != blas.ConjTrans { - panic(badTranspose) - } - if n < 0 { - panic(nLT0) - } - if k < 0 { - panic(kLT0) - } - if ldc < n { - panic(badLdC) - } - var row, col int - if tA == blas.NoTrans { - row, col = n, k - } else { - row, col = k, n - } - if lda*(row-1)+col > len(a) || lda < max(1, col) { - panic(badLdA) - } - if ldc*(n-1)+n > len(c) || ldc < max(1, n) { - panic(badLdC) - } - if alpha == 0 { - if beta == 0 { - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - for j := range ctmp { - ctmp[j] = 0 - } - } - return - } - for i := 0; i < n; i++ { - ctmp := c[i*ldc : i*ldc+i+1] - for j := range ctmp { - ctmp[j] = 0 - } - } - return - } - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - for j := range ctmp { - ctmp[j] *= beta - } - } - return - } - for i := 0; i < n; i++ { - ctmp := c[i*ldc : i*ldc+i+1] - for j := range ctmp { - ctmp[j] *= beta - } - } - return - } - if tA == blas.NoTrans { - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - atmp := a[i*lda : i*lda+k] - for jc, vc := range ctmp { - j := jc + i - ctmp[jc] = vc*beta + alpha*asm.SdotUnitary(atmp, a[j*lda:j*lda+k]) - } - } - return - } - for i := 0; i < n; i++ { - atmp := a[i*lda : i*lda+k] - for j, vc := range c[i*ldc : i*ldc+i+1] { - c[i*ldc+j] = vc*beta + alpha*asm.SdotUnitary(a[j*lda:j*lda+k], atmp) - } - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - if beta != 1 { - for j := range ctmp { - ctmp[j] *= beta - } - } - for l := 0; l < k; l++ { - tmp := alpha * a[l*lda+i] - if tmp != 0 { - asm.SaxpyUnitary(tmp, a[l*lda+i:l*lda+n], ctmp, ctmp) - } - } - } - return - } - for i := 0; i < n; i++ { - ctmp := c[i*ldc : i*ldc+i+1] - if beta != 0 { - for j := range ctmp { - ctmp[j] *= beta - } - } - for l := 0; l < k; l++ { - tmp := alpha * a[l*lda+i] - if tmp != 0 { - asm.SaxpyUnitary(tmp, a[l*lda:l*lda+i+1], ctmp, ctmp) - } - } - } -} - -// Ssyr2k performs the symmetric rank 2k operation -// C = alpha * A * B^T + alpha * B * A^T + beta * C -// where C is an n×n symmetric matrix. A and B are n×k matrices if -// tA == NoTrans and k×n otherwise. alpha and beta are scalars. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Ssyr2k(ul blas.Uplo, tA blas.Transpose, n, k int, alpha float32, a []float32, lda int, b []float32, ldb int, beta float32, c []float32, ldc int) { - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.Trans && tA != blas.NoTrans && tA != blas.ConjTrans { - panic(badTranspose) - } - if n < 0 { - panic(nLT0) - } - if k < 0 { - panic(kLT0) - } - if ldc < n { - panic(badLdC) - } - var row, col int - if tA == blas.NoTrans { - row, col = n, k - } else { - row, col = k, n - } - if lda*(row-1)+col > len(a) || lda < max(1, col) { - panic(badLdA) - } - if ldb*(row-1)+col > len(b) || ldb < max(1, col) { - panic(badLdB) - } - if ldc*(n-1)+n > len(c) || ldc < max(1, n) { - panic(badLdC) - } - if alpha == 0 { - if beta == 0 { - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - for j := range ctmp { - ctmp[j] = 0 - } - } - return - } - for i := 0; i < n; i++ { - ctmp := c[i*ldc : i*ldc+i+1] - for j := range ctmp { - ctmp[j] = 0 - } - } - return - } - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - for j := range ctmp { - ctmp[j] *= beta - } - } - return - } - for i := 0; i < n; i++ { - ctmp := c[i*ldc : i*ldc+i+1] - for j := range ctmp { - ctmp[j] *= beta - } - } - return - } - if tA == blas.NoTrans { - if ul == blas.Upper { - for i := 0; i < n; i++ { - atmp := a[i*lda : i*lda+k] - btmp := b[i*lda : i*lda+k] - ctmp := c[i*ldc+i : i*ldc+n] - for jc := range ctmp { - j := i + jc - var tmp1, tmp2 float32 - binner := b[j*ldb : j*ldb+k] - for l, v := range a[j*lda : j*lda+k] { - tmp1 += v * btmp[l] - tmp2 += atmp[l] * binner[l] - } - ctmp[jc] *= beta - ctmp[jc] += alpha * (tmp1 + tmp2) - } - } - return - } - for i := 0; i < n; i++ { - atmp := a[i*lda : i*lda+k] - btmp := b[i*lda : i*lda+k] - ctmp := c[i*ldc : i*ldc+i+1] - for j := 0; j <= i; j++ { - var tmp1, tmp2 float32 - binner := b[j*ldb : j*ldb+k] - for l, v := range a[j*lda : j*lda+k] { - tmp1 += v * btmp[l] - tmp2 += atmp[l] * binner[l] - } - ctmp[j] *= beta - ctmp[j] += alpha * (tmp1 + tmp2) - } - } - return - } - if ul == blas.Upper { - for i := 0; i < n; i++ { - ctmp := c[i*ldc+i : i*ldc+n] - if beta != 1 { - for j := range ctmp { - ctmp[j] *= beta - } - } - for l := 0; l < k; l++ { - tmp1 := alpha * b[l*lda+i] - tmp2 := alpha * a[l*lda+i] - btmp := b[l*ldb+i : l*ldb+n] - if tmp1 != 0 || tmp2 != 0 { - for j, v := range a[l*lda+i : l*lda+n] { - ctmp[j] += v*tmp1 + btmp[j]*tmp2 - } - } - } - } - return - } - for i := 0; i < n; i++ { - ctmp := c[i*ldc : i*ldc+i+1] - if beta != 1 { - for j := range ctmp { - ctmp[j] *= beta - } - } - for l := 0; l < k; l++ { - tmp1 := alpha * b[l*lda+i] - tmp2 := alpha * a[l*lda+i] - btmp := b[l*ldb : l*ldb+i+1] - if tmp1 != 0 || tmp2 != 0 { - for j, v := range a[l*lda : l*lda+i+1] { - ctmp[j] += v*tmp1 + btmp[j]*tmp2 - } - } - } - } -} - -// Strmm performs -// B = alpha * A * B if tA == blas.NoTrans and side == blas.Left -// B = alpha * A^T * B if tA == blas.Trans or blas.ConjTrans, and side == blas.Left -// B = alpha * B * A if tA == blas.NoTrans and side == blas.Right -// B = alpha * B * A^T if tA == blas.Trans or blas.ConjTrans, and side == blas.Right -// where A is an n×n triangular matrix, and B is an m×n matrix. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Strmm(s blas.Side, ul blas.Uplo, tA blas.Transpose, d blas.Diag, m, n int, alpha float32, a []float32, lda int, b []float32, ldb int) { - if s != blas.Left && s != blas.Right { - panic(badSide) - } - if ul != blas.Lower && ul != blas.Upper { - panic(badUplo) - } - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if d != blas.NonUnit && d != blas.Unit { - panic(badDiag) - } - if m < 0 { - panic(mLT0) - } - if n < 0 { - panic(nLT0) - } - var k int - if s == blas.Left { - k = m - } else { - k = n - } - if lda*(k-1)+k > len(a) || lda < max(1, k) { - panic(badLdA) - } - if ldb*(m-1)+n > len(b) || ldb < max(1, n) { - panic(badLdB) - } - if alpha == 0 { - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for j := range btmp { - btmp[j] = 0 - } - } - return - } - - nonUnit := d == blas.NonUnit - if s == blas.Left { - if tA == blas.NoTrans { - if ul == blas.Upper { - for i := 0; i < m; i++ { - tmp := alpha - if nonUnit { - tmp *= a[i*lda+i] - } - btmp := b[i*ldb : i*ldb+n] - for j := range btmp { - btmp[j] *= tmp - } - for ka, va := range a[i*lda+i+1 : i*lda+m] { - k := ka + i + 1 - tmp := alpha * va - if tmp != 0 { - asm.SaxpyUnitary(tmp, b[k*ldb:k*ldb+n], btmp, btmp) - } - } - } - return - } - for i := m - 1; i >= 0; i-- { - tmp := alpha - if nonUnit { - tmp *= a[i*lda+i] - } - btmp := b[i*ldb : i*ldb+n] - for j := range btmp { - btmp[j] *= tmp - } - for k, va := range a[i*lda : i*lda+i] { - tmp := alpha * va - if tmp != 0 { - asm.SaxpyUnitary(tmp, b[k*ldb:k*ldb+n], btmp, btmp) - } - } - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - for k := m - 1; k >= 0; k-- { - btmpk := b[k*ldb : k*ldb+n] - for ia, va := range a[k*lda+k+1 : k*lda+m] { - i := ia + k + 1 - btmp := b[i*ldb : i*ldb+n] - tmp := alpha * va - if tmp != 0 { - asm.SaxpyUnitary(tmp, btmpk, btmp, btmp) - } - } - tmp := alpha - if nonUnit { - tmp *= a[k*lda+k] - } - if tmp != 1 { - for j := 0; j < n; j++ { - btmpk[j] *= tmp - } - } - } - return - } - for k := 0; k < m; k++ { - btmpk := b[k*ldb : k*ldb+n] - for i, va := range a[k*lda : k*lda+k] { - btmp := b[i*ldb : i*ldb+n] - tmp := alpha * va - if tmp != 0 { - asm.SaxpyUnitary(tmp, btmpk, btmp, btmp) - } - } - tmp := alpha - if nonUnit { - tmp *= a[k*lda+k] - } - if tmp != 1 { - for j := 0; j < n; j++ { - btmpk[j] *= tmp - } - } - } - return - } - // Cases where a is on the right - if tA == blas.NoTrans { - if ul == blas.Upper { - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for k := n - 1; k >= 0; k-- { - tmp := alpha * btmp[k] - if tmp != 0 { - btmp[k] = tmp - if nonUnit { - btmp[k] *= a[k*lda+k] - } - for ja, v := range a[k*lda+k+1 : k*lda+n] { - j := ja + k + 1 - btmp[j] += tmp * v - } - } - } - } - return - } - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for k := 0; k < n; k++ { - tmp := alpha * btmp[k] - if tmp != 0 { - btmp[k] = tmp - if nonUnit { - btmp[k] *= a[k*lda+k] - } - asm.SaxpyUnitary(tmp, a[k*lda:k*lda+k], btmp, btmp) - } - } - } - return - } - // Cases where a is transposed. - if ul == blas.Upper { - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for j, vb := range btmp { - tmp := vb - if nonUnit { - tmp *= a[j*lda+j] - } - tmp += asm.SdotUnitary(a[j*lda+j+1:j*lda+n], btmp[j+1:n]) - btmp[j] = alpha * tmp - } - } - return - } - for i := 0; i < m; i++ { - btmp := b[i*ldb : i*ldb+n] - for j := n - 1; j >= 0; j-- { - tmp := btmp[j] - if nonUnit { - tmp *= a[j*lda+j] - } - tmp += asm.SdotUnitary(a[j*lda:j*lda+j], btmp[:j]) - btmp[j] = alpha * tmp - } - } -} diff --git a/vendor/github.com/gonum/blas/native/native.go b/vendor/github.com/gonum/blas/native/native.go deleted file mode 100644 index 43ec9bb5f..000000000 --- a/vendor/github.com/gonum/blas/native/native.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate ./single_precision - -package native - -type Implementation struct{} - -// The following are panic strings used during parameter checks. -const ( - negativeN = "blas: n < 0" - zeroIncX = "blas: zero x index increment" - zeroIncY = "blas: zero y index increment" - badLenX = "blas: x index out of range" - badLenY = "blas: y index out of range" - - mLT0 = "blas: m < 0" - nLT0 = "blas: n < 0" - kLT0 = "blas: k < 0" - kLLT0 = "blas: kL < 0" - kULT0 = "blas: kU < 0" - - badUplo = "blas: illegal triangle" - badTranspose = "blas: illegal transpose" - badDiag = "blas: illegal diagonal" - badSide = "blas: illegal side" - - badLdA = "blas: index of a out of range" - badLdB = "blas: index of b out of range" - badLdC = "blas: index of c out of range" - - badX = "blas: x index out of range" - badY = "blas: y index out of range" -) - -// [SD]gemm behavior constants. These are kept here to keep them out of the -// way during single precision code genration. -const ( - blockSize = 64 // b x b matrix - minParBlock = 4 // minimum number of blocks needed to go parallel - buffMul = 4 // how big is the buffer relative to the number of workers -) - -// [SD]gemm debugging constant. -const debug = false - -// subMul is a common type shared by [SD]gemm. -type subMul struct { - i, j int // index of block -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} - -func min(a, b int) int { - if a > b { - return b - } - return a -} diff --git a/vendor/github.com/gonum/blas/native/sgemm.go b/vendor/github.com/gonum/blas/native/sgemm.go deleted file mode 100644 index 047d4e520..000000000 --- a/vendor/github.com/gonum/blas/native/sgemm.go +++ /dev/null @@ -1,395 +0,0 @@ -// Generated code do not edit. Run `go generate`. - -// Copyright ©2014 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - "fmt" - "runtime" - "sync" - - "github.com/gonum/blas" - "github.com/gonum/internal/asm" -) - -// Sgemm computes -// C = beta * C + alpha * A * B. -// tA and tB specify whether A or B are transposed. A, B, and C are m×n dense -// matrices. -// -// Float32 implementations are autogenerated and not directly tested. -func (Implementation) Sgemm(tA, tB blas.Transpose, m, n, k int, alpha float32, a []float32, lda int, b []float32, ldb int, beta float32, c []float32, ldc int) { - if tA != blas.NoTrans && tA != blas.Trans && tA != blas.ConjTrans { - panic(badTranspose) - } - if tB != blas.NoTrans && tB != blas.Trans && tB != blas.ConjTrans { - panic(badTranspose) - } - - var amat, bmat, cmat general32 - if tA != blas.NoTrans { - amat = general32{ - data: a, - rows: k, - cols: m, - stride: lda, - } - } else { - amat = general32{ - data: a, - rows: m, - cols: k, - stride: lda, - } - } - err := amat.check('a') - if err != nil { - panic(err.Error()) - } - if tB != blas.NoTrans { - bmat = general32{ - data: b, - rows: n, - cols: k, - stride: ldb, - } - } else { - bmat = general32{ - data: b, - rows: k, - cols: n, - stride: ldb, - } - } - - err = bmat.check('b') - if err != nil { - panic(err.Error()) - } - cmat = general32{ - data: c, - rows: m, - cols: n, - stride: ldc, - } - err = cmat.check('c') - if err != nil { - panic(err.Error()) - } - - // scale c - if beta != 1 { - if beta == 0 { - for i := 0; i < m; i++ { - ctmp := cmat.data[i*cmat.stride : i*cmat.stride+cmat.cols] - for j := range ctmp { - ctmp[j] = 0 - } - } - } else { - for i := 0; i < m; i++ { - ctmp := cmat.data[i*cmat.stride : i*cmat.stride+cmat.cols] - for j := range ctmp { - ctmp[j] *= beta - } - } - } - } - - sgemmParallel(tA, tB, amat, bmat, cmat, alpha) -} - -func sgemmParallel(tA, tB blas.Transpose, a, b, c general32, alpha float32) { - // dgemmParallel computes a parallel matrix multiplication by partitioning - // a and b into sub-blocks, and updating c with the multiplication of the sub-block - // In all cases, - // A = [ A_11 A_12 ... A_1j - // A_21 A_22 ... A_2j - // ... - // A_i1 A_i2 ... A_ij] - // - // and same for B. All of the submatrix sizes are blockSize*blockSize except - // at the edges. - // In all cases, there is one dimension for each matrix along which - // C must be updated sequentially. - // Cij = \sum_k Aik Bki, (A * B) - // Cij = \sum_k Aki Bkj, (A^T * B) - // Cij = \sum_k Aik Bjk, (A * B^T) - // Cij = \sum_k Aki Bjk, (A^T * B^T) - // - // This code computes one {i, j} block sequentially along the k dimension, - // and computes all of the {i, j} blocks concurrently. This - // partitioning allows Cij to be updated in-place without race-conditions. - // Instead of launching a goroutine for each possible concurrent computation, - // a number of worker goroutines are created and channels are used to pass - // available and completed cases. - // - // http://alexkr.com/docs/matrixmult.pdf is a good reference on matrix-matrix - // multiplies, though this code does not copy matrices to attempt to eliminate - // cache misses. - - aTrans := tA == blas.Trans || tA == blas.ConjTrans - bTrans := tB == blas.Trans || tB == blas.ConjTrans - - maxKLen, parBlocks := computeNumBlocks32(a, b, aTrans, bTrans) - if parBlocks < minParBlock { - // The matrix multiplication is small in the dimensions where it can be - // computed concurrently. Just do it in serial. - sgemmSerial(tA, tB, a, b, c, alpha) - return - } - - nWorkers := runtime.GOMAXPROCS(0) - if parBlocks < nWorkers { - nWorkers = parBlocks - } - // There is a tradeoff between the workers having to wait for work - // and a large buffer making operations slow. - buf := buffMul * nWorkers - if buf > parBlocks { - buf = parBlocks - } - - sendChan := make(chan subMul, buf) - - // Launch workers. A worker receives an {i, j} submatrix of c, and computes - // A_ik B_ki (or the transposed version) storing the result in c_ij. When the - // channel is finally closed, it signals to the waitgroup that it has finished - // computing. - var wg sync.WaitGroup - for i := 0; i < nWorkers; i++ { - wg.Add(1) - go func() { - defer wg.Done() - // Make local copies of otherwise global variables to reduce shared memory. - // This has a noticable effect on benchmarks in some cases. - alpha := alpha - aTrans := aTrans - bTrans := bTrans - crows := c.rows - ccols := c.cols - for sub := range sendChan { - i := sub.i - j := sub.j - leni := blockSize - if i+leni > crows { - leni = crows - i - } - lenj := blockSize - if j+lenj > ccols { - lenj = ccols - j - } - cSub := c.view(i, j, leni, lenj) - - // Compute A_ik B_kj for all k - for k := 0; k < maxKLen; k += blockSize { - lenk := blockSize - if k+lenk > maxKLen { - lenk = maxKLen - k - } - var aSub, bSub general32 - if aTrans { - aSub = a.view(k, i, lenk, leni) - } else { - aSub = a.view(i, k, leni, lenk) - } - if bTrans { - bSub = b.view(j, k, lenj, lenk) - } else { - bSub = b.view(k, j, lenk, lenj) - } - - sgemmSerial(tA, tB, aSub, bSub, cSub, alpha) - } - } - }() - } - - // Send out all of the {i, j} subblocks for computation. - for i := 0; i < c.rows; i += blockSize { - for j := 0; j < c.cols; j += blockSize { - sendChan <- subMul{ - i: i, - j: j, - } - } - } - close(sendChan) - wg.Wait() -} - -// computeNumBlocks says how many blocks there are to compute. maxKLen says the length of the -// k dimension, parBlocks is the number of blocks that could be computed in parallel -// (the submatrices in i and j). expect is the full number of blocks that will be computed. -func computeNumBlocks32(a, b general32, aTrans, bTrans bool) (maxKLen, parBlocks int) { - aRowBlocks := a.rows / blockSize - if a.rows%blockSize != 0 { - aRowBlocks++ - } - aColBlocks := a.cols / blockSize - if a.cols%blockSize != 0 { - aColBlocks++ - } - bRowBlocks := b.rows / blockSize - if b.rows%blockSize != 0 { - bRowBlocks++ - } - bColBlocks := b.cols / blockSize - if b.cols%blockSize != 0 { - bColBlocks++ - } - - switch { - case !aTrans && !bTrans: - // Cij = \sum_k Aik Bki - maxKLen = a.cols - parBlocks = aRowBlocks * bColBlocks - case aTrans && !bTrans: - // Cij = \sum_k Aki Bkj - maxKLen = a.rows - parBlocks = aColBlocks * bColBlocks - case !aTrans && bTrans: - // Cij = \sum_k Aik Bjk - maxKLen = a.cols - parBlocks = aRowBlocks * bRowBlocks - case aTrans && bTrans: - // Cij = \sum_k Aki Bjk - maxKLen = a.rows - parBlocks = aColBlocks * bRowBlocks - } - return -} - -// sgemmSerial is serial matrix multiply -func sgemmSerial(tA, tB blas.Transpose, a, b, c general32, alpha float32) { - switch { - case tA == blas.NoTrans && tB == blas.NoTrans: - sgemmSerialNotNot(a, b, c, alpha) - return - case tA != blas.NoTrans && tB == blas.NoTrans: - sgemmSerialTransNot(a, b, c, alpha) - return - case tA == blas.NoTrans && tB != blas.NoTrans: - sgemmSerialNotTrans(a, b, c, alpha) - return - case tA != blas.NoTrans && tB != blas.NoTrans: - sgemmSerialTransTrans(a, b, c, alpha) - return - default: - panic("unreachable") - } -} - -// sgemmSerial where neither a nor b are transposed -func sgemmSerialNotNot(a, b, c general32, alpha float32) { - if debug { - if a.cols != b.rows { - panic("inner dimension mismatch") - } - if a.rows != c.rows { - panic("outer dimension mismatch") - } - if b.cols != c.cols { - panic("outer dimension mismatch") - } - } - - // This style is used instead of the literal [i*stride +j]) is used because - // approximately 5 times faster as of go 1.3. - for i := 0; i < a.rows; i++ { - ctmp := c.data[i*c.stride : i*c.stride+c.cols] - for l, v := range a.data[i*a.stride : i*a.stride+a.cols] { - tmp := alpha * v - if tmp != 0 { - asm.SaxpyUnitary(tmp, b.data[l*b.stride:l*b.stride+b.cols], ctmp, ctmp) - } - } - } -} - -// sgemmSerial where neither a is transposed and b is not -func sgemmSerialTransNot(a, b, c general32, alpha float32) { - if debug { - if a.rows != b.rows { - fmt.Println(a.rows, b.rows) - panic("inner dimension mismatch") - } - if a.cols != c.rows { - panic("outer dimension mismatch") - } - if b.cols != c.cols { - panic("outer dimension mismatch") - } - } - - // This style is used instead of the literal [i*stride +j]) is used because - // approximately 5 times faster as of go 1.3. - for l := 0; l < a.rows; l++ { - btmp := b.data[l*b.stride : l*b.stride+b.cols] - for i, v := range a.data[l*a.stride : l*a.stride+a.cols] { - tmp := alpha * v - ctmp := c.data[i*c.stride : i*c.stride+c.cols] - if tmp != 0 { - asm.SaxpyUnitary(tmp, btmp, ctmp, ctmp) - } - } - } -} - -// sgemmSerial where neither a is not transposed and b is -func sgemmSerialNotTrans(a, b, c general32, alpha float32) { - if debug { - if a.cols != b.cols { - panic("inner dimension mismatch") - } - if a.rows != c.rows { - panic("outer dimension mismatch") - } - if b.rows != c.cols { - panic("outer dimension mismatch") - } - } - - // This style is used instead of the literal [i*stride +j]) is used because - // approximately 5 times faster as of go 1.3. - for i := 0; i < a.rows; i++ { - atmp := a.data[i*a.stride : i*a.stride+a.cols] - ctmp := c.data[i*c.stride : i*c.stride+c.cols] - for j := 0; j < b.rows; j++ { - ctmp[j] += alpha * asm.SdotUnitary(atmp, b.data[j*b.stride:j*b.stride+b.cols]) - } - } - -} - -// sgemmSerial where both are transposed -func sgemmSerialTransTrans(a, b, c general32, alpha float32) { - if debug { - if a.rows != b.cols { - panic("inner dimension mismatch") - } - if a.cols != c.rows { - panic("outer dimension mismatch") - } - if b.rows != c.cols { - panic("outer dimension mismatch") - } - } - - // This style is used instead of the literal [i*stride +j]) is used because - // approximately 5 times faster as of go 1.3. - for l := 0; l < a.rows; l++ { - for i, v := range a.data[l*a.stride : l*a.stride+a.cols] { - ctmp := c.data[i*c.stride : i*c.stride+c.cols] - if v != 0 { - tmp := alpha * v - if tmp != 0 { - asm.SaxpyInc(tmp, b.data[l:], ctmp, uintptr(b.rows), uintptr(b.stride), 1, 0, 0) - } - } - } - } -} diff --git a/vendor/github.com/gonum/blas/native/single_precision b/vendor/github.com/gonum/blas/native/single_precision deleted file mode 100755 index 46259f981..000000000 --- a/vendor/github.com/gonum/blas/native/single_precision +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/env bash - -# Copyright ©2015 The gonum Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -WARNING='//\ -// Float32 implementations are autogenerated and not directly tested.\ -' - -# Level1 routines. - -echo Generating level1single.go -echo -e '// Generated code do not edit. Run `go generate`.\n' > level1single.go -cat level1double.go \ -| gofmt -r 'blas.Float64Level1 -> blas.Float32Level1' \ -\ -| gofmt -r 'float64 -> float32' \ -| gofmt -r 'blas.DrotmParams -> blas.SrotmParams' \ -\ -| gofmt -r 'asm.DaxpyInc -> asm.SaxpyInc' \ -| gofmt -r 'asm.DaxpyUnitary -> asm.SaxpyUnitary' \ -| gofmt -r 'asm.DdotInc -> asm.SdotInc' \ -| gofmt -r 'asm.DdotUnitary -> asm.SdotUnitary' \ -\ -| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNING\1S\2_" \ - -e 's_^// D_// S_' \ - -e "s_^\(func (Implementation) \)Id\(.*\)\$_$WARNING\1Is\2_" \ - -e 's_^// Id_// Is_' \ - -e 's_"math"_math "github.com/gonum/blas/native/internal/math32"_' \ ->> level1single.go - -echo Generating level1single_sdot.go -echo -e '// Generated code do not edit. Run `go generate`.\n' > level1single_sdot.go -cat level1double_ddot.go \ -| gofmt -r 'float64 -> float32' \ -\ -| gofmt -r 'asm.DdotInc -> asm.SdotInc' \ -| gofmt -r 'asm.DdotUnitary -> asm.SdotUnitary' \ -\ -| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNING\1S\2_" \ - -e 's_^// D_// S_' \ ->> level1single_sdot.go - -echo Generating level1single_dsdot.go -echo -e '// Generated code do not edit. Run `go generate`.\n' > level1single_dsdot.go -cat level1double_ddot.go \ -| gofmt -r '[]float64 -> []float32' \ -\ -| gofmt -r 'asm.DdotInc -> asm.DsdotInc' \ -| gofmt -r 'asm.DdotUnitary -> asm.DsdotUnitary' \ -\ -| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNING\1Ds\2_" \ - -e 's_^// D_// Ds_' \ ->> level1single_dsdot.go - -echo Generating level1single_sdsdot.go -echo -e '// Generated code do not edit. Run `go generate`.\n' > level1single_sdsdot.go -cat level1double_ddot.go \ -| gofmt -r 'float64 -> float32' \ -\ -| gofmt -r 'asm.DdotInc(x, y, f(n), f(incX), f(incY), f(ix), f(iy)) -> alpha + float32(asm.DsdotInc(x, y, f(n), f(incX), f(incY), f(ix), f(iy)))' \ -| gofmt -r 'asm.DdotUnitary(a, b) -> alpha + float32(asm.DsdotUnitary(a, b))' \ -\ -| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNING\1Sds\2_" \ - -e 's_^// D\(.*\)$_// Sds\1 plus a constant_' \ - -e 's_\\sum_alpha + \\sum_' \ - -e 's/n int/n int, alpha float32/' \ ->> level1single_sdsdot.go - - -# Level2 routines. - -echo Generating level2single.go -echo -e '// Generated code do not edit. Run `go generate`.\n' > level2single.go -cat level2double.go \ -| gofmt -r 'blas.Float64Level2 -> blas.Float32Level2' \ -\ -| gofmt -r 'float64 -> float32' \ -\ -| gofmt -r 'Dscal -> Sscal' \ -\ -| gofmt -r 'asm.DaxpyInc -> asm.SaxpyInc' \ -| gofmt -r 'asm.DaxpyUnitary -> asm.SaxpyUnitary' \ -| gofmt -r 'asm.DdotInc -> asm.SdotInc' \ -| gofmt -r 'asm.DdotUnitary -> asm.SdotUnitary' \ -\ -| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNING\1S\2_" \ - -e 's_^// D_// S_' \ ->> level2single.go - - -# Level3 routines. - -echo Generating level3single.go -echo -e '// Generated code do not edit. Run `go generate`.\n' > level3single.go -cat level3double.go \ -| gofmt -r 'blas.Float64Level3 -> blas.Float32Level3' \ -\ -| gofmt -r 'float64 -> float32' \ -\ -| gofmt -r 'asm.DaxpyUnitary -> asm.SaxpyUnitary' \ -| gofmt -r 'asm.DdotUnitary -> asm.SdotUnitary' \ -\ -| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNING\1S\2_" \ - -e 's_^// D_// S_' \ ->> level3single.go - -echo Generating general_single.go -echo -e '// Generated code do not edit. Run `go generate`.\n' > general_single.go -cat general_double.go \ -| gofmt -r 'float64 -> float32' \ -\ -| gofmt -r 'general64 -> general32' \ -| gofmt -r 'newGeneral64 -> newGeneral32' \ -\ -| sed -e 's/(g general64) print()/(g general32) print()/' \ - -e 's_"math"_math "github.com/gonum/blas/native/internal/math32"_' \ ->> general_single.go - -echo Generating sgemm.go -echo -e '// Generated code do not edit. Run `go generate`.\n' > sgemm.go -cat dgemm.go \ -| gofmt -r 'float64 -> float32' \ -| gofmt -r 'general64 -> general32' \ -\ -| gofmt -r 'dgemmParallel -> sgemmParallel' \ -| gofmt -r 'computeNumBlocks64 -> computeNumBlocks32' \ -| gofmt -r 'dgemmSerial -> sgemmSerial' \ -| gofmt -r 'dgemmSerialNotNot -> sgemmSerialNotNot' \ -| gofmt -r 'dgemmSerialTransNot -> sgemmSerialTransNot' \ -| gofmt -r 'dgemmSerialNotTrans -> sgemmSerialNotTrans' \ -| gofmt -r 'dgemmSerialTransTrans -> sgemmSerialTransTrans' \ -\ -| gofmt -r 'asm.DaxpyInc -> asm.SaxpyInc' \ -| gofmt -r 'asm.DaxpyUnitary -> asm.SaxpyUnitary' \ -| gofmt -r 'asm.DdotInc -> asm.SdotInc' \ -| gofmt -r 'asm.DdotUnitary -> asm.SdotUnitary' \ -\ -| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNING\1S\2_" \ - -e 's_^// D_// S_' \ - -e 's_^// d_// s_' \ ->> sgemm.go diff --git a/vendor/github.com/gonum/graph/.gitignore b/vendor/github.com/gonum/graph/.gitignore deleted file mode 100644 index 86e0d2404..000000000 --- a/vendor/github.com/gonum/graph/.gitignore +++ /dev/null @@ -1 +0,0 @@ -test.out \ No newline at end of file diff --git a/vendor/github.com/gonum/graph/.travis.yml b/vendor/github.com/gonum/graph/.travis.yml deleted file mode 100644 index a5e9aa153..000000000 --- a/vendor/github.com/gonum/graph/.travis.yml +++ /dev/null @@ -1,29 +0,0 @@ -language: go - -# Versions of go that are explicitly supported by gonum. -go: - - 1.5beta1 - - 1.3.3 - - 1.4.2 - -# Required for coverage. -before_install: - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls - -# Get deps, build, test, and ensure the code is gofmt'ed. -# If we are building as gonum, then we have access to the coveralls api key, so we can run coverage as well. -script: - - go get -d -t -v ./... - - go build -v ./... - - go test -v ./... - - diff <(gofmt -d .) <("") - - if [[ $TRAVIS_SECURE_ENV_VARS = "true" ]]; then bash ./.travis/test-coverage.sh; fi - -notifications: - email: - recipients: - - jragonmiris@gmail.com - on_success: change - on_failure: always - diff --git a/vendor/github.com/gonum/graph/README.md b/vendor/github.com/gonum/graph/README.md deleted file mode 100644 index 3c4c17962..000000000 --- a/vendor/github.com/gonum/graph/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# Gonum Graph [![Build Status](https://travis-ci.org/gonum/graph.svg?branch=master)](https://travis-ci.org/gonum/graph) [![Coverage Status](https://img.shields.io/coveralls/gonum/graph.svg)](https://coveralls.io/r/gonum/graph?branch=master) - -This is a generalized graph package for the Go language. It aims to provide a clean, transparent API for common algorithms on arbitrary graphs such as finding the graph's strongly connected components, dominators, or searces. - -The package is currently in testing, and the API is "semi-stable". The signatures of any functions like AStar are unlikely to change much, but the Graph, Node, and Edge interfaces may change a bit. - -## Issues - -If you find any bugs, feel free to file an issue on the github issue tracker. Discussions on API changes, added features, code review, or similar requests are preferred on the Gonum-dev Google Group. - -https://groups.google.com/forum/#!forum/gonum-dev - -## License - -Please see github.com/gonum/license for general license information, contributors, authors, etc on the Gonum suite of packages. diff --git a/vendor/github.com/gonum/graph/concrete/concrete.go b/vendor/github.com/gonum/graph/concrete/concrete.go deleted file mode 100644 index 4b272a76c..000000000 --- a/vendor/github.com/gonum/graph/concrete/concrete.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright ©2014 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package concrete - -// TODO(anyone) Package level documentation for this describing the overall -// reason for the package and a summary for the provided types. diff --git a/vendor/github.com/gonum/graph/concrete/dense_directed_matrix.go b/vendor/github.com/gonum/graph/concrete/dense_directed_matrix.go deleted file mode 100644 index 5e72db684..000000000 --- a/vendor/github.com/gonum/graph/concrete/dense_directed_matrix.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright ©2014 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package concrete - -import ( - "github.com/gonum/graph" - "github.com/gonum/matrix/mat64" -) - -// DirectedDenseGraph represents a graph such that all IDs are in a contiguous -// block from 0 to n-1. -type DirectedDenseGraph struct { - absent float64 - mat *mat64.Dense -} - -// NewDirectedDenseGraph creates a directed dense graph with n nodes. -// If passable is true all pairs of nodes will be connected by an edge -// with unit cost, otherwise every node will start unconnected with -// the cost specified by absent. -func NewDirectedDenseGraph(n int, passable bool, absent float64) *DirectedDenseGraph { - mat := make([]float64, n*n) - v := 1. - if !passable { - v = absent - } - for i := range mat { - mat[i] = v - } - return &DirectedDenseGraph{mat: mat64.NewDense(n, n, mat), absent: absent} -} - -func (g *DirectedDenseGraph) Has(n graph.Node) bool { - id := n.ID() - r, _ := g.mat.Dims() - return 0 <= id && id < r -} - -func (g *DirectedDenseGraph) Nodes() []graph.Node { - r, _ := g.mat.Dims() - nodes := make([]graph.Node, r) - for i := 0; i < r; i++ { - nodes[i] = Node(i) - } - return nodes -} - -func (g *DirectedDenseGraph) Edges() []graph.Edge { - var edges []graph.Edge - r, _ := g.mat.Dims() - for i := 0; i < r; i++ { - for j := 0; j < r; j++ { - if i == j { - continue - } - if !isSame(g.mat.At(i, j), g.absent) { - edges = append(edges, Edge{Node(i), Node(j)}) - } - } - } - return edges -} - -func (g *DirectedDenseGraph) From(n graph.Node) []graph.Node { - var neighbors []graph.Node - id := n.ID() - _, c := g.mat.Dims() - for j := 0; j < c; j++ { - if j == id { - continue - } - if !isSame(g.mat.At(id, j), g.absent) { - neighbors = append(neighbors, Node(j)) - } - } - return neighbors -} - -func (g *DirectedDenseGraph) To(n graph.Node) []graph.Node { - var neighbors []graph.Node - id := n.ID() - r, _ := g.mat.Dims() - for i := 0; i < r; i++ { - if i == id { - continue - } - if !isSame(g.mat.At(i, id), g.absent) { - neighbors = append(neighbors, Node(i)) - } - } - return neighbors -} - -func (g *DirectedDenseGraph) HasEdge(x, y graph.Node) bool { - xid := x.ID() - yid := y.ID() - return xid != yid && (!isSame(g.mat.At(xid, yid), g.absent) || !isSame(g.mat.At(yid, xid), g.absent)) -} - -func (g *DirectedDenseGraph) Edge(u, v graph.Node) graph.Edge { - if g.HasEdge(u, v) { - return Edge{u, v} - } - return nil -} - -func (g *DirectedDenseGraph) HasEdgeFromTo(u, v graph.Node) bool { - uid := u.ID() - vid := v.ID() - return uid != vid && !isSame(g.mat.At(uid, vid), g.absent) -} - -func (g *DirectedDenseGraph) Weight(e graph.Edge) float64 { - return g.mat.At(e.From().ID(), e.To().ID()) -} - -func (g *DirectedDenseGraph) SetEdgeWeight(e graph.Edge, weight float64) { - fid := e.From().ID() - tid := e.To().ID() - if fid == tid { - panic("concrete: set edge cost of illegal edge") - } - g.mat.Set(fid, tid, weight) -} - -func (g *DirectedDenseGraph) RemoveEdge(e graph.Edge) { - g.mat.Set(e.From().ID(), e.To().ID(), g.absent) -} - -func (g *DirectedDenseGraph) Matrix() mat64.Matrix { - // Prevent alteration of dimensions of the returned matrix. - m := *g.mat - return &m -} diff --git a/vendor/github.com/gonum/graph/concrete/dense_undirected_matrix.go b/vendor/github.com/gonum/graph/concrete/dense_undirected_matrix.go deleted file mode 100644 index d5aca0285..000000000 --- a/vendor/github.com/gonum/graph/concrete/dense_undirected_matrix.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright ©2014 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package concrete - -import ( - "github.com/gonum/graph" - "github.com/gonum/matrix/mat64" -) - -// UndirectedDenseGraph represents a graph such that all IDs are in a contiguous -// block from 0 to n-1. -type UndirectedDenseGraph struct { - absent float64 - mat *mat64.SymDense -} - -// NewUndirectedDenseGraph creates an undirected dense graph with n nodes. -// If passable is true all pairs of nodes will be connected by an edge -// with unit cost, otherwise every node will start unconnected with -// the cost specified by absent. -func NewUndirectedDenseGraph(n int, passable bool, absent float64) *UndirectedDenseGraph { - mat := make([]float64, n*n) - v := 1. - if !passable { - v = absent - } - for i := range mat { - mat[i] = v - } - return &UndirectedDenseGraph{mat: mat64.NewSymDense(n, mat), absent: absent} -} - -func (g *UndirectedDenseGraph) Has(n graph.Node) bool { - id := n.ID() - r := g.mat.Symmetric() - return 0 <= id && id < r -} - -func (g *UndirectedDenseGraph) Nodes() []graph.Node { - r := g.mat.Symmetric() - nodes := make([]graph.Node, r) - for i := 0; i < r; i++ { - nodes[i] = Node(i) - } - return nodes -} - -func (g *UndirectedDenseGraph) Edges() []graph.Edge { - var edges []graph.Edge - r, _ := g.mat.Dims() - for i := 0; i < r; i++ { - for j := i + 1; j < r; j++ { - if !isSame(g.mat.At(i, j), g.absent) { - edges = append(edges, Edge{Node(i), Node(j)}) - } - } - } - return edges -} - -func (g *UndirectedDenseGraph) Degree(n graph.Node) int { - id := n.ID() - var deg int - r := g.mat.Symmetric() - for i := 0; i < r; i++ { - if i == id { - continue - } - if !isSame(g.mat.At(id, i), g.absent) { - deg++ - } - } - return deg -} - -func (g *UndirectedDenseGraph) From(n graph.Node) []graph.Node { - var neighbors []graph.Node - id := n.ID() - r := g.mat.Symmetric() - for i := 0; i < r; i++ { - if i == id { - continue - } - if !isSame(g.mat.At(id, i), g.absent) { - neighbors = append(neighbors, Node(i)) - } - } - return neighbors -} - -func (g *UndirectedDenseGraph) HasEdge(u, v graph.Node) bool { - uid := u.ID() - vid := v.ID() - return uid != vid && !isSame(g.mat.At(uid, vid), g.absent) -} - -func (g *UndirectedDenseGraph) Edge(u, v graph.Node) graph.Edge { - return g.EdgeBetween(u, v) -} - -func (g *UndirectedDenseGraph) EdgeBetween(u, v graph.Node) graph.Edge { - if g.HasEdge(u, v) { - return Edge{u, v} - } - return nil -} - -func (g *UndirectedDenseGraph) Weight(e graph.Edge) float64 { - return g.mat.At(e.From().ID(), e.To().ID()) -} - -func (g *UndirectedDenseGraph) SetEdgeWeight(e graph.Edge, weight float64) { - fid := e.From().ID() - tid := e.To().ID() - if fid == tid { - panic("concrete: set edge cost of illegal edge") - } - g.mat.SetSym(fid, tid, weight) -} - -func (g *UndirectedDenseGraph) RemoveEdge(e graph.Edge) { - g.mat.SetSym(e.From().ID(), e.To().ID(), g.absent) -} - -func (g *UndirectedDenseGraph) Matrix() mat64.Matrix { - // Prevent alteration of dimensions of the returned matrix. - m := *g.mat - return &m -} diff --git a/vendor/github.com/gonum/graph/concrete/directed.go b/vendor/github.com/gonum/graph/concrete/directed.go deleted file mode 100644 index 9366e6a51..000000000 --- a/vendor/github.com/gonum/graph/concrete/directed.go +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright ©2014 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package concrete - -import ( - "fmt" - - "github.com/gonum/graph" -) - -// A Directed graph is a highly generalized MutableDirectedGraph. -// -// In most cases it's likely more desireable to use a graph specific to your -// problem domain. -type DirectedGraph struct { - successors map[int]map[int]WeightedEdge - predecessors map[int]map[int]WeightedEdge - nodeMap map[int]graph.Node - - // Add/remove convenience variables - maxID int - freeMap map[int]struct{} -} - -func NewDirectedGraph() *DirectedGraph { - return &DirectedGraph{ - successors: make(map[int]map[int]WeightedEdge), - predecessors: make(map[int]map[int]WeightedEdge), - nodeMap: make(map[int]graph.Node), - maxID: 0, - freeMap: make(map[int]struct{}), - } -} - -func (g *DirectedGraph) NewNodeID() int { - if g.maxID != maxInt { - g.maxID++ - return g.maxID - } - - // Implicitly checks if len(g.freeMap) == 0 - for id := range g.freeMap { - return id - } - - // I cannot foresee this ever happening, but just in case - if len(g.nodeMap) == maxInt { - panic("cannot allocate node: graph too large") - } - - for i := 0; i < maxInt; i++ { - if _, ok := g.nodeMap[i]; !ok { - return i - } - } - - // Should not happen. - panic("cannot allocate node id: no free id found") -} - -// Adds a node to the graph. Implementation note: if you add a node close to or at -// the max int on your machine NewNode will become slower. -func (g *DirectedGraph) AddNode(n graph.Node) { - if _, exists := g.nodeMap[n.ID()]; exists { - panic(fmt.Sprintf("concrete: node ID collision: %d", n.ID())) - } - g.nodeMap[n.ID()] = n - g.successors[n.ID()] = make(map[int]WeightedEdge) - g.predecessors[n.ID()] = make(map[int]WeightedEdge) - - delete(g.freeMap, n.ID()) - g.maxID = max(g.maxID, n.ID()) -} - -func (g *DirectedGraph) SetEdge(e graph.Edge, cost float64) { - var ( - from = e.From() - fid = from.ID() - to = e.To() - tid = to.ID() - ) - - if fid == tid { - panic("concrete: adding self edge") - } - - if !g.Has(from) { - g.AddNode(from) - } - - if !g.Has(to) { - g.AddNode(to) - } - - g.successors[fid][tid] = WeightedEdge{Edge: e, Cost: cost} - g.predecessors[tid][fid] = WeightedEdge{Edge: e, Cost: cost} -} - -func (g *DirectedGraph) RemoveNode(n graph.Node) { - if _, ok := g.nodeMap[n.ID()]; !ok { - return - } - delete(g.nodeMap, n.ID()) - - for succ := range g.successors[n.ID()] { - delete(g.predecessors[succ], n.ID()) - } - delete(g.successors, n.ID()) - - for pred := range g.predecessors[n.ID()] { - delete(g.successors[pred], n.ID()) - } - delete(g.predecessors, n.ID()) - - g.maxID-- // Fun facts: even if this ID doesn't exist this still works! - g.freeMap[n.ID()] = struct{}{} -} - -func (g *DirectedGraph) RemoveEdge(e graph.Edge) { - from, to := e.From(), e.To() - if _, ok := g.nodeMap[from.ID()]; !ok { - return - } else if _, ok := g.nodeMap[to.ID()]; !ok { - return - } - - delete(g.successors[from.ID()], to.ID()) - delete(g.predecessors[to.ID()], from.ID()) -} - -func (g *DirectedGraph) EmptyGraph() { - g.successors = make(map[int]map[int]WeightedEdge) - g.predecessors = make(map[int]map[int]WeightedEdge) - g.nodeMap = make(map[int]graph.Node) -} - -/* Graph implementation */ - -func (g *DirectedGraph) From(n graph.Node) []graph.Node { - if _, ok := g.successors[n.ID()]; !ok { - return nil - } - - successors := make([]graph.Node, len(g.successors[n.ID()])) - i := 0 - for succ := range g.successors[n.ID()] { - successors[i] = g.nodeMap[succ] - i++ - } - - return successors -} - -func (g *DirectedGraph) HasEdge(x, y graph.Node) bool { - xid := x.ID() - yid := y.ID() - if _, ok := g.nodeMap[xid]; !ok { - return false - } - if _, ok := g.nodeMap[yid]; !ok { - return false - } - if _, ok := g.successors[xid][yid]; ok { - return true - } - _, ok := g.successors[yid][xid] - return ok -} - -func (g *DirectedGraph) Edge(u, v graph.Node) graph.Edge { - if _, ok := g.nodeMap[u.ID()]; !ok { - return nil - } - if _, ok := g.nodeMap[v.ID()]; !ok { - return nil - } - edge, ok := g.successors[u.ID()][v.ID()] - if !ok { - return nil - } - return edge.Edge -} - -func (g *DirectedGraph) HasEdgeFromTo(u, v graph.Node) bool { - if _, ok := g.nodeMap[u.ID()]; !ok { - return false - } - if _, ok := g.nodeMap[v.ID()]; !ok { - return false - } - if _, ok := g.successors[u.ID()][v.ID()]; !ok { - return false - } - return true -} - -func (g *DirectedGraph) To(n graph.Node) []graph.Node { - if _, ok := g.successors[n.ID()]; !ok { - return nil - } - - predecessors := make([]graph.Node, len(g.predecessors[n.ID()])) - i := 0 - for succ := range g.predecessors[n.ID()] { - predecessors[i] = g.nodeMap[succ] - i++ - } - - return predecessors -} - -func (g *DirectedGraph) Node(id int) graph.Node { - return g.nodeMap[id] -} - -func (g *DirectedGraph) Has(n graph.Node) bool { - _, ok := g.nodeMap[n.ID()] - - return ok -} - -func (g *DirectedGraph) Degree(n graph.Node) int { - if _, ok := g.nodeMap[n.ID()]; !ok { - return 0 - } - - return len(g.successors[n.ID()]) + len(g.predecessors[n.ID()]) -} - -func (g *DirectedGraph) Nodes() []graph.Node { - nodes := make([]graph.Node, len(g.successors)) - i := 0 - for _, n := range g.nodeMap { - nodes[i] = n - i++ - } - - return nodes -} - -func (g *DirectedGraph) Weight(e graph.Edge) float64 { - if s, ok := g.successors[e.From().ID()]; ok { - if we, ok := s[e.To().ID()]; ok { - return we.Cost - } - } - return inf -} - -func (g *DirectedGraph) Edges() []graph.Edge { - edgeList := make([]graph.Edge, 0, len(g.successors)) - edgeMap := make(map[int]map[int]struct{}, len(g.successors)) - for n, succMap := range g.successors { - edgeMap[n] = make(map[int]struct{}, len(succMap)) - for succ, edge := range succMap { - if doneMap, ok := edgeMap[succ]; ok { - if _, ok := doneMap[n]; ok { - continue - } - } - edgeList = append(edgeList, edge) - edgeMap[n][succ] = struct{}{} - } - } - - return edgeList -} diff --git a/vendor/github.com/gonum/graph/concrete/undirected.go b/vendor/github.com/gonum/graph/concrete/undirected.go deleted file mode 100644 index 0bc13b71f..000000000 --- a/vendor/github.com/gonum/graph/concrete/undirected.go +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright ©2014 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package concrete - -import ( - "fmt" - - "github.com/gonum/graph" -) - -// A simple int alias. -type Node int - -func (n Node) ID() int { - return int(n) -} - -// Just a collection of two nodes -type Edge struct { - F, T graph.Node -} - -func (e Edge) From() graph.Node { - return e.F -} - -func (e Edge) To() graph.Node { - return e.T -} - -type WeightedEdge struct { - graph.Edge - Cost float64 -} - -// A GonumGraph is a very generalized graph that can handle an arbitrary number of vertices and -// edges -- as well as act as either directed or undirected. -// -// Internally, it uses a map of successors AND predecessors, to speed up some operations (such as -// getting all successors/predecessors). It also speeds up things like adding edges (assuming both -// edges exist). -// -// However, its generality is also its weakness (and partially a flaw in needing to satisfy -// MutableGraph). For most purposes, creating your own graph is probably better. For instance, -// see TileGraph for an example of an immutable 2D grid of tiles that also implements the Graph -// interface, but would be more suitable if all you needed was a simple undirected 2D grid. -type Graph struct { - neighbors map[int]map[int]WeightedEdge - nodeMap map[int]graph.Node - - // Node add/remove convenience vars - maxID int - freeMap map[int]struct{} -} - -func NewGraph() *Graph { - return &Graph{ - neighbors: make(map[int]map[int]WeightedEdge), - nodeMap: make(map[int]graph.Node), - maxID: 0, - freeMap: make(map[int]struct{}), - } -} - -func (g *Graph) NewNodeID() int { - if g.maxID != maxInt { - g.maxID++ - return g.maxID - } - - // Implicitly checks if len(g.freeMap) == 0 - for id := range g.freeMap { - return id - } - - // I cannot foresee this ever happening, but just in case, we check. - if len(g.nodeMap) == maxInt { - panic("cannot allocate node: graph too large") - } - - for i := 0; i < maxInt; i++ { - if _, ok := g.nodeMap[i]; !ok { - return i - } - } - - // Should not happen. - panic("cannot allocate node id: no free id found") -} - -func (g *Graph) AddNode(n graph.Node) { - if _, exists := g.nodeMap[n.ID()]; exists { - panic(fmt.Sprintf("concrete: node ID collision: %d", n.ID())) - } - g.nodeMap[n.ID()] = n - g.neighbors[n.ID()] = make(map[int]WeightedEdge) - - delete(g.freeMap, n.ID()) - g.maxID = max(g.maxID, n.ID()) -} - -func (g *Graph) SetEdge(e graph.Edge, cost float64) { - var ( - from = e.From() - fid = from.ID() - to = e.To() - tid = to.ID() - ) - - if fid == tid { - panic("concrete: adding self edge") - } - - if !g.Has(from) { - g.AddNode(from) - } - - if !g.Has(to) { - g.AddNode(to) - } - - g.neighbors[fid][tid] = WeightedEdge{Edge: e, Cost: cost} - g.neighbors[tid][fid] = WeightedEdge{Edge: e, Cost: cost} -} - -func (g *Graph) RemoveNode(n graph.Node) { - if _, ok := g.nodeMap[n.ID()]; !ok { - return - } - delete(g.nodeMap, n.ID()) - - for neigh := range g.neighbors[n.ID()] { - delete(g.neighbors[neigh], n.ID()) - } - delete(g.neighbors, n.ID()) - - if g.maxID != 0 && n.ID() == g.maxID { - g.maxID-- - } - g.freeMap[n.ID()] = struct{}{} -} - -func (g *Graph) RemoveEdge(e graph.Edge) { - from, to := e.From(), e.To() - if _, ok := g.nodeMap[from.ID()]; !ok { - return - } else if _, ok := g.nodeMap[to.ID()]; !ok { - return - } - - delete(g.neighbors[from.ID()], to.ID()) - delete(g.neighbors[to.ID()], from.ID()) -} - -func (g *Graph) EmptyGraph() { - g.neighbors = make(map[int]map[int]WeightedEdge) - g.nodeMap = make(map[int]graph.Node) -} - -/* Graph implementation */ - -func (g *Graph) From(n graph.Node) []graph.Node { - if !g.Has(n) { - return nil - } - - neighbors := make([]graph.Node, len(g.neighbors[n.ID()])) - i := 0 - for id := range g.neighbors[n.ID()] { - neighbors[i] = g.nodeMap[id] - i++ - } - - return neighbors -} - -func (g *Graph) HasEdge(n, neigh graph.Node) bool { - _, ok := g.neighbors[n.ID()][neigh.ID()] - return ok -} - -func (g *Graph) Edge(u, v graph.Node) graph.Edge { - return g.EdgeBetween(u, v) -} - -func (g *Graph) EdgeBetween(u, v graph.Node) graph.Edge { - // We don't need to check if neigh exists because - // it's implicit in the neighbors access. - if !g.Has(u) { - return nil - } - - return g.neighbors[u.ID()][v.ID()].Edge -} - -func (g *Graph) Node(id int) graph.Node { - return g.nodeMap[id] -} - -func (g *Graph) Has(n graph.Node) bool { - _, ok := g.nodeMap[n.ID()] - - return ok -} - -func (g *Graph) Nodes() []graph.Node { - nodes := make([]graph.Node, len(g.nodeMap)) - i := 0 - for _, n := range g.nodeMap { - nodes[i] = n - i++ - } - - return nodes -} - -func (g *Graph) Weight(e graph.Edge) float64 { - if n, ok := g.neighbors[e.From().ID()]; ok { - if we, ok := n[e.To().ID()]; ok { - return we.Cost - } - } - return inf -} - -func (g *Graph) Edges() []graph.Edge { - m := make(map[WeightedEdge]struct{}) - toReturn := make([]graph.Edge, 0) - - for _, neighs := range g.neighbors { - for _, we := range neighs { - if _, ok := m[we]; !ok { - m[we] = struct{}{} - toReturn = append(toReturn, we.Edge) - } - } - } - - return toReturn -} - -func (g *Graph) Degree(n graph.Node) int { - if _, ok := g.nodeMap[n.ID()]; !ok { - return 0 - } - - return len(g.neighbors[n.ID()]) -} diff --git a/vendor/github.com/gonum/graph/concrete/util.go b/vendor/github.com/gonum/graph/concrete/util.go deleted file mode 100644 index ee049e38f..000000000 --- a/vendor/github.com/gonum/graph/concrete/util.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright ©2014 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package concrete - -import ( - "math" - - "github.com/gonum/graph" -) - -type nodeSorter []graph.Node - -func (ns nodeSorter) Less(i, j int) bool { - return ns[i].ID() < ns[j].ID() -} - -func (ns nodeSorter) Swap(i, j int) { - ns[i], ns[j] = ns[j], ns[i] -} - -func (ns nodeSorter) Len() int { - return len(ns) -} - -// The math package only provides explicitly sized max -// values. This ensures we get the max for the actual -// type int. -const maxInt int = int(^uint(0) >> 1) - -var inf = math.Inf(1) - -func isSame(a, b float64) bool { - return a == b || (math.IsNaN(a) && math.IsNaN(b)) -} - -func max(a, b int) int { - if a > b { - return a - } else { - return b - } -} diff --git a/vendor/github.com/gonum/graph/doc.go b/vendor/github.com/gonum/graph/doc.go deleted file mode 100644 index 7b7b3cc87..000000000 --- a/vendor/github.com/gonum/graph/doc.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright ©2014 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package graph implements functions and interfaces to deal with formal discrete graphs. It aims to -be first and foremost flexible, with speed as a strong second priority. - -In this package, graphs are taken to be directed, and undirected graphs are considered to be a -special case of directed graphs that happen to have reciprocal edges. Graphs are, by default, -unweighted, but functions that require weighted edges have several methods of dealing with this. -In order of precedence: - -1. These functions have an argument called Cost (and in some cases, HeuristicCost). If this is -present, it will always be used to determine the cost between two nodes. - -2. These functions will check if your graph implements the Coster (and/or HeuristicCoster) -interface. If this is present, and the Cost (or HeuristicCost) argument is nil, these functions -will be used. - -3. Finally, if no user data is supplied, it will use the functions UniformCost (always returns 1) -and/or NulLHeuristic (always returns 0). - -For information on the specification for Cost functions, please see the Coster interface. - -Finally, although the functions take in a Graph -- they will always use the correct behavior. -If your graph implements DirectedGraph, it will use Successors and To where applicable, -if undirected, it will use From instead. If it implements neither, it will scan the edge list -for successors and predecessors where applicable. (This is slow, you should always implement either -Directed or Undirected) - -This package will never modify a graph that is not Mutable (and the interface does not allow it to -do so). However, return values are free to be modified, so never pass a reference to your own edge -list or node list. It also guarantees that any nodes passed back to the user will be the same -nodes returned to it -- that is, it will never take a Node's ID and then wrap the ID in a new -struct and return that. You'll always get back your original data. -*/ -package graph diff --git a/vendor/github.com/gonum/graph/encoding/dot/dot.go b/vendor/github.com/gonum/graph/encoding/dot/dot.go deleted file mode 100644 index 4b43839e0..000000000 --- a/vendor/github.com/gonum/graph/encoding/dot/dot.go +++ /dev/null @@ -1,383 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package dot implements GraphViz DOT marshaling of graphs. -// -// See the GraphViz DOT Guide and the DOT grammar for more information -// on using specific aspects of the DOT language: -// -// DOT Guide: http://www.graphviz.org/Documentation/dotguide.pdf -// -// DOT grammar: http://www.graphviz.org/doc/info/lang.html -// -package dot - -import ( - "bytes" - "errors" - "fmt" - "sort" - "strings" - - "github.com/gonum/graph" -) - -// Node is a DOT graph node. -type Node interface { - // DOTID returns a DOT node ID. - // - // An ID is one of the following: - // - // - a string of alphabetic ([a-zA-Z\x80-\xff]) characters, underscores ('_'). - // digits ([0-9]), not beginning with a digit. - // - a numeral [-]?(.[0-9]+ | [0-9]+(.[0-9]*)?). - // - a double-quoted string ("...") possibly containing escaped quotes (\"). - // - an HTML string (<...>). - DOTID() string -} - -// Attributers are graph.Graph values that specify top-level DOT -// attributes. -type Attributers interface { - DOTAttributers() (graph, node, edge Attributer) -} - -// Attributer defines graph.Node or graph.Edge values that can -// specify DOT attributes. -type Attributer interface { - DOTAttributes() []Attribute -} - -// Attribute is a DOT language key value attribute pair. -type Attribute struct { - Key, Value string -} - -// Porter defines the behavior of graph.Edge values that can specify -// connection ports for their end points. The returned port corresponds -// to the the DOT node port to be used by the edge, compass corresponds -// to DOT compass point to which the edge will be aimed. -type Porter interface { - FromPort() (port, compass string) - ToPort() (port, compass string) -} - -// Structurer represents a graph.Graph that can define subgraphs. -type Structurer interface { - Structure() []Graph -} - -// Graph wraps named graph.Graph values. -type Graph interface { - graph.Graph - DOTID() string -} - -// Subgrapher wraps graph.Node values that represent subgraphs. -type Subgrapher interface { - Subgraph() graph.Graph -} - -// Marshal returns the DOT encoding for the graph g, applying the prefix -// and indent to the encoding. Name is used to specify the graph name. If -// name is empty and g implements Graph, the returned string from DOTID -// will be used. If strict is true the output bytes will be prefixed with -// the DOT "strict" keyword. -// -// Graph serialization will work for a graph.Graph without modification, -// however, advanced GraphViz DOT features provided by Marshal depend on -// implementation of the Node, Attributer, Porter, Attributers, Structurer, -// Subgrapher and Graph interfaces. -func Marshal(g graph.Graph, name, prefix, indent string, strict bool) ([]byte, error) { - var p printer - p.indent = indent - p.prefix = prefix - p.visited = make(map[edge]bool) - if strict { - p.buf.WriteString("strict ") - } - err := p.print(g, name, false, false) - if err != nil { - return nil, err - } - return p.buf.Bytes(), nil -} - -type printer struct { - buf bytes.Buffer - - prefix string - indent string - depth int - - visited map[edge]bool - - err error -} - -type edge struct { - inGraph string - from, to int -} - -func (p *printer) print(g graph.Graph, name string, needsIndent, isSubgraph bool) error { - nodes := g.Nodes() - sort.Sort(byID(nodes)) - - p.buf.WriteString(p.prefix) - if needsIndent { - for i := 0; i < p.depth; i++ { - p.buf.WriteString(p.indent) - } - } - _, isDirected := g.(graph.Directed) - if isSubgraph { - p.buf.WriteString("sub") - } else if isDirected { - p.buf.WriteString("di") - } - p.buf.WriteString("graph") - - if name == "" { - if g, ok := g.(Graph); ok { - name = g.DOTID() - } - } - if name != "" { - p.buf.WriteByte(' ') - p.buf.WriteString(name) - } - - p.openBlock(" {") - if a, ok := g.(Attributers); ok { - p.writeAttributeComplex(a) - } - if s, ok := g.(Structurer); ok { - for _, g := range s.Structure() { - _, subIsDirected := g.(graph.Directed) - if subIsDirected != isDirected { - return errors.New("dot: mismatched graph type") - } - p.buf.WriteByte('\n') - p.print(g, g.DOTID(), true, true) - } - } - - havePrintedNodeHeader := false - for _, n := range nodes { - if s, ok := n.(Subgrapher); ok { - // If the node is not linked to any other node - // the graph needs to be written now. - if len(g.From(n)) == 0 { - g := s.Subgraph() - _, subIsDirected := g.(graph.Directed) - if subIsDirected != isDirected { - return errors.New("dot: mismatched graph type") - } - if !havePrintedNodeHeader { - p.newline() - p.buf.WriteString("// Node definitions.") - havePrintedNodeHeader = true - } - p.newline() - p.print(g, graphID(g, n), false, true) - } - continue - } - if !havePrintedNodeHeader { - p.newline() - p.buf.WriteString("// Node definitions.") - havePrintedNodeHeader = true - } - p.newline() - p.writeNode(n) - if a, ok := n.(Attributer); ok { - p.writeAttributeList(a) - } - p.buf.WriteByte(';') - } - - havePrintedEdgeHeader := false - for _, n := range nodes { - to := g.From(n) - sort.Sort(byID(to)) - for _, t := range to { - if isDirected { - if p.visited[edge{inGraph: name, from: n.ID(), to: t.ID()}] { - continue - } - p.visited[edge{inGraph: name, from: n.ID(), to: t.ID()}] = true - } else { - if p.visited[edge{inGraph: name, from: n.ID(), to: t.ID()}] { - continue - } - p.visited[edge{inGraph: name, from: n.ID(), to: t.ID()}] = true - p.visited[edge{inGraph: name, from: t.ID(), to: n.ID()}] = true - } - - if !havePrintedEdgeHeader { - p.buf.WriteByte('\n') - p.buf.WriteString(strings.TrimRight(p.prefix, " \t\xa0")) // Trim whitespace suffix. - p.newline() - p.buf.WriteString("// Edge definitions.") - havePrintedEdgeHeader = true - } - p.newline() - - if s, ok := n.(Subgrapher); ok { - g := s.Subgraph() - _, subIsDirected := g.(graph.Directed) - if subIsDirected != isDirected { - return errors.New("dot: mismatched graph type") - } - p.print(g, graphID(g, n), false, true) - } else { - p.writeNode(n) - } - e, edgeIsPorter := g.Edge(n, t).(Porter) - if edgeIsPorter { - p.writePorts(e.FromPort()) - } - - if isDirected { - p.buf.WriteString(" -> ") - } else { - p.buf.WriteString(" -- ") - } - - if s, ok := t.(Subgrapher); ok { - g := s.Subgraph() - _, subIsDirected := g.(graph.Directed) - if subIsDirected != isDirected { - return errors.New("dot: mismatched graph type") - } - p.print(g, graphID(g, t), false, true) - } else { - p.writeNode(t) - } - if edgeIsPorter { - p.writePorts(e.ToPort()) - } - - if a, ok := g.Edge(n, t).(Attributer); ok { - p.writeAttributeList(a) - } - - p.buf.WriteByte(';') - } - } - p.closeBlock("}") - - return nil -} - -func (p *printer) writeNode(n graph.Node) { - p.buf.WriteString(nodeID(n)) -} - -func (p *printer) writePorts(port, cp string) { - if port != "" { - p.buf.WriteByte(':') - p.buf.WriteString(port) - } - if cp != "" { - p.buf.WriteByte(':') - p.buf.WriteString(cp) - } -} - -func nodeID(n graph.Node) string { - switch n := n.(type) { - case Node: - return n.DOTID() - default: - return fmt.Sprint(n.ID()) - } -} - -func graphID(g graph.Graph, n graph.Node) string { - switch g := g.(type) { - case Node: - return g.DOTID() - default: - return nodeID(n) - } -} - -func (p *printer) writeAttributeList(a Attributer) { - attributes := a.DOTAttributes() - switch len(attributes) { - case 0: - case 1: - p.buf.WriteString(" [") - p.buf.WriteString(attributes[0].Key) - p.buf.WriteByte('=') - p.buf.WriteString(attributes[0].Value) - p.buf.WriteString("]") - default: - p.openBlock(" [") - for _, att := range attributes { - p.newline() - p.buf.WriteString(att.Key) - p.buf.WriteByte('=') - p.buf.WriteString(att.Value) - } - p.closeBlock("]") - } -} - -var attType = []string{"graph", "node", "edge"} - -func (p *printer) writeAttributeComplex(ca Attributers) { - g, n, e := ca.DOTAttributers() - haveWrittenBlock := false - for i, a := range []Attributer{g, n, e} { - attributes := a.DOTAttributes() - if len(attributes) == 0 { - continue - } - if haveWrittenBlock { - p.buf.WriteByte(';') - } - p.newline() - p.buf.WriteString(attType[i]) - p.openBlock(" [") - for _, att := range attributes { - p.newline() - p.buf.WriteString(att.Key) - p.buf.WriteByte('=') - p.buf.WriteString(att.Value) - } - p.closeBlock("]") - haveWrittenBlock = true - } - if haveWrittenBlock { - p.buf.WriteString(";\n") - } -} - -func (p *printer) newline() { - p.buf.WriteByte('\n') - p.buf.WriteString(p.prefix) - for i := 0; i < p.depth; i++ { - p.buf.WriteString(p.indent) - } -} - -func (p *printer) openBlock(b string) { - p.buf.WriteString(b) - p.depth++ -} - -func (p *printer) closeBlock(b string) { - p.depth-- - p.newline() - p.buf.WriteString(b) -} - -type byID []graph.Node - -func (n byID) Len() int { return len(n) } -func (n byID) Less(i, j int) bool { return n[i].ID() < n[j].ID() } -func (n byID) Swap(i, j int) { n[i], n[j] = n[j], n[i] } diff --git a/vendor/github.com/gonum/graph/graph.go b/vendor/github.com/gonum/graph/graph.go deleted file mode 100644 index 732fd2692..000000000 --- a/vendor/github.com/gonum/graph/graph.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright ©2014 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package graph - -import "math" - -// Node is a graph node. It returns a graph-unique integer ID. -type Node interface { - ID() int -} - -// Edge is a graph edge. In directed graphs, the direction of the -// edge is given from -> to, otherwise the edge is semantically -// unordered. -type Edge interface { - From() Node - To() Node -} - -// Graph is a generalized graph. -type Graph interface { - // Has returns whether the node exists within the graph. - Has(Node) bool - - // Nodes returns all the nodes in the graph. - Nodes() []Node - - // From returns all nodes that can be reached directly - // from the given node. - From(Node) []Node - - // HasEdge returns whether an edge exists between - // nodes x and y without considering direction. - HasEdge(x, y Node) bool - - // Edge returns the edge from u to v if such an edge - // exists and nil otherwise. The node v must be directly - // reachable from u as defined by the From method. - Edge(u, v Node) Edge -} - -// Undirected is an undirected graph. -type Undirected interface { - Graph - - // EdgeBetween returns the edge between nodes x and y. - EdgeBetween(x, y Node) Edge -} - -// Directed is a directed graph. -type Directed interface { - Graph - - // HasEdgeFromTo returns whether an edge exists - // in the graph from u to v. - HasEdgeFromTo(u, v Node) bool - - // To returns all nodes that can reach directly - // to the given node. - To(Node) []Node -} - -// Weighter defines graphs that can report edge weights. -type Weighter interface { - // Weight returns the weight for the given edge. - Weight(Edge) float64 -} - -// Mutable is an interface for generalized graph mutation. -type Mutable interface { - // NewNodeID returns a new unique arbitrary ID. - NewNodeID() int - - // Adds a node to the graph. AddNode panics if - // the added node ID matches an existing node ID. - AddNode(Node) - - // RemoveNode removes a node from the graph, as - // well as any edges attached to it. If the node - // is not in the graph it is a no-op. - RemoveNode(Node) - - // SetEdge adds an edge from one node to another. - // If the nodes do not exist, they are added. - // SetEdge will panic if the IDs of the e.From - // and e.To are equal. - SetEdge(e Edge, cost float64) - - // RemoveEdge removes the given edge, leaving the - // terminal nodes. If the edge does not exist it - // is a no-op. - RemoveEdge(Edge) -} - -// MutableUndirected is an undirected graph that can be arbitrarily altered. -type MutableUndirected interface { - Undirected - Mutable -} - -// MutableDirected is a directed graph that can be arbitrarily altered. -type MutableDirected interface { - Directed - Mutable -} - -// WeightFunc is a mapping between an edge and an edge weight. -type WeightFunc func(Edge) float64 - -// UniformCost is a WeightFunc that returns an edge cost of 1 for a non-nil Edge -// and Inf for a nil Edge. -func UniformCost(e Edge) float64 { - if e == nil { - return math.Inf(1) - } - return 1 -} - -// CopyUndirected copies nodes and edges as undirected edges from the source to the -// destination without first clearing the destination. CopyUndirected will panic if -// a node ID in the source graph matches a node ID in the destination. If the source -// does not implement Weighter, UniformCost is used to define edge weights. -// -// Note that if the source is a directed graph and a fundamental cycle exists with -// two nodes where the edge weights differ, the resulting destination graph's edge -// weight between those nodes is undefined. -func CopyUndirected(dst MutableUndirected, src Graph) { - var weight WeightFunc - if g, ok := src.(Weighter); ok { - weight = g.Weight - } else { - weight = UniformCost - } - - nodes := src.Nodes() - for _, n := range nodes { - dst.AddNode(n) - } - for _, u := range nodes { - for _, v := range src.From(u) { - edge := src.Edge(u, v) - dst.SetEdge(edge, weight(edge)) - } - } -} - -// CopyDirected copies nodes and edges as directed edges from the source to the -// destination without first clearing the destination. CopyDirected will panic if -// a node ID in the source graph matches a node ID in the destination. If the -// source is undirected both directions will be present in the destination after -// the copy is complete. If the source does not implement Weighter, UniformCost -// is used to define edge weights. -func CopyDirected(dst MutableDirected, src Graph) { - var weight WeightFunc - if g, ok := src.(Weighter); ok { - weight = g.Weight - } else { - weight = UniformCost - } - - nodes := src.Nodes() - for _, n := range nodes { - dst.AddNode(n) - } - for _, u := range nodes { - for _, v := range src.From(u) { - edge := src.Edge(u, v) - dst.SetEdge(edge, weight(edge)) - } - } -} diff --git a/vendor/github.com/gonum/graph/internal/linear.go b/vendor/github.com/gonum/graph/internal/linear.go deleted file mode 100644 index 3d64de9cf..000000000 --- a/vendor/github.com/gonum/graph/internal/linear.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -import ( - "github.com/gonum/graph" -) - -// NodeStack implements a LIFO stack of graph.Node. -type NodeStack []graph.Node - -// Len returns the number of graph.Nodes on the stack. -func (s *NodeStack) Len() int { return len(*s) } - -// Pop returns the last graph.Node on the stack and removes it -// from the stack. -func (s *NodeStack) Pop() graph.Node { - v := *s - v, n := v[:len(v)-1], v[len(v)-1] - *s = v - return n -} - -// Push adds the node n to the stack at the last position. -func (s *NodeStack) Push(n graph.Node) { *s = append(*s, n) } - -// NodeQueue implements a FIFO queue. -type NodeQueue struct { - head int - data []graph.Node -} - -// Len returns the number of graph.Nodes in the queue. -func (q *NodeQueue) Len() int { return len(q.data) - q.head } - -// Enqueue adds the node n to the back of the queue. -func (q *NodeQueue) Enqueue(n graph.Node) { - if len(q.data) == cap(q.data) && q.head > 0 { - l := q.Len() - copy(q.data, q.data[q.head:]) - q.head = 0 - q.data = append(q.data[:l], n) - } else { - q.data = append(q.data, n) - } -} - -// Dequeue returns the graph.Node at the front of the queue and -// removes it from the queue. -func (q *NodeQueue) Dequeue() graph.Node { - if q.Len() == 0 { - panic("queue: empty queue") - } - - var n graph.Node - n, q.data[q.head] = q.data[q.head], nil - q.head++ - - if q.Len() == 0 { - q.head = 0 - q.data = q.data[:0] - } - - return n -} - -// Reset clears the queue for reuse. -func (q *NodeQueue) Reset() { - q.head = 0 - q.data = q.data[:0] -} diff --git a/vendor/github.com/gonum/graph/internal/set.go b/vendor/github.com/gonum/graph/internal/set.go deleted file mode 100644 index 3ad1bc8c4..000000000 --- a/vendor/github.com/gonum/graph/internal/set.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright ©2014 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -import ( - "unsafe" - - "github.com/gonum/graph" -) - -// IntSet is a set of integer identifiers. -type IntSet map[int]struct{} - -// The simple accessor methods for Set are provided to allow ease of -// implementation change should the need arise. - -// Add inserts an element into the set. -func (s IntSet) Add(e int) { - s[e] = struct{}{} -} - -// Has reports the existence of the element in the set. -func (s IntSet) Has(e int) bool { - _, ok := s[e] - return ok -} - -// Remove deletes the specified element from the set. -func (s IntSet) Remove(e int) { - delete(s, e) -} - -// Count reports the number of elements stored in the set. -func (s IntSet) Count() int { - return len(s) -} - -// Same determines whether two sets are backed by the same store. In the -// current implementation using hash maps it makes use of the fact that -// hash maps (at least in the gc implementation) are passed as a pointer -// to a runtime Hmap struct. -// -// A map is not seen by the runtime as a pointer though, so we cannot -// directly compare the sets converted to unsafe.Pointer and need to take -// the sets' addressed and dereference them as pointers to some comparable -// type. -func Same(s1, s2 Set) bool { - return *(*uintptr)(unsafe.Pointer(&s1)) == *(*uintptr)(unsafe.Pointer(&s2)) -} - -// A set is a set of nodes keyed in their integer identifiers. -type Set map[int]graph.Node - -// The simple accessor methods for Set are provided to allow ease of -// implementation change should the need arise. - -// Add inserts an element into the set. -func (s Set) Add(n graph.Node) { - s[n.ID()] = n -} - -// Remove deletes the specified element from the set. -func (s Set) Remove(e graph.Node) { - delete(s, e.ID()) -} - -// Has reports the existence of the element in the set. -func (s Set) Has(n graph.Node) bool { - _, ok := s[n.ID()] - return ok -} - -// Clear returns an empty set, possibly using the same backing store. -// Clear is not provided as a method since there is no way to replace -// the calling value if clearing is performed by a make(set). Clear -// should never be called without keeping the returned value. -func Clear(s Set) Set { - if len(s) == 0 { - return s - } - - return make(Set) -} - -// Copy performs a perfect copy from s1 to dst (meaning the sets will -// be equal). -func (dst Set) Copy(src Set) Set { - if Same(src, dst) { - return dst - } - - if len(dst) > 0 { - dst = make(Set, len(src)) - } - - for e, n := range src { - dst[e] = n - } - - return dst -} - -// Equal reports set equality between the parameters. Sets are equal if -// and only if they have the same elements. -func Equal(s1, s2 Set) bool { - if Same(s1, s2) { - return true - } - - if len(s1) != len(s2) { - return false - } - - for e := range s1 { - if _, ok := s2[e]; !ok { - return false - } - } - - return true -} - -// Union takes the union of s1 and s2, and stores it in dst. -// -// The union of two sets, s1 and s2, is the set containing all the -// elements of each, for instance: -// -// {a,b,c} UNION {d,e,f} = {a,b,c,d,e,f} -// -// Since sets may not have repetition, unions of two sets that overlap -// do not contain repeat elements, that is: -// -// {a,b,c} UNION {b,c,d} = {a,b,c,d} -// -func (dst Set) Union(s1, s2 Set) Set { - if Same(s1, s2) { - return dst.Copy(s1) - } - - if !Same(s1, dst) && !Same(s2, dst) { - dst = Clear(dst) - } - - if !Same(dst, s1) { - for e, n := range s1 { - dst[e] = n - } - } - - if !Same(dst, s2) { - for e, n := range s2 { - dst[e] = n - } - } - - return dst -} - -// Intersect takes the intersection of s1 and s2, and stores it in dst. -// -// The intersection of two sets, s1 and s2, is the set containing all -// the elements shared between the two sets, for instance: -// -// {a,b,c} INTERSECT {b,c,d} = {b,c} -// -// The intersection between a set and itself is itself, and thus -// effectively a copy operation: -// -// {a,b,c} INTERSECT {a,b,c} = {a,b,c} -// -// The intersection between two sets that share no elements is the empty -// set: -// -// {a,b,c} INTERSECT {d,e,f} = {} -// -func (dst Set) Intersect(s1, s2 Set) Set { - var swap Set - - if Same(s1, s2) { - return dst.Copy(s1) - } - if Same(s1, dst) { - swap = s2 - } else if Same(s2, dst) { - swap = s1 - } else { - dst = Clear(dst) - - if len(s1) > len(s2) { - s1, s2 = s2, s1 - } - - for e, n := range s1 { - if _, ok := s2[e]; ok { - dst[e] = n - } - } - - return dst - } - - for e := range dst { - if _, ok := swap[e]; !ok { - delete(dst, e) - } - } - - return dst -} diff --git a/vendor/github.com/gonum/graph/internal/sort.go b/vendor/github.com/gonum/graph/internal/sort.go deleted file mode 100644 index 3bfee0f69..000000000 --- a/vendor/github.com/gonum/graph/internal/sort.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -// BySliceValues implements the sort.Interface sorting a slice of -// []int lexically by the values of the []int. -type BySliceValues [][]int - -func (c BySliceValues) Len() int { return len(c) } -func (c BySliceValues) Less(i, j int) bool { - a, b := c[i], c[j] - l := len(a) - if len(b) < l { - l = len(b) - } - for k, v := range a[:l] { - if v < b[k] { - return true - } - if v > b[k] { - return false - } - } - return len(a) < len(b) -} -func (c BySliceValues) Swap(i, j int) { c[i], c[j] = c[j], c[i] } diff --git a/vendor/github.com/gonum/graph/path/a_star.go b/vendor/github.com/gonum/graph/path/a_star.go deleted file mode 100644 index b41d19428..000000000 --- a/vendor/github.com/gonum/graph/path/a_star.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright ©2014 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package path - -import ( - "container/heap" - - "github.com/gonum/graph" - "github.com/gonum/graph/internal" -) - -// Heuristic returns an estimate of the cost of travelling between two nodes. -type Heuristic func(x, y graph.Node) float64 - -// HeuristicCoster wraps the HeuristicCost method. A graph implementing the -// interface provides a heuristic between any two given nodes. -type HeuristicCoster interface { - HeuristicCost(x, y graph.Node) float64 -} - -// AStar finds the A*-shortest path from s to t in g using the heuristic h. The path and -// its cost are returned in a Shortest along with paths and costs to all nodes explored -// during the search. The number of expanded nodes is also returned. This value may help -// with heuristic tuning. -// -// The path will be the shortest path if the heuristic is admissible. A heuristic is -// admissible if for any node, n, in the graph, the heuristic estimate of the cost of -// the path from n to t is less than or equal to the true cost of that path. -// -// If h is nil, AStar will use the g.HeuristicCost method if g implements HeuristicCoster, -// falling back to NullHeuristic otherwise. If the graph does not implement graph.Weighter, -// graph.UniformCost is used. AStar will panic if g has an A*-reachable negative edge weight. -func AStar(s, t graph.Node, g graph.Graph, h Heuristic) (path Shortest, expanded int) { - if !g.Has(s) || !g.Has(t) { - return Shortest{from: s}, 0 - } - var weight graph.WeightFunc - if g, ok := g.(graph.Weighter); ok { - weight = g.Weight - } else { - weight = graph.UniformCost - } - if h == nil { - if g, ok := g.(HeuristicCoster); ok { - h = g.HeuristicCost - } else { - h = NullHeuristic - } - } - - path = newShortestFrom(s, g.Nodes()) - tid := t.ID() - - visited := make(internal.IntSet) - open := &aStarQueue{indexOf: make(map[int]int)} - heap.Push(open, aStarNode{node: s, gscore: 0, fscore: h(s, t)}) - - for open.Len() != 0 { - u := heap.Pop(open).(aStarNode) - uid := u.node.ID() - i := path.indexOf[uid] - expanded++ - - if uid == tid { - break - } - - visited.Add(uid) - for _, v := range g.From(u.node) { - vid := v.ID() - if visited.Has(vid) { - continue - } - j := path.indexOf[vid] - - w := weight(g.Edge(u.node, v)) - if w < 0 { - panic("A*: negative edge weight") - } - g := u.gscore + w - if n, ok := open.node(vid); !ok { - path.set(j, g, i) - heap.Push(open, aStarNode{node: v, gscore: g, fscore: g + h(v, t)}) - } else if g < n.gscore { - path.set(j, g, i) - open.update(vid, g, g+h(v, t)) - } - } - } - - return path, expanded -} - -// NullHeuristic is an admissible, consistent heuristic that will not speed up computation. -func NullHeuristic(_, _ graph.Node) float64 { - return 0 -} - -// aStarNode adds A* accounting to a graph.Node. -type aStarNode struct { - node graph.Node - gscore float64 - fscore float64 -} - -// aStarQueue is an A* priority queue. -type aStarQueue struct { - indexOf map[int]int - nodes []aStarNode -} - -func (q *aStarQueue) Less(i, j int) bool { - return q.nodes[i].fscore < q.nodes[j].fscore -} - -func (q *aStarQueue) Swap(i, j int) { - q.indexOf[q.nodes[i].node.ID()] = j - q.indexOf[q.nodes[j].node.ID()] = i - q.nodes[i], q.nodes[j] = q.nodes[j], q.nodes[i] -} - -func (q *aStarQueue) Len() int { - return len(q.nodes) -} - -func (q *aStarQueue) Push(x interface{}) { - n := x.(aStarNode) - q.indexOf[n.node.ID()] = len(q.nodes) - q.nodes = append(q.nodes, n) -} - -func (q *aStarQueue) Pop() interface{} { - n := q.nodes[len(q.nodes)-1] - q.nodes = q.nodes[:len(q.nodes)-1] - delete(q.indexOf, n.node.ID()) - return n -} - -func (q *aStarQueue) update(id int, g, f float64) { - i, ok := q.indexOf[id] - if !ok { - return - } - q.nodes[i].gscore = g - q.nodes[i].fscore = f - heap.Fix(q, i) -} - -func (q *aStarQueue) node(id int) (aStarNode, bool) { - loc, ok := q.indexOf[id] - if ok { - return q.nodes[loc], true - } - return aStarNode{}, false -} diff --git a/vendor/github.com/gonum/graph/path/bellman_ford_moore.go b/vendor/github.com/gonum/graph/path/bellman_ford_moore.go deleted file mode 100644 index 6ca49d5f6..000000000 --- a/vendor/github.com/gonum/graph/path/bellman_ford_moore.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package path - -import "github.com/gonum/graph" - -// BellmanFordFrom returns a shortest-path tree for a shortest path from u to all nodes in -// the graph g, or false indicating that a negative cycle exists in the graph. If the graph -// does not implement graph.Weighter, graph.UniformCost is used. -// -// The time complexity of BellmanFordFrom is O(|V|.|E|). -func BellmanFordFrom(u graph.Node, g graph.Graph) (path Shortest, ok bool) { - if !g.Has(u) { - return Shortest{from: u}, true - } - var weight graph.WeightFunc - if g, ok := g.(graph.Weighter); ok { - weight = g.Weight - } else { - weight = graph.UniformCost - } - - nodes := g.Nodes() - - path = newShortestFrom(u, nodes) - path.dist[path.indexOf[u.ID()]] = 0 - - // TODO(kortschak): Consider adding further optimisations - // from http://arxiv.org/abs/1111.5414. - for i := 1; i < len(nodes); i++ { - changed := false - for j, u := range nodes { - for _, v := range g.From(u) { - k := path.indexOf[v.ID()] - joint := path.dist[j] + weight(g.Edge(u, v)) - if joint < path.dist[k] { - path.set(k, joint, j) - changed = true - } - } - } - if !changed { - break - } - } - - for j, u := range nodes { - for _, v := range g.From(u) { - k := path.indexOf[v.ID()] - if path.dist[j]+weight(g.Edge(u, v)) < path.dist[k] { - return path, false - } - } - } - - return path, true -} diff --git a/vendor/github.com/gonum/graph/path/control_flow.go b/vendor/github.com/gonum/graph/path/control_flow.go deleted file mode 100644 index 219226d5d..000000000 --- a/vendor/github.com/gonum/graph/path/control_flow.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright ©2014 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package path - -import ( - "github.com/gonum/graph" - "github.com/gonum/graph/internal" -) - -// PostDominatores returns all dominators for all nodes in g. It does not -// prune for strict post-dominators, immediate dominators etc. -// -// A dominates B if and only if the only path through B travels through A. -func Dominators(start graph.Node, g graph.Graph) map[int]internal.Set { - allNodes := make(internal.Set) - nlist := g.Nodes() - dominators := make(map[int]internal.Set, len(nlist)) - for _, node := range nlist { - allNodes.Add(node) - } - - var to func(graph.Node) []graph.Node - switch g := g.(type) { - case graph.Directed: - to = g.To - default: - to = g.From - } - - for _, node := range nlist { - dominators[node.ID()] = make(internal.Set) - if node.ID() == start.ID() { - dominators[node.ID()].Add(start) - } else { - dominators[node.ID()].Copy(allNodes) - } - } - - for somethingChanged := true; somethingChanged; { - somethingChanged = false - for _, node := range nlist { - if node.ID() == start.ID() { - continue - } - preds := to(node) - if len(preds) == 0 { - continue - } - tmp := make(internal.Set).Copy(dominators[preds[0].ID()]) - for _, pred := range preds[1:] { - tmp.Intersect(tmp, dominators[pred.ID()]) - } - - dom := make(internal.Set) - dom.Add(node) - - dom.Union(dom, tmp) - if !internal.Equal(dom, dominators[node.ID()]) { - dominators[node.ID()] = dom - somethingChanged = true - } - } - } - - return dominators -} - -// PostDominatores returns all post-dominators for all nodes in g. It does not -// prune for strict post-dominators, immediate post-dominators etc. -// -// A post-dominates B if and only if all paths from B travel through A. -func PostDominators(end graph.Node, g graph.Graph) map[int]internal.Set { - allNodes := make(internal.Set) - nlist := g.Nodes() - dominators := make(map[int]internal.Set, len(nlist)) - for _, node := range nlist { - allNodes.Add(node) - } - - for _, node := range nlist { - dominators[node.ID()] = make(internal.Set) - if node.ID() == end.ID() { - dominators[node.ID()].Add(end) - } else { - dominators[node.ID()].Copy(allNodes) - } - } - - for somethingChanged := true; somethingChanged; { - somethingChanged = false - for _, node := range nlist { - if node.ID() == end.ID() { - continue - } - succs := g.From(node) - if len(succs) == 0 { - continue - } - tmp := make(internal.Set).Copy(dominators[succs[0].ID()]) - for _, succ := range succs[1:] { - tmp.Intersect(tmp, dominators[succ.ID()]) - } - - dom := make(internal.Set) - dom.Add(node) - - dom.Union(dom, tmp) - if !internal.Equal(dom, dominators[node.ID()]) { - dominators[node.ID()] = dom - somethingChanged = true - } - } - } - - return dominators -} diff --git a/vendor/github.com/gonum/graph/path/dijkstra.go b/vendor/github.com/gonum/graph/path/dijkstra.go deleted file mode 100644 index 254676b3b..000000000 --- a/vendor/github.com/gonum/graph/path/dijkstra.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package path - -import ( - "container/heap" - - "github.com/gonum/graph" -) - -// DijkstraFrom returns a shortest-path tree for a shortest path from u to all nodes in -// the graph g. If the graph does not implement graph.Weighter, graph.UniformCost is used. -// DijkstraFrom will panic if g has a u-reachable negative edge weight. -// -// The time complexity of DijkstrFrom is O(|E|+|V|.log|V|). -func DijkstraFrom(u graph.Node, g graph.Graph) Shortest { - if !g.Has(u) { - return Shortest{from: u} - } - var weight graph.WeightFunc - if g, ok := g.(graph.Weighter); ok { - weight = g.Weight - } else { - weight = graph.UniformCost - } - - nodes := g.Nodes() - path := newShortestFrom(u, nodes) - - // Dijkstra's algorithm here is implemented essentially as - // described in Function B.2 in figure 6 of UTCS Technical - // Report TR-07-54. - // - // http://www.cs.utexas.edu/ftp/techreports/tr07-54.pdf - Q := priorityQueue{{node: u, dist: 0}} - for Q.Len() != 0 { - mid := heap.Pop(&Q).(distanceNode) - k := path.indexOf[mid.node.ID()] - if mid.dist < path.dist[k] { - path.dist[k] = mid.dist - } - for _, v := range g.From(mid.node) { - j := path.indexOf[v.ID()] - w := weight(g.Edge(mid.node, v)) - if w < 0 { - panic("dijkstra: negative edge weight") - } - joint := path.dist[k] + w - if joint < path.dist[j] { - heap.Push(&Q, distanceNode{node: v, dist: joint}) - path.set(j, joint, k) - } - } - } - - return path -} - -// DijkstraAllPaths returns a shortest-path tree for shortest paths in the graph g. -// If the graph does not implement graph.Weighter, graph.UniformCost is used. -// DijkstraAllPaths will panic if g has a negative edge weight. -// -// The time complexity of DijkstrAllPaths is O(|V|.|E|+|V|^2.log|V|). -func DijkstraAllPaths(g graph.Graph) (paths AllShortest) { - paths = newAllShortest(g.Nodes(), false) - dijkstraAllPaths(g, paths) - return paths -} - -// dijkstraAllPaths is the all-paths implementation of Dijkstra. It is shared -// between DijkstraAllPaths and JohnsonAllPaths to avoid repeated allocation -// of the nodes slice and the indexOf map. It returns nothing, but stores the -// result of the work in the paths parameter which is a reference type. -func dijkstraAllPaths(g graph.Graph, paths AllShortest) { - var weight graph.WeightFunc - if g, ok := g.(graph.Weighter); ok { - weight = g.Weight - } else { - weight = graph.UniformCost - } - - var Q priorityQueue - for i, u := range paths.nodes { - // Dijkstra's algorithm here is implemented essentially as - // described in Function B.2 in figure 6 of UTCS Technical - // Report TR-07-54 with the addition of handling multiple - // co-equal paths. - // - // http://www.cs.utexas.edu/ftp/techreports/tr07-54.pdf - - // Q must be empty at this point. - heap.Push(&Q, distanceNode{node: u, dist: 0}) - for Q.Len() != 0 { - mid := heap.Pop(&Q).(distanceNode) - k := paths.indexOf[mid.node.ID()] - if mid.dist < paths.dist.At(i, k) { - paths.dist.Set(i, k, mid.dist) - } - for _, v := range g.From(mid.node) { - j := paths.indexOf[v.ID()] - w := weight(g.Edge(mid.node, v)) - if w < 0 { - panic("dijkstra: negative edge weight") - } - joint := paths.dist.At(i, k) + w - if joint < paths.dist.At(i, j) { - heap.Push(&Q, distanceNode{node: v, dist: joint}) - paths.set(i, j, joint, k) - } else if joint == paths.dist.At(i, j) { - paths.add(i, j, k) - } - } - } - } -} - -type distanceNode struct { - node graph.Node - dist float64 -} - -// priorityQueue implements a no-dec priority queue. -type priorityQueue []distanceNode - -func (q priorityQueue) Len() int { return len(q) } -func (q priorityQueue) Less(i, j int) bool { return q[i].dist < q[j].dist } -func (q priorityQueue) Swap(i, j int) { q[i], q[j] = q[j], q[i] } -func (q *priorityQueue) Push(n interface{}) { *q = append(*q, n.(distanceNode)) } -func (q *priorityQueue) Pop() interface{} { - t := *q - var n interface{} - n, *q = t[len(t)-1], t[:len(t)-1] - return n -} diff --git a/vendor/github.com/gonum/graph/path/disjoint.go b/vendor/github.com/gonum/graph/path/disjoint.go deleted file mode 100644 index 0755f695a..000000000 --- a/vendor/github.com/gonum/graph/path/disjoint.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright ©2014 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package path - -// A disjoint set is a collection of non-overlapping sets. That is, for any two sets in the -// disjoint set, their intersection is the empty set. -// -// A disjoint set has three principle operations: Make Set, Find, and Union. -// -// Make set creates a new set for an element (presuming it does not already exist in any set in -// the disjoint set), Find finds the set containing that element (if any), and Union merges two -// sets in the disjoint set. In general, algorithms operating on disjoint sets are "union-find" -// algorithms, where two sets are found with Find, and then joined with Union. -// -// A concrete example of a union-find algorithm can be found as discrete.Kruskal -- which unions -// two sets when an edge is created between two vertices, and refuses to make an edge between two -// vertices if they're part of the same set. -type disjointSet struct { - master map[int]*disjointSetNode -} - -type disjointSetNode struct { - parent *disjointSetNode - rank int -} - -func newDisjointSet() *disjointSet { - return &disjointSet{master: make(map[int]*disjointSetNode)} -} - -// If the element isn't already somewhere in there, adds it to the master set and its own tiny set. -func (ds *disjointSet) makeSet(e int) { - if _, ok := ds.master[e]; ok { - return - } - dsNode := &disjointSetNode{rank: 0} - dsNode.parent = dsNode - ds.master[e] = dsNode -} - -// Returns the set the element belongs to, or nil if none. -func (ds *disjointSet) find(e int) *disjointSetNode { - dsNode, ok := ds.master[e] - if !ok { - return nil - } - - return find(dsNode) -} - -func find(dsNode *disjointSetNode) *disjointSetNode { - if dsNode.parent != dsNode { - dsNode.parent = find(dsNode.parent) - } - - return dsNode.parent -} - -// Unions two subsets within the disjointSet. -// -// If x or y are not in this disjoint set, the behavior is undefined. If either pointer is nil, -// this function will panic. -func (ds *disjointSet) union(x, y *disjointSetNode) { - if x == nil || y == nil { - panic("Disjoint Set union on nil sets") - } - xRoot := find(x) - yRoot := find(y) - if xRoot == nil || yRoot == nil { - return - } - - if xRoot == yRoot { - return - } - - if xRoot.rank < yRoot.rank { - xRoot.parent = yRoot - } else if yRoot.rank < xRoot.rank { - yRoot.parent = xRoot - } else { - yRoot.parent = xRoot - xRoot.rank += 1 - } -} diff --git a/vendor/github.com/gonum/graph/path/floydwarshall.go b/vendor/github.com/gonum/graph/path/floydwarshall.go deleted file mode 100644 index d4246835d..000000000 --- a/vendor/github.com/gonum/graph/path/floydwarshall.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package path - -import "github.com/gonum/graph" - -// FloydWarshall returns a shortest-path tree for the graph g or false indicating -// that a negative cycle exists in the graph. If the graph does not implement -// graph.Weighter, graph.UniformCost is used. -// -// The time complexity of FloydWarshall is O(|V|^3). -func FloydWarshall(g graph.Graph) (paths AllShortest, ok bool) { - var weight graph.WeightFunc - if g, ok := g.(graph.Weighter); ok { - weight = g.Weight - } else { - weight = graph.UniformCost - } - - nodes := g.Nodes() - paths = newAllShortest(nodes, true) - for i, u := range nodes { - paths.dist.Set(i, i, 0) - for _, v := range g.From(u) { - j := paths.indexOf[v.ID()] - paths.set(i, j, weight(g.Edge(u, v)), j) - } - } - - for k := range nodes { - for i := range nodes { - for j := range nodes { - ij := paths.dist.At(i, j) - joint := paths.dist.At(i, k) + paths.dist.At(k, j) - if ij > joint { - paths.set(i, j, joint, paths.at(i, k)...) - } else if ij-joint == 0 { - paths.add(i, j, paths.at(i, k)...) - } - } - } - } - - ok = true - for i := range nodes { - if paths.dist.At(i, i) < 0 { - ok = false - break - } - } - - return paths, ok -} diff --git a/vendor/github.com/gonum/graph/path/johnson_apsp.go b/vendor/github.com/gonum/graph/path/johnson_apsp.go deleted file mode 100644 index 1ea195e0b..000000000 --- a/vendor/github.com/gonum/graph/path/johnson_apsp.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package path - -import ( - "math" - "math/rand" - - "github.com/gonum/graph" - "github.com/gonum/graph/concrete" -) - -// JohnsonAllPaths returns a shortest-path tree for shortest paths in the graph g. -// If the graph does not implement graph.Weighter, graph.UniformCost is used. -// -// The time complexity of JohnsonAllPaths is O(|V|.|E|+|V|^2.log|V|). -func JohnsonAllPaths(g graph.Graph) (paths AllShortest, ok bool) { - jg := johnsonWeightAdjuster{ - g: g, - from: g.From, - edgeTo: g.Edge, - } - if g, ok := g.(graph.Weighter); ok { - jg.weight = g.Weight - } else { - jg.weight = graph.UniformCost - } - - paths = newAllShortest(g.Nodes(), false) - - sign := -1 - for { - // Choose a random node ID until we find - // one that is not in g. - jg.q = sign * rand.Int() - if _, exists := paths.indexOf[jg.q]; !exists { - break - } - sign *= -1 - } - - jg.bellmanFord = true - jg.adjustBy, ok = BellmanFordFrom(johnsonGraphNode(jg.q), jg) - if !ok { - return paths, false - } - - jg.bellmanFord = false - dijkstraAllPaths(jg, paths) - - for i, u := range paths.nodes { - hu := jg.adjustBy.WeightTo(u) - for j, v := range paths.nodes { - if i == j { - continue - } - hv := jg.adjustBy.WeightTo(v) - paths.dist.Set(i, j, paths.dist.At(i, j)-hu+hv) - } - } - - return paths, ok -} - -type johnsonWeightAdjuster struct { - q int - g graph.Graph - - from func(graph.Node) []graph.Node - edgeTo func(graph.Node, graph.Node) graph.Edge - weight graph.WeightFunc - - bellmanFord bool - adjustBy Shortest -} - -var ( - // johnsonWeightAdjuster has the behaviour - // of a directed graph, but we don't need - // to be explicit with the type since it - // is not exported. - _ graph.Graph = johnsonWeightAdjuster{} - _ graph.Weighter = johnsonWeightAdjuster{} -) - -func (g johnsonWeightAdjuster) Has(n graph.Node) bool { - if g.bellmanFord && n.ID() == g.q { - return true - } - return g.g.Has(n) - -} - -func (g johnsonWeightAdjuster) Nodes() []graph.Node { - if g.bellmanFord { - return append(g.g.Nodes(), johnsonGraphNode(g.q)) - } - return g.g.Nodes() -} - -func (g johnsonWeightAdjuster) From(n graph.Node) []graph.Node { - if g.bellmanFord && n.ID() == g.q { - return g.g.Nodes() - } - return g.from(n) -} - -func (g johnsonWeightAdjuster) Edge(u, v graph.Node) graph.Edge { - if g.bellmanFord && u.ID() == g.q && g.g.Has(v) { - return concrete.Edge{johnsonGraphNode(g.q), v} - } - return g.edgeTo(u, v) -} - -func (g johnsonWeightAdjuster) Weight(e graph.Edge) float64 { - if g.bellmanFord { - switch g.q { - case e.From().ID(): - return 0 - case e.To().ID(): - return math.Inf(1) - default: - return g.weight(e) - } - } - return g.weight(e) + g.adjustBy.WeightTo(e.From()) - g.adjustBy.WeightTo(e.To()) -} - -func (johnsonWeightAdjuster) HasEdge(_, _ graph.Node) bool { - panic("search: unintended use of johnsonWeightAdjuster") -} - -type johnsonGraphNode int - -func (n johnsonGraphNode) ID() int { return int(n) } diff --git a/vendor/github.com/gonum/graph/path/shortest.go b/vendor/github.com/gonum/graph/path/shortest.go deleted file mode 100644 index 97f7e49fb..000000000 --- a/vendor/github.com/gonum/graph/path/shortest.go +++ /dev/null @@ -1,319 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package path - -import ( - "math" - "math/rand" - - "github.com/gonum/graph" - "github.com/gonum/matrix/mat64" -) - -// Shortest is a shortest-path tree created by the BellmanFordFrom or DijkstraFrom -// single-source shortest path functions. -type Shortest struct { - // from holds the source node given to - // DijkstraFrom. - from graph.Node - - // nodes hold the nodes of the analysed - // graph. - nodes []graph.Node - // indexOf contains a mapping between - // the id-dense representation of the - // graph and the potentially id-sparse - // nodes held in nodes. - indexOf map[int]int - - // dist and next represent the shortest - // paths between nodes. - // - // Indices into dist and next are - // mapped through indexOf. - // - // dist contains the distances - // from the from node for each - // node in the graph. - dist []float64 - // next contains the shortest-path - // tree of the graph. The index is a - // linear mapping of to-dense-id. - next []int -} - -func newShortestFrom(u graph.Node, nodes []graph.Node) Shortest { - indexOf := make(map[int]int, len(nodes)) - uid := u.ID() - for i, n := range nodes { - indexOf[n.ID()] = i - if n.ID() == uid { - u = n - } - } - - p := Shortest{ - from: u, - - nodes: nodes, - indexOf: indexOf, - - dist: make([]float64, len(nodes)), - next: make([]int, len(nodes)), - } - for i := range nodes { - p.dist[i] = math.Inf(1) - p.next[i] = -1 - } - p.dist[indexOf[uid]] = 0 - - return p -} - -func (p Shortest) set(to int, weight float64, mid int) { - p.dist[to] = weight - p.next[to] = mid -} - -// From returns the starting node of the paths held by the Shortest. -func (p Shortest) From() graph.Node { return p.from } - -// WeightTo returns the weight of the minimum path to v. -func (p Shortest) WeightTo(v graph.Node) float64 { - to, toOK := p.indexOf[v.ID()] - if !toOK { - return math.Inf(1) - } - return p.dist[to] -} - -// To returns a shortest path to v and the weight of the path. -func (p Shortest) To(v graph.Node) (path []graph.Node, weight float64) { - to, toOK := p.indexOf[v.ID()] - if !toOK || math.IsInf(p.dist[to], 1) { - return nil, math.Inf(1) - } - from := p.indexOf[p.from.ID()] - path = []graph.Node{p.nodes[to]} - for to != from { - path = append(path, p.nodes[p.next[to]]) - to = p.next[to] - } - reverse(path) - return path, p.dist[p.indexOf[v.ID()]] -} - -// AllShortest is a shortest-path tree created by the DijkstraAllPaths, FloydWarshall -// or JohnsonAllPaths all-pairs shortest paths functions. -type AllShortest struct { - // nodes hold the nodes of the analysed - // graph. - nodes []graph.Node - // indexOf contains a mapping between - // the id-dense representation of the - // graph and the potentially id-sparse - // nodes held in nodes. - indexOf map[int]int - - // dist, next and forward represent - // the shortest paths between nodes. - // - // Indices into dist and next are - // mapped through indexOf. - // - // dist contains the pairwise - // distances between nodes. - dist *mat64.Dense - // next contains the shortest-path - // tree of the graph. The first index - // is a linear mapping of from-dense-id - // and to-dense-id, to-major with a - // stride equal to len(nodes); the - // slice indexed to is the list of - // intermediates leading from the 'from' - // node to the 'to' node represented - // by dense id. - // The interpretation of next is - // dependent on the state of forward. - next [][]int - // forward indicates the direction of - // path reconstruction. Forward - // reconstruction is used for Floyd- - // Warshall and reverse is used for - // Dijkstra. - forward bool -} - -func newAllShortest(nodes []graph.Node, forward bool) AllShortest { - indexOf := make(map[int]int, len(nodes)) - for i, n := range nodes { - indexOf[n.ID()] = i - } - dist := make([]float64, len(nodes)*len(nodes)) - for i := range dist { - dist[i] = math.Inf(1) - } - return AllShortest{ - nodes: nodes, - indexOf: indexOf, - - dist: mat64.NewDense(len(nodes), len(nodes), dist), - next: make([][]int, len(nodes)*len(nodes)), - forward: forward, - } -} - -func (p AllShortest) at(from, to int) (mid []int) { - return p.next[from+to*len(p.nodes)] -} - -func (p AllShortest) set(from, to int, weight float64, mid ...int) { - p.dist.Set(from, to, weight) - p.next[from+to*len(p.nodes)] = append(p.next[from+to*len(p.nodes)][:0], mid...) -} - -func (p AllShortest) add(from, to int, mid ...int) { -loop: // These are likely to be rare, so just loop over collisions. - for _, k := range mid { - for _, v := range p.next[from+to*len(p.nodes)] { - if k == v { - continue loop - } - } - p.next[from+to*len(p.nodes)] = append(p.next[from+to*len(p.nodes)], k) - } -} - -// Weight returns the weight of the minimum path between u and v. -func (p AllShortest) Weight(u, v graph.Node) float64 { - from, fromOK := p.indexOf[u.ID()] - to, toOK := p.indexOf[v.ID()] - if !fromOK || !toOK { - return math.Inf(1) - } - return p.dist.At(from, to) -} - -// Between returns a shortest path from u to v and the weight of the path. If more than -// one shortest path exists between u and v, a randomly chosen path will be returned and -// unique is returned false. If a cycle with zero weight exists in the path, it will not -// be included, but unique will be returned false. -func (p AllShortest) Between(u, v graph.Node) (path []graph.Node, weight float64, unique bool) { - from, fromOK := p.indexOf[u.ID()] - to, toOK := p.indexOf[v.ID()] - if !fromOK || !toOK || len(p.at(from, to)) == 0 { - if u.ID() == v.ID() { - return []graph.Node{p.nodes[from]}, 0, true - } - return nil, math.Inf(1), false - } - - seen := make([]int, len(p.nodes)) - for i := range seen { - seen[i] = -1 - } - var n graph.Node - if p.forward { - n = p.nodes[from] - seen[from] = 0 - } else { - n = p.nodes[to] - seen[to] = 0 - } - - path = []graph.Node{n} - weight = p.dist.At(from, to) - unique = true - - var next int - for from != to { - c := p.at(from, to) - if len(c) != 1 { - unique = false - next = c[rand.Intn(len(c))] - } else { - next = c[0] - } - if seen[next] >= 0 { - path = path[:seen[next]] - } - seen[next] = len(path) - path = append(path, p.nodes[next]) - if p.forward { - from = next - } else { - to = next - } - } - if !p.forward { - reverse(path) - } - - return path, weight, unique -} - -// AllBetween returns all shortest paths from u to v and the weight of the paths. Paths -// containing zero-weight cycles are not returned. -func (p AllShortest) AllBetween(u, v graph.Node) (paths [][]graph.Node, weight float64) { - from, fromOK := p.indexOf[u.ID()] - to, toOK := p.indexOf[v.ID()] - if !fromOK || !toOK || len(p.at(from, to)) == 0 { - if u.ID() == v.ID() { - return [][]graph.Node{{p.nodes[from]}}, 0 - } - return nil, math.Inf(1) - } - - var n graph.Node - if p.forward { - n = u - } else { - n = v - } - seen := make([]bool, len(p.nodes)) - paths = p.allBetween(from, to, seen, []graph.Node{n}, nil) - - return paths, p.dist.At(from, to) -} - -func (p AllShortest) allBetween(from, to int, seen []bool, path []graph.Node, paths [][]graph.Node) [][]graph.Node { - if p.forward { - seen[from] = true - } else { - seen[to] = true - } - if from == to { - if path == nil { - return paths - } - if !p.forward { - reverse(path) - } - return append(paths, path) - } - first := true - for _, n := range p.at(from, to) { - if seen[n] { - continue - } - if first { - path = append([]graph.Node(nil), path...) - first = false - } - if p.forward { - from = n - } else { - to = n - } - paths = p.allBetween(from, to, append([]bool(nil), seen...), append(path, p.nodes[n]), paths) - } - return paths -} - -func reverse(p []graph.Node) { - for i, j := 0, len(p)-1; i < j; i, j = i+1, j-1 { - p[i], p[j] = p[j], p[i] - } -} diff --git a/vendor/github.com/gonum/graph/path/spanning_tree.go b/vendor/github.com/gonum/graph/path/spanning_tree.go deleted file mode 100644 index 99b30cbc1..000000000 --- a/vendor/github.com/gonum/graph/path/spanning_tree.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright ©2014 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package path - -import ( - "sort" - - "github.com/gonum/graph" - "github.com/gonum/graph/concrete" - "github.com/gonum/graph/internal" -) - -// EdgeListerGraph is an undirected graph than returns its complete set of edges. -type EdgeListerGraph interface { - graph.Undirected - Edges() []graph.Edge -} - -// Prim generates a minimum spanning tree of g by greedy tree extension, placing -// the result in the destination. The destination is not cleared first. -func Prim(dst graph.MutableUndirected, g EdgeListerGraph) { - var weight graph.WeightFunc - if g, ok := g.(graph.Weighter); ok { - weight = g.Weight - } else { - weight = graph.UniformCost - } - - nlist := g.Nodes() - - if nlist == nil || len(nlist) == 0 { - return - } - - dst.AddNode(nlist[0]) - remainingNodes := make(internal.IntSet) - for _, node := range nlist[1:] { - remainingNodes.Add(node.ID()) - } - - edgeList := g.Edges() - for remainingNodes.Count() != 0 { - var edges []concrete.WeightedEdge - for _, edge := range edgeList { - if (dst.Has(edge.From()) && remainingNodes.Has(edge.To().ID())) || - (dst.Has(edge.To()) && remainingNodes.Has(edge.From().ID())) { - - edges = append(edges, concrete.WeightedEdge{Edge: edge, Cost: weight(edge)}) - } - } - - sort.Sort(byWeight(edges)) - myEdge := edges[0] - - dst.SetEdge(myEdge.Edge, myEdge.Cost) - remainingNodes.Remove(myEdge.Edge.From().ID()) - } - -} - -// Kruskal generates a minimum spanning tree of g by greedy tree coalesence, placing -// the result in the destination. The destination is not cleared first. -func Kruskal(dst graph.MutableUndirected, g EdgeListerGraph) { - var weight graph.WeightFunc - if g, ok := g.(graph.Weighter); ok { - weight = g.Weight - } else { - weight = graph.UniformCost - } - - edgeList := g.Edges() - edges := make([]concrete.WeightedEdge, 0, len(edgeList)) - for _, edge := range edgeList { - edges = append(edges, concrete.WeightedEdge{Edge: edge, Cost: weight(edge)}) - } - - sort.Sort(byWeight(edges)) - - ds := newDisjointSet() - for _, node := range g.Nodes() { - ds.makeSet(node.ID()) - } - - for _, edge := range edges { - // The disjoint set doesn't really care for which is head and which is tail so this - // should work fine without checking both ways - if s1, s2 := ds.find(edge.Edge.From().ID()), ds.find(edge.Edge.To().ID()); s1 != s2 { - ds.union(s1, s2) - dst.SetEdge(edge.Edge, edge.Cost) - } - } -} - -type byWeight []concrete.WeightedEdge - -func (e byWeight) Len() int { - return len(e) -} - -func (e byWeight) Less(i, j int) bool { - return e[i].Cost < e[j].Cost -} - -func (e byWeight) Swap(i, j int) { - e[i], e[j] = e[j], e[i] -} diff --git a/vendor/github.com/gonum/graph/topo/bron_kerbosch.go b/vendor/github.com/gonum/graph/topo/bron_kerbosch.go deleted file mode 100644 index 5e30d5bba..000000000 --- a/vendor/github.com/gonum/graph/topo/bron_kerbosch.go +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package topo - -import ( - "github.com/gonum/graph" - "github.com/gonum/graph/internal" -) - -// VertexOrdering returns the vertex ordering and the k-cores of -// the undirected graph g. -func VertexOrdering(g graph.Undirected) (order []graph.Node, cores [][]graph.Node) { - nodes := g.Nodes() - - // The algorithm used here is essentially as described at - // http://en.wikipedia.org/w/index.php?title=Degeneracy_%28graph_theory%29&oldid=640308710 - - // Initialize an output list L. - var l []graph.Node - - // Compute a number d_v for each vertex v in G, - // the number of neighbors of v that are not already in L. - // Initially, these numbers are just the degrees of the vertices. - dv := make(map[int]int, len(nodes)) - var ( - maxDegree int - neighbours = make(map[int][]graph.Node) - ) - for _, n := range nodes { - adj := g.From(n) - neighbours[n.ID()] = adj - dv[n.ID()] = len(adj) - if len(adj) > maxDegree { - maxDegree = len(adj) - } - } - - // Initialize an array D such that D[i] contains a list of the - // vertices v that are not already in L for which d_v = i. - d := make([][]graph.Node, maxDegree+1) - for _, n := range nodes { - deg := dv[n.ID()] - d[deg] = append(d[deg], n) - } - - // Initialize k to 0. - k := 0 - // Repeat n times: - s := []int{0} - for _ = range nodes { // TODO(kortschak): Remove blank assignment when go1.3.3 is no longer supported. - // Scan the array cells D[0], D[1], ... until - // finding an i for which D[i] is nonempty. - var ( - i int - di []graph.Node - ) - for i, di = range d { - if len(di) != 0 { - break - } - } - - // Set k to max(k,i). - if i > k { - k = i - s = append(s, make([]int, k-len(s)+1)...) - } - - // Select a vertex v from D[i]. Add v to the - // beginning of L and remove it from D[i]. - var v graph.Node - v, d[i] = di[len(di)-1], di[:len(di)-1] - l = append(l, v) - s[k]++ - delete(dv, v.ID()) - - // For each neighbor w of v not already in L, - // subtract one from d_w and move w to the - // cell of D corresponding to the new value of d_w. - for _, w := range neighbours[v.ID()] { - dw, ok := dv[w.ID()] - if !ok { - continue - } - for i, n := range d[dw] { - if n.ID() == w.ID() { - d[dw][i], d[dw] = d[dw][len(d[dw])-1], d[dw][:len(d[dw])-1] - dw-- - d[dw] = append(d[dw], w) - break - } - } - dv[w.ID()] = dw - } - } - - for i, j := 0, len(l)-1; i < j; i, j = i+1, j-1 { - l[i], l[j] = l[j], l[i] - } - cores = make([][]graph.Node, len(s)) - offset := len(l) - for i, n := range s { - cores[i] = l[offset-n : offset] - offset -= n - } - return l, cores -} - -// BronKerbosch returns the set of maximal cliques of the undirected graph g. -func BronKerbosch(g graph.Undirected) [][]graph.Node { - nodes := g.Nodes() - - // The algorithm used here is essentially BronKerbosch3 as described at - // http://en.wikipedia.org/w/index.php?title=Bron%E2%80%93Kerbosch_algorithm&oldid=656805858 - - p := make(internal.Set, len(nodes)) - for _, n := range nodes { - p.Add(n) - } - x := make(internal.Set) - var bk bronKerbosch - order, _ := VertexOrdering(g) - for _, v := range order { - neighbours := g.From(v) - nv := make(internal.Set, len(neighbours)) - for _, n := range neighbours { - nv.Add(n) - } - bk.maximalCliquePivot(g, []graph.Node{v}, make(internal.Set).Intersect(p, nv), make(internal.Set).Intersect(x, nv)) - p.Remove(v) - x.Add(v) - } - return bk -} - -type bronKerbosch [][]graph.Node - -func (bk *bronKerbosch) maximalCliquePivot(g graph.Undirected, r []graph.Node, p, x internal.Set) { - if len(p) == 0 && len(x) == 0 { - *bk = append(*bk, r) - return - } - - neighbours := bk.choosePivotFrom(g, p, x) - nu := make(internal.Set, len(neighbours)) - for _, n := range neighbours { - nu.Add(n) - } - for _, v := range p { - if nu.Has(v) { - continue - } - neighbours := g.From(v) - nv := make(internal.Set, len(neighbours)) - for _, n := range neighbours { - nv.Add(n) - } - - var found bool - for _, n := range r { - if n.ID() == v.ID() { - found = true - break - } - } - var sr []graph.Node - if !found { - sr = append(r[:len(r):len(r)], v) - } - - bk.maximalCliquePivot(g, sr, make(internal.Set).Intersect(p, nv), make(internal.Set).Intersect(x, nv)) - p.Remove(v) - x.Add(v) - } -} - -func (*bronKerbosch) choosePivotFrom(g graph.Undirected, p, x internal.Set) (neighbors []graph.Node) { - // TODO(kortschak): Investigate the impact of pivot choice that maximises - // |p ⋂ neighbours(u)| as a function of input size. Until then, leave as - // compile time option. - if !tomitaTanakaTakahashi { - for _, n := range p { - return g.From(n) - } - for _, n := range x { - return g.From(n) - } - panic("bronKerbosch: empty set") - } - - var ( - max = -1 - pivot graph.Node - ) - maxNeighbors := func(s internal.Set) { - outer: - for _, u := range s { - nb := g.From(u) - c := len(nb) - if c <= max { - continue - } - for n := range nb { - if _, ok := p[n]; ok { - continue - } - c-- - if c <= max { - continue outer - } - } - max = c - pivot = u - neighbors = nb - } - } - maxNeighbors(p) - maxNeighbors(x) - if pivot == nil { - panic("bronKerbosch: empty set") - } - return neighbors -} diff --git a/vendor/github.com/gonum/graph/topo/johnson_cycles.go b/vendor/github.com/gonum/graph/topo/johnson_cycles.go deleted file mode 100644 index 36d4cbd0a..000000000 --- a/vendor/github.com/gonum/graph/topo/johnson_cycles.go +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package topo - -import ( - "sort" - - "github.com/gonum/graph" - "github.com/gonum/graph/internal" -) - -// johnson implements Johnson's "Finding all the elementary -// circuits of a directed graph" algorithm. SIAM J. Comput. 4(1):1975. -// -// Comments in the johnson methods are kept in sync with the comments -// and labels from the paper. -type johnson struct { - adjacent johnsonGraph // SCC adjacency list. - b []internal.IntSet // Johnson's "B-list". - blocked []bool - s int - - stack []graph.Node - - result [][]graph.Node -} - -// CyclesIn returns the set of elementary cycles in the graph g. -func CyclesIn(g graph.Directed) [][]graph.Node { - jg := johnsonGraphFrom(g) - j := johnson{ - adjacent: jg, - b: make([]internal.IntSet, len(jg.orig)), - blocked: make([]bool, len(jg.orig)), - } - - // len(j.nodes) is the order of g. - for j.s < len(j.adjacent.orig)-1 { - // We use the previous SCC adjacency to reduce the work needed. - sccs := TarjanSCC(j.adjacent.subgraph(j.s)) - // A_k = adjacency structure of strong component K with least - // vertex in subgraph of G induced by {s, s+1, ... ,n}. - j.adjacent = j.adjacent.sccSubGraph(sccs, 2) // Only allow SCCs with >= 2 vertices. - if j.adjacent.order() == 0 { - break - } - - // s = least vertex in V_k - if s := j.adjacent.leastVertexIndex(); s < j.s { - j.s = s - } - for i, v := range j.adjacent.orig { - if !j.adjacent.nodes.Has(v.ID()) { - continue - } - if len(j.adjacent.succ[v.ID()]) > 0 { - j.blocked[i] = false - j.b[i] = make(internal.IntSet) - } - } - //L3: - _ = j.circuit(j.s) - j.s++ - } - - return j.result -} - -// circuit is the CIRCUIT sub-procedure in the paper. -func (j *johnson) circuit(v int) bool { - f := false - n := j.adjacent.orig[v] - j.stack = append(j.stack, n) - j.blocked[v] = true - - //L1: - for w := range j.adjacent.succ[n.ID()] { - w = j.adjacent.indexOf(w) - if w == j.s { - // Output circuit composed of stack followed by s. - r := make([]graph.Node, len(j.stack)+1) - copy(r, j.stack) - r[len(r)-1] = j.adjacent.orig[j.s] - j.result = append(j.result, r) - f = true - } else if !j.blocked[w] { - if j.circuit(w) { - f = true - } - } - } - - //L2: - if f { - j.unblock(v) - } else { - for w := range j.adjacent.succ[n.ID()] { - j.b[j.adjacent.indexOf(w)].Add(v) - } - } - j.stack = j.stack[:len(j.stack)-1] - - return f -} - -// unblock is the UNBLOCK sub-procedure in the paper. -func (j *johnson) unblock(u int) { - j.blocked[u] = false - for w := range j.b[u] { - j.b[u].Remove(w) - if j.blocked[w] { - j.unblock(w) - } - } -} - -// johnsonGraph is an edge list representation of a graph with helpers -// necessary for Johnson's algorithm -type johnsonGraph struct { - // Keep the original graph nodes and a - // look-up to into the non-sparse - // collection of potentially sparse IDs. - orig []graph.Node - index map[int]int - - nodes internal.IntSet - succ map[int]internal.IntSet -} - -// johnsonGraphFrom returns a deep copy of the graph g. -func johnsonGraphFrom(g graph.Directed) johnsonGraph { - nodes := g.Nodes() - sort.Sort(byID(nodes)) - c := johnsonGraph{ - orig: nodes, - index: make(map[int]int, len(nodes)), - - nodes: make(internal.IntSet, len(nodes)), - succ: make(map[int]internal.IntSet), - } - for i, u := range nodes { - c.index[u.ID()] = i - for _, v := range g.From(u) { - if c.succ[u.ID()] == nil { - c.succ[u.ID()] = make(internal.IntSet) - c.nodes.Add(u.ID()) - } - c.nodes.Add(v.ID()) - c.succ[u.ID()].Add(v.ID()) - } - } - return c -} - -type byID []graph.Node - -func (n byID) Len() int { return len(n) } -func (n byID) Less(i, j int) bool { return n[i].ID() < n[j].ID() } -func (n byID) Swap(i, j int) { n[i], n[j] = n[j], n[i] } - -// order returns the order of the graph. -func (g johnsonGraph) order() int { return g.nodes.Count() } - -// indexOf returns the index of the retained node for the given node ID. -func (g johnsonGraph) indexOf(id int) int { - return g.index[id] -} - -// leastVertexIndex returns the index into orig of the least vertex. -func (g johnsonGraph) leastVertexIndex() int { - for _, v := range g.orig { - if g.nodes.Has(v.ID()) { - return g.indexOf(v.ID()) - } - } - panic("johnsonCycles: empty set") -} - -// subgraph returns a subgraph of g induced by {s, s+1, ... , n}. The -// subgraph is destructively generated in g. -func (g johnsonGraph) subgraph(s int) johnsonGraph { - sn := g.orig[s].ID() - for u, e := range g.succ { - if u < sn { - g.nodes.Remove(u) - delete(g.succ, u) - continue - } - for v := range e { - if v < sn { - g.succ[u].Remove(v) - } - } - } - return g -} - -// sccSubGraph returns the graph of the tarjan's strongly connected -// components with each SCC containing at least min vertices. -// sccSubGraph returns nil if there is no SCC with at least min -// members. -func (g johnsonGraph) sccSubGraph(sccs [][]graph.Node, min int) johnsonGraph { - if len(g.nodes) == 0 { - g.nodes = nil - g.succ = nil - return g - } - sub := johnsonGraph{ - orig: g.orig, - index: g.index, - nodes: make(internal.IntSet), - succ: make(map[int]internal.IntSet), - } - - var n int - for _, scc := range sccs { - if len(scc) < min { - continue - } - n++ - for _, u := range scc { - for _, v := range scc { - if _, ok := g.succ[u.ID()][v.ID()]; ok { - if sub.succ[u.ID()] == nil { - sub.succ[u.ID()] = make(internal.IntSet) - sub.nodes.Add(u.ID()) - } - sub.nodes.Add(v.ID()) - sub.succ[u.ID()].Add(v.ID()) - } - } - } - } - if n == 0 { - g.nodes = nil - g.succ = nil - return g - } - - return sub -} - -// Nodes is required to satisfy Tarjan. -func (g johnsonGraph) Nodes() []graph.Node { - n := make([]graph.Node, 0, len(g.nodes)) - for id := range g.nodes { - n = append(n, johnsonGraphNode(id)) - } - return n -} - -// Successors is required to satisfy Tarjan. -func (g johnsonGraph) From(n graph.Node) []graph.Node { - adj := g.succ[n.ID()] - if len(adj) == 0 { - return nil - } - succ := make([]graph.Node, 0, len(adj)) - for n := range adj { - succ = append(succ, johnsonGraphNode(n)) - } - return succ -} - -func (johnsonGraph) Has(graph.Node) bool { - panic("search: unintended use of johnsonGraph") -} -func (johnsonGraph) HasEdge(_, _ graph.Node) bool { - panic("search: unintended use of johnsonGraph") -} -func (johnsonGraph) Edge(_, _ graph.Node) graph.Edge { - panic("search: unintended use of johnsonGraph") -} -func (johnsonGraph) HasEdgeFromTo(_, _ graph.Node) bool { - panic("search: unintended use of johnsonGraph") -} -func (johnsonGraph) To(graph.Node) []graph.Node { - panic("search: unintended use of johnsonGraph") -} - -type johnsonGraphNode int - -func (n johnsonGraphNode) ID() int { return int(n) } diff --git a/vendor/github.com/gonum/graph/topo/non_tomita_choice.go b/vendor/github.com/gonum/graph/topo/non_tomita_choice.go deleted file mode 100644 index de09ebd88..000000000 --- a/vendor/github.com/gonum/graph/topo/non_tomita_choice.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//+build !tomita - -package topo - -const tomitaTanakaTakahashi = false diff --git a/vendor/github.com/gonum/graph/topo/tarjan.go b/vendor/github.com/gonum/graph/topo/tarjan.go deleted file mode 100644 index 908358cd4..000000000 --- a/vendor/github.com/gonum/graph/topo/tarjan.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package topo - -import ( - "fmt" - "sort" - - "github.com/gonum/graph" - "github.com/gonum/graph/internal" -) - -// Unorderable is an error containing sets of unorderable graph.Nodes. -type Unorderable [][]graph.Node - -// Error satisfies the error interface. -func (e Unorderable) Error() string { - const maxNodes = 10 - var n int - for _, c := range e { - n += len(c) - } - if n > maxNodes { - // Don't return errors that are too long. - return fmt.Sprintf("topo: no topological ordering: %d nodes in %d cyclic components", n, len(e)) - } - return fmt.Sprintf("topo: no topological ordering: cyclic components: %v", [][]graph.Node(e)) -} - -// Sort performs a topological sort of the directed graph g returning the 'from' to 'to' -// sort order. If a topological ordering is not possible, an Unorderable error is returned -// listing cyclic components in g with each cyclic component's members sorted by ID. When -// an Unorderable error is returned, each cyclic component's topological position within -// the sorted nodes is marked with a nil graph.Node. -func Sort(g graph.Directed) (sorted []graph.Node, err error) { - sccs := TarjanSCC(g) - sorted = make([]graph.Node, 0, len(sccs)) - var sc Unorderable - for _, s := range sccs { - if len(s) != 1 { - sort.Sort(byID(s)) - sc = append(sc, s) - sorted = append(sorted, nil) - continue - } - sorted = append(sorted, s[0]) - } - if sc != nil { - for i, j := 0, len(sc)-1; i < j; i, j = i+1, j-1 { - sc[i], sc[j] = sc[j], sc[i] - } - err = sc - } - reverse(sorted) - return sorted, err -} - -func reverse(p []graph.Node) { - for i, j := 0, len(p)-1; i < j; i, j = i+1, j-1 { - p[i], p[j] = p[j], p[i] - } -} - -// TarjanSCC returns the strongly connected components of the graph g using Tarjan's algorithm. -// -// A strongly connected component of a graph is a set of vertices where it's possible to reach any -// vertex in the set from any other (meaning there's a cycle between them.) -// -// Generally speaking, a directed graph where the number of strongly connected components is equal -// to the number of nodes is acyclic, unless you count reflexive edges as a cycle (which requires -// only a little extra testing.) -// -func TarjanSCC(g graph.Directed) [][]graph.Node { - nodes := g.Nodes() - t := tarjan{ - succ: g.From, - - indexTable: make(map[int]int, len(nodes)), - lowLink: make(map[int]int, len(nodes)), - onStack: make(internal.IntSet, len(nodes)), - } - for _, v := range nodes { - if t.indexTable[v.ID()] == 0 { - t.strongconnect(v) - } - } - return t.sccs -} - -// tarjan implements Tarjan's strongly connected component finding -// algorithm. The implementation is from the pseudocode at -// -// http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm?oldid=642744644 -// -type tarjan struct { - succ func(graph.Node) []graph.Node - - index int - indexTable map[int]int - lowLink map[int]int - onStack internal.IntSet - - stack []graph.Node - - sccs [][]graph.Node -} - -// strongconnect is the strongconnect function described in the -// wikipedia article. -func (t *tarjan) strongconnect(v graph.Node) { - vID := v.ID() - - // Set the depth index for v to the smallest unused index. - t.index++ - t.indexTable[vID] = t.index - t.lowLink[vID] = t.index - t.stack = append(t.stack, v) - t.onStack.Add(vID) - - // Consider successors of v. - for _, w := range t.succ(v) { - wID := w.ID() - if t.indexTable[wID] == 0 { - // Successor w has not yet been visited; recur on it. - t.strongconnect(w) - t.lowLink[vID] = min(t.lowLink[vID], t.lowLink[wID]) - } else if t.onStack.Has(wID) { - // Successor w is in stack s and hence in the current SCC. - t.lowLink[vID] = min(t.lowLink[vID], t.indexTable[wID]) - } - } - - // If v is a root node, pop the stack and generate an SCC. - if t.lowLink[vID] == t.indexTable[vID] { - // Start a new strongly connected component. - var ( - scc []graph.Node - w graph.Node - ) - for { - w, t.stack = t.stack[len(t.stack)-1], t.stack[:len(t.stack)-1] - t.onStack.Remove(w.ID()) - // Add w to current strongly connected component. - scc = append(scc, w) - if w.ID() == vID { - break - } - } - // Output the current strongly connected component. - t.sccs = append(t.sccs, scc) - } -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/github.com/gonum/graph/topo/tomita_choice.go b/vendor/github.com/gonum/graph/topo/tomita_choice.go deleted file mode 100644 index d4eca6256..000000000 --- a/vendor/github.com/gonum/graph/topo/tomita_choice.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//+build tomita - -package topo - -const tomitaTanakaTakahashi = true diff --git a/vendor/github.com/gonum/graph/topo/topo.go b/vendor/github.com/gonum/graph/topo/topo.go deleted file mode 100644 index f4c3a2a1f..000000000 --- a/vendor/github.com/gonum/graph/topo/topo.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright ©2014 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package topo - -import ( - "github.com/gonum/graph" - "github.com/gonum/graph/traverse" -) - -// IsPathIn returns whether path is a path in g. -// -// As special cases, IsPathIn returns true for a zero length path or for -// a path of length 1 when the node in path exists in the graph. -func IsPathIn(g graph.Graph, path []graph.Node) bool { - switch len(path) { - case 0: - return true - case 1: - return g.Has(path[0]) - default: - var canReach func(u, v graph.Node) bool - switch g := g.(type) { - case graph.Directed: - canReach = g.HasEdgeFromTo - default: - canReach = g.HasEdge - } - - for i, u := range path[:len(path)-1] { - if !canReach(u, path[i+1]) { - return false - } - } - return true - } -} - -// ConnectedComponents returns the connected components of the undirected graph g. -func ConnectedComponents(g graph.Undirected) [][]graph.Node { - var ( - w traverse.DepthFirst - c []graph.Node - cc [][]graph.Node - ) - during := func(n graph.Node) { - c = append(c, n) - } - after := func() { - cc = append(cc, []graph.Node(nil)) - cc[len(cc)-1] = append(cc[len(cc)-1], c...) - c = c[:0] - } - w.WalkAll(g, nil, after, during) - - return cc -} diff --git a/vendor/github.com/gonum/graph/traverse/traverse.go b/vendor/github.com/gonum/graph/traverse/traverse.go deleted file mode 100644 index bb0fdad1b..000000000 --- a/vendor/github.com/gonum/graph/traverse/traverse.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package traverse provides basic graph traversal primitives. -package traverse - -import ( - "github.com/gonum/graph" - "github.com/gonum/graph/internal" -) - -// BreadthFirst implements stateful breadth-first graph traversal. -type BreadthFirst struct { - EdgeFilter func(graph.Edge) bool - Visit func(u, v graph.Node) - queue internal.NodeQueue - visited internal.IntSet -} - -// Walk performs a breadth-first traversal of the graph g starting from the given node, -// depending on the the EdgeFilter field and the until parameter if they are non-nil. The -// traversal follows edges for which EdgeFilter(edge) is true and returns the first node -// for which until(node, depth) is true. During the traversal, if the Visit field is -// non-nil, it is called with the nodes joined by each followed edge. -func (b *BreadthFirst) Walk(g graph.Graph, from graph.Node, until func(n graph.Node, d int) bool) graph.Node { - if b.visited == nil { - b.visited = make(internal.IntSet) - } - b.queue.Enqueue(from) - b.visited.Add(from.ID()) - - var ( - depth int - children int - untilNext = 1 - ) - for b.queue.Len() > 0 { - t := b.queue.Dequeue() - if until != nil && until(t, depth) { - return t - } - for _, n := range g.From(t) { - if b.EdgeFilter != nil && !b.EdgeFilter(g.Edge(t, n)) { - continue - } - if b.visited.Has(n.ID()) { - continue - } - if b.Visit != nil { - b.Visit(t, n) - } - b.visited.Add(n.ID()) - children++ - b.queue.Enqueue(n) - } - if untilNext--; untilNext == 0 { - depth++ - untilNext = children - children = 0 - } - } - - return nil -} - -// WalkAll calls Walk for each unvisited node of the graph g using edges independent -// of their direction. The functions before and after are called prior to commencing -// and after completing each walk if they are non-nil respectively. The function -// during is called on each node as it is traversed. -func (b *BreadthFirst) WalkAll(g graph.Undirected, before, after func(), during func(graph.Node)) { - b.Reset() - for _, from := range g.Nodes() { - if b.Visited(from) { - continue - } - if before != nil { - before() - } - b.Walk(g, from, func(n graph.Node, _ int) bool { - if during != nil { - during(n) - } - return false - }) - if after != nil { - after() - } - } -} - -// Visited returned whether the node n was visited during a traverse. -func (b *BreadthFirst) Visited(n graph.Node) bool { - _, ok := b.visited[n.ID()] - return ok -} - -// Reset resets the state of the traverser for reuse. -func (b *BreadthFirst) Reset() { - b.queue.Reset() - b.visited = nil -} - -// DepthFirst implements stateful depth-first graph traversal. -type DepthFirst struct { - EdgeFilter func(graph.Edge) bool - Visit func(u, v graph.Node) - stack internal.NodeStack - visited internal.IntSet -} - -// Walk performs a depth-first traversal of the graph g starting from the given node, -// depending on the the EdgeFilter field and the until parameter if they are non-nil. The -// traversal follows edges for which EdgeFilter(edge) is true and returns the first node -// for which until(node) is true. During the traversal, if the Visit field is non-nil, it -// is called with the nodes joined by each followed edge. -func (d *DepthFirst) Walk(g graph.Graph, from graph.Node, until func(graph.Node) bool) graph.Node { - if d.visited == nil { - d.visited = make(internal.IntSet) - } - d.stack.Push(from) - d.visited.Add(from.ID()) - - for d.stack.Len() > 0 { - t := d.stack.Pop() - if until != nil && until(t) { - return t - } - for _, n := range g.From(t) { - if d.EdgeFilter != nil && !d.EdgeFilter(g.Edge(t, n)) { - continue - } - if d.visited.Has(n.ID()) { - continue - } - if d.Visit != nil { - d.Visit(t, n) - } - d.visited.Add(n.ID()) - d.stack.Push(n) - } - } - - return nil -} - -// WalkAll calls Walk for each unvisited node of the graph g using edges independent -// of their direction. The functions before and after are called prior to commencing -// and after completing each walk if they are non-nil respectively. The function -// during is called on each node as it is traversed. -func (d *DepthFirst) WalkAll(g graph.Undirected, before, after func(), during func(graph.Node)) { - d.Reset() - for _, from := range g.Nodes() { - if d.Visited(from) { - continue - } - if before != nil { - before() - } - d.Walk(g, from, func(n graph.Node) bool { - if during != nil { - during(n) - } - return false - }) - if after != nil { - after() - } - } -} - -// Visited returned whether the node n was visited during a traverse. -func (d *DepthFirst) Visited(n graph.Node) bool { - _, ok := d.visited[n.ID()] - return ok -} - -// Reset resets the state of the traverser for reuse. -func (d *DepthFirst) Reset() { - d.stack = d.stack[:0] - d.visited = nil -} diff --git a/vendor/github.com/gonum/internal/asm/caxpy.go b/vendor/github.com/gonum/internal/asm/caxpy.go deleted file mode 100644 index 80d802adf..000000000 --- a/vendor/github.com/gonum/internal/asm/caxpy.go +++ /dev/null @@ -1,22 +0,0 @@ -// Generated code do not edit. Run `go generate`. - -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package asm - -// The extra z parameter is needed because of floats.AddScaledTo -func CaxpyUnitary(alpha complex64, x, y, z []complex64) { - for i, v := range x { - z[i] = alpha*v + y[i] - } -} - -func CaxpyInc(alpha complex64, x, y []complex64, n, incX, incY, ix, iy uintptr) { - for i := 0; i < int(n); i++ { - y[iy] += alpha * x[ix] - ix += incX - iy += incY - } -} diff --git a/vendor/github.com/gonum/internal/asm/cdotc.go b/vendor/github.com/gonum/internal/asm/cdotc.go deleted file mode 100644 index ed999e5fa..000000000 --- a/vendor/github.com/gonum/internal/asm/cdotc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Generated code do not edit. Run `go generate`. - -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package asm - -func CdotcUnitary(x, y []complex64) (sum complex64) { - for i, v := range x { - sum += y[i] * conj(v) - } - return -} - -func CdotcInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) { - for i := 0; i < int(n); i++ { - sum += y[iy] * conj(x[ix]) - ix += incX - iy += incY - } - return -} diff --git a/vendor/github.com/gonum/internal/asm/cdotu.go b/vendor/github.com/gonum/internal/asm/cdotu.go deleted file mode 100644 index 3392ee251..000000000 --- a/vendor/github.com/gonum/internal/asm/cdotu.go +++ /dev/null @@ -1,23 +0,0 @@ -// Generated code do not edit. Run `go generate`. - -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package asm - -func CdotuUnitary(x, y []complex64) (sum complex64) { - for i, v := range x { - sum += y[i] * v - } - return -} - -func CdotuInc(x, y []complex64, n, incX, incY, ix, iy uintptr) (sum complex64) { - for i := 0; i < int(n); i++ { - sum += y[iy] * x[ix] - ix += incX - iy += incY - } - return -} diff --git a/vendor/github.com/gonum/internal/asm/complex b/vendor/github.com/gonum/internal/asm/complex deleted file mode 100755 index b26e4e601..000000000 --- a/vendor/github.com/gonum/internal/asm/complex +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env bash - -# Copyright ©2015 The gonum Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -echo Generating zdotu.go -echo -e '// Generated code do not edit. Run `go generate`.\n' > zdotu.go -cat ddot.go \ -| grep -v '//+build' \ -| gofmt -r 'float64 -> complex128' \ -| sed 's/Ddot/Zdotu/' \ ->> zdotu.go - -echo Generating zdotc.go -echo -e '// Generated code do not edit. Run `go generate`.\n' > zdotc.go -cat ddot.go \ -| grep -v '//+build' \ -| gofmt -r 'float64 -> complex128' \ -| gofmt -r 'y[i] * v -> y[i] * cmplx.Conj(v)' \ -| sed 's/Ddot/Zdotc/' \ -| goimports \ ->> zdotc.go - -echo Generating zaxpy.go -echo -e '// Generated code do not edit. Run `go generate`.\n' > zaxpy.go -cat daxpy.go \ -| grep -v '//+build' \ -| gofmt -r 'float64 -> complex128' \ -| sed 's/Daxpy/Zaxpy/' \ ->> zaxpy.go - -echo Generating cdotu.go -echo -e '// Generated code do not edit. Run `go generate`.\n' > cdotu.go -cat ddot.go \ -| grep -v '//+build' \ -| gofmt -r 'float64 -> complex64' \ -| sed 's/Ddot/Cdotu/' \ ->> cdotu.go - -echo Generating cdotc.go -echo -e '// Generated code do not edit. Run `go generate`.\n' > cdotc.go -cat ddot.go \ -| grep -v '//+build' \ -| gofmt -r 'float64 -> complex64' \ -| gofmt -r 'y[i] * v -> y[i] * conj(v)' \ -| sed 's/Ddot/Cdotc/' \ -| goimports \ ->> cdotc.go - -echo Generating caxpy.go -echo -e '// Generated code do not edit. Run `go generate`.\n' > caxpy.go -cat daxpy.go \ -| grep -v '//+build' \ -| gofmt -r 'float64 -> complex64' \ -| sed 's/Daxpy/Caxpy/' \ ->> caxpy.go - diff --git a/vendor/github.com/gonum/internal/asm/conj.go b/vendor/github.com/gonum/internal/asm/conj.go deleted file mode 100644 index 1cadb2a5c..000000000 --- a/vendor/github.com/gonum/internal/asm/conj.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package asm - -func conj(c complex64) complex64 { return complex(real(c), -imag(c)) } diff --git a/vendor/github.com/gonum/internal/asm/daxpy.go b/vendor/github.com/gonum/internal/asm/daxpy.go deleted file mode 100644 index 24979fc64..000000000 --- a/vendor/github.com/gonum/internal/asm/daxpy.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//+build !amd64 noasm - -package asm - -// The extra z parameter is needed because of floats.AddScaledTo -func DaxpyUnitary(alpha float64, x, y, z []float64) { - for i, v := range x { - z[i] = alpha*v + y[i] - } -} - -func DaxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) { - for i := 0; i < int(n); i++ { - y[iy] += alpha * x[ix] - ix += incX - iy += incY - } -} diff --git a/vendor/github.com/gonum/internal/asm/daxpy_amd64.go b/vendor/github.com/gonum/internal/asm/daxpy_amd64.go deleted file mode 100644 index d1aeacfed..000000000 --- a/vendor/github.com/gonum/internal/asm/daxpy_amd64.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//+build !noasm - -package asm - -// The extra z parameter is needed because of floats.AddScaledTo -func DaxpyUnitary(alpha float64, x, y, z []float64) - -func DaxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) diff --git a/vendor/github.com/gonum/internal/asm/daxpy_amd64.s b/vendor/github.com/gonum/internal/asm/daxpy_amd64.s deleted file mode 100644 index 18f2d3c7f..000000000 --- a/vendor/github.com/gonum/internal/asm/daxpy_amd64.s +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// -// Some of the loop unrolling code is copied from: -// http://golang.org/src/math/big/arith_amd64.s -// which is distributed under these terms: -// -// Copyright (c) 2012 The Go Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -//+build !noasm - -// TODO(fhs): use textflag.h after we drop Go 1.3 support -//#include "textflag.h" -// Don't insert stack check preamble. -#define NOSPLIT 4 - - -// func DaxpyUnitary(alpha float64, x, y, z []float64) -// This function assumes len(y) >= len(x). -TEXT ·DaxpyUnitary(SB),NOSPLIT,$0 - MOVHPD alpha+0(FP), X7 - MOVLPD alpha+0(FP), X7 - MOVQ x_len+16(FP), DI // n = len(x) - MOVQ x+8(FP), R8 - MOVQ y+32(FP), R9 - MOVQ z+56(FP), R10 - - MOVQ $0, SI // i = 0 - SUBQ $2, DI // n -= 2 - JL V1 // if n < 0 goto V1 - -U1: // n >= 0 - // y[i] += alpha * x[i] unrolled 2x. - MOVUPD 0(R8)(SI*8), X0 - MOVUPD 0(R9)(SI*8), X1 - MULPD X7, X0 - ADDPD X0, X1 - MOVUPD X1, 0(R10)(SI*8) - - ADDQ $2, SI // i += 2 - SUBQ $2, DI // n -= 2 - JGE U1 // if n >= 0 goto U1 - -V1: - ADDQ $2, DI // n += 2 - JLE E1 // if n <= 0 goto E1 - - // y[i] += alpha * x[i] for last iteration if n is odd. - MOVSD 0(R8)(SI*8), X0 - MOVSD 0(R9)(SI*8), X1 - MULSD X7, X0 - ADDSD X0, X1 - MOVSD X1, 0(R10)(SI*8) - -E1: - RET - - -// func DaxpyInc(alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr) -TEXT ·DaxpyInc(SB),NOSPLIT,$0 - MOVHPD alpha+0(FP), X7 - MOVLPD alpha+0(FP), X7 - MOVQ x+8(FP), R8 - MOVQ y+32(FP), R9 - MOVQ n+56(FP), CX - MOVQ incX+64(FP), R11 - MOVQ incY+72(FP), R12 - MOVQ ix+80(FP), SI - MOVQ iy+88(FP), DI - - MOVQ SI, AX // nextX = ix - MOVQ DI, BX // nextY = iy - ADDQ R11, AX // nextX += incX - ADDQ R12, BX // nextY += incX - SHLQ $1, R11 // indX *= 2 - SHLQ $1, R12 // indY *= 2 - - SUBQ $2, CX // n -= 2 - JL V2 // if n < 0 goto V2 - -U2: // n >= 0 - // y[i] += alpha * x[i] unrolled 2x. - MOVHPD 0(R8)(SI*8), X0 - MOVHPD 0(R9)(DI*8), X1 - MOVLPD 0(R8)(AX*8), X0 - MOVLPD 0(R9)(BX*8), X1 - - MULPD X7, X0 - ADDPD X0, X1 - MOVHPD X1, 0(R9)(DI*8) - MOVLPD X1, 0(R9)(BX*8) - - ADDQ R11, SI // ix += incX - ADDQ R12, DI // iy += incY - ADDQ R11, AX // nextX += incX - ADDQ R12, BX // nextY += incY - - SUBQ $2, CX // n -= 2 - JGE U2 // if n >= 0 goto U2 - -V2: - ADDQ $2, CX // n += 2 - JLE E2 // if n <= 0 goto E2 - - // y[i] += alpha * x[i] for the last iteration if n is odd. - MOVSD 0(R8)(SI*8), X0 - MOVSD 0(R9)(DI*8), X1 - MULSD X7, X0 - ADDSD X0, X1 - MOVSD X1, 0(R9)(DI*8) - -E2: - RET diff --git a/vendor/github.com/gonum/internal/asm/ddot.go b/vendor/github.com/gonum/internal/asm/ddot.go deleted file mode 100644 index 7e6995799..000000000 --- a/vendor/github.com/gonum/internal/asm/ddot.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//+build !amd64 noasm - -package asm - -func DdotUnitary(x, y []float64) (sum float64) { - for i, v := range x { - sum += y[i] * v - } - return -} - -func DdotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64) { - for i := 0; i < int(n); i++ { - sum += y[iy] * x[ix] - ix += incX - iy += incY - } - return -} diff --git a/vendor/github.com/gonum/internal/asm/ddot_amd64.go b/vendor/github.com/gonum/internal/asm/ddot_amd64.go deleted file mode 100644 index 7fa634a6f..000000000 --- a/vendor/github.com/gonum/internal/asm/ddot_amd64.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//+build !noasm - -package asm - -func DdotUnitary(x, y []float64) (sum float64) -func DdotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64) diff --git a/vendor/github.com/gonum/internal/asm/ddot_amd64.s b/vendor/github.com/gonum/internal/asm/ddot_amd64.s deleted file mode 100644 index a898bbba0..000000000 --- a/vendor/github.com/gonum/internal/asm/ddot_amd64.s +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// -// Some of the loop unrolling code is copied from: -// http://golang.org/src/math/big/arith_amd64.s -// which is distributed under these terms: -// -// Copyright (c) 2012 The Go Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -//+build !noasm - -// TODO(fhs): use textflag.h after we drop Go 1.3 support -//#include "textflag.h" -// Don't insert stack check preamble. -#define NOSPLIT 4 - - -// func DdotUnitary(x, y []float64) (sum float64) -// This function assumes len(y) >= len(x). -TEXT ·DdotUnitary(SB),NOSPLIT,$0 - MOVQ x_len+8(FP), DI // n = len(x) - MOVQ x+0(FP), R8 - MOVQ y+24(FP), R9 - - MOVQ $0, SI // i = 0 - MOVSD $(0.0), X7 // sum = 0 - - SUBQ $2, DI // n -= 2 - JL V1 // if n < 0 goto V1 - -U1: // n >= 0 - // sum += x[i] * y[i] unrolled 2x. - MOVUPD 0(R8)(SI*8), X0 - MOVUPD 0(R9)(SI*8), X1 - MULPD X1, X0 - ADDPD X0, X7 - - ADDQ $2, SI // i += 2 - SUBQ $2, DI // n -= 2 - JGE U1 // if n >= 0 goto U1 - -V1: // n > 0 - ADDQ $2, DI // n += 2 - JLE E1 // if n <= 0 goto E1 - - // sum += x[i] * y[i] for last iteration if n is odd. - MOVSD 0(R8)(SI*8), X0 - MOVSD 0(R9)(SI*8), X1 - MULSD X1, X0 - ADDSD X0, X7 - -E1: - // Add the two sums together. - MOVSD X7, X0 - UNPCKHPD X7, X7 - ADDSD X0, X7 - MOVSD X7, sum+48(FP) // return final sum - RET - - -// func DdotInc(x, y []float64, n, incX, incY, ix, iy uintptr) (sum float64) -TEXT ·DdotInc(SB),NOSPLIT,$0 - MOVQ x+0(FP), R8 - MOVQ y+24(FP), R9 - MOVQ n+48(FP), CX - MOVQ incX+56(FP), R11 - MOVQ incY+64(FP), R12 - MOVQ ix+72(FP), R13 - MOVQ iy+80(FP), R14 - - MOVSD $(0.0), X7 // sum = 0 - LEAQ (R8)(R13*8), SI // p = &x[ix] - LEAQ (R9)(R14*8), DI // q = &y[ix] - SHLQ $3, R11 // incX *= sizeof(float64) - SHLQ $3, R12 // indY *= sizeof(float64) - - SUBQ $2, CX // n -= 2 - JL V2 // if n < 0 goto V2 - -U2: // n >= 0 - // sum += *p * *q unrolled 2x. - MOVHPD (SI), X0 - MOVHPD (DI), X1 - ADDQ R11, SI // p += incX - ADDQ R12, DI // q += incY - MOVLPD (SI), X0 - MOVLPD (DI), X1 - ADDQ R11, SI // p += incX - ADDQ R12, DI // q += incY - - MULPD X1, X0 - ADDPD X0, X7 - - SUBQ $2, CX // n -= 2 - JGE U2 // if n >= 0 goto U2 - -V2: - ADDQ $2, CX // n += 2 - JLE E2 // if n <= 0 goto E2 - - // sum += *p * *q for the last iteration if n is odd. - MOVSD (SI), X0 - MULSD (DI), X0 - ADDSD X0, X7 - -E2: - // Add the two sums together. - MOVSD X7, X0 - UNPCKHPD X7, X7 - ADDSD X0, X7 - MOVSD X7, sum+88(FP) // return final sum - RET - \ No newline at end of file diff --git a/vendor/github.com/gonum/internal/asm/dsdot.go b/vendor/github.com/gonum/internal/asm/dsdot.go deleted file mode 100644 index 84506890d..000000000 --- a/vendor/github.com/gonum/internal/asm/dsdot.go +++ /dev/null @@ -1,23 +0,0 @@ -// Generated code do not edit. Run `go generate`. - -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package asm - -func DsdotUnitary(x, y []float32) (sum float64) { - for i, v := range x { - sum += float64(y[i]) * float64(v) - } - return -} - -func DsdotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float64) { - for i := 0; i < int(n); i++ { - sum += float64(y[iy]) * float64(x[ix]) - ix += incX - iy += incY - } - return -} diff --git a/vendor/github.com/gonum/internal/asm/generate.go b/vendor/github.com/gonum/internal/asm/generate.go deleted file mode 100644 index e25214051..000000000 --- a/vendor/github.com/gonum/internal/asm/generate.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate ./single_precision -//go:generate ./complex - -package asm diff --git a/vendor/github.com/gonum/internal/asm/saxpy.go b/vendor/github.com/gonum/internal/asm/saxpy.go deleted file mode 100644 index 3ef767f26..000000000 --- a/vendor/github.com/gonum/internal/asm/saxpy.go +++ /dev/null @@ -1,22 +0,0 @@ -// Generated code do not edit. Run `go generate`. - -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package asm - -// The extra z parameter is needed because of floats.AddScaledTo -func SaxpyUnitary(alpha float32, x, y, z []float32) { - for i, v := range x { - z[i] = alpha*v + y[i] - } -} - -func SaxpyInc(alpha float32, x, y []float32, n, incX, incY, ix, iy uintptr) { - for i := 0; i < int(n); i++ { - y[iy] += alpha * x[ix] - ix += incX - iy += incY - } -} diff --git a/vendor/github.com/gonum/internal/asm/sdot.go b/vendor/github.com/gonum/internal/asm/sdot.go deleted file mode 100644 index 0cef5de4a..000000000 --- a/vendor/github.com/gonum/internal/asm/sdot.go +++ /dev/null @@ -1,23 +0,0 @@ -// Generated code do not edit. Run `go generate`. - -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package asm - -func SdotUnitary(x, y []float32) (sum float32) { - for i, v := range x { - sum += y[i] * v - } - return -} - -func SdotInc(x, y []float32, n, incX, incY, ix, iy uintptr) (sum float32) { - for i := 0; i < int(n); i++ { - sum += y[iy] * x[ix] - ix += incX - iy += incY - } - return -} diff --git a/vendor/github.com/gonum/internal/asm/single_precision b/vendor/github.com/gonum/internal/asm/single_precision deleted file mode 100755 index a937a9771..000000000 --- a/vendor/github.com/gonum/internal/asm/single_precision +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env bash - -# Copyright ©2015 The gonum Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -echo Generating dsdot.go -echo -e '// Generated code do not edit. Run `go generate`.\n' > dsdot.go -cat ddot.go \ -| grep -v '//+build' \ -| gofmt -r '[]float64 -> []float32' \ -| gofmt -r 'a * b -> float64(a) * float64(b)' \ -| sed 's/Ddot/Dsdot/' \ ->> dsdot.go - -echo Generating sdot.go -echo -e '// Generated code do not edit. Run `go generate`.\n' > sdot.go -cat ddot.go \ -| grep -v '//+build' \ -| gofmt -r 'float64 -> float32' \ -| sed 's/Ddot/Sdot/' \ ->> sdot.go - -echo Generating saxpy.go -echo -e '// Generated code do not edit. Run `go generate`.\n' > saxpy.go -cat daxpy.go \ -| grep -v '//+build' \ -| gofmt -r 'float64 -> float32' \ -| sed 's/Daxpy/Saxpy/' \ ->> saxpy.go diff --git a/vendor/github.com/gonum/internal/asm/zaxpy.go b/vendor/github.com/gonum/internal/asm/zaxpy.go deleted file mode 100644 index 9478f2572..000000000 --- a/vendor/github.com/gonum/internal/asm/zaxpy.go +++ /dev/null @@ -1,22 +0,0 @@ -// Generated code do not edit. Run `go generate`. - -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package asm - -// The extra z parameter is needed because of floats.AddScaledTo -func ZaxpyUnitary(alpha complex128, x, y, z []complex128) { - for i, v := range x { - z[i] = alpha*v + y[i] - } -} - -func ZaxpyInc(alpha complex128, x, y []complex128, n, incX, incY, ix, iy uintptr) { - for i := 0; i < int(n); i++ { - y[iy] += alpha * x[ix] - ix += incX - iy += incY - } -} diff --git a/vendor/github.com/gonum/internal/asm/zdotc.go b/vendor/github.com/gonum/internal/asm/zdotc.go deleted file mode 100644 index 7b8febcc0..000000000 --- a/vendor/github.com/gonum/internal/asm/zdotc.go +++ /dev/null @@ -1,25 +0,0 @@ -// Generated code do not edit. Run `go generate`. - -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package asm - -import "math/cmplx" - -func ZdotcUnitary(x, y []complex128) (sum complex128) { - for i, v := range x { - sum += y[i] * cmplx.Conj(v) - } - return -} - -func ZdotcInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) { - for i := 0; i < int(n); i++ { - sum += y[iy] * cmplx.Conj(x[ix]) - ix += incX - iy += incY - } - return -} diff --git a/vendor/github.com/gonum/internal/asm/zdotu.go b/vendor/github.com/gonum/internal/asm/zdotu.go deleted file mode 100644 index 82c1fe2c0..000000000 --- a/vendor/github.com/gonum/internal/asm/zdotu.go +++ /dev/null @@ -1,23 +0,0 @@ -// Generated code do not edit. Run `go generate`. - -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package asm - -func ZdotuUnitary(x, y []complex128) (sum complex128) { - for i, v := range x { - sum += y[i] * v - } - return -} - -func ZdotuInc(x, y []complex128, n, incX, incY, ix, iy uintptr) (sum complex128) { - for i := 0; i < int(n); i++ { - sum += y[iy] * x[ix] - ix += incX - iy += incY - } - return -} diff --git a/vendor/github.com/gonum/lapack/.gitignore b/vendor/github.com/gonum/lapack/.gitignore deleted file mode 100644 index cfb89652b..000000000 --- a/vendor/github.com/gonum/lapack/.gitignore +++ /dev/null @@ -1 +0,0 @@ -clapack/lapack.go diff --git a/vendor/github.com/gonum/lapack/.travis.yml b/vendor/github.com/gonum/lapack/.travis.yml deleted file mode 100644 index 543581720..000000000 --- a/vendor/github.com/gonum/lapack/.travis.yml +++ /dev/null @@ -1,38 +0,0 @@ -sudo: required - -language: go - -env: - matrix: - - BLAS_LIB=OpenBLAS - - BLAS_LIB=gonum - # Does not currently link correctly. Note that there is an issue with drotgm in ATLAS. - # - BLAS_LIB=ATLAS - # If we can get multiarch builds on travis. - # There are some issues with the Accellerate implementation. - #- BLAS_LIB=Accellerate - -# Versions of go that are explicitly supported by gonum. -go: - - 1.5beta1 - - 1.3.3 - - 1.4.2 - -# Required for coverage. -before_install: - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls - -# Install the appropriate BLAS library. -install: - - bash .travis/$TRAVIS_OS_NAME/$BLAS_LIB/install.sh - -# Get deps, build, test, and ensure the code is gofmt'ed. -# If we are building as gonum, then we have access to the coveralls api key, so we can run coverage as well. -script: - - if [[ "$BLAS_LIB" == "gonum" ]]; then pushd native; fi - - go get -d -t -v ./... - - go build -v ./... - - go test -v ./... - - diff <(gofmt -d *.go) <("") - - if [[ $TRAVIS_SECURE_ENV_VARS = "true" ]]; then bash -c "${TRAVIS_BUILD_DIR}/.travis/test-coverage.sh"; fi diff --git a/vendor/github.com/gonum/lapack/README.md b/vendor/github.com/gonum/lapack/README.md deleted file mode 100644 index b2fc7b211..000000000 --- a/vendor/github.com/gonum/lapack/README.md +++ /dev/null @@ -1,58 +0,0 @@ -Gonum LAPACK [![Build Status](https://travis-ci.org/gonum/lapack.svg?branch=master)](https://travis-ci.org/gonum/lapack) [![Coverage Status](https://img.shields.io/coveralls/gonum/lapack.svg)](https://coveralls.io/r/gonum/lapack) -====== - -A collection of packages to provide LAPACK functionality for the Go programming -language (http://golang.org). This provides a partial implementation in native go -and a wrapper using cgo to a c-based implementation. - -## Installation - -``` - go get github.com/gonum/blas -``` - - -Install OpenBLAS: -``` - git clone https://github.com/xianyi/OpenBLAS - cd OpenBLAS - make -``` - -Then install the lapack/cgo package: -```sh - CGO_LDFLAGS="-L/path/to/OpenBLAS -lopenblas" go install github.com/gonum/lapack/cgo -``` - -For Windows you can download binary packages for OpenBLAS at -http://sourceforge.net/projects/openblas/files/ - -If you want to use a different BLAS package such as the Intel MKL you can -adjust the `CGO_LDFLAGS` variable: -```sh - CGO_LDFLAGS="-lmkl_rt" go install github.com/gonum/lapack/cgo -``` - -## Packages - -### lapack - -Defines the LAPACK API based on http://www.netlib.org/lapack/lapacke.html - -### lapack/clapack - -Binding to a C implementation of the lapacke interface (e.g. OpenBLAS or intel MKL) - -The linker flags (i.e. path to the BLAS library and library name) might have to be adapted. - -The recommended (free) option for good performance on both linux and darwin is OpenBLAS. - -## Issues - -If you find any bugs, feel free to file an issue on the github issue tracker. Discussions on API changes, added features, code review, or similar requests are preferred on the gonum-dev Google Group. - -https://groups.google.com/forum/#!forum/gonum-dev - -## License - -Please see github.com/gonum/license for general license information, contributors, authors, etc on the Gonum suite of packages. diff --git a/vendor/github.com/gonum/lapack/lapack.go b/vendor/github.com/gonum/lapack/lapack.go deleted file mode 100644 index f6fbb511e..000000000 --- a/vendor/github.com/gonum/lapack/lapack.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lapack - -import "github.com/gonum/blas" - -const None = 'N' - -type Job byte - -// CompSV determines if the singular values are to be computed in compact form. -type CompSV byte - -const ( - Compact CompSV = 'P' - Explicit CompSV = 'I' -) - -// Complex128 defines the public complex128 LAPACK API supported by gonum/lapack. -type Complex128 interface{} - -// Float64 defines the public float64 LAPACK API supported by gonum/lapack. -type Float64 interface { - Dpotrf(ul blas.Uplo, n int, a []float64, lda int) (ok bool) -} - -// Direct specifies the direction of the multiplication for the Householder matrix. -type Direct byte - -const ( - Forward Direct = 'F' // Reflectors are right-multiplied, H_1 * H_2 * ... * H_k - Backward Direct = 'B' // Reflectors are left-multiplied, H_k * ... * H_2 * H_1 -) - -// StoreV indicates the storage direction of elementary reflectors. -type StoreV byte - -const ( - ColumnWise StoreV = 'C' // Reflector stored in a column of the matrix. - RowWise StoreV = 'R' // Reflector stored in a row of the matrix. -) - -// MatrixNorm represents the kind of matrix norm to compute. -type MatrixNorm byte - -const ( - MaxAbs MatrixNorm = 'M' // max(abs(A(i,j))) ('M') - MaxColumnSum MatrixNorm = 'O' // Maximum column sum (one norm) ('1', 'O') - MaxRowSum MatrixNorm = 'I' // Maximum row sum (infinity norm) ('I', 'i') - NormFrob MatrixNorm = 'F' // Frobenium norm (sqrt of sum of squares) ('F', 'f', E, 'e') -) - -// MatrixType represents the kind of matrix represented in the data. -type MatrixType byte - -const ( - General MatrixType = 'G' // A dense matrix (like blas64.General). -) diff --git a/vendor/github.com/gonum/lapack/lapack64/lapack64.go b/vendor/github.com/gonum/lapack/lapack64/lapack64.go deleted file mode 100644 index fe13d587e..000000000 --- a/vendor/github.com/gonum/lapack/lapack64/lapack64.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package lapack64 provides a set of convenient wrapper functions for LAPACK -// calls, as specified in the netlib standard (www.netlib.org). -// -// The native Go routines are used by default, and the Use function can be used -// to set an alternate implementation. -// -// If the type of matrix (General, Symmetric, etc.) is known and fixed, it is -// used in the wrapper signature. In many cases, however, the type of the matrix -// changes during the call to the routine, for example the matrix is symmetric on -// entry and is triangular on exit. In these cases the correct types should be checked -// in the documentation. -// -// The full set of Lapack functions is very large, and it is not clear that a -// full implementation is desirable, let alone feasible. Please open up an issue -// if there is a specific function you need and/or are willing to implement. -package lapack64 - -import ( - "github.com/gonum/blas" - "github.com/gonum/blas/blas64" - "github.com/gonum/lapack" - "github.com/gonum/lapack/native" -) - -var lapack64 lapack.Float64 = native.Implementation{} - -// Use sets the LAPACK float64 implementation to be used by subsequent BLAS calls. -// The default implementation is native.Implementation. -func Use(l lapack.Float64) { - lapack64 = l -} - -// Potrf computes the cholesky factorization of a. -// A = U^T * U if ul == blas.Upper -// A = L * L^T if ul == blas.Lower -// The underlying data between the input matrix and output matrix is shared. -func Potrf(a blas64.Symmetric) (t blas64.Triangular, ok bool) { - ok = lapack64.Dpotrf(a.Uplo, a.N, a.Data, a.Stride) - t.Uplo = a.Uplo - t.N = a.N - t.Data = a.Data - t.Stride = a.Stride - t.Diag = blas.NonUnit - return -} diff --git a/vendor/github.com/gonum/lapack/native/dgelq2.go b/vendor/github.com/gonum/lapack/native/dgelq2.go deleted file mode 100644 index 184769d71..000000000 --- a/vendor/github.com/gonum/lapack/native/dgelq2.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import "github.com/gonum/blas" - -// Dgelq2 computes the LQ factorization of the m×n matrix a. -// -// During Dgelq2, a is modified to contain the information to construct Q and L. -// The lower triangle of a contains the matrix L. The upper triangular elements -// (not including the diagonal) contain the elementary reflectors. Tau is modified -// to contain the reflector scales. Tau must have length of at least k = min(m,n) -// and this function will panic otherwise. -// -// See Dgeqr2 for a description of the elementary reflectors and orthonormal -// matrix Q. Q is constructed as a product of these elementary reflectors, -// Q = H_k ... H_2*H_1. -// -// Work is temporary storage of length at least m and this function will panic otherwise. -func (impl Implementation) Dgelq2(m, n int, a []float64, lda int, tau, work []float64) { - checkMatrix(m, n, a, lda) - k := min(m, n) - if len(tau) < k { - panic(badTau) - } - if len(work) < m { - panic(badWork) - } - for i := 0; i < k; i++ { - a[i*lda+i], tau[i] = impl.Dlarfg(n-i, a[i*lda+i], a[i*lda+min(i+1, n-1):], 1) - if i < m-1 { - aii := a[i*lda+i] - a[i*lda+i] = 1 - impl.Dlarf(blas.Right, m-i-1, n-i, - a[i*lda+i:], 1, - tau[i], - a[(i+1)*lda+i:], lda, - work) - a[i*lda+i] = aii - } - } -} diff --git a/vendor/github.com/gonum/lapack/native/dgelqf.go b/vendor/github.com/gonum/lapack/native/dgelqf.go deleted file mode 100644 index bc52f3b06..000000000 --- a/vendor/github.com/gonum/lapack/native/dgelqf.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - "github.com/gonum/blas" - "github.com/gonum/lapack" -) - -// Dgelqf computes the LQ factorization of the m×n matrix a using a blocked -// algorithm. Please see the documentation for Dgelq2 for a description of the -// parameters at entry and exit. -// -// Work is temporary storage, and lwork specifies the usable memory length. -// At minimum, lwork >= m, and this function will panic otherwise. -// Dgelqf is a blocked LQ factorization, but the block size is limited -// by the temporary space available. If lwork == -1, instead of performing Dgelqf, -// the optimal work length will be stored into work[0]. -// -// tau must have length at least min(m,n), and this function will panic otherwise. -func (impl Implementation) Dgelqf(m, n int, a []float64, lda int, tau, work []float64, lwork int) { - nb := impl.Ilaenv(1, "DGELQF", " ", m, n, -1, -1) - lworkopt := m * max(nb, 1) - if lwork == -1 { - work[0] = float64(lworkopt) - return - } - checkMatrix(m, n, a, lda) - if len(work) < lwork { - panic(shortWork) - } - if lwork < m { - panic(badWork) - } - k := min(m, n) - if len(tau) < k { - panic(badTau) - } - if k == 0 { - return - } - // Find the optimal blocking size based on the size of available memory - // and optimal machine parameters. - nbmin := 2 - var nx int - iws := m - ldwork := nb - if nb > 1 && k > nb { - nx = max(0, impl.Ilaenv(3, "DGELQF", " ", m, n, -1, -1)) - if nx < k { - iws = m * nb - if lwork < iws { - nb = lwork / m - nbmin = max(2, impl.Ilaenv(2, "DGELQF", " ", m, n, -1, -1)) - } - } - } - // Computed blocked LQ factorization. - var i int - if nb >= nbmin && nb < k && nx < k { - for i = 0; i < k-nx; i += nb { - ib := min(k-i, nb) - impl.Dgelq2(ib, n-i, a[i*lda+i:], lda, tau[i:], work) - if i+ib < m { - impl.Dlarft(lapack.Forward, lapack.RowWise, n-i, ib, - a[i*lda+i:], lda, - tau[i:], - work, ldwork) - impl.Dlarfb(blas.Right, blas.NoTrans, lapack.Forward, lapack.RowWise, - m-i-ib, n-i, ib, - a[i*lda+i:], lda, - work, ldwork, - a[(i+ib)*lda+i:], lda, - work[ib*ldwork:], ldwork) - } - } - } - // Perform unblocked LQ factorization on the remainder. - if i < k { - impl.Dgelq2(m-i, n-i, a[i*lda+i:], lda, tau[i:], work) - } -} diff --git a/vendor/github.com/gonum/lapack/native/dgels.go b/vendor/github.com/gonum/lapack/native/dgels.go deleted file mode 100644 index 7759561bc..000000000 --- a/vendor/github.com/gonum/lapack/native/dgels.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - "github.com/gonum/blas" - "github.com/gonum/lapack" -) - -// Dgels finds a minimum-norm solution based on the matrices a and b using the -// QR or LQ factorization. Dgels returns false if the matrix -// A is singular, and true if this solution was successfully found. -// -// The minimization problem solved depends on the input parameters. -// -// 1. If m >= n and trans == blas.NoTrans, Dgels finds X such that || A*X - B||_2 -// is minimized. -// 2. If m < n and trans == blas.NoTrans, Dgels finds the minimum norm solution of -// A * X = B. -// 3. If m >= n and trans == blas.Trans, Dgels finds the minimum norm solution of -// A^T * X = B. -// 4. If m < n and trans == blas.Trans, Dgels finds X such that || A*X - B||_2 -// is minimized. -// Note that the least-squares solutions (cases 1 and 3) perform the minimization -// per column of B. This is not the same as finding the minimum-norm matrix. -// -// The matrix a is a general matrix of size m×n and is modified during this call. -// The input matrix b is of size max(m,n)×nrhs, and serves two purposes. On entry, -// the elements of b specify the input matrix B. B has size m×nrhs if -// trans == blas.NoTrans, and n×nrhs if trans == blas.Trans. On exit, the -// leading submatrix of b contains the solution vectors X. If trans == blas.NoTrans, -// this submatrix is of size n×nrhs, and of size m×nrhs otherwise. -// -// Work is temporary storage, and lwork specifies the usable memory length. -// At minimum, lwork >= max(m,n) + max(m,n,nrhs), and this function will panic -// otherwise. A longer work will enable blocked algorithms to be called. -// In the special case that lwork == -1, work[0] will be set to the optimal working -// length. -func (impl Implementation) Dgels(trans blas.Transpose, m, n, nrhs int, a []float64, lda int, b []float64, ldb int, work []float64, lwork int) bool { - notran := trans == blas.NoTrans - checkMatrix(m, n, a, lda) - mn := min(m, n) - checkMatrix(mn, nrhs, b, ldb) - - // Find optimal block size. - tpsd := true - if notran { - tpsd = false - } - var nb int - if m >= n { - nb = impl.Ilaenv(1, "DGEQRF", " ", m, n, -1, -1) - if tpsd { - nb = max(nb, impl.Ilaenv(1, "DORMQR", "LN", m, nrhs, n, -1)) - } else { - nb = max(nb, impl.Ilaenv(1, "DORMQR", "LT", m, nrhs, n, -1)) - } - } else { - nb = impl.Ilaenv(1, "DGELQF", " ", m, n, -1, -1) - if tpsd { - nb = max(nb, impl.Ilaenv(1, "DORMLQ", "LT", n, nrhs, m, -1)) - } else { - nb = max(nb, impl.Ilaenv(1, "DORMLQ", "LN", n, nrhs, m, -1)) - } - } - if lwork == -1 { - work[0] = float64(max(1, mn+max(mn, nrhs)*nb)) - return true - } - - if len(work) < lwork { - panic(shortWork) - } - if lwork < mn+max(mn, nrhs) { - panic(badWork) - } - if m == 0 || n == 0 || nrhs == 0 { - impl.Dlaset(blas.All, max(m, n), nrhs, 0, 0, b, ldb) - return true - } - - // Scale the input matrices if they contain extreme values. - smlnum := dlamchS / dlamchP - bignum := 1 / smlnum - anrm := impl.Dlange(lapack.MaxAbs, m, n, a, lda, nil) - var iascl int - if anrm > 0 && anrm < smlnum { - impl.Dlascl(lapack.General, 0, 0, anrm, smlnum, m, n, a, lda) - iascl = 1 - } else if anrm > bignum { - impl.Dlascl(lapack.General, 0, 0, anrm, bignum, m, n, a, lda) - } else if anrm == 0 { - // Matrix all zeros - impl.Dlaset(blas.All, max(m, n), nrhs, 0, 0, b, ldb) - return true - } - brow := m - if tpsd { - brow = n - } - bnrm := impl.Dlange(lapack.MaxAbs, brow, nrhs, b, ldb, nil) - ibscl := 0 - if bnrm > 0 && bnrm < smlnum { - impl.Dlascl(lapack.General, 0, 0, bnrm, smlnum, brow, nrhs, b, ldb) - ibscl = 1 - } else if bnrm > bignum { - impl.Dlascl(lapack.General, 0, 0, bnrm, bignum, brow, nrhs, b, ldb) - ibscl = 2 - } - - // Solve the minimization problem using a QR or an LQ decomposition. - var scllen int - if m >= n { - impl.Dgeqrf(m, n, a, lda, work, work[mn:], lwork-mn) - if !tpsd { - impl.Dormqr(blas.Left, blas.Trans, m, nrhs, n, - a, lda, - work, - b, ldb, - work[mn:], lwork-mn) - ok := impl.Dtrtrs(blas.Upper, blas.NoTrans, blas.NonUnit, n, nrhs, - a, lda, - b, ldb) - if !ok { - return false - } - scllen = n - } else { - ok := impl.Dtrtrs(blas.Upper, blas.Trans, blas.NonUnit, n, nrhs, - a, lda, - b, ldb) - if !ok { - return false - } - for i := n; i < m; i++ { - for j := 0; j < nrhs; j++ { - b[i*ldb+j] = 0 - } - } - impl.Dormqr(blas.Left, blas.NoTrans, m, nrhs, n, - a, lda, - work, - b, ldb, - work[mn:], lwork-mn) - scllen = m - } - } else { - impl.Dgelqf(m, n, a, lda, work, work[mn:], lwork-mn) - if !tpsd { - ok := impl.Dtrtrs(blas.Lower, blas.NoTrans, blas.NonUnit, - m, nrhs, - a, lda, - b, ldb) - if !ok { - return false - } - for i := m; i < n; i++ { - for j := 0; j < nrhs; j++ { - b[i*ldb+j] = 0 - } - } - impl.Dormlq(blas.Left, blas.Trans, n, nrhs, m, - a, lda, - work, - b, ldb, - work[mn:], lwork-mn) - scllen = n - } else { - impl.Dormlq(blas.Left, blas.NoTrans, n, nrhs, m, - a, lda, - work, - b, ldb, - work[mn:], lwork-mn) - ok := impl.Dtrtrs(blas.Lower, blas.Trans, blas.NonUnit, - m, nrhs, - a, lda, - b, ldb) - if !ok { - return false - } - } - } - - // Adjust answer vector based on scaling. - if iascl == 1 { - impl.Dlascl(lapack.General, 0, 0, anrm, smlnum, scllen, nrhs, b, ldb) - } - if iascl == 2 { - impl.Dlascl(lapack.General, 0, 0, anrm, bignum, scllen, nrhs, b, ldb) - } - if ibscl == 1 { - impl.Dlascl(lapack.General, 0, 0, smlnum, bnrm, scllen, nrhs, b, ldb) - } - if ibscl == 2 { - impl.Dlascl(lapack.General, 0, 0, bignum, bnrm, scllen, nrhs, b, ldb) - } - return true -} diff --git a/vendor/github.com/gonum/lapack/native/dgeqr2.go b/vendor/github.com/gonum/lapack/native/dgeqr2.go deleted file mode 100644 index efae4a775..000000000 --- a/vendor/github.com/gonum/lapack/native/dgeqr2.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import "github.com/gonum/blas" - -// Dgeqr2 computes a QR factorization of the m×n matrix a. -// -// In a QR factorization, Q is an m×m orthonormal matrix, and R is an -// upper triangular m×n matrix. -// -// During Dgeqr2, a is modified to contain the information to construct Q and R. -// The upper triangle of a contains the matrix R. The lower triangular elements -// (not including the diagonal) contain the elementary reflectors. Tau is modified -// to contain the reflector scales. Tau must have length at least k = min(m,n), and -// this function will panic otherwise. -// -// The ith elementary reflector can be explicitly constructed by first extracting -// the -// v[j] = 0 j < i -// v[j] = i j == i -// v[j] = a[i*lda+j] j > i -// and computing h_i = I - tau[i] * v * v^T. -// -// The orthonormal matrix Q can be constucted from a product of these elementary -// reflectors, Q = H_1*H_2 ... H_k, where k = min(m,n). -// -// Work is temporary storage of length at least n and this function will panic otherwise. -func (impl Implementation) Dgeqr2(m, n int, a []float64, lda int, tau, work []float64) { - // TODO(btracey): This is oriented such that columns of a are eliminated. - // This likely could be re-arranged to take better advantage of row-major - // storage. - checkMatrix(m, n, a, lda) - if len(work) < n { - panic(badWork) - } - k := min(m, n) - if len(tau) < k { - panic(badTau) - } - for i := 0; i < k; i++ { - // Generate elementary reflector H(i). - a[i*lda+i], tau[i] = impl.Dlarfg(m-i, a[i*lda+i], a[min((i+1), m-1)*lda+i:], lda) - if i < n-1 { - aii := a[i*lda+i] - a[i*lda+i] = 1 - impl.Dlarf(blas.Left, m-i, n-i-1, - a[i*lda+i:], lda, - tau[i], - a[i*lda+i+1:], lda, - work) - a[i*lda+i] = aii - } - } -} diff --git a/vendor/github.com/gonum/lapack/native/dgeqrf.go b/vendor/github.com/gonum/lapack/native/dgeqrf.go deleted file mode 100644 index e7c056010..000000000 --- a/vendor/github.com/gonum/lapack/native/dgeqrf.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - "github.com/gonum/blas" - "github.com/gonum/lapack" -) - -// Dgeqrf computes the QR factorization of the m×n matrix a using a blocked -// algorithm. Please see the documentation for Dgeqr2 for a description of the -// parameters at entry and exit. -// -// Work is temporary storage, and lwork specifies the usable memory length. -// At minimum, lwork >= m and this function will panic otherwise. -// Dgeqrf is a blocked LQ factorization, but the block size is limited -// by the temporary space available. If lwork == -1, instead of performing Dgelqf, -// the optimal work length will be stored into work[0]. -// -// tau must be at least len min(m,n), and this function will panic otherwise. -func (impl Implementation) Dgeqrf(m, n int, a []float64, lda int, tau, work []float64, lwork int) { - // TODO(btracey): This algorithm is oriented for column-major storage. - // Consider modifying the algorithm to better suit row-major storage. - - // nb is the optimal blocksize, i.e. the number of columns transformed at a time. - nb := impl.Ilaenv(1, "DGEQRF", " ", m, n, -1, -1) - lworkopt := n * max(nb, 1) - lworkopt = max(n, lworkopt) - if lwork == -1 { - work[0] = float64(lworkopt) - return - } - checkMatrix(m, n, a, lda) - if len(work) < lwork { - panic(shortWork) - } - if lwork < n { - panic(badWork) - } - k := min(m, n) - if len(tau) < k { - panic(badTau) - } - if k == 0 { - return - } - nbmin := 2 // Minimal number of blocks - var nx int // Use unblocked (unless changed in the next for loop) - iws := n - ldwork := nb - // Only consider blocked if the suggested number of blocks is > 1 and the - // number of columns is sufficiently large. - if nb > 1 && k > nb { - // nx is the crossover point. Above this value the blocked routine should be used. - nx = max(0, impl.Ilaenv(3, "DGEQRF", " ", m, n, -1, -1)) - if k > nx { - iws = ldwork * n - if lwork < iws { - // Not enough workspace to use the optimal number of blocks. Instead, - // get the maximum allowable number of blocks. - nb = lwork / n - nbmin = max(2, impl.Ilaenv(2, "DGEQRF", " ", m, n, -1, -1)) - } - } - } - for i := range work { - work[i] = 0 - } - // Compute QR using a blocked algorithm. - var i int - if nb >= nbmin && nb < k && nx < k { - for i = 0; i < k-nx; i += nb { - ib := min(k-i, nb) - // Compute the QR factorization of the current block. - impl.Dgeqr2(m-i, ib, a[i*lda+i:], lda, tau[i:], work) - if i+ib < n { - // Form the triangular factor of the block reflector and apply H^T - // In Dlarft, work becomes the T matrix. - impl.Dlarft(lapack.Forward, lapack.ColumnWise, m-i, ib, - a[i*lda+i:], lda, - tau[i:], - work, ldwork) - impl.Dlarfb(blas.Left, blas.Trans, lapack.Forward, lapack.ColumnWise, - m-i, n-i-ib, ib, - a[i*lda+i:], lda, - work, ldwork, - a[i*lda+i+ib:], lda, - work[ib*ldwork:], ldwork) - } - } - } - // Call unblocked code on the remaining columns. - if i < k { - impl.Dgeqr2(m-i, n-i, a[i*lda+i:], lda, tau[i:], work) - } -} diff --git a/vendor/github.com/gonum/lapack/native/dlange.go b/vendor/github.com/gonum/lapack/native/dlange.go deleted file mode 100644 index fb8259a2e..000000000 --- a/vendor/github.com/gonum/lapack/native/dlange.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - "math" - - "github.com/gonum/lapack" -) - -// Dlange computes the matrix norm of the general m×n matrix a. The input norm -// specifies the norm computed. -// lapack.MaxAbs: the maximum absolute value of an element. -// lapack.MaxColumnSum: the maximum column sum of the absolute values of the entries. -// lapack.MaxRowSum: the maximum row sum of the absolute values of the entries. -// lapack.Frobenius: the square root of the sum of the squares of the entries. -// If norm == lapack.MaxColumnSum, work must be of length n, and this function will panic otherwise. -// There are no restrictions on work for the other matrix norms. -func (impl Implementation) Dlange(norm lapack.MatrixNorm, m, n int, a []float64, lda int, work []float64) float64 { - // TODO(btracey): These should probably be refactored to use BLAS calls. - checkMatrix(m, n, a, lda) - if m == 0 && n == 0 { - return 0 - } - if norm == lapack.MaxAbs { - var value float64 - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - value = math.Max(value, math.Abs(a[i*lda+j])) - } - } - return value - } - if norm == lapack.MaxColumnSum { - if len(work) < n { - panic(badWork) - } - for i := 0; i < n; i++ { - work[i] = 0 - } - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - work[j] += math.Abs(a[i*lda+j]) - } - } - var value float64 - for i := 0; i < n; i++ { - value = math.Max(value, work[i]) - } - return value - } - if norm == lapack.MaxRowSum { - var value float64 - for i := 0; i < m; i++ { - var sum float64 - for j := 0; j < n; j++ { - sum += math.Abs(a[i*lda+j]) - } - value = math.Max(value, sum) - } - return value - } - if norm == lapack.NormFrob { - var value float64 - scale := 0.0 - sum := 1.0 - for i := 0; i < m; i++ { - scale, sum = impl.Dlassq(n, a[i*lda:], 1, scale, sum) - } - value = scale * math.Sqrt(sum) - return value - } - panic("lapack: bad matrix norm") -} diff --git a/vendor/github.com/gonum/lapack/native/dlapy2.go b/vendor/github.com/gonum/lapack/native/dlapy2.go deleted file mode 100644 index 172a1174e..000000000 --- a/vendor/github.com/gonum/lapack/native/dlapy2.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import "math" - -// Dlapy2 is the LAPACK version of math.Hypot. -func (Implementation) Dlapy2(x, y float64) float64 { - return math.Hypot(x, y) -} diff --git a/vendor/github.com/gonum/lapack/native/dlarf.go b/vendor/github.com/gonum/lapack/native/dlarf.go deleted file mode 100644 index ad473fbe4..000000000 --- a/vendor/github.com/gonum/lapack/native/dlarf.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - "github.com/gonum/blas" - "github.com/gonum/blas/blas64" -) - -// Dlarf applies an elementary reflector to a general rectangular matrix c. -// This computes -// c = h * c if side == Left -// c = c * h if side == right -// where -// h = 1 - tau * v * v^T -// and c is an m * n matrix. -// - -// Work is temporary storage of length at least m if side == Left and at least -// n if side == Right. This function will panic if this length requirement is not met. -func (impl Implementation) Dlarf(side blas.Side, m, n int, v []float64, incv int, tau float64, c []float64, ldc int, work []float64) { - applyleft := side == blas.Left - if (applyleft && len(work) < n) || (!applyleft && len(work) < m) { - panic(badWork) - } - checkMatrix(m, n, c, ldc) - - // v has length m if applyleft and n otherwise. - lenV := n - if applyleft { - lenV = m - } - - checkVector(lenV, v, incv) - - lastv := 0 // last non-zero element of v - lastc := 0 // last non-zero row/column of c - if tau != 0 { - var i int - if applyleft { - lastv = m - 1 - } else { - lastv = n - 1 - } - if incv > 0 { - i = lastv * incv - } - - // Look for the last non-zero row in v. - for lastv >= 0 && v[i] == 0 { - lastv-- - i -= incv - } - if applyleft { - // Scan for the last non-zero column in C[0:lastv, :] - lastc = impl.Iladlc(lastv+1, n, c, ldc) - } else { - // Scan for the last non-zero row in C[:, 0:lastv] - lastc = impl.Iladlr(m, lastv+1, c, ldc) - } - } - if lastv == -1 || lastc == -1 { - return - } - // Sometimes 1-indexing is nicer ... - bi := blas64.Implementation() - if applyleft { - // Form H * C - // w[0:lastc+1] = c[1:lastv+1, 1:lastc+1]^T * v[1:lastv+1,1] - bi.Dgemv(blas.Trans, lastv+1, lastc+1, 1, c, ldc, v, incv, 0, work, 1) - // c[0: lastv, 0: lastc] = c[...] - w[0:lastv, 1] * v[1:lastc, 1]^T - bi.Dger(lastv+1, lastc+1, -tau, v, incv, work, 1, c, ldc) - return - } - // Form C*H - // w[0:lastc+1,1] := c[0:lastc+1,0:lastv+1] * v[0:lastv+1,1] - bi.Dgemv(blas.NoTrans, lastc+1, lastv+1, 1, c, ldc, v, incv, 0, work, 1) - // c[0:lastc+1,0:lastv+1] = c[...] - w[0:lastc+1,0] * v[0:lastv+1,0]^T - bi.Dger(lastc+1, lastv+1, -tau, work, 1, v, incv, c, ldc) -} diff --git a/vendor/github.com/gonum/lapack/native/dlarfb.go b/vendor/github.com/gonum/lapack/native/dlarfb.go deleted file mode 100644 index 927dd9a56..000000000 --- a/vendor/github.com/gonum/lapack/native/dlarfb.go +++ /dev/null @@ -1,424 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - "github.com/gonum/blas" - "github.com/gonum/blas/blas64" - "github.com/gonum/lapack" -) - -// Dlarfb applies a block reflector to a matrix. -// -// In the call to Dlarfb, the mxn c is multiplied by the implicitly defined matrix h as follows: -// c = h * c if side == Left and trans == NoTrans -// c = c * h if side == Right and trans == NoTrans -// c = h^T * c if side == Left and trans == Trans -// c = c * h^t if side == Right and trans == Trans -// h is a product of elementary reflectors. direct sets the direction of multiplication -// h = h_1 * h_2 * ... * h_k if direct == Forward -// h = h_k * h_k-1 * ... * h_1 if direct == Backward -// The combination of direct and store defines the orientation of the elementary -// reflectors. In all cases the ones on the diagonal are implicitly represented. -// -// If direct == lapack.Forward and store == lapack.ColumnWise -// V = ( 1 ) -// ( v1 1 ) -// ( v1 v2 1 ) -// ( v1 v2 v3 ) -// ( v1 v2 v3 ) -// If direct == lapack.Forward and store == lapack.RowWise -// V = ( 1 v1 v1 v1 v1 ) -// ( 1 v2 v2 v2 ) -// ( 1 v3 v3 ) -// If direct == lapack.Backward and store == lapack.ColumnWise -// V = ( v1 v2 v3 ) -// ( v1 v2 v3 ) -// ( 1 v2 v3 ) -// ( 1 v3 ) -// ( 1 ) -// If direct == lapack.Backward and store == lapack.RowWise -// V = ( v1 v1 1 ) -// ( v2 v2 v2 1 ) -// ( v3 v3 v3 v3 1 ) -// An elementary reflector can be explicitly constructed by extracting the -// corresponding elements of v, placing a 1 where the diagonal would be, and -// placing zeros in the remaining elements. -// -// t is a k×k matrix containing the block reflector, and this function will panic -// if t is not of sufficient size. See Dlarft for more information. -// -// Work is a temporary storage matrix with stride ldwork. -// Work must be of size at least n×k side == Left and m×k if side == Right, and -// this function will panic if this size is not met. -func (Implementation) Dlarfb(side blas.Side, trans blas.Transpose, direct lapack.Direct, - store lapack.StoreV, m, n, k int, v []float64, ldv int, t []float64, ldt int, - c []float64, ldc int, work []float64, ldwork int) { - - checkMatrix(m, n, c, ldc) - if m == 0 || n == 0 { - return - } - if k < 0 { - panic("lapack: negative number of transforms") - } - if side != blas.Left && side != blas.Right { - panic(badSide) - } - if trans != blas.Trans && trans != blas.NoTrans { - panic(badTrans) - } - if direct != lapack.Forward && direct != lapack.Backward { - panic(badDirect) - } - if store != lapack.ColumnWise && store != lapack.RowWise { - panic(badStore) - } - - rowsWork := n - if side == blas.Right { - rowsWork = m - } - checkMatrix(rowsWork, k, work, ldwork) - - bi := blas64.Implementation() - - transt := blas.Trans - if trans == blas.Trans { - transt = blas.NoTrans - } - // TODO(btracey): This follows the original Lapack code where the - // elements are copied into the columns of the working array. The - // loops should go in the other direction so the data is written - // into the rows of work so the copy is not strided. A bigger change - // would be to replace work with work^T, but benchmarks would be - // needed to see if the change is merited. - if store == lapack.ColumnWise { - if direct == lapack.Forward { - // V1 is the first k rows of C. V2 is the remaining rows. - if side == blas.Left { - // W = C^T V = C1^T V1 + C2^T V2 (stored in work). - - // W = C1. - for j := 0; j < k; j++ { - bi.Dcopy(n, c[j*ldc:], 1, work[j:], ldwork) - } - // W = W * V1. - bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, - n, k, 1, - v, ldv, - work, ldwork) - if m > k { - // W = W + C2^T V2. - bi.Dgemm(blas.Trans, blas.NoTrans, n, k, m-k, - 1, c[k*ldc:], ldc, v[k*ldv:], ldv, - 1, work, ldwork) - } - // W = W * T^T or W * T. - bi.Dtrmm(blas.Right, blas.Upper, transt, blas.NonUnit, n, k, - 1, t, ldt, - work, ldwork) - // C -= V * W^T. - if m > k { - // C2 -= V2 * W^T. - bi.Dgemm(blas.NoTrans, blas.Trans, m-k, n, k, - -1, v[k*ldv:], ldv, work, ldwork, - 1, c[k*ldc:], ldc) - } - // W *= V1^T. - bi.Dtrmm(blas.Right, blas.Lower, blas.Trans, blas.Unit, n, k, - 1, v, ldv, - work, ldwork) - // C1 -= W^T. - // TODO(btracey): This should use blas.Axpy. - for i := 0; i < n; i++ { - for j := 0; j < k; j++ { - c[j*ldc+i] -= work[i*ldwork+j] - } - } - return - } - // Form C = C * H or C * H^T, where C = (C1 C2). - - // W = C1. - for i := 0; i < k; i++ { - bi.Dcopy(m, c[i:], ldc, work[i:], ldwork) - } - // W *= V1. - bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, m, k, - 1, v, ldv, - work, ldwork) - if n > k { - bi.Dgemm(blas.NoTrans, blas.NoTrans, m, k, n-k, - 1, c[k:], ldc, v[k*ldv:], ldv, - 1, work, ldwork) - } - // W *= T or T^T. - bi.Dtrmm(blas.Right, blas.Upper, trans, blas.NonUnit, m, k, - 1, t, ldt, - work, ldwork) - if n > k { - bi.Dgemm(blas.NoTrans, blas.Trans, m, n-k, k, - -1, work, ldwork, v[k*ldv:], ldv, - 1, c[k:], ldc) - } - // C -= W * V^T. - bi.Dtrmm(blas.Right, blas.Lower, blas.Trans, blas.Unit, m, k, - 1, v, ldv, - work, ldwork) - // C -= W. - // TODO(btracey): This should use blas.Axpy. - for i := 0; i < m; i++ { - for j := 0; j < k; j++ { - c[i*ldc+j] -= work[i*ldwork+j] - } - } - return - } - // V = (V1) - // = (V2) (last k rows) - // Where V2 is unit upper triangular. - if side == blas.Left { - // Form H * C or - // W = C^T V. - - // W = C2^T. - for j := 0; j < k; j++ { - bi.Dcopy(n, c[(m-k+j)*ldc:], 1, work[j:], ldwork) - } - // W *= V2. - bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.Unit, n, k, - 1, v[(m-k)*ldv:], ldv, - work, ldwork) - if m > k { - // W += C1^T * V1. - bi.Dgemm(blas.Trans, blas.NoTrans, n, k, m-k, - 1, c, ldc, v, ldv, - 1, work, ldwork) - } - // W *= T or T^T. - bi.Dtrmm(blas.Right, blas.Lower, transt, blas.NonUnit, n, k, - 1, t, ldt, - work, ldwork) - // C -= V * W^T. - if m > k { - bi.Dgemm(blas.NoTrans, blas.Trans, m-k, n, k, - -1, v, ldv, work, ldwork, - 1, c, ldc) - } - // W *= V2^T. - bi.Dtrmm(blas.Right, blas.Upper, blas.Trans, blas.Unit, n, k, - 1, v[(m-k)*ldv:], ldv, - work, ldwork) - // C2 -= W^T. - // TODO(btracey): This should use blas.Axpy. - for i := 0; i < n; i++ { - for j := 0; j < k; j++ { - c[(m-k+j)*ldc+i] -= work[i*ldwork+j] - } - } - return - } - // Form C * H or C * H^T where C = (C1 C2). - // W = C * V. - - // W = C2. - for j := 0; j < k; j++ { - bi.Dcopy(m, c[n-k+j:], ldc, work[j:], ldwork) - } - - // W = W * V2. - bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.Unit, m, k, - 1, v[(n-k)*ldv:], ldv, - work, ldwork) - if n > k { - bi.Dgemm(blas.NoTrans, blas.NoTrans, m, k, n-k, - 1, c, ldc, v, ldv, - 1, work, ldwork) - } - // W *= T or T^T. - bi.Dtrmm(blas.Right, blas.Lower, trans, blas.NonUnit, m, k, - 1, t, ldt, - work, ldwork) - // C -= W * V^T. - if n > k { - // C1 -= W * V1^T. - bi.Dgemm(blas.NoTrans, blas.Trans, m, n-k, k, - -1, work, ldwork, v, ldv, - 1, c, ldc) - } - // W *= V2^T. - bi.Dtrmm(blas.Right, blas.Upper, blas.Trans, blas.Unit, m, k, - 1, v[(n-k)*ldv:], ldv, - work, ldwork) - // C2 -= W. - // TODO(btracey): This should use blas.Axpy. - for i := 0; i < m; i++ { - for j := 0; j < k; j++ { - c[i*ldc+n-k+j] -= work[i*ldwork+j] - } - } - return - } - // Store = Rowwise. - if direct == lapack.Forward { - // V = (V1 V2) where v1 is unit upper triangular. - if side == blas.Left { - // Form H * C or H^T * C where C = (C1; C2). - // W = C^T * V^T. - - // W = C1^T. - for j := 0; j < k; j++ { - bi.Dcopy(n, c[j*ldc:], 1, work[j:], ldwork) - } - // W *= V1^T. - bi.Dtrmm(blas.Right, blas.Upper, blas.Trans, blas.Unit, n, k, - 1, v, ldv, - work, ldwork) - if m > k { - bi.Dgemm(blas.Trans, blas.Trans, n, k, m-k, - 1, c[k*ldc:], ldc, v[k:], ldv, - 1, work, ldwork) - } - // W *= T or T^T. - bi.Dtrmm(blas.Right, blas.Upper, transt, blas.NonUnit, n, k, - 1, t, ldt, - work, ldwork) - // C -= V^T * W^T. - if m > k { - bi.Dgemm(blas.Trans, blas.Trans, m-k, n, k, - -1, v[k:], ldv, work, ldwork, - 1, c[k*ldc:], ldc) - } - // W *= V1. - bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.Unit, n, k, - 1, v, ldv, - work, ldwork) - // C1 -= W^T. - // TODO(btracey): This should use blas.Axpy. - for i := 0; i < n; i++ { - for j := 0; j < k; j++ { - c[j*ldc+i] -= work[i*ldwork+j] - } - } - return - } - // Form C * H or C * H^T where C = (C1 C2). - // W = C * V^T. - - // W = C1. - for j := 0; j < k; j++ { - bi.Dcopy(m, c[j:], ldc, work[j:], ldwork) - } - // W *= V1^T. - bi.Dtrmm(blas.Right, blas.Upper, blas.Trans, blas.Unit, m, k, - 1, v, ldv, - work, ldwork) - if n > k { - bi.Dgemm(blas.NoTrans, blas.Trans, m, k, n-k, - 1, c[k:], ldc, v[k:], ldv, - 1, work, ldwork) - } - // W *= T or T^T. - bi.Dtrmm(blas.Right, blas.Upper, trans, blas.NonUnit, m, k, - 1, t, ldt, - work, ldwork) - // C -= W * V. - if n > k { - bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n-k, k, - -1, work, ldwork, v[k:], ldv, - 1, c[k:], ldc) - } - // W *= V1. - bi.Dtrmm(blas.Right, blas.Upper, blas.NoTrans, blas.Unit, m, k, - 1, v, ldv, - work, ldwork) - // C1 -= W. - // TODO(btracey): This should use blas.Axpy. - for i := 0; i < m; i++ { - for j := 0; j < k; j++ { - c[i*ldc+j] -= work[i*ldwork+j] - } - } - return - } - // V = (V1 V2) where V2 is the last k columns and is lower unit triangular. - if side == blas.Left { - // Form H * C or H^T C where C = (C1 ; C2). - // W = C^T * V^T. - - // W = C2^T. - for j := 0; j < k; j++ { - bi.Dcopy(n, c[(m-k+j)*ldc:], 1, work[j:], ldwork) - } - // W *= V2^T. - bi.Dtrmm(blas.Right, blas.Lower, blas.Trans, blas.Unit, n, k, - 1, v[m-k:], ldv, - work, ldwork) - if m > k { - bi.Dgemm(blas.Trans, blas.Trans, n, k, m-k, - 1, c, ldc, v, ldv, - 1, work, ldwork) - } - // W *= T or T^T. - bi.Dtrmm(blas.Right, blas.Lower, transt, blas.NonUnit, n, k, - 1, t, ldt, - work, ldwork) - // C -= V^T * W^T. - if m > k { - bi.Dgemm(blas.Trans, blas.Trans, m-k, n, k, - -1, v, ldv, work, ldwork, - 1, c, ldc) - } - // W *= V2. - bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, n, k, - 1, v[m-k:], ldv, - work, ldwork) - // C2 -= W^T. - // TODO(btracey): This should use blas.Axpy. - for i := 0; i < n; i++ { - for j := 0; j < k; j++ { - c[(m-k+j)*ldc+i] -= work[i*ldwork+j] - } - } - return - } - // Form C * H or C * H^T where C = (C1 C2). - // W = C * V^T. - // W = C2. - for j := 0; j < k; j++ { - bi.Dcopy(m, c[n-k+j:], ldc, work[j:], ldwork) - } - // W *= V2^T. - bi.Dtrmm(blas.Right, blas.Lower, blas.Trans, blas.Unit, m, k, - 1, v[n-k:], ldv, - work, ldwork) - if n > k { - bi.Dgemm(blas.NoTrans, blas.Trans, m, k, n-k, - 1, c, ldc, v, ldv, - 1, work, ldwork) - } - // W *= T or T^T. - bi.Dtrmm(blas.Right, blas.Lower, trans, blas.NonUnit, m, k, - 1, t, ldt, - work, ldwork) - // C -= W * V. - if n > k { - bi.Dgemm(blas.NoTrans, blas.NoTrans, m, n-k, k, - -1, work, ldwork, v, ldv, - 1, c, ldc) - } - // W *= V2. - bi.Dtrmm(blas.Right, blas.Lower, blas.NoTrans, blas.Unit, m, k, - 1, v[n-k:], ldv, - work, ldwork) - // C1 -= W. - // TODO(btracey): This should use blas.Axpy. - for i := 0; i < m; i++ { - for j := 0; j < k; j++ { - c[i*ldc+n-k+j] -= work[i*ldwork+j] - } - } -} diff --git a/vendor/github.com/gonum/lapack/native/dlarfg.go b/vendor/github.com/gonum/lapack/native/dlarfg.go deleted file mode 100644 index bd1edf6e3..000000000 --- a/vendor/github.com/gonum/lapack/native/dlarfg.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - "math" - - "github.com/gonum/blas/blas64" -) - -// Dlarfg generates an elementary reflector for a Householder matrix. It creates -// a real elementary reflector of order n such that -// H * (alpha) = (beta) -// ( x) ( 0) -// H^T * H = I -// H is represented in the form -// H = 1 - tau * (1; v) * (1 v^T) -// where tau is a real scalar. -// -// On entry, x contains the vector x, on exit it contains v. -func (impl Implementation) Dlarfg(n int, alpha float64, x []float64, incX int) (beta, tau float64) { - if n < 0 { - panic(nLT0) - } - if n <= 1 { - return alpha, 0 - } - checkVector(n-1, x, incX) - bi := blas64.Implementation() - xnorm := bi.Dnrm2(n-1, x, incX) - if xnorm == 0 { - return alpha, 0 - } - beta = -math.Copysign(impl.Dlapy2(alpha, xnorm), alpha) - safmin := dlamchS / dlamchE - knt := 0 - if math.Abs(beta) < safmin { - // xnorm and beta may be innacurate, scale x and recompute. - rsafmn := 1 / safmin - for { - knt++ - bi.Dscal(n-1, rsafmn, x, incX) - beta *= rsafmn - alpha *= rsafmn - if math.Abs(beta) >= safmin { - break - } - } - xnorm = bi.Dnrm2(n-1, x, incX) - beta = -math.Copysign(impl.Dlapy2(alpha, xnorm), alpha) - } - tau = (beta - alpha) / beta - bi.Dscal(n-1, 1/(alpha-beta), x, incX) - for j := 0; j < knt; j++ { - beta *= safmin - } - return beta, tau -} diff --git a/vendor/github.com/gonum/lapack/native/dlarft.go b/vendor/github.com/gonum/lapack/native/dlarft.go deleted file mode 100644 index f99d43d15..000000000 --- a/vendor/github.com/gonum/lapack/native/dlarft.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - "github.com/gonum/blas" - "github.com/gonum/blas/blas64" - "github.com/gonum/lapack" -) - -// Dlarft forms the triangular factor t of a block reflector, storing the answer -// in t. -// H = 1 - V * T * V^T if store == lapack.ColumnWise -// H = 1 - V^T * T * V if store == lapack.RowWise -// H is defined by a product of the elementary reflectors where -// H = H_1 * H_2 * ... * H_k if direct == lapack.Forward -// H = H_k * H_k-1 * ... * H_1 if direct == lapack.Backward -// -// t is a k×k triangular matrix. t is upper triangular if direct = lapack.Forward -// and lower triangular otherwise. This function will panic if t is not of -// sufficient size. -// -// store describes the storage of the elementary reflectors in v. Please see -// Dlarfb for a description of layout. -// -// tau contains the scalar factor of the elementary reflectors h. -func (Implementation) Dlarft(direct lapack.Direct, store lapack.StoreV, n, k int, - v []float64, ldv int, tau []float64, t []float64, ldt int) { - if n == 0 { - return - } - if n < 0 || k < 0 { - panic(negDimension) - } - if direct != lapack.Forward && direct != lapack.Backward { - panic(badDirect) - } - if store != lapack.RowWise && store != lapack.ColumnWise { - panic(badStore) - } - if len(tau) < k { - panic(badTau) - } - checkMatrix(k, k, t, ldt) - bi := blas64.Implementation() - // TODO(btracey): There are a number of minor obvious loop optimizations here. - // TODO(btracey): It may be possible to rearrange some of the code so that - // index of 1 is more common in the Dgemv. - if direct == lapack.Forward { - prevlastv := n - 1 - for i := 0; i < k; i++ { - prevlastv = max(i, prevlastv) - if tau[i] == 0 { - for j := 0; j <= i; j++ { - t[j*ldt+i] = 0 - } - continue - } - var lastv int - if store == lapack.ColumnWise { - // skip trailing zeros - for lastv = n - 1; lastv >= i+1; lastv-- { - if v[lastv*ldv+i] != 0 { - break - } - } - for j := 0; j < i; j++ { - t[j*ldt+i] = -tau[i] * v[i*ldv+j] - } - j := min(lastv, prevlastv) - bi.Dgemv(blas.Trans, j-i, i, - -tau[i], v[(i+1)*ldv:], ldv, v[(i+1)*ldv+i:], ldv, - 1, t[i:], ldt) - } else { - for lastv = n - 1; lastv >= i+1; lastv-- { - if v[i*ldv+lastv] != 0 { - break - } - } - for j := 0; j < i; j++ { - t[j*ldt+i] = -tau[i] * v[j*ldv+i] - } - j := min(lastv, prevlastv) - bi.Dgemv(blas.NoTrans, i, j-i, - -tau[i], v[i+1:], ldv, v[i*ldv+i+1:], 1, - 1, t[i:], ldt) - } - bi.Dtrmv(blas.Upper, blas.NoTrans, blas.NonUnit, i, t, ldt, t[i:], ldt) - t[i*ldt+i] = tau[i] - if i > 1 { - prevlastv = max(prevlastv, lastv) - } else { - prevlastv = lastv - } - } - return - } - prevlastv := 0 - for i := k - 1; i >= 0; i-- { - if tau[i] == 0 { - for j := i; j < k; j++ { - t[j*ldt+i] = 0 - } - continue - } - var lastv int - if i < k-1 { - if store == lapack.ColumnWise { - for lastv = 0; lastv < i; lastv++ { - if v[lastv*ldv+i] != 0 { - break - } - } - for j := i + 1; j < k; j++ { - t[j*ldt+i] = -tau[i] * v[(n-k+i)*ldv+j] - } - j := max(lastv, prevlastv) - bi.Dgemv(blas.Trans, n-k+i-j, k-i-1, - -tau[i], v[j*ldv+i+1:], ldv, v[j*ldv+i:], ldv, - 1, t[(i+1)*ldt+i:], ldt) - } else { - for lastv := 0; lastv < i; lastv++ { - if v[i*ldv+lastv] != 0 { - break - } - } - for j := i + 1; j < k; j++ { - t[j*ldt+i] = -tau[i] * v[j*ldv+n-k+i] - } - j := max(lastv, prevlastv) - bi.Dgemv(blas.NoTrans, k-i-1, n-k+i-j, - -tau[i], v[(i+1)*ldv+j:], ldv, v[i*ldv+j:], 1, - 1, t[(i+1)*ldt+i:], ldt) - } - bi.Dtrmv(blas.Lower, blas.NoTrans, blas.NonUnit, k-i-1, - t[(i+1)*ldt+i+1:], ldt, - t[(i+1)*ldt+i:], ldt) - if i > 0 { - prevlastv = min(prevlastv, lastv) - } else { - prevlastv = lastv - } - } - t[i*ldt+i] = tau[i] - } -} diff --git a/vendor/github.com/gonum/lapack/native/dlascl.go b/vendor/github.com/gonum/lapack/native/dlascl.go deleted file mode 100644 index 93245723c..000000000 --- a/vendor/github.com/gonum/lapack/native/dlascl.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - "math" - - "github.com/gonum/lapack" -) - -// Dlascl multiplies a rectangular matrix by a scalar. -func (impl Implementation) Dlascl(kind lapack.MatrixType, kl, ku int, cfrom, cto float64, m, n int, a []float64, lda int) { - checkMatrix(m, n, a, lda) - if cfrom == 0 { - panic("dlascl: zero divisor") - } - if math.IsNaN(cfrom) || math.IsNaN(cto) { - panic("dlascl: NaN scale factor") - } - if n == 0 || m == 0 { - return - } - smlnum := dlamchS - bignum := 1 / smlnum - cfromc := cfrom - ctoc := cto - cfrom1 := cfromc * smlnum - for { - var done bool - var mul, ctol float64 - if cfrom1 == cfromc { - // cfromc is inf - mul = ctoc / cfromc - done = true - ctol = ctoc - } else { - ctol = ctoc / bignum - if ctol == ctoc { - // ctoc is either 0 or inf. - mul = ctoc - done = true - cfromc = 1 - } else if math.Abs(cfrom1) > math.Abs(ctoc) && ctoc != 0 { - mul = smlnum - done = false - cfromc = cfrom1 - } else if math.Abs(ctol) > math.Abs(cfromc) { - mul = bignum - done = false - ctoc = ctol - } else { - mul = ctoc / cfromc - done = true - } - } - switch kind { - default: - panic("lapack: not implemented") - case lapack.General: - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - a[i*lda+j] = a[i*lda+j] * mul - } - } - } - if done { - break - } - } -} diff --git a/vendor/github.com/gonum/lapack/native/dlaset.go b/vendor/github.com/gonum/lapack/native/dlaset.go deleted file mode 100644 index 33cfc438f..000000000 --- a/vendor/github.com/gonum/lapack/native/dlaset.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import "github.com/gonum/blas" - -// Dlaset sets the off-diagonal elements of a to alpha, and the diagonal elements -// of a to beta. If uplo == blas.Upper, only the upper diagonal elements are set. -// If uplo == blas.Lower, only the lower diagonal elements are set. If uplo is -// otherwise, all of the elements of a are set. -func (impl Implementation) Dlaset(uplo blas.Uplo, m, n int, alpha, beta float64, a []float64, lda int) { - checkMatrix(m, n, a, lda) - if uplo == blas.Upper { - for i := 0; i < m; i++ { - for j := i + 1; j < n; j++ { - a[i*lda+j] = alpha - } - } - } else if uplo == blas.Lower { - for i := 0; i < m; i++ { - for j := 0; j < i; j++ { - a[i*lda+j] = alpha - } - } - } else { - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - a[i*lda+j] = alpha - } - } - } - for i := 0; i < min(m, n); i++ { - a[i*lda+i] = beta - } -} diff --git a/vendor/github.com/gonum/lapack/native/dlassq.go b/vendor/github.com/gonum/lapack/native/dlassq.go deleted file mode 100644 index 7b98c7005..000000000 --- a/vendor/github.com/gonum/lapack/native/dlassq.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import "math" - -// Dlassq updates a sum of squares in scaled form. The input parameters scale and -// sumsq represent the current scale and total sum of squares. These values are -// updated with the information in the first n elements of the vector specified -// by x and incX. -func (impl Implementation) Dlassq(n int, x []float64, incx int, scale float64, sumsq float64) (scl, smsq float64) { - if n <= 0 { - return scale, sumsq - } - for ix := 0; ix <= (n-1)*incx; ix += incx { - absxi := math.Abs(x[ix]) - if absxi > 0 || math.IsNaN(absxi) { - if scale < absxi { - sumsq = 1 + sumsq*(scale/absxi)*(scale/absxi) - scale = absxi - } else { - sumsq += (absxi / scale) * (absxi / scale) - } - } - } - return scale, sumsq -} diff --git a/vendor/github.com/gonum/lapack/native/doc.go b/vendor/github.com/gonum/lapack/native/doc.go deleted file mode 100644 index d622dc504..000000000 --- a/vendor/github.com/gonum/lapack/native/doc.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package native is a pure-go implementation of the LAPACK API. The LAPACK API defines -// a set of algorithms for advanced matrix operations. -// -// The function definitions and implementations follow that of the netlib reference -// implementation. Please see http://www.netlib.org/lapack/explore-html/ for more -// information, and http://www.netlib.org/lapack/explore-html/d4/de1/_l_i_c_e_n_s_e_source.html -// for more license information. -// -// Slice function arguments frequently represent vectors and matrices. The data -// layout is identical to that found in https://godoc.org/github.com/gonum/blas/native. -// -// Most LAPACK functions are built on top the routines defined in the BLAS API, -// and as such the computation time for many LAPACK functions is -// dominated by BLAS calls. Here, BLAS is accessed through the -// the blas64 package (https://godoc.org/github.com/gonum/blas/blas64). In particular, -// this implies that an external BLAS library will be used if it is -// registered in blas64. -// -// The full LAPACK capability has not been implemented at present. The full -// API is very large, containing approximately 200 functions for double precision -// alone. Future additions will be focused on supporting the gonum matrix -// package (https://godoc.org/github.com/gonum/matrix/mat64), though pull requests -// with implementations and tests for LAPACK function are encouraged. -package native diff --git a/vendor/github.com/gonum/lapack/native/dorm2r.go b/vendor/github.com/gonum/lapack/native/dorm2r.go deleted file mode 100644 index 031480a76..000000000 --- a/vendor/github.com/gonum/lapack/native/dorm2r.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import "github.com/gonum/blas" - -// Dorm2r multiplies a general matrix c by an orthogonal matrix from a QR factorization -// determined by Dgeqrf. -// C = Q * C if side == blas.Left and trans == blas.NoTrans -// C = Q^T * C if side == blas.Left and trans == blas.Trans -// C = C * Q if side == blas.Right and trans == blas.NoTrans -// C = C * Q^T if side == blas.Right and trans == blas.Trans -// If side == blas.Left, a is a matrix of size m×k, and if side == blas.Right -// a is of size n×k. -// -// Tau contains the householder factors and is of length at least k and this function -// will panic otherwise. -// -// Work is temporary storage of length at least n if side == blas.Left -// and at least m if side == blas.Right and this function will panic otherwise. -func (impl Implementation) Dorm2r(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64) { - if side != blas.Left && side != blas.Right { - panic(badSide) - } - if trans != blas.Trans && trans != blas.NoTrans { - panic(badTrans) - } - - left := side == blas.Left - notran := trans == blas.NoTrans - if left { - // Q is m x m - checkMatrix(m, k, a, lda) - if len(work) < n { - panic(badWork) - } - } else { - // Q is n x n - checkMatrix(n, k, a, lda) - if len(work) < m { - panic(badWork) - } - } - checkMatrix(m, n, c, ldc) - if m == 0 || n == 0 || k == 0 { - return - } - if len(tau) < k { - panic(badTau) - } - if left { - if notran { - for i := k - 1; i >= 0; i-- { - aii := a[i*lda+i] - a[i*lda+i] = 1 - impl.Dlarf(side, m-i, n, a[i*lda+i:], lda, tau[i], c[i*ldc:], ldc, work) - a[i*lda+i] = aii - } - return - } - for i := 0; i < k; i++ { - aii := a[i*lda+i] - a[i*lda+i] = 1 - impl.Dlarf(side, m-i, n, a[i*lda+i:], lda, tau[i], c[i*ldc:], ldc, work) - a[i*lda+i] = aii - } - return - } - if notran { - for i := 0; i < k; i++ { - aii := a[i*lda+i] - a[i*lda+i] = 1 - impl.Dlarf(side, m, n-i, a[i*lda+i:], lda, tau[i], c[i:], ldc, work) - a[i*lda+i] = aii - } - return - } - for i := k - 1; i >= 0; i-- { - aii := a[i*lda+i] - a[i*lda+i] = 1 - impl.Dlarf(side, m, n-i, a[i*lda+i:], lda, tau[i], c[i:], ldc, work) - a[i*lda+i] = aii - } -} diff --git a/vendor/github.com/gonum/lapack/native/dorml2.go b/vendor/github.com/gonum/lapack/native/dorml2.go deleted file mode 100644 index 9d1585cda..000000000 --- a/vendor/github.com/gonum/lapack/native/dorml2.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import "github.com/gonum/blas" - -// Dorml2 multiplies a general matrix c by an orthogonal matrix from an LQ factorization -// determined by Dgelqf. -// C = Q * C if side == blas.Left and trans == blas.NoTrans -// C = Q^T * C if side == blas.Left and trans == blas.Trans -// C = C * Q if side == blas.Right and trans == blas.NoTrans -// C = C * Q^T if side == blas.Right and trans == blas.Trans -// If side == blas.Left, a is a matrix of side k×m, and if side == blas.Right -// a is of size k×n. -// -// -// Tau contains the householder factors and is of length at least k and this function will -// panic otherwise. -// -// Work is temporary storage of length at least n if side == blas.Left -// and at least m if side == blas.Right and this function will panic otherwise. -func (impl Implementation) Dorml2(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64) { - if side != blas.Left && side != blas.Right { - panic(badSide) - } - if trans != blas.Trans && trans != blas.NoTrans { - panic(badTrans) - } - - left := side == blas.Left - notran := trans == blas.NoTrans - if left { - checkMatrix(k, m, a, lda) - if len(work) < n { - panic(badWork) - } - } else { - checkMatrix(k, n, a, lda) - if len(work) < m { - panic(badWork) - } - } - checkMatrix(m, n, c, ldc) - if m == 0 || n == 0 || k == 0 { - return - } - switch { - case left && notran: - for i := 0; i < k; i++ { - aii := a[i*lda+i] - a[i*lda+i] = 1 - impl.Dlarf(side, m-i, n, a[i*lda+i:], 1, tau[i], c[i*ldc:], ldc, work) - a[i*lda+i] = aii - } - return - case left && !notran: - for i := k - 1; i >= 0; i-- { - aii := a[i*lda+i] - a[i*lda+i] = 1 - impl.Dlarf(side, m-i, n, a[i*lda+i:], 1, tau[i], c[i*ldc:], ldc, work) - a[i*lda+i] = aii - } - return - case !left && notran: - for i := k - 1; i >= 0; i-- { - aii := a[i*lda+i] - a[i*lda+i] = 1 - impl.Dlarf(side, m, n-i, a[i*lda+i:], 1, tau[i], c[i:], ldc, work) - a[i*lda+i] = aii - } - return - case !left && !notran: - for i := 0; i < k; i++ { - aii := a[i*lda+i] - a[i*lda+i] = 1 - impl.Dlarf(side, m, n-i, a[i*lda+i:], 1, tau[i], c[i:], ldc, work) - a[i*lda+i] = aii - } - return - } -} diff --git a/vendor/github.com/gonum/lapack/native/dormlq.go b/vendor/github.com/gonum/lapack/native/dormlq.go deleted file mode 100644 index 4d3cedf6a..000000000 --- a/vendor/github.com/gonum/lapack/native/dormlq.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - "github.com/gonum/blas" - "github.com/gonum/lapack" -) - -// Dormlq multiplies the matrix c by the othogonal matrix q defined by the -// slices a and tau. A and tau are as returned from Dgelqf. -// C = Q * C if side == blas.Left and trans == blas.NoTrans -// C = Q^T * C if side == blas.Left and trans == blas.Trans -// C = C * Q if side == blas.Right and trans == blas.NoTrans -// C = C * Q^T if side == blas.Right and trans == blas.Trans -// If side == blas.Left, a is a matrix of side k×m, and if side == blas.Right -// a is of size k×n. This uses a blocked algorithm. -// -// Work is temporary storage, and lwork specifies the usable memory length. -// At minimum, lwork >= m if side == blas.Left and lwork >= n if side == blas.Right, -// and this function will panic otherwise. -// Dormlq uses a block algorithm, but the block size is limited -// by the temporary space available. If lwork == -1, instead of performing Dormlq, -// the optimal work length will be stored into work[0]. -// -// Tau contains the householder scales and must have length at least k, and -// this function will panic otherwise. -func (impl Implementation) Dormlq(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int) { - if side != blas.Left && side != blas.Right { - panic(badSide) - } - if trans != blas.Trans && trans != blas.NoTrans { - panic(badTrans) - } - left := side == blas.Left - notran := trans == blas.NoTrans - if left { - checkMatrix(k, m, a, lda) - } else { - checkMatrix(k, n, a, lda) - } - checkMatrix(m, n, c, ldc) - if len(tau) < k { - panic(badTau) - } - - const nbmax = 64 - nw := n - if !left { - nw = m - } - opts := string(side) + string(trans) - nb := min(nbmax, impl.Ilaenv(1, "DORMLQ", opts, m, n, k, -1)) - lworkopt := max(1, nw) * nb - if lwork == -1 { - work[0] = float64(lworkopt) - return - } - if left { - if lwork < n { - panic(badWork) - } - } else { - if lwork < m { - panic(badWork) - } - } - - if m == 0 || n == 0 || k == 0 { - return - } - nbmin := 2 - - ldwork := nb - if nb > 1 && nb < k { - iws := nw * nb - if lwork < iws { - nb = lwork / nw - nbmin = max(2, impl.Ilaenv(2, "DORMLQ", opts, m, n, k, -1)) - } - } - if nb < nbmin || nb >= k { - // Call unblocked code - impl.Dorml2(side, trans, m, n, k, a, lda, tau, c, ldc, work) - return - } - ldt := nb - t := make([]float64, nb*ldt) - - transt := blas.NoTrans - if notran { - transt = blas.Trans - } - - switch { - case left && notran: - for i := 0; i < k; i += nb { - ib := min(nb, k-i) - impl.Dlarft(lapack.Forward, lapack.RowWise, m-i, ib, - a[i*lda+i:], lda, - tau[i:], - t, ldt) - impl.Dlarfb(side, transt, lapack.Forward, lapack.RowWise, m-i, n, ib, - a[i*lda+i:], lda, - t, ldt, - c[i*ldc:], ldc, - work, ldwork) - } - return - case left && !notran: - for i := ((k - 1) / nb) * nb; i >= 0; i -= nb { - ib := min(nb, k-i) - impl.Dlarft(lapack.Forward, lapack.RowWise, m-i, ib, - a[i*lda+i:], lda, - tau[i:], - t, ldt) - impl.Dlarfb(side, transt, lapack.Forward, lapack.RowWise, m-i, n, ib, - a[i*lda+i:], lda, - t, ldt, - c[i*ldc:], ldc, - work, ldwork) - } - return - case !left && notran: - for i := ((k - 1) / nb) * nb; i >= 0; i -= nb { - ib := min(nb, k-i) - impl.Dlarft(lapack.Forward, lapack.RowWise, n-i, ib, - a[i*lda+i:], lda, - tau[i:], - t, ldt) - impl.Dlarfb(side, transt, lapack.Forward, lapack.RowWise, m, n-i, ib, - a[i*lda+i:], lda, - t, ldt, - c[i:], ldc, - work, ldwork) - } - return - case !left && !notran: - for i := 0; i < k; i += nb { - ib := min(nb, k-i) - impl.Dlarft(lapack.Forward, lapack.RowWise, n-i, ib, - a[i*lda+i:], lda, - tau[i:], - t, ldt) - impl.Dlarfb(side, transt, lapack.Forward, lapack.RowWise, m, n-i, ib, - a[i*lda+i:], lda, - t, ldt, - c[i:], ldc, - work, ldwork) - } - return - } -} diff --git a/vendor/github.com/gonum/lapack/native/dormqr.go b/vendor/github.com/gonum/lapack/native/dormqr.go deleted file mode 100644 index 5d005f118..000000000 --- a/vendor/github.com/gonum/lapack/native/dormqr.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - "github.com/gonum/blas" - "github.com/gonum/lapack" -) - -// Dormqr multiplies the matrix c by the othogonal matrix q defined by the -// slices a and tau. A and tau are as returned from Dgeqrf. -// C = Q * C if side == blas.Left and trans == blas.NoTrans -// C = Q^T * C if side == blas.Left and trans == blas.Trans -// C = C * Q if side == blas.Right and trans == blas.NoTrans -// C = C * Q^T if side == blas.Right and trans == blas.Trans -// If side == blas.Left, a is a matrix of side k×m, and if side == blas.Right -// a is of size k×n. This uses a blocked algorithm. -// -// Work is temporary storage, and lwork specifies the usable memory length. -// At minimum, lwork >= m if side == blas.Left and lwork >= n if side == blas.Right, -// and this function will panic otherwise. -// Dormqr uses a block algorithm, but the block size is limited -// by the temporary space available. If lwork == -1, instead of performing Dormqr, -// the optimal work length will be stored into work[0]. -// -// Tau contains the householder scales and must have length at least k, and -// this function will panic otherwise. -func (impl Implementation) Dormqr(side blas.Side, trans blas.Transpose, m, n, k int, a []float64, lda int, tau, c []float64, ldc int, work []float64, lwork int) { - left := side == blas.Left - notran := trans == blas.NoTrans - if left { - checkMatrix(m, k, a, lda) - } else { - checkMatrix(n, k, a, lda) - } - checkMatrix(m, n, c, ldc) - - const nbmax = 64 - nw := n - if side == blas.Right { - nw = m - } - opts := string(side) + string(trans) - nb := min(nbmax, impl.Ilaenv(1, "DORMQR", opts, m, n, k, -1)) - lworkopt := max(1, nw) * nb - if lwork == -1 { - work[0] = float64(lworkopt) - return - } - if left { - if lwork < n { - panic(badWork) - } - } else { - if lwork < m { - panic(badWork) - } - } - if m == 0 || n == 0 || k == 0 { - return - } - nbmin := 2 - - ldwork := nb - if nb > 1 && nb < k { - iws := nw * nb - if lwork < iws { - nb = lwork / nw - nbmin = max(2, impl.Ilaenv(2, "DORMQR", opts, m, n, k, -1)) - } - } - if nb < nbmin || nb >= k { - // Call unblocked code - impl.Dorm2r(side, trans, m, n, k, a, lda, tau, c, ldc, work) - return - } - ldt := nb - t := make([]float64, nb*ldt) - switch { - case left && notran: - for i := ((k - 1) / nb) * nb; i >= 0; i -= nb { - ib := min(nb, k-i) - impl.Dlarft(lapack.Forward, lapack.ColumnWise, m-i, ib, - a[i*lda+i:], lda, - tau[i:], - t, ldt) - impl.Dlarfb(side, trans, lapack.Forward, lapack.ColumnWise, m-i, n, ib, - a[i*lda+i:], lda, - t, ldt, - c[i*ldc:], ldc, - work, ldwork) - } - return - case left && !notran: - for i := 0; i < k; i += nb { - ib := min(nb, k-i) - impl.Dlarft(lapack.Forward, lapack.ColumnWise, m-i, ib, - a[i*lda+i:], lda, - tau[i:], - t, ldt) - impl.Dlarfb(side, trans, lapack.Forward, lapack.ColumnWise, m-i, n, ib, - a[i*lda+i:], lda, - t, ldt, - c[i*ldc:], ldc, - work, ldwork) - } - return - case !left && notran: - for i := 0; i < k; i += nb { - ib := min(nb, k-i) - impl.Dlarft(lapack.Forward, lapack.ColumnWise, n-i, ib, - a[i*lda+i:], lda, - tau[i:], - t, ldt) - impl.Dlarfb(side, trans, lapack.Forward, lapack.ColumnWise, m, n-i, ib, - a[i*lda+i:], lda, - t, ldt, - c[i:], ldc, - work, ldwork) - } - return - case !left && !notran: - for i := ((k - 1) / nb) * nb; i >= 0; i -= nb { - ib := min(nb, k-i) - impl.Dlarft(lapack.Forward, lapack.ColumnWise, n-i, ib, - a[i*lda+i:], lda, - tau[i:], - t, ldt) - impl.Dlarfb(side, trans, lapack.Forward, lapack.ColumnWise, m, n-i, ib, - a[i*lda+i:], lda, - t, ldt, - c[i:], ldc, - work, ldwork) - } - return - } -} diff --git a/vendor/github.com/gonum/lapack/native/dpotf2.go b/vendor/github.com/gonum/lapack/native/dpotf2.go deleted file mode 100644 index 9ad612efc..000000000 --- a/vendor/github.com/gonum/lapack/native/dpotf2.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - "math" - - "github.com/gonum/blas" - "github.com/gonum/blas/blas64" -) - -// Dpotf2 computes the cholesky decomposition of the symmetric positive definite -// matrix a. If ul == blas.Upper, then a is stored as an upper-triangular matrix, -// and a = U^T U is stored in place into a. If ul == blas.Lower, then a = L L^T -// is computed and stored in-place into a. If a is not positive definite, false -// is returned. This is the unblocked version of the algorithm. -func (Implementation) Dpotf2(ul blas.Uplo, n int, a []float64, lda int) (ok bool) { - if ul != blas.Upper && ul != blas.Lower { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - if lda < n { - panic(badLdA) - } - if n == 0 { - return true - } - bi := blas64.Implementation() - if ul == blas.Upper { - for j := 0; j < n; j++ { - ajj := a[j*lda+j] - if j != 0 { - ajj -= bi.Ddot(j, a[j:], lda, a[j:], lda) - } - if ajj <= 0 || math.IsNaN(ajj) { - a[j*lda+j] = ajj - return false - } - ajj = math.Sqrt(ajj) - a[j*lda+j] = ajj - if j < n-1 { - bi.Dgemv(blas.Trans, j, n-j-1, - -1, a[j+1:], lda, a[j:], lda, - 1, a[j*lda+j+1:], 1) - bi.Dscal(n-j-1, 1/ajj, a[j*lda+j+1:], 1) - } - } - return true - } - for j := 0; j < n; j++ { - ajj := a[j*lda+j] - if j != 0 { - ajj -= bi.Ddot(j, a[j*lda:], 1, a[j*lda:], 1) - } - if ajj <= 0 || math.IsNaN(ajj) { - a[j*lda+j] = ajj - return false - } - ajj = math.Sqrt(ajj) - a[j*lda+j] = ajj - if j < n-1 { - bi.Dgemv(blas.NoTrans, n-j-1, j, - -1, a[(j+1)*lda:], lda, a[j*lda:], 1, - 1, a[(j+1)*lda+j:], lda) - bi.Dscal(n-j-1, 1/ajj, a[(j+1)*lda+j:], lda) - } - } - return true -} diff --git a/vendor/github.com/gonum/lapack/native/dpotrf.go b/vendor/github.com/gonum/lapack/native/dpotrf.go deleted file mode 100644 index 4062b9027..000000000 --- a/vendor/github.com/gonum/lapack/native/dpotrf.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - "github.com/gonum/blas" - "github.com/gonum/blas/blas64" -) - -// Dpotrf computes the cholesky decomposition of the symmetric positive definite -// matrix a. If ul == blas.Upper, then a is stored as an upper-triangular matrix, -// and a = U U^T is stored in place into a. If ul == blas.Lower, then a = L L^T -// is computed and stored in-place into a. If a is not positive definite, false -// is returned. This is the blocked version of the algorithm. -func (impl Implementation) Dpotrf(ul blas.Uplo, n int, a []float64, lda int) (ok bool) { - bi := blas64.Implementation() - if ul != blas.Upper && ul != blas.Lower { - panic(badUplo) - } - if n < 0 { - panic(nLT0) - } - if lda < n { - panic(badLdA) - } - if n == 0 { - return true - } - nb := impl.Ilaenv(1, "DPOTRF", string(ul), n, -1, -1, -1) - if n <= nb { - return impl.Dpotf2(ul, n, a, lda) - } - if ul == blas.Upper { - for j := 0; j < n; j += nb { - jb := min(nb, n-j) - bi.Dsyrk(blas.Upper, blas.Trans, jb, j, - -1, a[j:], lda, - 1, a[j*lda+j:], lda) - ok = impl.Dpotf2(blas.Upper, jb, a[j*lda+j:], lda) - if !ok { - return ok - } - if j+jb < n { - bi.Dgemm(blas.Trans, blas.NoTrans, jb, n-j-jb, j, - -1, a[j:], lda, a[j+jb:], lda, - 1, a[j*lda+j+jb:], lda) - bi.Dtrsm(blas.Left, blas.Upper, blas.Trans, blas.NonUnit, jb, n-j-jb, - 1, a[j*lda+j:], lda, - a[j*lda+j+jb:], lda) - } - } - return true - } - for j := 0; j < n; j += nb { - jb := min(nb, n-j) - bi.Dsyrk(blas.Lower, blas.NoTrans, jb, j, - -1, a[j*lda:], lda, - 1, a[j*lda+j:], lda) - ok := impl.Dpotf2(blas.Lower, jb, a[j*lda+j:], lda) - if !ok { - return ok - } - if j+jb < n { - bi.Dgemm(blas.NoTrans, blas.Trans, n-j-jb, jb, j, - -1, a[(j+jb)*lda:], lda, a[j*lda:], lda, - 1, a[(j+jb)*lda+j:], lda) - bi.Dtrsm(blas.Right, blas.Lower, blas.Trans, blas.NonUnit, n-j-jb, jb, - 1, a[j*lda+j:], lda, - a[(j+jb)*lda+j:], lda) - } - } - return true -} diff --git a/vendor/github.com/gonum/lapack/native/dtrtrs.go b/vendor/github.com/gonum/lapack/native/dtrtrs.go deleted file mode 100644 index d6cb8d2d8..000000000 --- a/vendor/github.com/gonum/lapack/native/dtrtrs.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - "github.com/gonum/blas" - "github.com/gonum/blas/blas64" -) - -// Dtrtrs solves a triangular system of the form a * x = b or a^T * x = b. Dtrtrs -// checks for singularity in a. If a is singular, false is returned and no solve -// is performed. True is returned otherwise. -func (impl Implementation) Dtrtrs(uplo blas.Uplo, trans blas.Transpose, diag blas.Diag, n, nrhs int, a []float64, lda int, b []float64, ldb int) (ok bool) { - nounit := diag == blas.NonUnit - if n == 0 { - return false - } - // Check for singularity. - if nounit { - for i := 0; i < n; i++ { - if a[i*lda+i] == 0 { - return false - } - } - } - bi := blas64.Implementation() - bi.Dtrsm(blas.Left, uplo, trans, diag, n, nrhs, 1, a, lda, b, ldb) - return true -} diff --git a/vendor/github.com/gonum/lapack/native/general.go b/vendor/github.com/gonum/lapack/native/general.go deleted file mode 100644 index e42b9a2fa..000000000 --- a/vendor/github.com/gonum/lapack/native/general.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -import ( - "math" - - "github.com/gonum/lapack" -) - -// Implementation is the native Go implementation of LAPACK routines. It -// is built on top of calls to the return of blas64.Implementation(), so while -// this code is in pure Go, the underlying BLAS implementation may not be. -type Implementation struct{} - -var _ lapack.Float64 = Implementation{} - -const ( - badDirect = "lapack: bad direct" - badLdA = "lapack: index of a out of range" - badSide = "lapack: bad side" - badStore = "lapack: bad store" - badTau = "lapack: tau has insufficient length" - badTrans = "lapack: bad trans" - badUplo = "lapack: illegal triangle" - badWork = "lapack: insufficient working memory" - badWorkStride = "lapack: insufficient working array stride" - negDimension = "lapack: negative matrix dimension" - nLT0 = "lapack: n < 0" - shortWork = "lapack: working array shorter than declared" -) - -// checkMatrix verifies the parameters of a matrix input. -func checkMatrix(m, n int, a []float64, lda int) { - if m < 0 { - panic("lapack: has negative number of rows") - } - if m < 0 { - panic("lapack: has negative number of columns") - } - if lda < n { - panic("lapack: stride less than number of columns") - } - if len(a) < (m-1)*lda+n { - panic("lapack: insufficient matrix slice length") - } -} - -func checkVector(n int, v []float64, inc int) { - if n < 0 { - panic("lapack: negative matrix length") - } - if (inc > 0 && (n-1)*inc >= len(v)) || (inc < 0 && (1-n)*inc >= len(v)) { - panic("lapack: insufficient vector slice length") - } -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} - -// dlamch is a function in fortran, but since go forces IEEE-754 these are all -// fixed values. Probably a way to get them as constants. -// TODO(btracey): Is there a better way to find the smallest number such that 1+E > 1 - -var dlamchE, dlamchS, dlamchP float64 - -func init() { - onePlusEps := math.Nextafter(1, math.Inf(1)) - eps := (math.Nextafter(1, math.Inf(1)) - 1) * 0.5 - dlamchE = eps - sfmin := math.SmallestNonzeroFloat64 - small := 1 / math.MaxFloat64 - if small >= sfmin { - sfmin = small * onePlusEps - } - dlamchS = sfmin - radix := 2.0 - dlamchP = radix * eps -} diff --git a/vendor/github.com/gonum/lapack/native/iladlc.go b/vendor/github.com/gonum/lapack/native/iladlc.go deleted file mode 100644 index 75b8caf0d..000000000 --- a/vendor/github.com/gonum/lapack/native/iladlc.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -// Iladlc scans a matrix for its last non-zero column. Returns -1 if the matrix -// is all zeros. -func (Implementation) Iladlc(m, n int, a []float64, lda int) int { - if n == 0 || m == 0 { - return n - 1 - } - checkMatrix(m, n, a, lda) - - // Test common case where corner is non-zero. - if a[n-1] != 0 || a[(m-1)*lda+(n-1)] != 0 { - return n - 1 - } - - // Scan each row tracking the highest column seen. - highest := -1 - for i := 0; i < m; i++ { - for j := n - 1; j >= 0; j-- { - if a[i*lda+j] != 0 { - highest = max(highest, j) - break - } - } - } - return highest -} diff --git a/vendor/github.com/gonum/lapack/native/iladlr.go b/vendor/github.com/gonum/lapack/native/iladlr.go deleted file mode 100644 index 6862e158a..000000000 --- a/vendor/github.com/gonum/lapack/native/iladlr.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -// Iladlr scans a matrix for its last non-zero row. Returns -1 if the matrix -// is all zeros. -func (Implementation) Iladlr(m, n int, a []float64, lda int) int { - if m == 0 { - return m - 1 - } - - checkMatrix(m, n, a, lda) - - // Check the common case where the corner is non-zero - if a[(m-1)*lda] != 0 || a[(m-1)*lda+n-1] != 0 { - return m - 1 - } - for i := m - 1; i >= 0; i-- { - for j := 0; j < n; j++ { - if a[i*lda+j] != 0 { - return i - } - } - } - return -1 -} diff --git a/vendor/github.com/gonum/lapack/native/ilaenv.go b/vendor/github.com/gonum/lapack/native/ilaenv.go deleted file mode 100644 index b420dbf14..000000000 --- a/vendor/github.com/gonum/lapack/native/ilaenv.go +++ /dev/null @@ -1,375 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package native - -// Ilaenv returns algorithm tuning parameters for the algorithm given by the -// input string. ispec specifies the parameter to return. -// 1: The optimal block size -// 2: The minimum block size for which the algorithm should be used. -// 3: The crossover point below which an unblocked routine should be used. -// 4: The number of shifts. -// 5: The minumum column dimension for blocking to be used. -// 6: The crossover point for SVD (to use QR factorization or not). -// 7: The number of processors. -// 8: The crossover point for multishift in QR and QZ methods for nonsymmetric eigenvalue problems. -// 9: Maximum size of the subproblems in divide-and-conquer algorithms. -// 10: ieee NaN arithmetic can be trusted not to trap. -// 11: infinity arithmetic can be trusted not to trap. -func (Implementation) Ilaenv(ispec int, s string, opts string, n1, n2, n3, n4 int) int { - // TODO(btracey): Replace this with a constant lookup? A list of constants? - // TODO: What is the difference between 2 and 3? - sname := s[0] == 'S' || s[0] == 'D' - cname := s[0] == 'C' || s[0] == 'Z' - if !sname && !cname { - panic("lapack: bad name") - } - c2 := s[1:3] - c3 := s[3:6] - c4 := c3[1:3] - - switch ispec { - default: - panic("lapack: bad ispec") - case 1: - switch c2 { - default: - panic("lapack: bad function name") - case "GE": - switch c3 { - default: - panic("lapack: bad function name") - case "TRF": - if sname { - return 64 - } - return 64 - case "QRF", "RQF", "LQF", "QLF": - if sname { - return 32 - } - return 32 - case "HRD": - if sname { - return 32 - } - return 32 - case "BRD": - if sname { - return 32 - } - return 32 - case "TRI": - if sname { - return 64 - } - return 64 - } - case "PO": - switch c3 { - default: - panic("lapack: bad function name") - case "TRF": - if sname { - return 64 - } - return 64 - } - case "SY": - switch c3 { - default: - panic("lapack: bad function name") - case "TRF": - if sname { - return 64 - } - return 64 - case "TRD": - return 32 - case "GST": - return 64 - } - case "HE": - switch c3 { - default: - panic("lapack: bad function name") - case "TRF": - return 64 - case "TRD": - return 32 - case "GST": - return 64 - } - case "OR": - switch c3[0] { - default: - panic("lapack: bad function name") - case 'G': - switch c3[1:] { - default: - panic("lapack: bad function name") - case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": - return 32 - } - case 'M': - switch c3[1:] { - default: - panic("lapack: bad function name") - case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": - return 32 - } - } - case "UN": - switch c3[0] { - default: - panic("lapack: bad function name") - case 'G': - switch c3[1:] { - default: - panic("lapack: bad function name") - case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": - return 32 - } - case 'M': - switch c3[1:] { - default: - panic("lapack: bad function name") - case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": - return 32 - } - } - case "GB": - switch c3 { - default: - panic("lapack: bad function name") - case "TRF": - if sname { - if n4 <= 64 { - return 1 - } - return 32 - } - if n4 <= 64 { - return 1 - } - return 32 - } - case "PB": - switch c3 { - default: - panic("lapack: bad function name") - case "TRF": - if sname { - if n4 <= 64 { - return 1 - } - return 32 - } - if n4 <= 64 { - return 1 - } - return 32 - } - case "TR": - switch c3 { - default: - panic("lapack: bad function name") - case "TRI": - if sname { - return 64 - } - return 64 - } - case "LA": - switch c3 { - default: - panic("lapack: bad function name") - case "UUM": - if sname { - return 64 - } - return 64 - } - case "ST": - if sname && c3 == "EBZ" { - return 1 - } - panic("lapack: bad function name") - } - case 2: - switch c2 { - default: - panic("lapack: bad function name") - case "GE": - switch c3 { - default: - panic("lapack: bad function name") - case "QRF", "RQF", "LQF", "QLF": - if sname { - return 2 - } - return 2 - case "HRD": - if sname { - return 2 - } - return 2 - case "BRD": - if sname { - return 2 - } - return 2 - case "TRI": - if sname { - return 2 - } - return 2 - } - case "SY": - switch c3 { - default: - panic("lapack: bad function name") - case "TRF": - if sname { - return 8 - } - return 8 - case "TRD": - if sname { - return 2 - } - panic("lapack: bad function name") - } - case "HE": - if c3 == "TRD" { - return 2 - } - panic("lapack: bad function name") - case "OR": - if !sname { - panic("lapack: bad function name") - } - switch c3[0] { - default: - panic("lapack: bad function name") - case 'G': - switch c4 { - default: - panic("lapack: bad function name") - case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": - return 2 - } - case 'M': - switch c4 { - default: - panic("lapack: bad function name") - case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": - return 2 - } - } - case "UN": - switch c3[0] { - default: - panic("lapack: bad function name") - case 'G': - switch c4 { - default: - panic("lapack: bad function name") - case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": - return 2 - } - case 'M': - switch c4 { - default: - panic("lapack: bad function name") - case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": - return 2 - } - } - } - case 3: - switch c2 { - default: - panic("lapack: bad function name") - case "GE": - switch c3 { - default: - panic("lapack: bad function name") - case "QRF", "RQF", "LQF", "QLF": - if sname { - return 128 - } - return 128 - case "HRD": - if sname { - return 128 - } - return 128 - case "BRD": - if sname { - return 128 - } - return 128 - } - case "SY": - if sname && c3 == "TRD" { - return 32 - } - panic("lapack: bad function name") - case "HE": - if c3 == "TRD" { - return 32 - } - panic("lapack: bad function name") - case "OR": - switch c3[0] { - default: - panic("lapack: bad function name") - case 'G': - switch c4 { - default: - panic("lapack: bad function name") - case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": - return 128 - } - } - case "UN": - switch c3[0] { - default: - panic("lapack: bad function name") - case 'G': - switch c4 { - default: - panic("lapack: bad function name") - case "QR", "RQ", "LQ", "QL", "HR", "TR", "BR": - return 128 - } - } - } - case 4: - // Used by xHSEQR - return 6 - case 5: - // Not used - return 2 - case 6: - // Used by xGELSS and xGESVD - return min(n1, n2) * 1e6 - case 7: - // Not used - return 1 - case 8: - // Used by xHSEQR - return 50 - case 9: - // used by xGELSD and xGESDD - return 25 - case 10: - // Go guarantees ieee - return 1 - case 11: - // Go guarantees ieee - return 1 - } -} diff --git a/vendor/github.com/gonum/matrix/mat64/cholesky.go b/vendor/github.com/gonum/matrix/mat64/cholesky.go deleted file mode 100644 index 1b69b05ae..000000000 --- a/vendor/github.com/gonum/matrix/mat64/cholesky.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright ©2013 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on the CholeskyDecomposition class from Jama 1.0.3. - -package mat64 - -import ( - "github.com/gonum/blas" - "github.com/gonum/blas/blas64" - "github.com/gonum/lapack/lapack64" -) - -const badTriangle = "mat64: invalid triangle" - -// Cholesky calculates the Cholesky decomposition of the matrix A and returns -// whether the matrix is positive definite. The returned matrix is either a -// lower triangular matrix such that A = L * L^T or an upper triangular matrix -// such that A = U^T * U depending on the upper parameter. -func (t *TriDense) Cholesky(a Symmetric, upper bool) (ok bool) { - n := a.Symmetric() - if t.isZero() { - t.mat = blas64.Triangular{ - N: n, - Stride: n, - Diag: blas.NonUnit, - Data: use(t.mat.Data, n*n), - } - if upper { - t.mat.Uplo = blas.Upper - } else { - t.mat.Uplo = blas.Lower - } - } else { - if n != t.mat.N { - panic(ErrShape) - } - if (upper && t.mat.Uplo != blas.Upper) || (!upper && t.mat.Uplo != blas.Lower) { - panic(ErrTriangle) - } - } - copySymIntoTriangle(t, a) - - // Potrf modifies the data in place - _, ok = lapack64.Potrf( - blas64.Symmetric{ - N: t.mat.N, - Stride: t.mat.Stride, - Data: t.mat.Data, - Uplo: t.mat.Uplo, - }) - return ok -} - -// SolveCholesky finds the matrix m that solves A * m = b where A = L * L^T or -// A = U^T * U, and U or L are represented by t, placing the result in the -// receiver. -func (m *Dense) SolveCholesky(t Triangular, b Matrix) { - _, n := t.Dims() - bm, bn := b.Dims() - if n != bm { - panic(ErrShape) - } - - m.reuseAs(bm, bn) - if b != m { - m.Copy(b) - } - - // TODO(btracey): Implement an algorithm that doesn't require a copy into - // a blas64.Triangular. - ta := getBlasTriangular(t) - - switch ta.Uplo { - case blas.Upper: - blas64.Trsm(blas.Left, blas.Trans, 1, ta, m.mat) - blas64.Trsm(blas.Left, blas.NoTrans, 1, ta, m.mat) - case blas.Lower: - blas64.Trsm(blas.Left, blas.NoTrans, 1, ta, m.mat) - blas64.Trsm(blas.Left, blas.Trans, 1, ta, m.mat) - default: - panic(badTriangle) - } -} - -// SolveCholeskyVec finds the vector v that solves A * v = b where A = L * L^T or -// A = U^T * U, and U or L are represented by t, placing the result in the -// receiver. -func (v *Vector) SolveCholeskyVec(t Triangular, b *Vector) { - _, n := t.Dims() - vn := b.Len() - if vn != n { - panic(ErrShape) - } - v.reuseAs(n) - if v != b { - v.CopyVec(b) - } - ta := getBlasTriangular(t) - switch ta.Uplo { - case blas.Upper: - blas64.Trsv(blas.Trans, ta, v.mat) - blas64.Trsv(blas.NoTrans, ta, v.mat) - case blas.Lower: - blas64.Trsv(blas.NoTrans, ta, v.mat) - blas64.Trsv(blas.Trans, ta, v.mat) - default: - panic(badTriangle) - } -} - -// SolveTri finds the matrix x that solves op(A) * X = B where A is a triangular -// matrix and op is specified by trans. -func (m *Dense) SolveTri(a Triangular, trans bool, b Matrix) { - n, _ := a.Triangle() - bm, bn := b.Dims() - if n != bm { - panic(ErrShape) - } - - m.reuseAs(bm, bn) - if b != m { - m.Copy(b) - } - - // TODO(btracey): Implement an algorithm that doesn't require a copy into - // a blas64.Triangular. - ta := getBlasTriangular(a) - - t := blas.NoTrans - if trans { - t = blas.Trans - } - switch ta.Uplo { - case blas.Upper, blas.Lower: - blas64.Trsm(blas.Left, t, 1, ta, m.mat) - default: - panic(badTriangle) - } -} diff --git a/vendor/github.com/gonum/matrix/mat64/dense.go b/vendor/github.com/gonum/matrix/mat64/dense.go deleted file mode 100644 index 50ed79d28..000000000 --- a/vendor/github.com/gonum/matrix/mat64/dense.go +++ /dev/null @@ -1,646 +0,0 @@ -// Copyright ©2013 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat64 - -import ( - "bytes" - "encoding/binary" - - "github.com/gonum/blas/blas64" -) - -var ( - matrix *Dense - - _ Matrix = matrix - _ Mutable = matrix - _ Vectorer = matrix - _ VectorSetter = matrix - - _ Cloner = matrix - _ Viewer = matrix - _ RowViewer = matrix - _ ColViewer = matrix - _ RawRowViewer = matrix - _ Grower = matrix - - _ Adder = matrix - _ Suber = matrix - _ Muler = matrix - _ Dotter = matrix - _ ElemMuler = matrix - _ ElemDiver = matrix - _ Exper = matrix - - _ Scaler = matrix - _ Applyer = matrix - - _ TransposeCopier = matrix - // _ TransposeViewer = matrix - - _ Tracer = matrix - _ Normer = matrix - _ Sumer = matrix - - _ Uer = matrix - _ Ler = matrix - - _ Stacker = matrix - _ Augmenter = matrix - - _ Equaler = matrix - _ ApproxEqualer = matrix - - _ RawMatrixSetter = matrix - _ RawMatrixer = matrix - - _ Reseter = matrix -) - -// Dense is a dense matrix representation. -type Dense struct { - mat blas64.General - - capRows, capCols int -} - -// NewDense creates a new matrix of type Dense with dimensions r and c. -// If the mat argument is nil, a new data slice is allocated. -// -// The data must be arranged in row-major order, i.e. the (i*c + j)-th -// element in mat is the {i, j}-th element in the matrix. -func NewDense(r, c int, mat []float64) *Dense { - if mat != nil && r*c != len(mat) { - panic(ErrShape) - } - if mat == nil { - mat = make([]float64, r*c) - } - return &Dense{ - mat: blas64.General{ - Rows: r, - Cols: c, - Stride: c, - Data: mat, - }, - capRows: r, - capCols: c, - } -} - -// reuseAs resizes an empty matrix to a r×c matrix, -// or checks that a non-empty matrix is r×c. -func (m *Dense) reuseAs(r, c int) { - if m.mat.Rows > m.capRows || m.mat.Cols > m.capCols { - // Panic as a string, not a mat64.Error. - panic("mat64: caps not correctly set") - } - if m.isZero() { - m.mat = blas64.General{ - Rows: r, - Cols: c, - Stride: c, - Data: use(m.mat.Data, r*c), - } - m.capRows = r - m.capCols = c - return - } - if r != m.mat.Rows || c != m.mat.Cols { - panic(ErrShape) - } -} - -func (m *Dense) isZero() bool { - // It must be the case that m.Dims() returns - // zeros in this case. See comment in Reset(). - return m.mat.Stride == 0 -} - -// DenseCopyOf returns a newly allocated copy of the elements of a. -func DenseCopyOf(a Matrix) *Dense { - d := &Dense{} - d.Clone(a) - return d -} - -// SetRawMatrix sets the underlying blas64.General used by the receiver. -// Changes to elements in the receiver following the call will be reflected -// in b. -func (m *Dense) SetRawMatrix(b blas64.General) { - m.capRows, m.capCols = b.Rows, b.Cols - m.mat = b -} - -// RawMatrix returns the underlying blas64.General used by the receiver. -// Changes to elements in the receiver following the call will be reflected -// in returned blas64.General. -func (m *Dense) RawMatrix() blas64.General { return m.mat } - -// Dims returns the number of rows and columns in the matrix. -func (m *Dense) Dims() (r, c int) { return m.mat.Rows, m.mat.Cols } - -// Caps returns the number of rows and columns in the backing matrix. -func (m *Dense) Caps() (r, c int) { return m.capRows, m.capCols } - -// Col copies the elements in the jth column of the matrix into the slice dst. -// If the provided slice is nil, a new slice is first allocated. -// -// See the Vectorer interface for more information. -func (m *Dense) Col(dst []float64, j int) []float64 { - if j >= m.mat.Cols || j < 0 { - panic(ErrColAccess) - } - - if dst == nil { - dst = make([]float64, m.mat.Rows) - } - dst = dst[:min(len(dst), m.mat.Rows)] - blas64.Copy(len(dst), - blas64.Vector{Inc: m.mat.Stride, Data: m.mat.Data[j:]}, - blas64.Vector{Inc: 1, Data: dst}, - ) - - return dst -} - -// ColView returns a Vector reflecting col j, backed by the matrix data. -// -// See ColViewer for more information. -func (m *Dense) ColView(j int) *Vector { - if j >= m.mat.Cols || j < 0 { - panic(ErrColAccess) - } - return &Vector{ - mat: blas64.Vector{ - Inc: m.mat.Stride, - Data: m.mat.Data[j : (m.mat.Rows-1)*m.mat.Stride+j+1], - }, - n: m.mat.Rows, - } -} - -// SetCol sets the elements of the matrix in the specified column to the values -// of src. -// -// See the VectorSetter interface for more information. -func (m *Dense) SetCol(j int, src []float64) int { - if j >= m.mat.Cols || j < 0 { - panic(ErrColAccess) - } - - blas64.Copy(min(len(src), m.mat.Rows), - blas64.Vector{Inc: 1, Data: src}, - blas64.Vector{Inc: m.mat.Stride, Data: m.mat.Data[j:]}, - ) - - return min(len(src), m.mat.Rows) -} - -// Row copies the elements in the ith row of the matrix into the slice dst. -// If the provided slice is nil, a new slice is first allocated. -// -// See the Vectorer interface for more information. -func (m *Dense) Row(dst []float64, i int) []float64 { - if i >= m.mat.Rows || i < 0 { - panic(ErrRowAccess) - } - - if dst == nil { - dst = make([]float64, m.mat.Cols) - } - copy(dst, m.rowView(i)) - - return dst -} - -// SetRow sets the elements of the matrix in the specified row to the values of -// src. -// -// See the VectorSetter interface for more information. -func (m *Dense) SetRow(i int, src []float64) int { - if i >= m.mat.Rows || i < 0 { - panic(ErrRowAccess) - } - - copy(m.rowView(i), src) - - return min(len(src), m.mat.Cols) -} - -// RowView returns a Vector reflecting row i, backed by the matrix data. -// -// See RowViewer for more information. -func (m *Dense) RowView(i int) *Vector { - if i >= m.mat.Rows || i < 0 { - panic(ErrRowAccess) - } - return &Vector{ - mat: blas64.Vector{ - Inc: 1, - Data: m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+m.mat.Cols], - }, - n: m.mat.Cols, - } -} - -// RawRowView returns a slice backed by the same array as backing the -// receiver. -func (m *Dense) RawRowView(i int) []float64 { - if i >= m.mat.Rows || i < 0 { - panic(ErrRowAccess) - } - return m.rowView(i) -} - -func (m *Dense) rowView(r int) []float64 { - return m.mat.Data[r*m.mat.Stride : r*m.mat.Stride+m.mat.Cols] -} - -// View returns a new Matrix that shares backing data with the receiver. -// The new matrix is located from row i, column j extending r rows and c -// columns. -func (m *Dense) View(i, j, r, c int) Matrix { - mr, mc := m.Dims() - if i < 0 || i >= mr || j < 0 || j >= mc || r <= 0 || i+r > mr || c <= 0 || j+c > mc { - panic(ErrIndexOutOfRange) - } - t := *m - t.mat.Data = t.mat.Data[i*t.mat.Stride+j : (i+r-1)*t.mat.Stride+(j+c)] - t.mat.Rows = r - t.mat.Cols = c - t.capRows -= i - t.capCols -= j - return &t -} - -// Grow returns an expanded copy of the receiver. The copy is expanded -// by r rows and c columns. If the dimensions of the new copy are outside -// the caps of the receiver a new allocation is made, otherwise not. -func (m *Dense) Grow(r, c int) Matrix { - if r < 0 || c < 0 { - panic(ErrIndexOutOfRange) - } - if r == 0 && c == 0 { - return m - } - - r += m.mat.Rows - c += m.mat.Cols - - var t Dense - switch { - case m.mat.Rows == 0 || m.mat.Cols == 0: - t.mat = blas64.General{ - Rows: r, - Cols: c, - Stride: c, - // We zero because we don't know how the matrix will be used. - // In other places, the mat is immediately filled with a result; - // this is not the case here. - Data: useZeroed(m.mat.Data, r*c), - } - case r > m.capRows || c > m.capCols: - cr := max(r, m.capRows) - cc := max(c, m.capCols) - t.mat = blas64.General{ - Rows: r, - Cols: c, - Stride: cc, - Data: make([]float64, cr*cc), - } - t.capRows = cr - t.capCols = cc - // Copy the complete matrix over to the new matrix. - // Including elements not currently visible. - r, c, m.mat.Rows, m.mat.Cols = m.mat.Rows, m.mat.Cols, m.capRows, m.capCols - t.Copy(m) - m.mat.Rows, m.mat.Cols = r, c - return &t - default: - t.mat = blas64.General{ - Data: m.mat.Data[:(r-1)*m.mat.Stride+c], - Rows: r, - Cols: c, - Stride: m.mat.Stride, - } - } - t.capRows = r - t.capCols = c - return &t -} - -// Reset zeros the dimensions of the matrix so that it can be reused as the -// receiver of a dimensionally restricted operation. -// -// See the Reseter interface for more information. -func (m *Dense) Reset() { - // No change of Stride, Rows and Cols to 0 - // may be made unless all are set to 0. - m.mat.Rows, m.mat.Cols, m.mat.Stride = 0, 0, 0 - m.capRows, m.capCols = 0, 0 - m.mat.Data = m.mat.Data[:0] -} - -// Clone makes a copy of a into the receiver, overwriting the previous value of -// the receiver. The clone operation does not make any restriction on shape. -// -// See the Cloner interface for more information. -func (m *Dense) Clone(a Matrix) { - r, c := a.Dims() - mat := blas64.General{ - Rows: r, - Cols: c, - Stride: c, - } - m.capRows, m.capCols = r, c - switch a := a.(type) { - case RawMatrixer: - amat := a.RawMatrix() - mat.Data = make([]float64, r*c) - for i := 0; i < r; i++ { - copy(mat.Data[i*c:(i+1)*c], amat.Data[i*amat.Stride:i*amat.Stride+c]) - } - case Vectorer: - mat.Data = use(m.mat.Data, r*c) - for i := 0; i < r; i++ { - a.Row(mat.Data[i*c:(i+1)*c], i) - } - default: - mat.Data = use(m.mat.Data, r*c) - m.mat = mat - for i := 0; i < r; i++ { - for j := 0; j < c; j++ { - m.set(i, j, a.At(i, j)) - } - } - return - } - m.mat = mat -} - -// Copy makes a copy of elements of a into the receiver. It is similar to the -// built-in copy; it copies as much as the overlap between the two matrices and -// returns the number of rows and columns it copied. -// -// See the Copier interface for more information. -func (m *Dense) Copy(a Matrix) (r, c int) { - r, c = a.Dims() - r = min(r, m.mat.Rows) - c = min(c, m.mat.Cols) - - switch a := a.(type) { - case RawMatrixer: - amat := a.RawMatrix() - for i := 0; i < r; i++ { - copy(m.mat.Data[i*m.mat.Stride:i*m.mat.Stride+c], amat.Data[i*amat.Stride:i*amat.Stride+c]) - } - case Vectorer: - for i := 0; i < r; i++ { - a.Row(m.mat.Data[i*m.mat.Stride:i*m.mat.Stride+c], i) - } - default: - for i := 0; i < r; i++ { - for j := 0; j < c; j++ { - m.set(r, c, a.At(r, c)) - } - } - } - - return r, c -} - -// U places the upper triangular matrix of a in the receiver. -// -// See the Uer interface for more information. -func (m *Dense) U(a Matrix) { - ar, ac := a.Dims() - if ar != ac { - panic(ErrSquare) - } - - if m == a { - m.zeroLower() - return - } - m.reuseAs(ar, ac) - - if a, ok := a.(RawMatrixer); ok { - amat := a.RawMatrix() - copy(m.mat.Data[:ac], amat.Data[:ac]) - for j, ja, jm := 1, amat.Stride, m.mat.Stride; ja < ar*amat.Stride; j, ja, jm = j+1, ja+amat.Stride, jm+m.mat.Stride { - zero(m.mat.Data[jm : jm+j]) - copy(m.mat.Data[jm+j:jm+ac], amat.Data[ja+j:ja+ac]) - } - return - } - - if a, ok := a.(Vectorer); ok { - row := make([]float64, ac) - copy(m.mat.Data[:m.mat.Cols], a.Row(row, 0)) - for r := 1; r < ar; r++ { - zero(m.mat.Data[r*m.mat.Stride : r*(m.mat.Stride+1)]) - copy(m.mat.Data[r*(m.mat.Stride+1):r*m.mat.Stride+m.mat.Cols], a.Row(row, r)) - } - return - } - - m.zeroLower() - for r := 0; r < ar; r++ { - for c := r; c < ac; c++ { - m.set(r, c, a.At(r, c)) - } - } -} - -func (m *Dense) zeroLower() { - for i := 1; i < m.mat.Rows; i++ { - zero(m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+i]) - } -} - -// L places the lower triangular matrix of a in the receiver. -// -// See the Ler interface for more information. -func (m *Dense) L(a Matrix) { - ar, ac := a.Dims() - if ar != ac { - panic(ErrSquare) - } - - if m == a { - m.zeroUpper() - return - } - m.reuseAs(ar, ac) - - if a, ok := a.(RawMatrixer); ok { - amat := a.RawMatrix() - copy(m.mat.Data[:ar], amat.Data[:ar]) - for j, ja, jm := 1, amat.Stride, m.mat.Stride; ja < ac*amat.Stride; j, ja, jm = j+1, ja+amat.Stride, jm+m.mat.Stride { - zero(m.mat.Data[jm : jm+j]) - copy(m.mat.Data[jm+j:jm+ar], amat.Data[ja+j:ja+ar]) - } - return - } - - if a, ok := a.(Vectorer); ok { - row := make([]float64, ac) - for r := 0; r < ar; r++ { - a.Row(row[:r+1], r) - m.SetRow(r, row) - } - return - } - - m.zeroUpper() - for c := 0; c < ac; c++ { - for r := c; r < ar; r++ { - m.set(r, c, a.At(r, c)) - } - } -} - -func (m *Dense) zeroUpper() { - for i := 0; i < m.mat.Rows-1; i++ { - zero(m.mat.Data[i*m.mat.Stride+i+1 : (i+1)*m.mat.Stride]) - } -} - -// TCopy makes a copy of the transpose the matrix represented by a, placing the -// result into the receiver. -// -// See the TransposeCopier interface for more information. -func (m *Dense) TCopy(a Matrix) { - ar, ac := a.Dims() - - var w Dense - if m != a { - w = *m - } - w.reuseAs(ac, ar) - - switch a := a.(type) { - case *Dense: - for i := 0; i < ac; i++ { - for j := 0; j < ar; j++ { - w.set(i, j, a.at(j, i)) - } - } - default: - for i := 0; i < ac; i++ { - for j := 0; j < ar; j++ { - w.set(i, j, a.At(j, i)) - } - } - } - *m = w -} - -// Stack appends the rows of b onto the rows of a, placing the result into the -// receiver. -// -// See the Stacker interface for more information. -func (m *Dense) Stack(a, b Matrix) { - ar, ac := a.Dims() - br, bc := b.Dims() - if ac != bc || m == a || m == b { - panic(ErrShape) - } - - m.reuseAs(ar+br, ac) - - m.Copy(a) - w := m.View(ar, 0, br, bc).(*Dense) - w.Copy(b) -} - -// Augment creates the augmented matrix of a and b, where b is placed in the -// greater indexed columns. -// -// See the Augmenter interface for more information. -func (m *Dense) Augment(a, b Matrix) { - ar, ac := a.Dims() - br, bc := b.Dims() - if ar != br || m == a || m == b { - panic(ErrShape) - } - - m.reuseAs(ar, ac+bc) - - m.Copy(a) - w := m.View(0, ac, br, bc).(*Dense) - w.Copy(b) -} - -// MarshalBinary encodes the receiver into a binary form and returns the result. -// -// Dense is little-endian encoded as follows: -// 0 - 8 number of rows (int64) -// 8 - 16 number of columns (int64) -// 16 - .. matrix data elements (float64) -// [0,0] [0,1] ... [0,ncols-1] -// [1,0] [1,1] ... [1,ncols-1] -// ... -// [nrows-1,0] ... [nrows-1,ncols-1] -func (m Dense) MarshalBinary() ([]byte, error) { - buf := bytes.NewBuffer(make([]byte, 0, m.mat.Rows*m.mat.Cols*sizeFloat64+2*sizeInt64)) - err := binary.Write(buf, defaultEndian, int64(m.mat.Rows)) - if err != nil { - return nil, err - } - err = binary.Write(buf, defaultEndian, int64(m.mat.Cols)) - if err != nil { - return nil, err - } - - for i := 0; i < m.mat.Rows; i++ { - for _, v := range m.rowView(i) { - err = binary.Write(buf, defaultEndian, v) - if err != nil { - return nil, err - } - } - } - return buf.Bytes(), err -} - -// UnmarshalBinary decodes the binary form into the receiver. -// It panics if the receiver is a non-zero Dense matrix. -// -// See MarshalBinary for the on-disk layout. -func (m *Dense) UnmarshalBinary(data []byte) error { - if !m.isZero() { - panic("mat64: unmarshal into non-zero matrix") - } - - buf := bytes.NewReader(data) - var rows int64 - err := binary.Read(buf, defaultEndian, &rows) - if err != nil { - return err - } - var cols int64 - err = binary.Read(buf, defaultEndian, &cols) - if err != nil { - return err - } - - m.mat.Rows = int(rows) - m.mat.Cols = int(cols) - m.mat.Stride = int(cols) - m.capRows = int(rows) - m.capCols = int(cols) - m.mat.Data = use(m.mat.Data, m.mat.Rows*m.mat.Cols) - - for i := range m.mat.Data { - err = binary.Read(buf, defaultEndian, &m.mat.Data[i]) - if err != nil { - return err - } - } - - return err -} diff --git a/vendor/github.com/gonum/matrix/mat64/dense_arithmetic.go b/vendor/github.com/gonum/matrix/mat64/dense_arithmetic.go deleted file mode 100644 index 8d0f596d3..000000000 --- a/vendor/github.com/gonum/matrix/mat64/dense_arithmetic.go +++ /dev/null @@ -1,975 +0,0 @@ -// Copyright ©2013 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat64 - -import ( - "math" - - "github.com/gonum/blas" - "github.com/gonum/blas/blas64" -) - -// Min returns the smallest element value of the receiver. -func (m *Dense) Min() float64 { - min := m.mat.Data[0] - for k := 0; k < m.mat.Rows; k++ { - for _, v := range m.rowView(k) { - min = math.Min(min, v) - } - } - return min -} - -// Max returns the largest element value of the receiver. -func (m *Dense) Max() float64 { - max := m.mat.Data[0] - for k := 0; k < m.mat.Rows; k++ { - for _, v := range m.rowView(k) { - max = math.Max(max, v) - } - } - return max -} - -// Trace returns the trace of the matrix. -// -// See the Tracer interface for more information. -func (m *Dense) Trace() float64 { - if m.mat.Rows != m.mat.Cols { - panic(ErrSquare) - } - var t float64 - for i := 0; i < len(m.mat.Data); i += m.mat.Stride + 1 { - t += m.mat.Data[i] - } - return t -} - -var inf = math.Inf(1) - -const ( - epsilon = 2.2204e-16 - small = math.SmallestNonzeroFloat64 -) - -// Norm returns the specified matrix p-norm of the receiver. -// -// See the Normer interface for more information. -func (m *Dense) Norm(ord float64) float64 { - var n float64 - switch { - case ord == 1: - col := make([]float64, m.mat.Rows) - for i := 0; i < m.mat.Cols; i++ { - var s float64 - for _, e := range m.Col(col, i) { - s += math.Abs(e) - } - n = math.Max(s, n) - } - case math.IsInf(ord, +1): - row := make([]float64, m.mat.Cols) - for i := 0; i < m.mat.Rows; i++ { - var s float64 - for _, e := range m.Row(row, i) { - s += math.Abs(e) - } - n = math.Max(s, n) - } - case ord == -1: - n = math.MaxFloat64 - col := make([]float64, m.mat.Rows) - for i := 0; i < m.mat.Cols; i++ { - var s float64 - for _, e := range m.Col(col, i) { - s += math.Abs(e) - } - n = math.Min(s, n) - } - case math.IsInf(ord, -1): - n = math.MaxFloat64 - row := make([]float64, m.mat.Cols) - for i := 0; i < m.mat.Rows; i++ { - var s float64 - for _, e := range m.Row(row, i) { - s += math.Abs(e) - } - n = math.Min(s, n) - } - case ord == 0: - for i := 0; i < len(m.mat.Data); i += m.mat.Stride { - for _, v := range m.mat.Data[i : i+m.mat.Cols] { - n = math.Hypot(n, v) - } - } - return n - case ord == 2, ord == -2: - s := SVD(m, epsilon, small, false, false).Sigma - if ord == 2 { - return s[0] - } - return s[len(s)-1] - default: - panic(ErrNormOrder) - } - - return n -} - -// Add adds a and b element-wise, placing the result in the receiver. -// -// See the Adder interface for more information. -func (m *Dense) Add(a, b Matrix) { - ar, ac := a.Dims() - br, bc := b.Dims() - - if ar != br || ac != bc { - panic(ErrShape) - } - - m.reuseAs(ar, ac) - - if a, ok := a.(RawMatrixer); ok { - if b, ok := b.(RawMatrixer); ok { - amat, bmat := a.RawMatrix(), b.RawMatrix() - for ja, jb, jm := 0, 0, 0; ja < ar*amat.Stride; ja, jb, jm = ja+amat.Stride, jb+bmat.Stride, jm+m.mat.Stride { - for i, v := range amat.Data[ja : ja+ac] { - m.mat.Data[i+jm] = v + bmat.Data[i+jb] - } - } - return - } - } - - if a, ok := a.(Vectorer); ok { - if b, ok := b.(Vectorer); ok { - rowa := make([]float64, ac) - rowb := make([]float64, bc) - for r := 0; r < ar; r++ { - a.Row(rowa, r) - for i, v := range b.Row(rowb, r) { - rowa[i] += v - } - copy(m.rowView(r), rowa) - } - return - } - } - - for r := 0; r < ar; r++ { - for c := 0; c < ac; c++ { - m.set(r, c, a.At(r, c)+b.At(r, c)) - } - } -} - -// Sub subtracts the matrix b from a, placing the result in the receiver. -// -// See the Suber interface for more information. -func (m *Dense) Sub(a, b Matrix) { - ar, ac := a.Dims() - br, bc := b.Dims() - - if ar != br || ac != bc { - panic(ErrShape) - } - - m.reuseAs(ar, ac) - - if a, ok := a.(RawMatrixer); ok { - if b, ok := b.(RawMatrixer); ok { - amat, bmat := a.RawMatrix(), b.RawMatrix() - for ja, jb, jm := 0, 0, 0; ja < ar*amat.Stride; ja, jb, jm = ja+amat.Stride, jb+bmat.Stride, jm+m.mat.Stride { - for i, v := range amat.Data[ja : ja+ac] { - m.mat.Data[i+jm] = v - bmat.Data[i+jb] - } - } - return - } - } - - if a, ok := a.(Vectorer); ok { - if b, ok := b.(Vectorer); ok { - rowa := make([]float64, ac) - rowb := make([]float64, bc) - for r := 0; r < ar; r++ { - a.Row(rowa, r) - for i, v := range b.Row(rowb, r) { - rowa[i] -= v - } - copy(m.rowView(r), rowa) - } - return - } - } - - for r := 0; r < ar; r++ { - for c := 0; c < ac; c++ { - m.set(r, c, a.At(r, c)-b.At(r, c)) - } - } -} - -// MulElem performs element-wise multiplication of a and b, placing the result -// in the receiver. -// -// See the ElemMuler interface for more information. -func (m *Dense) MulElem(a, b Matrix) { - ar, ac := a.Dims() - br, bc := b.Dims() - - if ar != br || ac != bc { - panic(ErrShape) - } - - m.reuseAs(ar, ac) - - if a, ok := a.(RawMatrixer); ok { - if b, ok := b.(RawMatrixer); ok { - amat, bmat := a.RawMatrix(), b.RawMatrix() - for ja, jb, jm := 0, 0, 0; ja < ar*amat.Stride; ja, jb, jm = ja+amat.Stride, jb+bmat.Stride, jm+m.mat.Stride { - for i, v := range amat.Data[ja : ja+ac] { - m.mat.Data[i+jm] = v * bmat.Data[i+jb] - } - } - return - } - } - - if a, ok := a.(Vectorer); ok { - if b, ok := b.(Vectorer); ok { - rowa := make([]float64, ac) - rowb := make([]float64, bc) - for r := 0; r < ar; r++ { - a.Row(rowa, r) - for i, v := range b.Row(rowb, r) { - rowa[i] *= v - } - copy(m.rowView(r), rowa) - } - return - } - } - - for r := 0; r < ar; r++ { - for c := 0; c < ac; c++ { - m.set(r, c, a.At(r, c)*b.At(r, c)) - } - } -} - -// DivElem performs element-wise division of a by b, placing the result -// in the receiver. -// -// See the ElemDiver interface for more information. -func (m *Dense) DivElem(a, b Matrix) { - ar, ac := a.Dims() - br, bc := b.Dims() - - if ar != br || ac != bc { - panic(ErrShape) - } - - m.reuseAs(ar, ac) - - if a, ok := a.(RawMatrixer); ok { - if b, ok := b.(RawMatrixer); ok { - amat, bmat := a.RawMatrix(), b.RawMatrix() - for ja, jb, jm := 0, 0, 0; ja < ar*amat.Stride; ja, jb, jm = ja+amat.Stride, jb+bmat.Stride, jm+m.mat.Stride { - for i, v := range amat.Data[ja : ja+ac] { - m.mat.Data[i+jm] = v / bmat.Data[i+jb] - } - } - return - } - } - - if a, ok := a.(Vectorer); ok { - if b, ok := b.(Vectorer); ok { - rowa := make([]float64, ac) - rowb := make([]float64, bc) - for r := 0; r < ar; r++ { - a.Row(rowa, r) - for i, v := range b.Row(rowb, r) { - rowa[i] /= v - } - copy(m.rowView(r), rowa) - } - return - } - } - - for r := 0; r < ar; r++ { - for c := 0; c < ac; c++ { - m.set(r, c, a.At(r, c)/b.At(r, c)) - } - } -} - -// Dot returns the sum of the element-wise products of the elements of the -// receiver and b. -// -// See the Dotter interface for more information. -func (m *Dense) Dot(b Matrix) float64 { - mr, mc := m.Dims() - br, bc := b.Dims() - - if mr != br || mc != bc { - panic(ErrShape) - } - - var d float64 - - if b, ok := b.(RawMatrixer); ok { - bmat := b.RawMatrix() - for jm, jb := 0, 0; jm < mr*m.mat.Stride; jm, jb = jm+m.mat.Stride, jb+bmat.Stride { - for i, v := range m.mat.Data[jm : jm+mc] { - d += v * bmat.Data[i+jb] - } - } - return d - } - - if b, ok := b.(Vectorer); ok { - row := make([]float64, bc) - for r := 0; r < br; r++ { - for i, v := range b.Row(row, r) { - d += m.mat.Data[r*m.mat.Stride+i] * v - } - } - return d - } - - for r := 0; r < mr; r++ { - for c := 0; c < mc; c++ { - d += m.At(r, c) * b.At(r, c) - } - } - return d -} - -// Mul takes the matrix product of a and b, placing the result in the receiver. -// -// See the Muler interface for more information. -func (m *Dense) Mul(a, b Matrix) { - ar, ac := a.Dims() - br, bc := b.Dims() - - if ac != br { - panic(ErrShape) - } - - m.reuseAs(ar, bc) - var w *Dense - if m != a && m != b { - w = m - } else { - w = getWorkspace(ar, bc, false) - defer func() { - m.Copy(w) - putWorkspace(w) - }() - } - - if a, ok := a.(RawMatrixer); ok { - if b, ok := b.(RawMatrixer); ok { - amat, bmat := a.RawMatrix(), b.RawMatrix() - blas64.Gemm(blas.NoTrans, blas.NoTrans, 1, amat, bmat, 0, w.mat) - return - } - } - - if a, ok := a.(Vectorer); ok { - if b, ok := b.(Vectorer); ok { - row := make([]float64, ac) - col := make([]float64, br) - for r := 0; r < ar; r++ { - dataTmp := w.mat.Data[r*w.mat.Stride : r*w.mat.Stride+bc] - for c := 0; c < bc; c++ { - dataTmp[c] = blas64.Dot(ac, - blas64.Vector{Inc: 1, Data: a.Row(row, r)}, - blas64.Vector{Inc: 1, Data: b.Col(col, c)}, - ) - } - } - return - } - } - - row := make([]float64, ac) - for r := 0; r < ar; r++ { - for i := range row { - row[i] = a.At(r, i) - } - for c := 0; c < bc; c++ { - var v float64 - for i, e := range row { - v += e * b.At(i, c) - } - w.mat.Data[r*w.mat.Stride+c] = v - } - } -} - -// MulTrans takes the matrix product of a and b, optionally transposing each, -// and placing the result in the receiver. -// -// See the MulTranser interface for more information. -func (m *Dense) MulTrans(a Matrix, aTrans bool, b Matrix, bTrans bool) { - ar, ac := a.Dims() - if aTrans { - ar, ac = ac, ar - } - - br, bc := b.Dims() - if bTrans { - br, bc = bc, br - } - - if ac != br { - panic(ErrShape) - } - - m.reuseAs(ar, bc) - var w *Dense - if m != a && m != b { - w = m - } else { - w = getWorkspace(ar, bc, false) - defer func() { - m.Copy(w) - putWorkspace(w) - }() - } - - if a, ok := a.(RawMatrixer); ok { - if b, ok := b.(RawMatrixer); ok { - amat := a.RawMatrix() - if a == b && aTrans != bTrans { - var op blas.Transpose - if aTrans { - op = blas.Trans - } else { - op = blas.NoTrans - } - blas64.Syrk(op, 1, amat, 0, blas64.Symmetric{N: w.mat.Rows, Stride: w.mat.Stride, Data: w.mat.Data, Uplo: blas.Upper}) - - // Fill lower matrix with result. - // TODO(kortschak): Investigate whether using blas64.Copy improves the performance of this significantly. - for i := 0; i < w.mat.Rows; i++ { - for j := i + 1; j < w.mat.Cols; j++ { - w.set(j, i, w.at(i, j)) - } - } - } else { - var aOp, bOp blas.Transpose - if aTrans { - aOp = blas.Trans - } else { - aOp = blas.NoTrans - } - if bTrans { - bOp = blas.Trans - } else { - bOp = blas.NoTrans - } - bmat := b.RawMatrix() - blas64.Gemm(aOp, bOp, 1, amat, bmat, 0, w.mat) - } - return - } - } - - if a, ok := a.(Vectorer); ok { - if b, ok := b.(Vectorer); ok { - row := make([]float64, ac) - col := make([]float64, br) - if aTrans { - if bTrans { - for r := 0; r < ar; r++ { - dataTmp := w.mat.Data[r*w.mat.Stride : r*w.mat.Stride+bc] - for c := 0; c < bc; c++ { - dataTmp[c] = blas64.Dot(ac, - blas64.Vector{Inc: 1, Data: a.Col(row, r)}, - blas64.Vector{Inc: 1, Data: b.Row(col, c)}, - ) - } - } - return - } - // TODO(jonlawlor): determine if (b*a)' is more efficient - for r := 0; r < ar; r++ { - dataTmp := w.mat.Data[r*w.mat.Stride : r*w.mat.Stride+bc] - for c := 0; c < bc; c++ { - dataTmp[c] = blas64.Dot(ac, - blas64.Vector{Inc: 1, Data: a.Col(row, r)}, - blas64.Vector{Inc: 1, Data: b.Col(col, c)}, - ) - } - } - return - } - if bTrans { - for r := 0; r < ar; r++ { - dataTmp := w.mat.Data[r*w.mat.Stride : r*w.mat.Stride+bc] - for c := 0; c < bc; c++ { - dataTmp[c] = blas64.Dot(ac, - blas64.Vector{Inc: 1, Data: a.Row(row, r)}, - blas64.Vector{Inc: 1, Data: b.Row(col, c)}, - ) - } - } - return - } - for r := 0; r < ar; r++ { - dataTmp := w.mat.Data[r*w.mat.Stride : r*w.mat.Stride+bc] - for c := 0; c < bc; c++ { - dataTmp[c] = blas64.Dot(ac, - blas64.Vector{Inc: 1, Data: a.Row(row, r)}, - blas64.Vector{Inc: 1, Data: b.Col(col, c)}, - ) - } - } - return - } - } - - row := make([]float64, ac) - if aTrans { - if bTrans { - for r := 0; r < ar; r++ { - dataTmp := w.mat.Data[r*w.mat.Stride : r*w.mat.Stride+bc] - for i := range row { - row[i] = a.At(i, r) - } - for c := 0; c < bc; c++ { - var v float64 - for i, e := range row { - v += e * b.At(c, i) - } - dataTmp[c] = v - } - } - return - } - - for r := 0; r < ar; r++ { - dataTmp := w.mat.Data[r*w.mat.Stride : r*w.mat.Stride+bc] - for i := range row { - row[i] = a.At(i, r) - } - for c := 0; c < bc; c++ { - var v float64 - for i, e := range row { - v += e * b.At(i, c) - } - dataTmp[c] = v - } - } - return - } - if bTrans { - for r := 0; r < ar; r++ { - dataTmp := w.mat.Data[r*w.mat.Stride : r*w.mat.Stride+bc] - for i := range row { - row[i] = a.At(r, i) - } - for c := 0; c < bc; c++ { - var v float64 - for i, e := range row { - v += e * b.At(c, i) - } - dataTmp[c] = v - } - } - return - } - for r := 0; r < ar; r++ { - dataTmp := w.mat.Data[r*w.mat.Stride : r*w.mat.Stride+bc] - for i := range row { - row[i] = a.At(r, i) - } - for c := 0; c < bc; c++ { - var v float64 - for i, e := range row { - v += e * b.At(i, c) - } - dataTmp[c] = v - } - } -} - -// Exp calculates the exponential of the matrix a, e^a, placing the result -// in the receiver. -// -// See the Exper interface for more information. -// -// Exp uses the scaling and squaring method described in section 3 of -// http://www.cs.cornell.edu/cv/researchpdf/19ways+.pdf. -func (m *Dense) Exp(a Matrix) { - r, c := a.Dims() - if r != c { - panic(ErrShape) - } - - var w *Dense - switch { - case m.isZero(): - m.mat = blas64.General{ - Rows: r, - Cols: c, - Stride: c, - Data: useZeroed(m.mat.Data, r*r), - } - m.capRows = r - m.capCols = c - for i := 0; i < r*r; i += r + 1 { - m.mat.Data[i] = 1 - } - w = m - case r == m.mat.Rows && c == m.mat.Cols: - w = getWorkspace(r, r, true) - for i := 0; i < r; i++ { - w.mat.Data[i*w.mat.Stride+i] = 1 - } - default: - panic(ErrShape) - } - - const ( - terms = 10 - scaling = 4 - ) - - small := getWorkspace(r, r, false) - small.Scale(math.Pow(2, -scaling), a) - power := getWorkspace(r, r, false) - power.Copy(small) - - var ( - tmp = getWorkspace(r, r, false) - factI = 1. - ) - for i := 1.; i < terms; i++ { - factI *= i - - // This is OK to do because power and tmp are - // new Dense values so all rows are contiguous. - // TODO(kortschak) Make this explicit in the NewDense doc comment. - for j, v := range power.mat.Data { - tmp.mat.Data[j] = v / factI - } - - w.Add(w, tmp) - if i < terms-1 { - tmp.Mul(power, small) - tmp, power = power, tmp - } - } - putWorkspace(small) - putWorkspace(power) - for i := 0; i < scaling; i++ { - tmp.Mul(w, w) - tmp, w = w, tmp - } - putWorkspace(tmp) - - if w != m { - m.Copy(w) - putWorkspace(w) - } -} - -// Pow calculates the integral power of the matrix a to n, placing the result -// in the receiver. -// -// See the Power interface for more information. -func (m *Dense) Pow(a Matrix, n int) { - if n < 0 { - panic("matrix: illegal power") - } - r, c := a.Dims() - if r != c { - panic(ErrShape) - } - - m.reuseAs(r, c) - - // Take possible fast paths. - switch n { - case 0: - for i := 0; i < r; i++ { - zero(m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+c]) - m.mat.Data[i*m.mat.Stride+i] = 1 - } - return - case 1: - m.Copy(a) - return - case 2: - m.Mul(a, a) - return - } - - // Perform iterative exponentiation by squaring in work space. - w := getWorkspace(r, r, false) - w.Copy(a) - s := getWorkspace(r, r, false) - s.Copy(a) - x := getWorkspace(r, r, false) - for n--; n > 0; n >>= 1 { - if n&1 != 0 { - x.Mul(w, s) - w, x = x, w - } - if n != 1 { - x.Mul(s, s) - s, x = x, s - } - } - m.Copy(w) - putWorkspace(w) - putWorkspace(s) - putWorkspace(x) -} - -// Scale multiplies the elements of a by f, placing the result in the receiver. -// -// See the Scaler interface for more information. -func (m *Dense) Scale(f float64, a Matrix) { - ar, ac := a.Dims() - - m.reuseAs(ar, ac) - - if a, ok := a.(RawMatrixer); ok { - amat := a.RawMatrix() - for ja, jm := 0, 0; ja < ar*amat.Stride; ja, jm = ja+amat.Stride, jm+m.mat.Stride { - for i, v := range amat.Data[ja : ja+ac] { - m.mat.Data[i+jm] = v * f - } - } - return - } - - if a, ok := a.(Vectorer); ok { - row := make([]float64, ac) - for r := 0; r < ar; r++ { - for i, v := range a.Row(row, r) { - row[i] = f * v - } - copy(m.rowView(r), row) - } - return - } - - for r := 0; r < ar; r++ { - for c := 0; c < ac; c++ { - m.set(r, c, f*a.At(r, c)) - } - } -} - -// Apply applies the function f to each of the elements of a, placing the -// resulting matrix in the receiver. -// -// See the Applyer interface for more information. -func (m *Dense) Apply(f ApplyFunc, a Matrix) { - ar, ac := a.Dims() - - m.reuseAs(ar, ac) - - if a, ok := a.(RawMatrixer); ok { - amat := a.RawMatrix() - for j, ja, jm := 0, 0, 0; ja < ar*amat.Stride; j, ja, jm = j+1, ja+amat.Stride, jm+m.mat.Stride { - for i, v := range amat.Data[ja : ja+ac] { - m.mat.Data[i+jm] = f(j, i, v) - } - } - return - } - - if a, ok := a.(Vectorer); ok { - row := make([]float64, ac) - for r := 0; r < ar; r++ { - for i, v := range a.Row(row, r) { - row[i] = f(r, i, v) - } - copy(m.rowView(r), row) - } - return - } - - for r := 0; r < ar; r++ { - for c := 0; c < ac; c++ { - m.set(r, c, f(r, c, a.At(r, c))) - } - } -} - -// Sum returns the sum of the elements of the matrix. -// -// See the Sumer interface for more information. -func (m *Dense) Sum() float64 { - l := m.mat.Cols - var s float64 - for i := 0; i < len(m.mat.Data); i += m.mat.Stride { - for _, v := range m.mat.Data[i : i+l] { - s += v - } - } - return s -} - -// Equals returns true if b and the receiver have the same size and contain all -// equal elements. -// -// See the Equaler interface for more information. -func (m *Dense) Equals(b Matrix) bool { - br, bc := b.Dims() - if br != m.mat.Rows || bc != m.mat.Cols { - return false - } - - if b, ok := b.(RawMatrixer); ok { - bmat := b.RawMatrix() - for jb, jm := 0, 0; jm < br*m.mat.Stride; jb, jm = jb+bmat.Stride, jm+m.mat.Stride { - for i, v := range m.mat.Data[jm : jm+bc] { - if v != bmat.Data[i+jb] { - return false - } - } - } - return true - } - - if b, ok := b.(Vectorer); ok { - rowb := make([]float64, bc) - for r := 0; r < br; r++ { - rowm := m.mat.Data[r*m.mat.Stride : r*m.mat.Stride+m.mat.Cols] - for i, v := range b.Row(rowb, r) { - if rowm[i] != v { - return false - } - } - } - return true - } - - for r := 0; r < br; r++ { - for c := 0; c < bc; c++ { - if m.At(r, c) != b.At(r, c) { - return false - } - } - } - return true -} - -// EqualsApprox compares the matrices represented by b and the receiver, with -// tolerance for element-wise equality specified by epsilon. -// -// See the ApproxEqualer interface for more information. -func (m *Dense) EqualsApprox(b Matrix, epsilon float64) bool { - br, bc := b.Dims() - if br != m.mat.Rows || bc != m.mat.Cols { - return false - } - - if b, ok := b.(RawMatrixer); ok { - bmat := b.RawMatrix() - for jb, jm := 0, 0; jm < br*m.mat.Stride; jb, jm = jb+bmat.Stride, jm+m.mat.Stride { - for i, v := range m.mat.Data[jm : jm+bc] { - if math.Abs(v-bmat.Data[i+jb]) > epsilon { - return false - } - } - } - return true - } - - if b, ok := b.(Vectorer); ok { - rowb := make([]float64, bc) - for r := 0; r < br; r++ { - rowm := m.mat.Data[r*m.mat.Stride : r*m.mat.Stride+m.mat.Cols] - for i, v := range b.Row(rowb, r) { - if math.Abs(rowm[i]-v) > epsilon { - return false - } - } - } - return true - } - - for r := 0; r < br; r++ { - for c := 0; c < bc; c++ { - if math.Abs(m.At(r, c)-b.At(r, c)) > epsilon { - return false - } - } - } - return true -} - -// RankOne performs a rank-one update to the matrix a and stores the result -// in the receiver. If a is zero, see Outer. -// m = a + alpha * x * y' -func (m *Dense) RankOne(a Matrix, alpha float64, x, y *Vector) { - ar, ac := a.Dims() - if x.Len() != ar { - panic(ErrShape) - } - if y.Len() != ac { - panic(ErrShape) - } - - var w Dense - if m == a { - w = *m - } - w.reuseAs(ar, ac) - - // Copy over to the new memory if necessary - if m != a { - w.Copy(a) - } - blas64.Ger(alpha, x.mat, y.mat, w.mat) - *m = w -} - -// Outer calculates the outer product of x and y, and stores the result -// in the receiver. In order to update to an existing matrix, see RankOne. -// m = x * y' -func (m *Dense) Outer(x, y *Vector) { - r := x.Len() - c := y.Len() - - // Copied from reuseAs with use replaced by useZeroed - // and a final zero of the matrix elements if we pass - // the shape checks. - // TODO(kortschak): Factor out into reuseZeroedAs if - // we find another case that needs it. - if m.mat.Rows > m.capRows || m.mat.Cols > m.capCols { - // Panic as a string, not a mat64.Error. - panic("mat64: caps not correctly set") - } - if m.isZero() { - m.mat = blas64.General{ - Rows: r, - Cols: c, - Stride: c, - Data: useZeroed(m.mat.Data, r*c), - } - m.capRows = r - m.capCols = c - } else if r != m.mat.Rows || c != m.mat.Cols { - panic(ErrShape) - } else { - for i := 0; i < r; i++ { - zero(m.mat.Data[i*m.mat.Stride : i*m.mat.Stride+c]) - } - } - - blas64.Ger(1, x.mat, y.mat, m.mat) -} diff --git a/vendor/github.com/gonum/matrix/mat64/eigen.go b/vendor/github.com/gonum/matrix/mat64/eigen.go deleted file mode 100644 index 676642f79..000000000 --- a/vendor/github.com/gonum/matrix/mat64/eigen.go +++ /dev/null @@ -1,819 +0,0 @@ -// Copyright ©2013 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on the EigenvalueDecomposition class from Jama 1.0.3. - -package mat64 - -import ( - "math" -) - -func symmetric(m *Dense) bool { - n, _ := m.Dims() - for i := 0; i < n; i++ { - for j := 0; j < i; j++ { - if m.at(i, j) != m.at(j, i) { - return false - } - } - } - return true -} - -type EigenFactors struct { - V *Dense - d, e []float64 -} - -// Eigen returns the Eigenvalues and eigenvectors of a square real matrix. -// The matrix a is overwritten during the decomposition. If a is symmetric, -// then a = v*D*v' where the eigenvalue matrix D is diagonal and the -// eigenvector matrix v is orthogonal. -// -// If a is not symmetric, then the eigenvalue matrix D is block diagonal -// with the real eigenvalues in 1-by-1 blocks and any complex eigenvalues, -// lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The -// columns of v represent the eigenvectors in the sense that a*v = v*D, -// i.e. a.v equals v.D. The matrix v may be badly conditioned, or even -// singular, so the validity of the equation a = v*D*inverse(v) depends -// upon the 2-norm condition number of v. -func Eigen(a *Dense, epsilon float64) EigenFactors { - m, n := a.Dims() - if m != n { - panic(ErrSquare) - } - - var v *Dense - d := make([]float64, n) - e := make([]float64, n) - - if symmetric(a) { - // Tridiagonalize. - v = tred2(a, d, e) - - // Diagonalize. - tql2(d, e, v, epsilon) - } else { - // Reduce to Hessenberg form. - var hess *Dense - hess, v = orthes(a) - - // Reduce Hessenberg to real Schur form. - hqr2(d, e, hess, v, epsilon) - } - - return EigenFactors{v, d, e} -} - -// Symmetric Householder reduction to tridiagonal form. -// -// This is derived from the Algol procedures tred2 by -// Bowdler, Martin, Reinsch, and Wilkinson, Handbook for -// Auto. Comp., Vol.ii-Linear Algebra, and the corresponding -// Fortran subroutine in EISPACK. -func tred2(a *Dense, d, e []float64) (v *Dense) { - n := len(d) - v = a - - for j := 0; j < n; j++ { - d[j] = v.at(n-1, j) - } - - // Householder reduction to tridiagonal form. - for i := n - 1; i > 0; i-- { - // Scale to avoid under/overflow. - var ( - scale float64 - h float64 - ) - for k := 0; k < i; k++ { - scale += math.Abs(d[k]) - } - if scale == 0 { - e[i] = d[i-1] - for j := 0; j < i; j++ { - d[j] = v.at(i-1, j) - v.set(i, j, 0) - v.set(j, i, 0) - } - } else { - // Generate Householder vector. - for k := 0; k < i; k++ { - d[k] /= scale - h += d[k] * d[k] - } - f := d[i-1] - g := math.Sqrt(h) - if f > 0 { - g = -g - } - e[i] = scale * g - h -= f * g - d[i-1] = f - g - for j := 0; j < i; j++ { - e[j] = 0 - } - - // Apply similarity transformation to remaining columns. - for j := 0; j < i; j++ { - f = d[j] - v.set(j, i, f) - g = e[j] + v.at(j, j)*f - for k := j + 1; k <= i-1; k++ { - g += v.at(k, j) * d[k] - e[k] += v.at(k, j) * f - } - e[j] = g - } - f = 0 - for j := 0; j < i; j++ { - e[j] /= h - f += e[j] * d[j] - } - hh := f / (h + h) - for j := 0; j < i; j++ { - e[j] -= hh * d[j] - } - for j := 0; j < i; j++ { - f = d[j] - g = e[j] - for k := j; k <= i-1; k++ { - v.set(k, j, v.at(k, j)-(f*e[k]+g*d[k])) - } - d[j] = v.at(i-1, j) - v.set(i, j, 0) - } - } - d[i] = h - } - - // Accumulate transformations. - for i := 0; i < n-1; i++ { - v.set(n-1, i, v.at(i, i)) - v.set(i, i, 1) - h := d[i+1] - if h != 0 { - for k := 0; k <= i; k++ { - d[k] = v.at(k, i+1) / h - } - for j := 0; j <= i; j++ { - var g float64 - for k := 0; k <= i; k++ { - g += v.at(k, i+1) * v.at(k, j) - } - for k := 0; k <= i; k++ { - v.set(k, j, v.at(k, j)-g*d[k]) - } - } - } - for k := 0; k <= i; k++ { - v.set(k, i+1, 0) - } - } - for j := 0; j < n; j++ { - d[j] = v.at(n-1, j) - v.set(n-1, j, 0) - } - v.set(n-1, n-1, 1) - e[0] = 0 - - return v -} - -// Symmetric tridiagonal QL algorithm. -// -// This is derived from the Algol procedures tql2, by -// Bowdler, Martin, Reinsch, and Wilkinson, Handbook for -// Auto. Comp., Vol.ii-Linear Algebra, and the corresponding -// Fortran subroutine in EISPACK. -func tql2(d, e []float64, v *Dense, epsilon float64) { - n := len(d) - for i := 1; i < n; i++ { - e[i-1] = e[i] - } - e[n-1] = 0 - - var ( - f float64 - tst1 float64 - ) - for l := 0; l < n; l++ { - // Find small subdiagonal element - tst1 = math.Max(tst1, math.Abs(d[l])+math.Abs(e[l])) - m := l - for m < n { - if math.Abs(e[m]) <= epsilon*tst1 { - break - } - m++ - } - - // If m == l, d[l] is an eigenvalue, otherwise, iterate. - if m > l { - for iter := 0; ; iter++ { // Could check iteration count here. - - // Compute implicit shift - g := d[l] - p := (d[l+1] - g) / (2 * e[l]) - r := math.Hypot(p, 1) - if p < 0 { - r = -r - } - d[l] = e[l] / (p + r) - d[l+1] = e[l] * (p + r) - dl1 := d[l+1] - h := g - d[l] - for i := l + 2; i < n; i++ { - d[i] -= h - } - f += h - - // Implicit QL transformation. - p = d[m] - c := 1. - c2 := c - c3 := c - el1 := e[l+1] - var ( - s float64 - s2 float64 - ) - for i := m - 1; i >= l; i-- { - c3 = c2 - c2 = c - s2 = s - g = c * e[i] - h = c * p - r = math.Hypot(p, e[i]) - e[i+1] = s * r - s = e[i] / r - c = p / r - p = c*d[i] - s*g - d[i+1] = h + s*(c*g+s*d[i]) - - // Accumulate transformation. - for k := 0; k < n; k++ { - h = v.at(k, i+1) - v.set(k, i+1, s*v.at(k, i)+c*h) - v.set(k, i, c*v.at(k, i)-s*h) - } - } - p = -s * s2 * c3 * el1 * e[l] / dl1 - e[l] = s * p - d[l] = c * p - - // Check for convergence. - if math.Abs(e[l]) <= epsilon*tst1 { - break - } - } - } - d[l] += f - e[l] = 0 - } - - // Sort eigenvalues and corresponding vectors. - for i := 0; i < n-1; i++ { - k := i - p := d[i] - for j := i + 1; j < n; j++ { - if d[j] < p { - k = j - p = d[j] - } - } - if k != i { - d[k] = d[i] - d[i] = p - for j := 0; j < n; j++ { - p = v.at(j, i) - v.set(j, i, v.at(j, k)) - v.set(j, k, p) - } - } - } -} - -// Nonsymmetric reduction to Hessenberg form. -// -// This is derived from the Algol procedures orthes and ortran, -// by Martin and Wilkinson, Handbook for Auto. Comp., -// Vol.ii-Linear Algebra, and the corresponding -// Fortran subroutines in EISPACK. -func orthes(a *Dense) (hess, v *Dense) { - n, _ := a.Dims() - hess = a - - ort := make([]float64, n) - - low := 0 - high := n - 1 - - for m := low + 1; m <= high-1; m++ { - // Scale column. - var scale float64 - for i := m; i <= high; i++ { - scale += math.Abs(hess.at(i, m-1)) - } - if scale != 0 { - // Compute Householder transformation. - var h float64 - for i := high; i >= m; i-- { - ort[i] = hess.at(i, m-1) / scale - h += ort[i] * ort[i] - } - g := math.Sqrt(h) - if ort[m] > 0 { - g = -g - } - h -= ort[m] * g - ort[m] -= g - - // Apply Householder similarity transformation - // hess = (I-u*u'/h)*hess*(I-u*u')/h) - for j := m; j < n; j++ { - var f float64 - for i := high; i >= m; i-- { - f += ort[i] * hess.at(i, j) - } - f /= h - for i := m; i <= high; i++ { - hess.set(i, j, hess.at(i, j)-f*ort[i]) - } - } - - for i := 0; i <= high; i++ { - var f float64 - for j := high; j >= m; j-- { - f += ort[j] * hess.at(i, j) - } - f /= h - for j := m; j <= high; j++ { - hess.set(i, j, hess.at(i, j)-f*ort[j]) - } - } - ort[m] *= scale - hess.set(m, m-1, scale*g) - } - } - - // Accumulate transformations (Algol's ortran). - v = NewDense(n, n, nil) - for i := 0; i < n; i++ { - for j := 0; j < n; j++ { - if i == j { - v.set(i, j, 1) - } else { - v.set(i, j, 0) - } - } - } - for m := high - 1; m >= low+1; m-- { - if hess.at(m, m-1) != 0 { - for i := m + 1; i <= high; i++ { - ort[i] = hess.at(i, m-1) - } - for j := m; j <= high; j++ { - var g float64 - for i := m; i <= high; i++ { - g += ort[i] * v.at(i, j) - } - - // Double division avoids possible underflow - g = (g / ort[m]) / hess.at(m, m-1) - for i := m; i <= high; i++ { - v.set(i, j, v.at(i, j)+g*ort[i]) - } - } - } - } - - return hess, v -} - -// Nonsymmetric reduction from Hessenberg to real Schur form. -// -// This is derived from the Algol procedure hqr2, -// by Martin and Wilkinson, Handbook for Auto. Comp., -// Vol.ii-Linear Algebra, and the corresponding -// Fortran subroutine in EISPACK. -func hqr2(d, e []float64, hess, v *Dense, epsilon float64) { - // Initialize - nn := len(d) - n := nn - 1 - - low := 0 - high := n - - var exshift, p, q, r, s, z, t, w, x, y float64 - - // Store roots isolated by balanc and compute matrix norm - var norm float64 - for i := 0; i < nn; i++ { - if i < low || i > high { - d[i] = hess.at(i, i) - e[i] = 0 - } - for j := max(i-1, 0); j < nn; j++ { - norm += math.Abs(hess.at(i, j)) - } - } - - // Outer loop over eigenvalue index - for iter := 0; n >= low; { - // Look for single small sub-diagonal element - l := n - for l > low { - s = math.Abs(hess.at(l-1, l-1)) + math.Abs(hess.at(l, l)) - if s == 0 { - s = norm - } - if math.Abs(hess.at(l, l-1)) < epsilon*s { - break - } - l-- - } - - // Check for convergence - if l == n { - // One root found - hess.set(n, n, hess.at(n, n)+exshift) - d[n] = hess.at(n, n) - e[n] = 0 - n-- - iter = 0 - } else if l == n-1 { - // Two roots found - w = hess.at(n, n-1) * hess.at(n-1, n) - p = (hess.at(n-1, n-1) - hess.at(n, n)) / 2.0 - q = p*p + w - z = math.Sqrt(math.Abs(q)) - hess.set(n, n, hess.at(n, n)+exshift) - hess.set(n-1, n-1, hess.at(n-1, n-1)+exshift) - x = hess.at(n, n) - - // Real pair - if q >= 0 { - if p >= 0 { - z = p + z - } else { - z = p - z - } - d[n-1] = x + z - d[n] = d[n-1] - if z != 0 { - d[n] = x - w/z - } - e[n-1] = 0 - e[n] = 0 - x = hess.at(n, n-1) - s = math.Abs(x) + math.Abs(z) - p = x / s - q = z / s - r = math.Hypot(p, q) - p /= r - q /= r - - // Row modification - for j := n - 1; j < nn; j++ { - z = hess.at(n-1, j) - hess.set(n-1, j, q*z+p*hess.at(n, j)) - hess.set(n, j, q*hess.at(n, j)-p*z) - } - - // Column modification - for i := 0; i <= n; i++ { - z = hess.at(i, n-1) - hess.set(i, n-1, q*z+p*hess.at(i, n)) - hess.set(i, n, q*hess.at(i, n)-p*z) - } - - // Accumulate transformations - for i := low; i <= high; i++ { - z = v.at(i, n-1) - v.set(i, n-1, q*z+p*v.at(i, n)) - v.set(i, n, q*v.at(i, n)-p*z) - } - } else { - // Complex pair - d[n-1] = x + p - d[n] = x + p - e[n-1] = z - e[n] = -z - } - n -= 2 - iter = 0 - } else { - // No convergence yet - - // Form shift - x = hess.at(n, n) - y = 0 - w = 0 - if l < n { - y = hess.at(n-1, n-1) - w = hess.at(n, n-1) * hess.at(n-1, n) - } - - // Wilkinson's original ad hoc shift - if iter == 10 { - exshift += x - for i := low; i <= n; i++ { - hess.set(i, i, hess.at(i, i)-x) - } - s = math.Abs(hess.at(n, n-1)) + math.Abs(hess.at(n-1, n-2)) - x = 0.75 * s - y = x - w = -0.4375 * s * s - } - - // MATLAB's new ad hoc shift - if iter == 30 { - s = (y - x) / 2 - s = s*s + w - if s > 0 { - s = math.Sqrt(s) - if y < x { - s = -s - } - s = x - w/((y-x)/2+s) - for i := low; i <= n; i++ { - hess.set(i, i, hess.at(i, i)-s) - } - exshift += s - x = 0.964 - y = x - w = x - } - } - - iter++ // Could check iteration count here. - - // Look for two consecutive small sub-diagonal elements - m := n - 2 - for m >= l { - z = hess.at(m, m) - r = x - z - s = y - z - p = (r*s-w)/hess.at(m+1, m) + hess.at(m, m+1) - q = hess.at(m+1, m+1) - z - r - s - r = hess.at(m+2, m+1) - s = math.Abs(p) + math.Abs(q) + math.Abs(r) - p /= s - q /= s - r /= s - if m == l { - break - } - if math.Abs(hess.at(m, m-1))*(math.Abs(q)+math.Abs(r)) < - epsilon*(math.Abs(p)*(math.Abs(hess.at(m-1, m-1))+math.Abs(z)+math.Abs(hess.at(m+1, m+1)))) { - break - } - m-- - } - - for i := m + 2; i <= n; i++ { - hess.set(i, i-2, 0) - if i > m+2 { - hess.set(i, i-3, 0) - } - } - - // Double QR step involving rows l:n and columns m:n - for k := m; k <= n-1; k++ { - last := k == n-1 - if k != m { - p = hess.at(k, k-1) - q = hess.at(k+1, k-1) - if !last { - r = hess.at(k+2, k-1) - } else { - r = 0 - } - x = math.Abs(p) + math.Abs(q) + math.Abs(r) - if x == 0 { - continue - } - p /= x - q /= x - r /= x - } - - s = math.Sqrt(p*p + q*q + r*r) - if p < 0 { - s = -s - } - if s != 0 { - if k != m { - hess.set(k, k-1, -s*x) - } else if l != m { - hess.set(k, k-1, -hess.at(k, k-1)) - } - p += s - x = p / s - y = q / s - z = r / s - q /= p - r /= p - - // Row modification - for j := k; j < nn; j++ { - p = hess.at(k, j) + q*hess.at(k+1, j) - if !last { - p += r * hess.at(k+2, j) - hess.set(k+2, j, hess.at(k+2, j)-p*z) - } - hess.set(k, j, hess.at(k, j)-p*x) - hess.set(k+1, j, hess.at(k+1, j)-p*y) - } - - // Column modification - for i := 0; i <= min(n, k+3); i++ { - p = x*hess.at(i, k) + y*hess.at(i, k+1) - if !last { - p += z * hess.at(i, k+2) - hess.set(i, k+2, hess.at(i, k+2)-p*r) - } - hess.set(i, k, hess.at(i, k)-p) - hess.set(i, k+1, hess.at(i, k+1)-p*q) - } - - // Accumulate transformations - for i := low; i <= high; i++ { - p = x*v.at(i, k) + y*v.at(i, k+1) - if !last { - p += z * v.at(i, k+2) - v.set(i, k+2, v.at(i, k+2)-p*r) - } - v.set(i, k, v.at(i, k)-p) - v.set(i, k+1, v.at(i, k+1)-p*q) - } - } - } - } - } - - // Backsubstitute to find vectors of upper triangular form - if norm == 0 { - return - } - - for n = nn - 1; n >= 0; n-- { - p = d[n] - q = e[n] - - if q == 0 { - // Real vector - l := n - hess.set(n, n, 1) - for i := n - 1; i >= 0; i-- { - w = hess.at(i, i) - p - r = 0 - for j := l; j <= n; j++ { - r += hess.at(i, j) * hess.at(j, n) - } - if e[i] < 0 { - z = w - s = r - } else { - l = i - if e[i] == 0 { - if w != 0 { - hess.set(i, n, -r/w) - } else { - hess.set(i, n, -r/(epsilon*norm)) - } - } else { - // Solve real equations - x = hess.at(i, i+1) - y = hess.at(i+1, i) - q = (d[i]-p)*(d[i]-p) + e[i]*e[i] - t = (x*s - z*r) / q - hess.set(i, n, t) - if math.Abs(x) > math.Abs(z) { - hess.set(i+1, n, (-r-w*t)/x) - } else { - hess.set(i+1, n, (-s-y*t)/z) - } - } - - // Overflow control - t = math.Abs(hess.at(i, n)) - if epsilon*t*t > 1 { - for j := i; j <= n; j++ { - hess.set(j, n, hess.at(j, n)/t) - } - } - } - } - } else if q < 0 { - // Complex vector - - l := n - 1 - - // Last vector component imaginary so matrix is triangular - if math.Abs(hess.at(n, n-1)) > math.Abs(hess.at(n-1, n)) { - hess.set(n-1, n-1, q/hess.at(n, n-1)) - hess.set(n-1, n, -(hess.at(n, n)-p)/hess.at(n, n-1)) - } else { - c := complex(0, -hess.at(n-1, n)) / complex(hess.at(n-1, n-1)-p, q) - hess.set(n-1, n-1, real(c)) - hess.set(n-1, n, imag(c)) - } - hess.set(n, n-1, 0) - hess.set(n, n, 1) - - for i := n - 2; i >= 0; i-- { - var ra, sa, vr, vi float64 - for j := l; j <= n; j++ { - ra += hess.at(i, j) * hess.at(j, n-1) - sa += hess.at(i, j) * hess.at(j, n) - } - w = hess.at(i, i) - p - - if e[i] < 0 { - z = w - r = ra - s = sa - } else { - l = i - if e[i] == 0 { - c := complex(-ra, -sa) / complex(w, q) - hess.set(i, n-1, real(c)) - hess.set(i, n, imag(c)) - } else { - // Solve complex equations - x = hess.at(i, i+1) - y = hess.at(i+1, i) - vr = (d[i]-p)*(d[i]-p) + e[i]*e[i] - q*q - vi = (d[i] - p) * 2 * q - if vr == 0 && vi == 0 { - vr = epsilon * norm * (math.Abs(w) + math.Abs(q) + math.Abs(x) + math.Abs(y) + math.Abs(z)) - } - c := complex(x*r-z*ra+q*sa, x*s-z*sa-q*ra) / complex(vr, vi) - hess.set(i, n-1, real(c)) - hess.set(i, n, imag(c)) - if math.Abs(x) > (math.Abs(z) + math.Abs(q)) { - hess.set(i+1, n-1, (-ra-w*hess.at(i, n-1)+q*hess.at(i, n))/x) - hess.set(i+1, n, (-sa-w*hess.at(i, n)-q*hess.at(i, n-1))/x) - } else { - c := complex(-r-y*hess.at(i, n-1), -s-y*hess.at(i, n)) / complex(z, q) - hess.set(i+1, n-1, real(c)) - hess.set(i+1, n, imag(c)) - } - } - - // Overflow control - t = math.Max(math.Abs(hess.at(i, n-1)), math.Abs(hess.at(i, n))) - if (epsilon*t)*t > 1 { - for j := i; j <= n; j++ { - hess.set(j, n-1, hess.at(j, n-1)/t) - hess.set(j, n, hess.at(j, n)/t) - } - } - } - } - } - } - - // Vectors of isolated roots - for i := 0; i < nn; i++ { - if i < low || i > high { - for j := i; j < nn; j++ { - v.set(i, j, hess.at(i, j)) - } - } - } - - // Back transformation to get eigenvectors of original matrix - for j := nn - 1; j >= low; j-- { - for i := low; i <= high; i++ { - z = 0 - for k := low; k <= min(j, high); k++ { - z += v.at(i, k) * hess.at(k, j) - } - v.set(i, j, z) - } - } -} - -// D returns the block diagonal eigenvalue matrix from the real and imaginary -// components d and e. -func (f EigenFactors) D() *Dense { - d, e := f.d, f.e - var n int - if n = len(d); n != len(e) { - panic(ErrSquare) - } - dm := NewDense(n, n, nil) - for i := 0; i < n; i++ { - dm.set(i, i, d[i]) - if e[i] > 0 { - dm.set(i, i+1, e[i]) - } else if e[i] < 0 { - dm.set(i, i-1, e[i]) - } - } - return dm -} diff --git a/vendor/github.com/gonum/matrix/mat64/format.go b/vendor/github.com/gonum/matrix/mat64/format.go deleted file mode 100644 index a4cc6ff2b..000000000 --- a/vendor/github.com/gonum/matrix/mat64/format.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright ©2013 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat64 - -import ( - "fmt" - "strconv" -) - -// Format prints a pretty representation of m to the fs io.Writer. The format character c -// specifies the numerical representation of of elements; valid values are those for float64 -// specified in the fmt package, with their associated flags. In addition to this, a '#' for -// all valid verbs except 'v' indicates that zero values be represented by the dot character. -// The '#' associated with the 'v' verb formats the matrix with Go syntax representation. -// The printed range of the matrix can be limited by specifying a positive value for margin; -// If margin is greater than zero, only the first and last margin rows/columns of the matrix -// are output. -func Format(m Matrix, margin int, dot byte, fs fmt.State, c rune) { - rows, cols := m.Dims() - - var printed int - if margin <= 0 { - printed = rows - if cols > printed { - printed = cols - } - } else { - printed = margin - } - - prec, pOk := fs.Precision() - if !pOk { - prec = -1 - } - - var ( - maxWidth int - buf, pad []byte - ) - switch c { - case 'v', 'e', 'E', 'f', 'F', 'g', 'G': - // Note that the '#' flag should have been dealt with by the type. - // So %v is treated exactly as %g here. - if c == 'v' { - buf, maxWidth = maxCellWidth(m, 'g', printed, prec) - } else { - buf, maxWidth = maxCellWidth(m, c, printed, prec) - } - default: - fmt.Fprintf(fs, "%%!%c(%T=Dims(%d, %d))", c, m, rows, cols) - return - } - width, _ := fs.Width() - width = max(width, maxWidth) - pad = make([]byte, max(width, 2)) - for i := range pad { - pad[i] = ' ' - } - - if rows > 2*printed || cols > 2*printed { - fmt.Fprintf(fs, "Dims(%d, %d)\n", rows, cols) - } - - skipZero := fs.Flag('#') - for i := 0; i < rows; i++ { - var el string - switch { - case rows == 1: - fmt.Fprint(fs, "[") - el = "]" - case i == 0: - fmt.Fprint(fs, "⎡") - el = "⎤\n" - case i < rows-1: - fmt.Fprint(fs, "⎢") - el = "⎥\n" - default: - fmt.Fprint(fs, "⎣") - el = "⎦" - } - - for j := 0; j < cols; j++ { - if j >= printed && j < cols-printed { - j = cols - printed - 1 - if i == 0 || i == rows-1 { - fmt.Fprint(fs, "... ... ") - } else { - fmt.Fprint(fs, " ") - } - continue - } - - v := m.At(i, j) - if v == 0 && skipZero { - buf = buf[:1] - buf[0] = dot - } else { - if c == 'v' { - buf = strconv.AppendFloat(buf[:0], v, 'g', prec, 64) - } else { - buf = strconv.AppendFloat(buf[:0], v, byte(c), prec, 64) - } - } - if fs.Flag('-') { - fs.Write(buf) - fs.Write(pad[:width-len(buf)]) - } else { - fs.Write(pad[:width-len(buf)]) - fs.Write(buf) - } - - if j < cols-1 { - fs.Write(pad[:2]) - } - } - - fmt.Fprint(fs, el) - - if i >= printed-1 && i < rows-printed && 2*printed < rows { - i = rows - printed - 1 - fmt.Fprint(fs, " .\n .\n .\n") - continue - } - } -} - -func maxCellWidth(m Matrix, c rune, printed, prec int) ([]byte, int) { - var ( - buf = make([]byte, 0, 64) - rows, cols = m.Dims() - max int - ) - for i := 0; i < rows; i++ { - if i >= printed-1 && i < rows-printed && 2*printed < rows { - i = rows - printed - 1 - continue - } - for j := 0; j < cols; j++ { - if j >= printed && j < cols-printed { - continue - } - - buf = strconv.AppendFloat(buf, m.At(i, j), byte(c), prec, 64) - if len(buf) > max { - max = len(buf) - } - buf = buf[:0] - } - } - return buf, max -} diff --git a/vendor/github.com/gonum/matrix/mat64/index_bound_checks.go b/vendor/github.com/gonum/matrix/mat64/index_bound_checks.go deleted file mode 100644 index 282b69bd0..000000000 --- a/vendor/github.com/gonum/matrix/mat64/index_bound_checks.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright ©2014 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file must be kept in sync with index_no_bound_checks.go. - -//+build bounds - -package mat64 - -import "github.com/gonum/blas" - -// At returns the element at row r, column c. -func (m *Dense) At(r, c int) float64 { - return m.at(r, c) -} - -func (m *Dense) at(r, c int) float64 { - if r >= m.mat.Rows || r < 0 { - panic(ErrRowAccess) - } - if c >= m.mat.Cols || c < 0 { - panic(ErrColAccess) - } - return m.mat.Data[r*m.mat.Stride+c] -} - -// Set sets the element at row r, column c to the value v. -func (m *Dense) Set(r, c int, v float64) { - m.set(r, c, v) -} - -func (m *Dense) set(r, c int, v float64) { - if r >= m.mat.Rows || r < 0 { - panic(ErrRowAccess) - } - if c >= m.mat.Cols || c < 0 { - panic(ErrColAccess) - } - m.mat.Data[r*m.mat.Stride+c] = v -} - -// At returns the element at row r, column c. It panics if c is not zero. -func (v *Vector) At(r, c int) float64 { - if c != 0 { - panic(ErrColAccess) - } - return v.at(r) -} - -func (v *Vector) at(r int) float64 { - if r < 0 || r >= v.n { - panic(ErrRowAccess) - } - return v.mat.Data[r*v.mat.Inc] -} - -// Set sets the element at row r to the value val. It panics if r is less than -// zero or greater than the length. -func (v *Vector) SetVec(i int, val float64) { - v.setVec(i, val) -} - -func (v *Vector) setVec(i int, val float64) { - if i < 0 || i >= v.n { - panic(ErrVectorAccess) - } - v.mat.Data[i*v.mat.Inc] = val -} - -// At returns the element at row r and column c. -func (t *SymDense) At(r, c int) float64 { - return t.at(r, c) -} - -func (t *SymDense) at(r, c int) float64 { - if r >= t.mat.N || r < 0 { - panic(ErrRowAccess) - } - if c >= t.mat.N || c < 0 { - panic(ErrColAccess) - } - if r > c { - r, c = c, r - } - return t.mat.Data[r*t.mat.Stride+c] -} - -// SetSym sets the elements at (r,c) and (c,r) to the value v. -func (t *SymDense) SetSym(r, c int, v float64) { - t.set(r, c, v) -} - -func (t *SymDense) set(r, c int, v float64) { - if r >= t.mat.N || r < 0 { - panic(ErrRowAccess) - } - if c >= t.mat.N || c < 0 { - panic(ErrColAccess) - } - if r > c { - r, c = c, r - } - t.mat.Data[r*t.mat.Stride+c] = v -} - -// At returns the element at row r, column c. -func (t *TriDense) At(r, c int) float64 { - return t.at(r, c) -} - -func (t *TriDense) at(r, c int) float64 { - if r >= t.mat.N || r < 0 { - panic(ErrRowAccess) - } - if c >= t.mat.N || c < 0 { - panic(ErrColAccess) - } - if t.mat.Uplo == blas.Upper { - if r > c { - return 0 - } - return t.mat.Data[r*t.mat.Stride+c] - } - if r < c { - return 0 - } - return t.mat.Data[r*t.mat.Stride+c] -} - -// SetTri sets the element of the triangular matrix at row r, column c to the value v. -// It panics if the location is outside the appropriate half of the matrix. -func (t *TriDense) SetTri(r, c int, v float64) { - t.set(r, c, v) -} - -func (t *TriDense) set(r, c int, v float64) { - if r >= t.mat.N || r < 0 { - panic(ErrRowAccess) - } - if c >= t.mat.N || c < 0 { - panic(ErrColAccess) - } - if t.mat.Uplo == blas.Upper && r > c { - panic("mat64: triangular set out of bounds") - } - if t.mat.Uplo == blas.Lower && r < c { - panic("mat64: triangular set out of bounds") - } - t.mat.Data[r*t.mat.Stride+c] = v -} diff --git a/vendor/github.com/gonum/matrix/mat64/index_no_bound_checks.go b/vendor/github.com/gonum/matrix/mat64/index_no_bound_checks.go deleted file mode 100644 index 716cbf3f0..000000000 --- a/vendor/github.com/gonum/matrix/mat64/index_no_bound_checks.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright ©2014 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file must be kept in sync with index_bound_checks.go. - -//+build !bounds - -package mat64 - -import "github.com/gonum/blas" - -// At returns the element at row r, column c. -func (m *Dense) At(r, c int) float64 { - if r >= m.mat.Rows || r < 0 { - panic(ErrRowAccess) - } - if c >= m.mat.Cols || c < 0 { - panic(ErrColAccess) - } - return m.at(r, c) -} - -func (m *Dense) at(r, c int) float64 { - return m.mat.Data[r*m.mat.Stride+c] -} - -// Set sets the element at row r, column c to the value v. -func (m *Dense) Set(r, c int, v float64) { - if r >= m.mat.Rows || r < 0 { - panic(ErrRowAccess) - } - if c >= m.mat.Cols || c < 0 { - panic(ErrColAccess) - } - m.set(r, c, v) -} - -func (m *Dense) set(r, c int, v float64) { - m.mat.Data[r*m.mat.Stride+c] = v -} - -// At returns the element at row r, column c. It panics if c is not zero. -func (v *Vector) At(r, c int) float64 { - if r < 0 || r >= v.n { - panic(ErrRowAccess) - } - if c != 0 { - panic(ErrColAccess) - } - return v.at(r) -} - -func (v *Vector) at(r int) float64 { - return v.mat.Data[r*v.mat.Inc] -} - -// Set sets the element at row r to the value val. It panics if r is less than -// zero or greater than the length. -func (v *Vector) SetVec(i int, val float64) { - if i < 0 || i >= v.n { - panic(ErrVectorAccess) - } - v.setVec(i, val) -} - -func (v *Vector) setVec(i int, val float64) { - v.mat.Data[i*v.mat.Inc] = val -} - -// At returns the element at row r and column c. -func (s *SymDense) At(r, c int) float64 { - if r >= s.mat.N || r < 0 { - panic(ErrRowAccess) - } - if c >= s.mat.N || c < 0 { - panic(ErrColAccess) - } - return s.at(r, c) -} - -func (s *SymDense) at(r, c int) float64 { - if r > c { - r, c = c, r - } - return s.mat.Data[r*s.mat.Stride+c] -} - -// SetSym sets the elements at (r,c) and (c,r) to the value v. -func (s *SymDense) SetSym(r, c int, v float64) { - if r >= s.mat.N || r < 0 { - panic(ErrRowAccess) - } - if c >= s.mat.N || c < 0 { - panic(ErrColAccess) - } - s.set(r, c, v) -} - -func (s *SymDense) set(r, c int, v float64) { - if r > c { - r, c = c, r - } - s.mat.Data[r*s.mat.Stride+c] = v -} - -// At returns the element at row r, column c. -func (t *TriDense) At(r, c int) float64 { - if r >= t.mat.N || r < 0 { - panic(ErrRowAccess) - } - if c >= t.mat.N || c < 0 { - panic(ErrColAccess) - } - return t.at(r, c) -} - -func (t *TriDense) at(r, c int) float64 { - if t.mat.Uplo == blas.Upper { - if r > c { - return 0 - } - return t.mat.Data[r*t.mat.Stride+c] - } - if r < c { - return 0 - } - return t.mat.Data[r*t.mat.Stride+c] -} - -// SetTri sets the element at row r, column c to the value v. -// It panics if the location is outside the appropriate half of the matrix. -func (t *TriDense) SetTri(r, c int, v float64) { - if r >= t.mat.N || r < 0 { - panic(ErrRowAccess) - } - if c >= t.mat.N || c < 0 { - panic(ErrColAccess) - } - if t.mat.Uplo == blas.Upper && r > c { - panic("mat64: triangular set out of bounds") - } - if t.mat.Uplo == blas.Lower && r < c { - panic("mat64: triangular set out of bounds") - } - t.set(r, c, v) -} - -func (t *TriDense) set(r, c int, v float64) { - t.mat.Data[r*t.mat.Stride+c] = v -} diff --git a/vendor/github.com/gonum/matrix/mat64/inner.go b/vendor/github.com/gonum/matrix/mat64/inner.go deleted file mode 100644 index 77616baea..000000000 --- a/vendor/github.com/gonum/matrix/mat64/inner.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright ©2014 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat64 - -import ( - "github.com/gonum/blas" - "github.com/gonum/internal/asm" -) - -// Inner computes the generalized inner product -// x^T A y -// between vectors x and y with matrix A. This is only a true inner product if -// A is symmetric positive definite, though the operation works for any matrix A. -// -// Inner panics if x.Len != m or y.Len != n when A is an m x n matrix. -func Inner(x *Vector, A Matrix, y *Vector) float64 { - m, n := A.Dims() - if x.Len() != m { - panic(ErrShape) - } - if y.Len() != n { - panic(ErrShape) - } - if m == 0 || n == 0 { - return 0 - } - - var sum float64 - - switch b := A.(type) { - case RawSymmetricer: - bmat := b.RawSymmetric() - if bmat.Uplo != blas.Upper { - // Panic as a string not a mat64.Error. - panic(badSymTriangle) - } - for i := 0; i < x.Len(); i++ { - xi := x.at(i) - if xi != 0 { - if y.mat.Inc == 1 { - sum += xi * asm.DdotUnitary( - bmat.Data[i*bmat.Stride+i:i*bmat.Stride+n], - y.mat.Data[i:], - ) - } else { - sum += xi * asm.DdotInc( - bmat.Data[i*bmat.Stride+i:i*bmat.Stride+n], - y.mat.Data[i*y.mat.Inc:], uintptr(n-i), - 1, uintptr(y.mat.Inc), - 0, 0, - ) - } - } - yi := y.at(i) - if i != n-1 && yi != 0 { - if x.mat.Inc == 1 { - sum += yi * asm.DdotUnitary( - bmat.Data[i*bmat.Stride+i+1:i*bmat.Stride+n], - x.mat.Data[i+1:], - ) - } else { - sum += yi * asm.DdotInc( - bmat.Data[i*bmat.Stride+i+1:i*bmat.Stride+n], - x.mat.Data[(i+1)*x.mat.Inc:], uintptr(n-i-1), - 1, uintptr(x.mat.Inc), - 0, 0, - ) - } - } - } - case RawMatrixer: - bmat := b.RawMatrix() - for i := 0; i < x.Len(); i++ { - xi := x.at(i) - if xi != 0 { - if y.mat.Inc == 1 { - sum += xi * asm.DdotUnitary( - bmat.Data[i*bmat.Stride:i*bmat.Stride+n], - y.mat.Data, - ) - } else { - sum += xi * asm.DdotInc( - bmat.Data[i*bmat.Stride:i*bmat.Stride+n], - y.mat.Data, uintptr(n), - 1, uintptr(y.mat.Inc), - 0, 0, - ) - } - } - } - default: - for i := 0; i < x.Len(); i++ { - xi := x.at(i) - for j := 0; j < y.Len(); j++ { - sum += xi * A.At(i, j) * y.at(j) - } - } - } - return sum -} diff --git a/vendor/github.com/gonum/matrix/mat64/io.go b/vendor/github.com/gonum/matrix/mat64/io.go deleted file mode 100644 index 043d4cfbc..000000000 --- a/vendor/github.com/gonum/matrix/mat64/io.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright ©2015 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat64 - -import ( - "encoding/binary" -) - -var ( - littleEndian = binary.LittleEndian - bigEndian = binary.BigEndian - defaultEndian = littleEndian - - sizeInt64 = binary.Size(int64(0)) - sizeFloat64 = binary.Size(float64(0)) -) diff --git a/vendor/github.com/gonum/matrix/mat64/lq.go b/vendor/github.com/gonum/matrix/mat64/lq.go deleted file mode 100644 index 6d1c61651..000000000 --- a/vendor/github.com/gonum/matrix/mat64/lq.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright ©2013 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat64 - -import ( - "math" - - "github.com/gonum/blas" - "github.com/gonum/blas/blas64" -) - -type LQFactor struct { - LQ *Dense - lDiag []float64 -} - -// LQ computes an LQ Decomposition for an m-by-n matrix a with m <= n by Householder -// reflections. The LQ decomposition is an m-by-n orthogonal matrix q and an m-by-m -// lower triangular matrix l so that a = l.q. LQ will panic with ErrShape if m > n. -// -// The LQ decomposition always exists, even if the matrix does not have full rank, -// so LQ will never fail unless m > n. The primary use of the LQ decomposition is -// in the least squares solution of non-square systems of simultaneous linear equations. -// This will fail if LQIsFullRank() returns false. The matrix a is overwritten by the -// decomposition. -func LQ(a *Dense) LQFactor { - // Initialize. - m, n := a.Dims() - if m > n { - panic(ErrShape) - } - - lq := *a - - lDiag := make([]float64, m) - projs := NewVector(m, nil) - - // Main loop. - for k := 0; k < m; k++ { - hh := lq.RawRowView(k)[k:] - norm := blas64.Nrm2(len(hh), blas64.Vector{Inc: 1, Data: hh}) - lDiag[k] = norm - - if norm != 0 { - hhNorm := (norm * math.Sqrt(1-hh[0]/norm)) - if hhNorm == 0 { - hh[0] = 0 - } else { - // Form k-th Householder vector. - s := 1 / hhNorm - hh[0] -= norm - blas64.Scal(len(hh), s, blas64.Vector{Inc: 1, Data: hh}) - - // Apply transformation to remaining columns. - if k < m-1 { - a = lq.View(k+1, k, m-k-1, n-k).(*Dense) - projs = projs.ViewVec(0, m-k-1) - projs.MulVec(a, false, NewVector(len(hh), hh)) - - for j := 0; j < m-k-1; j++ { - dst := a.RawRowView(j) - blas64.Axpy(len(dst), -projs.at(j), - blas64.Vector{Inc: 1, Data: hh}, - blas64.Vector{Inc: 1, Data: dst}, - ) - } - } - } - } - } - *a = lq - - return LQFactor{a, lDiag} -} - -// IsFullRank returns whether the L matrix and hence a has full rank. -func (f LQFactor) IsFullRank() bool { - for _, v := range f.lDiag { - if v == 0 { - return false - } - } - return true -} - -// L returns the lower triangular factor for the LQ decomposition. -func (f LQFactor) L() *Dense { - lq, lDiag := f.LQ, f.lDiag - m, _ := lq.Dims() - l := NewDense(m, m, nil) - for i, v := range lDiag { - for j := 0; j < m; j++ { - if i < j { - l.set(j, i, lq.at(j, i)) - } else if i == j { - l.set(j, i, v) - } - } - } - return l -} - -// replaces x with Q.x -func (f LQFactor) applyQTo(x *Dense, trans bool) { - nh, nc := f.LQ.Dims() - m, n := x.Dims() - if m != nc { - panic(ErrShape) - } - proj := make([]float64, n) - - if trans { - for k := nh - 1; k >= 0; k-- { - hh := f.LQ.RawRowView(k)[k:] - - sub := x.View(k, 0, m-k, n).(*Dense) - - blas64.Gemv(blas.Trans, - 1, sub.mat, blas64.Vector{Inc: 1, Data: hh}, - 0, blas64.Vector{Inc: 1, Data: proj}, - ) - for i := k; i < m; i++ { - row := x.RawRowView(i) - blas64.Axpy(n, -hh[i-k], - blas64.Vector{Inc: 1, Data: proj}, - blas64.Vector{Inc: 1, Data: row}, - ) - } - } - } else { - for k := 0; k < nh; k++ { - hh := f.LQ.RawRowView(k)[k:] - - sub := x.View(k, 0, m-k, n).(*Dense) - - blas64.Gemv(blas.Trans, - 1, sub.mat, blas64.Vector{Inc: 1, Data: hh}, - 0, blas64.Vector{Inc: 1, Data: proj}, - ) - for i := k; i < m; i++ { - row := x.RawRowView(i) - blas64.Axpy(n, -hh[i-k], - blas64.Vector{Inc: 1, Data: proj}, - blas64.Vector{Inc: 1, Data: row}, - ) - } - } - } -} - -// Solve computes minimum norm least squares solution of a.x = b where b has as many rows as a. -// A matrix x is returned that minimizes the two norm of Q*R*X-B. Solve will panic -// if a is not full rank. -func (f LQFactor) Solve(b *Dense) (x *Dense) { - lq := f.LQ - lDiag := f.lDiag - m, n := lq.Dims() - bm, bn := b.Dims() - if bm != m { - panic(ErrShape) - } - if !f.IsFullRank() { - panic(ErrSingular) - } - - x = NewDense(n, bn, nil) - x.Copy(b) - - tau := make([]float64, m) - for i := range tau { - tau[i] = lq.at(i, i) - lq.set(i, i, lDiag[i]) - } - lqT := blas64.Triangular{ - // N omitted since it is not used by Trsm. - Stride: lq.mat.Stride, - Data: lq.mat.Data, - Uplo: blas.Lower, - Diag: blas.NonUnit, - } - x.mat.Rows = bm - blas64.Trsm(blas.Left, blas.NoTrans, 1, lqT, x.mat) - x.mat.Rows = n - for i := range tau { - lq.set(i, i, tau[i]) - } - - f.applyQTo(x, true) - - return x -} diff --git a/vendor/github.com/gonum/matrix/mat64/lu.go b/vendor/github.com/gonum/matrix/mat64/lu.go deleted file mode 100644 index 3d6c09988..000000000 --- a/vendor/github.com/gonum/matrix/mat64/lu.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright ©2013 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on the LUDecomposition class from Jama 1.0.3. - -package mat64 - -import ( - "math" -) - -type LUFactors struct { - LU *Dense - Pivot []int - Sign int -} - -// LU performs an LU Decomposition for an m-by-n matrix a. -// -// If m >= n, the LU decomposition is an m-by-n unit lower triangular matrix L, -// an n-by-n upper triangular matrix U, and a permutation vector piv of length m -// so that A(piv,:) = L*U. -// -// If m < n, then L is m-by-m and U is m-by-n. -// -// The LU decompostion with pivoting always exists, even if the matrix is -// singular, so LU will never fail. The primary use of the LU decomposition -// is in the solution of square systems of simultaneous linear equations. This -// will fail if IsSingular() returns true. -func LU(a *Dense) LUFactors { - // Use a "left-looking", dot-product, Crout/Doolittle algorithm. - m, n := a.Dims() - lu := a - - piv := make([]int, m) - for i := range piv { - piv[i] = i - } - sign := 1 - - // Outer loop. - luColj := make([]float64, m) - for j := 0; j < n; j++ { - - // Make a copy of the j-th column to localize references. - for i := 0; i < m; i++ { - luColj[i] = lu.at(i, j) - } - - // Apply previous transformations. - for i := 0; i < m; i++ { - luRowi := lu.RawRowView(i) - - // Most of the time is spent in the following dot product. - kmax := min(i, j) - var s float64 - for k, v := range luRowi[:kmax] { - s += v * luColj[k] - } - - luColj[i] -= s - luRowi[j] = luColj[i] - } - - // Find pivot and exchange if necessary. - p := j - for i := j + 1; i < m; i++ { - if math.Abs(luColj[i]) > math.Abs(luColj[p]) { - p = i - } - } - if p != j { - for k := 0; k < n; k++ { - t := lu.at(p, k) - lu.set(p, k, lu.at(j, k)) - lu.set(j, k, t) - } - piv[p], piv[j] = piv[j], piv[p] - sign = -sign - } - - // Compute multipliers. - if j < m && lu.at(j, j) != 0 { - for i := j + 1; i < m; i++ { - lu.set(i, j, lu.at(i, j)/lu.at(j, j)) - } - } - } - - return LUFactors{lu, piv, sign} -} - -// IsSingular returns whether the the upper triangular factor and hence a is -// singular. -func (f LUFactors) IsSingular() bool { - lu := f.LU - _, n := lu.Dims() - for j := 0; j < n; j++ { - if lu.at(j, j) == 0 { - return true - } - } - return false -} - -// L returns the lower triangular factor of the LU decomposition. -func (f LUFactors) L() *Dense { - lu := f.LU - m, n := lu.Dims() - if m < n { - n = m - } - l := NewDense(m, n, nil) - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - if i > j { - l.set(i, j, lu.at(i, j)) - } else if i == j { - l.set(i, j, 1) - } - } - } - return l -} - -// U returns the upper triangular factor of the LU decomposition. -func (f LUFactors) U() *Dense { - lu := f.LU - m, n := lu.Dims() - u := NewDense(m, n, nil) - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - if i <= j { - u.set(i, j, lu.at(i, j)) - } - } - } - return u -} - -// Det returns the determinant of matrix a decomposed into lu. The matrix -// a must have been square. -func (f LUFactors) Det() float64 { - lu, sign := f.LU, f.Sign - m, n := lu.Dims() - if m != n { - panic(ErrSquare) - } - d := float64(sign) - for j := 0; j < n; j++ { - d *= lu.at(j, j) - } - return d -} - -// Solve computes a solution of a.x = b where b has as many rows as a. A matrix x -// is returned that minimizes the two norm of L*U*X - B(piv,:). Solve will panic -// if a is singular. The matrix b is overwritten during the call. -func (f LUFactors) Solve(b *Dense) (x *Dense) { - lu, piv := f.LU, f.Pivot - m, n := lu.Dims() - bm, bn := b.Dims() - if bm != m { - panic(ErrShape) - } - if f.IsSingular() { - panic(ErrSingular) - } - - // Copy right hand side with pivoting - nx := bn - x = pivotRows(b, piv) - - // Solve L*Y = B(piv,:) - for k := 0; k < n; k++ { - for i := k + 1; i < n; i++ { - for j := 0; j < nx; j++ { - x.set(i, j, x.at(i, j)-x.at(k, j)*lu.at(i, k)) - } - } - } - - // Solve U*X = Y; - for k := n - 1; k >= 0; k-- { - for j := 0; j < nx; j++ { - x.set(k, j, x.at(k, j)/lu.at(k, k)) - } - for i := 0; i < k; i++ { - for j := 0; j < nx; j++ { - x.set(i, j, x.at(i, j)-x.at(k, j)*lu.at(i, k)) - } - } - } - - return x -} - -func pivotRows(a *Dense, piv []int) *Dense { - visit := make([]bool, len(piv)) - _, n := a.Dims() - tmpRow := make([]float64, n) - for to, from := range piv { - for to != from && !visit[from] { - visit[from], visit[to] = true, true - a.Row(tmpRow, from) - a.SetRow(from, a.rowView(to)) - a.SetRow(to, tmpRow) - to, from = from, piv[from] - } - } - return a -} diff --git a/vendor/github.com/gonum/matrix/mat64/matrix.go b/vendor/github.com/gonum/matrix/mat64/matrix.go deleted file mode 100644 index fcf92fc96..000000000 --- a/vendor/github.com/gonum/matrix/mat64/matrix.go +++ /dev/null @@ -1,472 +0,0 @@ -// Copyright ©2013 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package mat64 provides basic linear algebra operations for float64 matrices. -// -// Note that in all interfaces that assign the result to the receiver, the receiver must -// be either the correct dimensions for the result or a zero-sized matrix. In the latter -// case, matrix data is allocated and stored in the receiver. If the matrix dimensions -// do not match the result, the method must panic. -package mat64 - -import ( - "github.com/gonum/blas/blas64" -) - -// Matrix is the basic matrix interface type. -type Matrix interface { - // Dims returns the dimensions of a Matrix. - Dims() (r, c int) - - // At returns the value of a matrix element at (r, c). It will panic if r or c are - // out of bounds for the matrix. - At(r, c int) float64 -} - -// Mutable is a matrix interface type that allows elements to be altered. -type Mutable interface { - // Set alters the matrix element at (r, c) to v. It will panic if r or c are out of - // bounds for the matrix. - Set(r, c int, v float64) - - Matrix -} - -// A Vectorer can return rows and columns of the represented matrix. -type Vectorer interface { - // Row returns a slice of float64 for the row specified. It will panic if the index - // is out of bounds. If the call requires a copy and dst is not nil it will be used and - // returned, if it is not nil the number of elements copied will be the minimum of the - // length of the slice and the number of columns in the matrix. - Row(dst []float64, i int) []float64 - - // Col returns a slice of float64 for the column specified. It will panic if the index - // is out of bounds. If the call requires a copy and dst is not nil it will be used and - // returned, if it is not nil the number of elements copied will be the minimum of the - // length of the slice and the number of rows in the matrix. - Col(dst []float64, j int) []float64 -} - -// A VectorSetter can set rows and columns in the represented matrix. -type VectorSetter interface { - // SetRow sets the values of the specified row to the values held in a slice of float64. - // It will panic if the index is out of bounds. The number of elements copied is - // returned and will be the minimum of the length of the slice and the number of columns - // in the matrix. - SetRow(i int, src []float64) int - - // SetCol sets the values of the specified column to the values held in a slice of float64. - // It will panic if the index is out of bounds. The number of elements copied is - // returned and will be the minimum of the length of the slice and the number of rows - // in the matrix. - SetCol(i int, src []float64) int -} - -// A RowViewer can return a Vector reflecting a row that is backed by the matrix -// data. The Vector returned will have Len() == nCols. -type RowViewer interface { - RowView(r int) *Vector -} - -// A RawRowViewer can return a slice of float64 reflecting a row that is backed by the matrix -// data. -type RawRowViewer interface { - RawRowView(r int) []float64 -} - -// A ColViewer can return a Vector reflecting a row that is backed by the matrix -// data. The Vector returned will have Len() == nRows. -type ColViewer interface { - ColView(c int) *Vector -} - -// A RawColViewer can return a slice of float64 reflecting a column that is backed by the matrix -// data. -type RawColViewer interface { - RawColView(c int) *Vector -} - -// A Cloner can make a copy of a into the receiver, overwriting the previous value of the -// receiver. The clone operation does not make any restriction on shape. -type Cloner interface { - Clone(a Matrix) -} - -// A Reseter can reset the matrix so that it can be reused as the receiver of a dimensionally -// restricted operation. This is commonly used when the matrix is being used a a workspace -// or temporary matrix. -// -// If the matrix is a view, using the reset matrix may result in data corruption in elements -// outside the view. -type Reseter interface { - Reset() -} - -// A Copier can make a copy of elements of a into the receiver. The submatrix copied -// starts at row and column 0 and has dimensions equal to the minimum dimensions of -// the two matrices. The number of row and columns copied is returned. -type Copier interface { - Copy(a Matrix) (r, c int) -} - -// A Viewer returns a submatrix view of the Matrix parameter, starting at row i, column j -// and extending r rows and c columns. If i or j are out of range, or r or c are zero or -// extend beyond the bounds of the matrix View will panic with ErrIndexOutOfRange. The -// returned matrix must retain the receiver's reference to the original matrix such that -// changes in the elements of the submatrix are reflected in the original and vice versa. -type Viewer interface { - View(i, j, r, c int) Matrix -} - -// A Grower can grow the size of the represented matrix by the given number of rows and columns. -// Growing beyond the size given by the Caps method will result in the allocation of a new -// matrix and copying of the elements. If Grow is called with negative increments it will -// panic with ErrIndexOutOfRange. -type Grower interface { - Caps() (r, c int) - Grow(r, c int) Matrix -} - -// A Normer can return the specified matrix norm, o of the matrix represented by the receiver. -// -// Valid order values are: -// -// 1 - max of the sum of the absolute values of columns -// -1 - min of the sum of the absolute values of columns -// Inf - max of the sum of the absolute values of rows -// -Inf - min of the sum of the absolute values of rows -// 0 - Frobenius norm -// -// Norm will panic with ErrNormOrder if an illegal norm order is specified. -type Normer interface { - Norm(o float64) float64 -} - -// A TransposeCopier can make a copy of the transpose the matrix represented by a, placing the elements -// into the receiver. -type TransposeCopier interface { - TCopy(a Matrix) -} - -// A Transposer can create a transposed view matrix from the matrix represented by the receiver. -// Changes made to the returned Matrix may be reflected in the original. -type Transposer interface { - T() Matrix -} - -// A Deter can return the determinant of the represented matrix. -type Deter interface { - Det() float64 -} - -// An Inver can calculate the inverse of the matrix represented by a and stored in the receiver. -// ErrSingular is returned if there is no inverse of the matrix. -type Inver interface { - Inv(a Matrix) error -} - -// An Adder can add the matrices represented by a and b, placing the result in the receiver. Add -// will panic if the two matrices do not have the same shape. -type Adder interface { - Add(a, b Matrix) -} - -// A Suber can subtract the matrix b from a, placing the result in the receiver. Sub will panic if -// the two matrices do not have the same shape. -type Suber interface { - Sub(a, b Matrix) -} - -// An ElemMuler can perform element-wise multiplication of the matrices represented by a and b, -// placing the result in the receiver. MulEmen will panic if the two matrices do not have the same -// shape. -type ElemMuler interface { - MulElem(a, b Matrix) -} - -// An ElemDiver can perform element-wise division a / b of the matrices represented by a and b, -// placing the result in the receiver. DivElem will panic if the two matrices do not have the same -// shape. -type ElemDiver interface { - DivElem(a, b Matrix) -} - -// An Equaler can compare the matrices represented by b and the receiver. Matrices with non-equal shapes -// are not equal. -type Equaler interface { - Equals(b Matrix) bool -} - -// An ApproxEqualer can compare the matrices represented by b and the receiver, with tolerance for -// element-wise equailty specified by epsilon. Matrices with non-equal shapes are not equal. -type ApproxEqualer interface { - EqualsApprox(b Matrix, epsilon float64) bool -} - -// A Scaler can perform scalar multiplication of the matrix represented by a with c, placing -// the result in the receiver. -type Scaler interface { - Scale(c float64, a Matrix) -} - -// A Sumer can return the sum of elements of the matrix represented by the receiver. -type Sumer interface { - Sum() float64 -} - -// A Muler can determine the matrix product of a and b, placing the result in the receiver. -// If the number of columns in a does not equal the number of rows in b, Mul will panic. -type Muler interface { - Mul(a, b Matrix) -} - -// A MulTranser can determine the matrix product of a and b, optionally taking the transpose -// of either a, b, or both, placing the result in the receiver. It performs OpA(a) * OpB(b), -// where OpA is transpose(a) when aTrans is true, and does nothing when aTrans == blas.NoTrans. -// The same logic applies to OpB. If the number of columns in OpA(a) does not equal the -// number of rows in OpB(b), MulTrans will panic. -type MulTranser interface { - MulTrans(a Matrix, aTrans bool, b Matrix, bTrans bool) -} - -// An Exper can perform a matrix exponentiation of the square matrix a. Exp will panic with ErrShape -// if a is not square. -type Exper interface { - Exp(a Matrix) -} - -// A Power can raise a square matrix, a to a positive integral power, n. Pow will panic if n is negative -// or if a is not square. -type Power interface { - Pow(a Matrix, n int) -} - -// A Dotter can determine the sum of the element-wise products of the elements of the receiver and b. -// If the shapes of the two matrices differ, Dot will panic. -type Dotter interface { - Dot(b Matrix) float64 -} - -// A Stacker can create the stacked matrix of a with b, where b is placed in the greater indexed rows. -// The result of stacking is placed in the receiver, overwriting the previous value of the receiver. -// Stack will panic if the two input matrices do not have the same number of columns. -type Stacker interface { - Stack(a, b Matrix) -} - -// An Augmenter can create the augmented matrix of a with b, where b is placed in the greater indexed -// columns. The result of augmentation is placed in the receiver, overwriting the previous value of the -// receiver. Augment will panic if the two input matrices do not have the same number of rows. -type Augmenter interface { - Augment(a, b Matrix) -} - -// An ApplyFunc takes a row/column index and element value and returns some function of that tuple. -type ApplyFunc func(r, c int, v float64) float64 - -// An Applyer can apply an Applyfunc f to each of the elements of the matrix represented by a, -// placing the resulting matrix in the receiver. -type Applyer interface { - Apply(f ApplyFunc, a Matrix) -} - -// A Tracer can return the trace of the matrix represented by the receiver. Trace will panic if the -// matrix is not square. -type Tracer interface { - Trace() float64 -} - -// A Uer can return the upper triangular matrix of the matrix represented by a, placing the result -// in the receiver. If the concrete value of a is the receiver, the lower residue is zeroed in place. -type Uer interface { - U(a Matrix) -} - -// An Ler can return the lower triangular matrix of the matrix represented by a, placing the result -// in the receiver. If the concrete value of a is the receiver, the upper residue is zeroed in place. -type Ler interface { - L(a Matrix) -} - -// A BandWidther represents a banded matrix and can return the left and right half-bandwidths, k1 and -// k2. -type BandWidther interface { - BandWidth() (k1, k2 int) -} - -// A RawMatrixSetter can set the underlying blas64.General used by the receiver. There is no restriction -// on the shape of the receiver. Changes to the receiver's elements will be reflected in the blas64.General.Data. -type RawMatrixSetter interface { - SetRawMatrix(a blas64.General) -} - -// A RawMatrixer can return a blas64.General representation of the receiver. Changes to the blas64.General.Data -// slice will be reflected in the original matrix, changes to the Rows, Cols and Stride fields will not. -type RawMatrixer interface { - RawMatrix() blas64.General -} - -// A RawVectorer can return a blas64.Vector representation of the receiver. Changes to the blas64.Vector.Data -// slice will be reflected in the original matrix, changes to the Inc field will not. -type RawVectorer interface { - RawVector() blas64.Vector -} - -// Det returns the determinant of the matrix a. -func Det(a Matrix) float64 { - if a, ok := a.(Deter); ok { - return a.Det() - } - return LU(DenseCopyOf(a)).Det() -} - -// Inverse returns the inverse or pseudoinverse of the matrix a. -// It returns a nil matrix and ErrSingular if a is singular. -func Inverse(a Matrix) (*Dense, error) { - m, _ := a.Dims() - d := make([]float64, m*m) - for i := 0; i < m*m; i += m + 1 { - d[i] = 1 - } - eye := NewDense(m, m, d) - return Solve(a, eye) -} - -// Solve returns a matrix x that satisfies ax = b. -// It returns a nil matrix and ErrSingular if a is singular. -func Solve(a, b Matrix) (x *Dense, err error) { - switch m, n := a.Dims(); { - case m == n: - lu := LU(DenseCopyOf(a)) - if lu.IsSingular() { - return nil, ErrSingular - } - return lu.Solve(DenseCopyOf(b)), nil - case m > n: - qr := QR(DenseCopyOf(a)) - if !qr.IsFullRank() { - return nil, ErrSingular - } - return qr.Solve(DenseCopyOf(b)), nil - default: - lq := LQ(DenseCopyOf(a)) - if !lq.IsFullRank() { - return nil, ErrSingular - } - switch b := b.(type) { - case *Dense: - return lq.Solve(b), nil - default: - return lq.Solve(DenseCopyOf(b)), nil - } - } -} - -// A Panicker is a function that may panic. -type Panicker func() - -// Maybe will recover a panic with a type mat64.Error from fn, and return this error. -// Any other error is re-panicked. -func Maybe(fn Panicker) (err error) { - defer func() { - if r := recover(); r != nil { - if e, ok := r.(Error); ok { - if e.string == "" { - panic("mat64: invalid error") - } - err = e - return - } - panic(r) - } - }() - fn() - return -} - -// A FloatPanicker is a function that returns a float64 and may panic. -type FloatPanicker func() float64 - -// MaybeFloat will recover a panic with a type mat64.Error from fn, and return this error. -// Any other error is re-panicked. -func MaybeFloat(fn FloatPanicker) (f float64, err error) { - defer func() { - if r := recover(); r != nil { - if e, ok := r.(Error); ok { - if e.string == "" { - panic("mat64: invalid error") - } - err = e - return - } - panic(r) - } - }() - return fn(), nil -} - -// Type Error represents matrix handling errors. These errors can be recovered by Maybe wrappers. -type Error struct{ string } - -func (err Error) Error() string { return err.string } - -var ( - ErrIndexOutOfRange = Error{"mat64: index out of range"} - ErrRowAccess = Error{"mat64: row index out of range"} - ErrColAccess = Error{"mat64: column index out of range"} - ErrVectorAccess = Error{"mat64: vector index out of range"} - ErrZeroLength = Error{"mat64: zero length in matrix definition"} - ErrRowLength = Error{"mat64: row length mismatch"} - ErrColLength = Error{"mat64: col length mismatch"} - ErrSquare = Error{"mat64: expect square matrix"} - ErrNormOrder = Error{"mat64: invalid norm order for matrix"} - ErrSingular = Error{"mat64: matrix is singular"} - ErrShape = Error{"mat64: dimension mismatch"} - ErrIllegalStride = Error{"mat64: illegal stride"} - ErrPivot = Error{"mat64: malformed pivot list"} - ErrTriangle = Error{"mat64: triangular storage mismatch"} -) - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} - -// use returns a float64 slice with l elements, using f if it -// has the necessary capacity, otherwise creating a new slice. -func use(f []float64, l int) []float64 { - if l <= cap(f) { - return f[:l] - } - return make([]float64, l) -} - -// useZeroed returns a float64 slice with l elements, using f if it -// has the necessary capacity, otherwise creating a new slice. The -// elements of the returned slice are guaranteed to be zero. -func useZeroed(f []float64, l int) []float64 { - if l <= cap(f) { - f = f[:l] - zero(f) - return f - } - return make([]float64, l) -} - -// zero does a fast zeroing of the given slice's elements. -func zero(f []float64) { - f[0] = 0 - for i := 1; i < len(f); { - i += copy(f[i:], f[:i]) - } -} diff --git a/vendor/github.com/gonum/matrix/mat64/pool.go b/vendor/github.com/gonum/matrix/mat64/pool.go deleted file mode 100644 index 62faf6aa7..000000000 --- a/vendor/github.com/gonum/matrix/mat64/pool.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright ©2014 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat64 - -import ( - "sync" - - "github.com/gonum/blas/blas64" -) - -var tab64 = [64]byte{ - 0x3f, 0x00, 0x3a, 0x01, 0x3b, 0x2f, 0x35, 0x02, - 0x3c, 0x27, 0x30, 0x1b, 0x36, 0x21, 0x2a, 0x03, - 0x3d, 0x33, 0x25, 0x28, 0x31, 0x12, 0x1c, 0x14, - 0x37, 0x1e, 0x22, 0x0b, 0x2b, 0x0e, 0x16, 0x04, - 0x3e, 0x39, 0x2e, 0x34, 0x26, 0x1a, 0x20, 0x29, - 0x32, 0x24, 0x11, 0x13, 0x1d, 0x0a, 0x0d, 0x15, - 0x38, 0x2d, 0x19, 0x1f, 0x23, 0x10, 0x09, 0x0c, - 0x2c, 0x18, 0x0f, 0x08, 0x17, 0x07, 0x06, 0x05, -} - -// bits returns the ceiling of base 2 log of v. -// Approach based on http://stackoverflow.com/a/11398748. -func bits(v uint64) byte { - if v == 0 { - return 0 - } - v <<= 2 - v-- - v |= v >> 1 - v |= v >> 2 - v |= v >> 4 - v |= v >> 8 - v |= v >> 16 - v |= v >> 32 - return tab64[((v-(v>>1))*0x07EDD5E59A4E28C2)>>58] - 1 -} - -// pool contains size stratified workspace Dense pools. -// Each pool element i returns sized matrices with a data -// slice capped at 1<= n by Householder -// reflections, the QR decomposition is an m-by-n orthogonal matrix q and an n-by-n -// upper triangular matrix r so that a = q.r. QR will panic with ErrShape if m < n. -// -// The QR decomposition always exists, even if the matrix does not have full rank, -// so QR will never fail unless m < n. The primary use of the QR decomposition is -// in the least squares solution of non-square systems of simultaneous linear equations. -// This will fail if QRIsFullRank() returns false. The matrix a is overwritten by the -// decomposition. -func QR(a *Dense) QRFactor { - // Initialize. - m, n := a.Dims() - if m < n { - panic(ErrShape) - } - - qr := a - rDiag := make([]float64, n) - - // Main loop. - for k := 0; k < n; k++ { - // Compute 2-norm of k-th column without under/overflow. - var norm float64 - for i := k; i < m; i++ { - norm = math.Hypot(norm, qr.at(i, k)) - } - - if norm != 0 { - // Form k-th Householder vector. - if qr.at(k, k) < 0 { - norm = -norm - } - for i := k; i < m; i++ { - qr.set(i, k, qr.at(i, k)/norm) - } - qr.set(k, k, qr.at(k, k)+1) - - // Apply transformation to remaining columns. - for j := k + 1; j < n; j++ { - var s float64 - for i := k; i < m; i++ { - s += qr.at(i, k) * qr.at(i, j) - } - s /= -qr.at(k, k) - for i := k; i < m; i++ { - qr.set(i, j, qr.at(i, j)+s*qr.at(i, k)) - } - } - } - rDiag[k] = -norm - } - - return QRFactor{qr, rDiag} -} - -// IsFullRank returns whether the R matrix and hence a has full rank. -func (f QRFactor) IsFullRank() bool { - for _, v := range f.rDiag { - if v == 0 { - return false - } - } - return true -} - -// H returns the Householder vectors in a lower trapezoidal matrix -// whose columns define the reflections. -func (f QRFactor) H() *Dense { - qr := f.QR - m, n := qr.Dims() - h := NewDense(m, n, nil) - for i := 0; i < m; i++ { - for j := 0; j < n; j++ { - if i >= j { - h.set(i, j, qr.at(i, j)) - } - } - } - return h -} - -// R returns the upper triangular factor for the QR decomposition. -func (f QRFactor) R() *Dense { - qr, rDiag := f.QR, f.rDiag - _, n := qr.Dims() - r := NewDense(n, n, nil) - for i, v := range rDiag[:n] { - for j := 0; j < n; j++ { - if i < j { - r.set(i, j, qr.at(i, j)) - } else if i == j { - r.set(i, j, v) - } - } - } - return r -} - -// Q generates and returns the (economy-sized) orthogonal factor. -func (f QRFactor) Q() *Dense { - qr := f.QR - m, n := qr.Dims() - q := NewDense(m, n, nil) - - for k := n - 1; k >= 0; k-- { - q.set(k, k, 1) - for j := k; j < n; j++ { - if qr.at(k, k) != 0 { - var s float64 - for i := k; i < m; i++ { - s += qr.at(i, k) * q.at(i, j) - } - s /= -qr.at(k, k) - for i := k; i < m; i++ { - q.set(i, j, q.at(i, j)+s*qr.at(i, k)) - } - } - } - } - - return q -} - -// Solve computes a least squares solution of a.x = b where b has as many rows as a. -// A matrix x is returned that minimizes the two norm of Q*R*X-B. Solve will panic -// if a is not full rank. The matrix b is overwritten during the call. -func (f QRFactor) Solve(b *Dense) (x *Dense) { - qr := f.QR - rDiag := f.rDiag - m, n := qr.Dims() - bm, bn := b.Dims() - if bm != m { - panic(ErrShape) - } - if !f.IsFullRank() { - panic(ErrSingular) - } - - // Compute Y = transpose(Q)*B - for k := 0; k < n; k++ { - for j := 0; j < bn; j++ { - var s float64 - for i := k; i < m; i++ { - s += qr.at(i, k) * b.at(i, j) - } - s /= -qr.at(k, k) - - for i := k; i < m; i++ { - b.set(i, j, b.at(i, j)+s*qr.at(i, k)) - } - } - } - - // Solve R*X = Y; - for k := n - 1; k >= 0; k-- { - row := b.rowView(k) - for j := range row[:bn] { - row[j] /= rDiag[k] - } - for i := 0; i < k; i++ { - row := b.rowView(i) - for j := range row[:bn] { - row[j] -= b.at(k, j) * qr.at(i, k) - } - } - } - - return b.View(0, 0, n, bn).(*Dense) -} diff --git a/vendor/github.com/gonum/matrix/mat64/svd.go b/vendor/github.com/gonum/matrix/mat64/svd.go deleted file mode 100644 index 3b9b98952..000000000 --- a/vendor/github.com/gonum/matrix/mat64/svd.go +++ /dev/null @@ -1,479 +0,0 @@ -// Copyright ©2013 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on the SingularValueDecomposition class from Jama 1.0.3. - -package mat64 - -import ( - "math" -) - -type SVDFactors struct { - U *Dense - Sigma []float64 - V *Dense - m, n int -} - -// SVD performs singular value decomposition for an m-by-n matrix a. The -// singular value decomposition is an m-by-n orthogonal matrix u, an n-by-n -// diagonal matrix s, and an n-by-n orthogonal matrix v so that a = u*s*v'. If -// a is a wide matrix a copy of its transpose is allocated, otherwise a is -// overwritten during the decomposition. Matrices u and v are only created when -// wantu and wantv are true respectively. -// -// The singular values, sigma[k] = s[k][k], are ordered so that -// -// sigma[0] >= sigma[1] >= ... >= sigma[n-1]. -// -// The matrix condition number and the effective numerical rank can be computed from -// this decomposition. -func SVD(a *Dense, epsilon, small float64, wantu, wantv bool) SVDFactors { - m, n := a.Dims() - - trans := false - if m < n { - a.TCopy(a) - m, n = n, m - wantu, wantv = wantv, wantu - trans = true - } - - sigma := make([]float64, min(m+1, n)) - nu := min(m, n) - var u, v *Dense - if wantu { - u = NewDense(m, nu, nil) - } - if wantv { - v = NewDense(n, n, nil) - } - - var ( - e = make([]float64, n) - work = make([]float64, m) - ) - - // Reduce a to bidiagonal form, storing the diagonal elements - // in sigma and the super-diagonal elements in e. - nct := min(m-1, n) - nrt := max(0, min(n-2, m)) - for k := 0; k < max(nct, nrt); k++ { - if k < nct { - // Compute the transformation for the k-th column and - // place the k-th diagonal in sigma[k]. - // Compute 2-norm of k-th column without under/overflow. - sigma[k] = 0 - for i := k; i < m; i++ { - sigma[k] = math.Hypot(sigma[k], a.at(i, k)) - } - if sigma[k] != 0 { - if a.at(k, k) < 0 { - sigma[k] = -sigma[k] - } - for i := k; i < m; i++ { - a.set(i, k, a.at(i, k)/sigma[k]) - } - a.set(k, k, a.at(k, k)+1) - } - sigma[k] = -sigma[k] - } - - for j := k + 1; j < n; j++ { - if k < nct && sigma[k] != 0 { - // Apply the transformation. - var t float64 - for i := k; i < m; i++ { - t += a.at(i, k) * a.at(i, j) - } - t = -t / a.at(k, k) - for i := k; i < m; i++ { - a.set(i, j, a.at(i, j)+t*a.at(i, k)) - } - } - - // Place the k-th row of a into e for the - // subsequent calculation of the row transformation. - e[j] = a.at(k, j) - } - - if wantu && k < nct { - // Place the transformation in u for subsequent back - // multiplication. - for i := k; i < m; i++ { - u.set(i, k, a.at(i, k)) - } - } - - if k < nrt { - // Compute the k-th row transformation and place the - // k-th super-diagonal in e[k]. - // Compute 2-norm without under/overflow. - e[k] = 0 - for i := k + 1; i < n; i++ { - e[k] = math.Hypot(e[k], e[i]) - } - if e[k] != 0 { - if e[k+1] < 0 { - e[k] = -e[k] - } - for i := k + 1; i < n; i++ { - e[i] /= e[k] - } - e[k+1]++ - } - e[k] = -e[k] - if k+1 < m && e[k] != 0 { - // Apply the transformation. - for i := k + 1; i < m; i++ { - work[i] = 0 - } - for j := k + 1; j < n; j++ { - for i := k + 1; i < m; i++ { - work[i] += e[j] * a.at(i, j) - } - } - for j := k + 1; j < n; j++ { - t := -e[j] / e[k+1] - for i := k + 1; i < m; i++ { - a.set(i, j, a.at(i, j)+t*work[i]) - } - } - } - if wantv { - // Place the transformation in v for subsequent - // back multiplication. - for i := k + 1; i < n; i++ { - v.set(i, k, e[i]) - } - } - } - } - - // set up the final bidiagonal matrix or order p. - p := min(n, m+1) - if nct < n { - sigma[nct] = a.at(nct, nct) - } - if m < p { - sigma[p-1] = 0 - } - if nrt+1 < p { - e[nrt] = a.at(nrt, p-1) - } - e[p-1] = 0 - - // If requested, generate u. - if wantu { - for j := nct; j < nu; j++ { - for i := 0; i < m; i++ { - u.set(i, j, 0) - } - u.set(j, j, 1) - } - for k := nct - 1; k >= 0; k-- { - if sigma[k] != 0 { - for j := k + 1; j < nu; j++ { - var t float64 - for i := k; i < m; i++ { - t += u.at(i, k) * u.at(i, j) - } - t /= -u.at(k, k) - for i := k; i < m; i++ { - u.set(i, j, u.at(i, j)+t*u.at(i, k)) - } - } - for i := k; i < m; i++ { - u.set(i, k, -u.at(i, k)) - } - u.set(k, k, 1+u.at(k, k)) - for i := 0; i < k-1; i++ { - u.set(i, k, 0) - } - } else { - for i := 0; i < m; i++ { - u.set(i, k, 0) - } - u.set(k, k, 1) - } - } - } - - // If requested, generate v. - if wantv { - for k := n - 1; k >= 0; k-- { - if k < nrt && e[k] != 0 { - for j := k + 1; j < nu; j++ { - var t float64 - for i := k + 1; i < n; i++ { - t += v.at(i, k) * v.at(i, j) - } - t /= -v.at(k+1, k) - for i := k + 1; i < n; i++ { - v.set(i, j, v.at(i, j)+t*v.at(i, k)) - } - } - } - for i := 0; i < n; i++ { - v.set(i, k, 0) - } - v.set(k, k, 1) - } - } - - // Main iteration loop for the singular values. - pp := p - 1 - for iter := 0; p > 0; { - var k, kase int - - // Here is where a test for too many iterations would go. - - // This section of the program inspects for - // negligible elements in the sigma and e arrays. On - // completion the variables kase and k are set as follows. - // - // kase = 1 if sigma(p) and e[k-1] are negligible and k

= -1; k-- { - if k == -1 { - break - } - if math.Abs(e[k]) <= small+epsilon*(math.Abs(sigma[k])+math.Abs(sigma[k+1])) { - e[k] = 0 - break - } - } - - if k == p-2 { - kase = 4 - } else { - var ks int - for ks = p - 1; ks >= k; ks-- { - if ks == k { - break - } - var t float64 - if ks != p { - t = math.Abs(e[ks]) - } - if ks != k+1 { - t += math.Abs(e[ks-1]) - } - if math.Abs(sigma[ks]) <= small+epsilon*t { - sigma[ks] = 0 - break - } - } - if ks == k { - kase = 3 - } else if ks == p-1 { - kase = 1 - } else { - kase = 2 - k = ks - } - } - k++ - - switch kase { - // Deflate negligible sigma(p). - case 1: - f := e[p-2] - e[p-2] = 0 - for j := p - 2; j >= k; j-- { - t := math.Hypot(sigma[j], f) - cs := sigma[j] / t - sn := f / t - sigma[j] = t - if j != k { - f = -sn * e[j-1] - e[j-1] *= cs - } - if wantv { - for i := 0; i < n; i++ { - t = cs*v.at(i, j) + sn*v.at(i, p-1) - v.set(i, p-1, -sn*v.at(i, j)+cs*v.at(i, p-1)) - v.set(i, j, t) - } - } - } - - // Split at negligible sigma(k). - case 2: - f := e[k-1] - e[k-1] = 0 - for j := k; j < p; j++ { - t := math.Hypot(sigma[j], f) - cs := sigma[j] / t - sn := f / t - sigma[j] = t - f = -sn * e[j] - e[j] *= cs - if wantu { - for i := 0; i < m; i++ { - t = cs*u.at(i, j) + sn*u.at(i, k-1) - u.set(i, k-1, -sn*u.at(i, j)+cs*u.at(i, k-1)) - u.set(i, j, t) - } - } - } - - // Perform one qr step. - case 3: - // Calculate the shift. - scale := math.Max(math.Max(math.Max(math.Max( - math.Abs(sigma[p-1]), math.Abs(sigma[p-2])), math.Abs(e[p-2])), math.Abs(sigma[k])), math.Abs(e[k]), - ) - sp := sigma[p-1] / scale - spm1 := sigma[p-2] / scale - epm1 := e[p-2] / scale - sk := sigma[k] / scale - ek := e[k] / scale - b := ((spm1+sp)*(spm1-sp) + epm1*epm1) / 2 - c := (sp * epm1) * (sp * epm1) - - var shift float64 - if b != 0 || c != 0 { - shift = math.Sqrt(b*b + c) - if b < 0 { - shift = -shift - } - shift = c / (b + shift) - } - f := (sk+sp)*(sk-sp) + shift - g := sk * ek - - // Chase zeros. - for j := k; j < p-1; j++ { - t := math.Hypot(f, g) - cs := f / t - sn := g / t - if j != k { - e[j-1] = t - } - f = cs*sigma[j] + sn*e[j] - e[j] = cs*e[j] - sn*sigma[j] - g = sn * sigma[j+1] - sigma[j+1] *= cs - if wantv { - for i := 0; i < n; i++ { - t = cs*v.at(i, j) + sn*v.at(i, j+1) - v.set(i, j+1, -sn*v.at(i, j)+cs*v.at(i, j+1)) - v.set(i, j, t) - } - } - t = math.Hypot(f, g) - cs = f / t - sn = g / t - sigma[j] = t - f = cs*e[j] + sn*sigma[j+1] - sigma[j+1] = -sn*e[j] + cs*sigma[j+1] - g = sn * e[j+1] - e[j+1] *= cs - if wantu && j < m-1 { - for i := 0; i < m; i++ { - t = cs*u.at(i, j) + sn*u.at(i, j+1) - u.set(i, j+1, -sn*u.at(i, j)+cs*u.at(i, j+1)) - u.set(i, j, t) - } - } - } - e[p-2] = f - iter++ - - // Convergence. - case 4: - // Make the singular values positive. - if sigma[k] <= 0 { - if sigma[k] < 0 { - sigma[k] = -sigma[k] - } else { - sigma[k] = 0 - } - if wantv { - for i := 0; i <= pp; i++ { - v.set(i, k, -v.at(i, k)) - } - } - } - - // Order the singular values. - for k < pp { - if sigma[k] >= sigma[k+1] { - break - } - sigma[k], sigma[k+1] = sigma[k+1], sigma[k] - if wantv && k < n-1 { - for i := 0; i < n; i++ { - t := v.at(i, k+1) - v.set(i, k+1, v.at(i, k)) - v.set(i, k, t) - } - } - if wantu && k < m-1 { - for i := 0; i < m; i++ { - t := u.at(i, k+1) - u.set(i, k+1, u.at(i, k)) - u.set(i, k, t) - } - } - k++ - } - iter = 0 - p-- - } - } - - if trans { - return SVDFactors{ - U: v, - Sigma: sigma, - V: u, - - m: m, n: n, - } - } - return SVDFactors{ - U: u, - Sigma: sigma, - V: v, - - m: m, n: n, - } -} - -// S returns a newly allocated S matrix from the sigma values held by the -// factorisation. -func (f SVDFactors) S() *Dense { - s := NewDense(len(f.Sigma), len(f.Sigma), nil) - for i, v := range f.Sigma { - s.set(i, i, v) - } - return s -} - -// Rank returns the number of non-negligible singular values in the sigma held by -// the factorisation with the given epsilon. -func (f SVDFactors) Rank(epsilon float64) int { - if len(f.Sigma) == 0 { - return 0 - } - tol := float64(max(f.m, len(f.Sigma))) * f.Sigma[0] * epsilon - var r int - for _, v := range f.Sigma { - if v > tol { - r++ - } - } - return r -} - -// Cond returns the 2-norm condition number for the S matrix. -func (f SVDFactors) Cond() float64 { - return f.Sigma[0] / f.Sigma[min(f.m, f.n)-1] -} diff --git a/vendor/github.com/gonum/matrix/mat64/symmetric.go b/vendor/github.com/gonum/matrix/mat64/symmetric.go deleted file mode 100644 index 19923b046..000000000 --- a/vendor/github.com/gonum/matrix/mat64/symmetric.go +++ /dev/null @@ -1,198 +0,0 @@ -package mat64 - -import ( - "github.com/gonum/blas" - "github.com/gonum/blas/blas64" -) - -var ( - symDense *SymDense - - _ Matrix = symDense - _ Symmetric = symDense - _ RawSymmetricer = symDense -) - -const badSymTriangle = "mat64: blas64.Symmetric not upper" - -// SymDense is a symmetric matrix that uses Dense storage. -type SymDense struct { - mat blas64.Symmetric -} - -// Symmetric represents a symmetric matrix (where the element at {i, j} equals -// the element at {j, i}). Symmetric matrices are always square. -type Symmetric interface { - Matrix - // Symmetric returns the number of rows/columns in the matrix. - Symmetric() int -} - -// A RawSymmetricer can return a view of itself as a BLAS Symmetric matrix. -type RawSymmetricer interface { - RawSymmetric() blas64.Symmetric -} - -// NewSymDense constructs an n x n symmetric matrix. If len(mat) == n * n, -// mat will be used to hold the underlying data, or if mat == nil, new data will be allocated. -// The underlying data representation is the same as a Dense matrix, except -// the values of the entries in the lower triangular portion are completely ignored. -func NewSymDense(n int, mat []float64) *SymDense { - if n < 0 { - panic("mat64: negative dimension") - } - if mat != nil && n*n != len(mat) { - panic(ErrShape) - } - if mat == nil { - mat = make([]float64, n*n) - } - return &SymDense{blas64.Symmetric{ - N: n, - Stride: n, - Data: mat, - Uplo: blas.Upper, - }} -} - -func (s *SymDense) Dims() (r, c int) { - return s.mat.N, s.mat.N -} - -func (s *SymDense) Symmetric() int { - return s.mat.N -} - -// RawSymmetric returns the matrix as a blas64.Symmetric. The returned -// value must be stored in upper triangular format. -func (s *SymDense) RawSymmetric() blas64.Symmetric { - return s.mat -} - -func (s *SymDense) isZero() bool { - return s.mat.N == 0 -} - -func (s *SymDense) AddSym(a, b Symmetric) { - n := a.Symmetric() - if n != b.Symmetric() { - panic(ErrShape) - } - if s.isZero() { - s.mat = blas64.Symmetric{ - N: n, - Stride: n, - Data: use(s.mat.Data, n*n), - Uplo: blas.Upper, - } - } else if s.mat.N != n { - panic(ErrShape) - } - - if a, ok := a.(RawSymmetricer); ok { - if b, ok := b.(RawSymmetricer); ok { - amat, bmat := a.RawSymmetric(), b.RawSymmetric() - for i := 0; i < n; i++ { - btmp := bmat.Data[i*bmat.Stride+i : i*bmat.Stride+n] - stmp := s.mat.Data[i*s.mat.Stride+i : i*s.mat.Stride+n] - for j, v := range amat.Data[i*amat.Stride+i : i*amat.Stride+n] { - stmp[j] = v + btmp[j] - } - } - return - } - } - - for i := 0; i < n; i++ { - stmp := s.mat.Data[i*s.mat.Stride : i*s.mat.Stride+n] - for j := i; j < n; j++ { - stmp[j] = a.At(i, j) + b.At(i, j) - } - } -} - -func (s *SymDense) CopySym(a Symmetric) int { - n := a.Symmetric() - n = min(n, s.mat.N) - switch a := a.(type) { - case RawSymmetricer: - amat := a.RawSymmetric() - if amat.Uplo != blas.Upper { - panic(badSymTriangle) - } - for i := 0; i < n; i++ { - copy(s.mat.Data[i*s.mat.Stride+i:i*s.mat.Stride+n], amat.Data[i*amat.Stride+i:i*amat.Stride+n]) - } - default: - for i := 0; i < n; i++ { - stmp := s.mat.Data[i*s.mat.Stride : i*s.mat.Stride+n] - for j := i; j < n; j++ { - stmp[j] = a.At(i, j) - } - } - } - return n -} - -// SymRankOne performs a symetric rank-one update to the matrix a and stores -// the result in the receiver -// s = a + alpha * x * x' -func (s *SymDense) SymRankOne(a Symmetric, alpha float64, x []float64) { - n := s.mat.N - var w SymDense - if s == a { - w = *s - } - if w.isZero() { - w.mat = blas64.Symmetric{ - N: n, - Stride: n, - Uplo: blas.Upper, - Data: use(w.mat.Data, n*n), - } - } else if n != w.mat.N { - panic(ErrShape) - } - if s != a { - w.CopySym(a) - } - if len(x) != n { - panic(ErrShape) - } - blas64.Syr(alpha, blas64.Vector{Inc: 1, Data: x}, w.mat) - *s = w - return -} - -// RankTwo performs a symmmetric rank-two update to the matrix a and stores -// the result in the receiver -// m = a + alpha * (x * y' + y * x') -func (s *SymDense) RankTwo(a Symmetric, alpha float64, x, y []float64) { - n := s.mat.N - var w SymDense - if s == a { - w = *s - } - if w.isZero() { - w.mat = blas64.Symmetric{ - N: n, - Stride: n, - Uplo: blas.Upper, - Data: use(w.mat.Data, n*n), - } - } else if n != w.mat.N { - panic(ErrShape) - } - if s != a { - w.CopySym(a) - } - if len(x) != n { - panic(ErrShape) - } - if len(y) != n { - panic(ErrShape) - } - blas64.Syr2(alpha, blas64.Vector{Inc: 1, Data: x}, blas64.Vector{Inc: 1, Data: y}, w.mat) - *s = w - return -} diff --git a/vendor/github.com/gonum/matrix/mat64/triangular.go b/vendor/github.com/gonum/matrix/mat64/triangular.go deleted file mode 100644 index 93e942b47..000000000 --- a/vendor/github.com/gonum/matrix/mat64/triangular.go +++ /dev/null @@ -1,177 +0,0 @@ -package mat64 - -import ( - "github.com/gonum/blas" - "github.com/gonum/blas/blas64" -) - -var ( - triDense *TriDense - _ Matrix = triDense - _ Triangular = triDense - _ RawTriangular = triDense -) - -// TriDense represents an upper or lower triangular matrix in dense storage -// format. -type TriDense struct { - mat blas64.Triangular -} - -type Triangular interface { - Matrix - // Triangular returns the number of rows/columns in the matrix and if it is - // an upper triangular matrix. - Triangle() (n int, upper bool) -} - -type RawTriangular interface { - RawTriangular() blas64.Triangular -} - -// NewTriangular constructs an n x n triangular matrix. The constructed matrix -// is upper triangular if upper == true and lower triangular otherwise. -// If len(mat) == n * n, mat will be used to hold the underlying data, if -// mat == nil, new data will be allocated, and will panic if neither of these -// cases is true. -// The underlying data representation is the same as that of a Dense matrix, -// except the values of the entries in the opposite half are completely ignored. -func NewTriDense(n int, upper bool, mat []float64) *TriDense { - if n < 0 { - panic("mat64: negative dimension") - } - if mat != nil && len(mat) != n*n { - panic(ErrShape) - } - if mat == nil { - mat = make([]float64, n*n) - } - uplo := blas.Lower - if upper { - uplo = blas.Upper - } - return &TriDense{blas64.Triangular{ - N: n, - Stride: n, - Data: mat, - Uplo: uplo, - Diag: blas.NonUnit, - }} -} - -func (t *TriDense) Dims() (r, c int) { - return t.mat.N, t.mat.N -} - -func (t *TriDense) Triangle() (n int, upper bool) { - return t.mat.N, t.mat.Uplo == blas.Upper -} - -func (t *TriDense) RawTriangular() blas64.Triangular { - return t.mat -} - -func (t *TriDense) isZero() bool { - // It must be the case that t.Dims() returns - // zeros in this case. See comment in Reset(). - return t.mat.Stride == 0 -} - -// Reset zeros the dimensions of the matrix so that it can be reused as the -// receiver of a dimensionally restricted operation. -// -// See the Reseter interface for more information. -func (t *TriDense) Reset() { - // No change of Stride, N to 0 may - // be made unless both are set to 0. - t.mat.N, t.mat.Stride = 0, 0 - // Defensively zero Uplo to ensure - // it is set correctly later. - t.mat.Uplo = 0 - t.mat.Data = t.mat.Data[:0] -} - -// getBlasTriangular transforms t into a blas64.Triangular. If t is a RawTriangular, -// the direct matrix representation is returned, otherwise t is copied into one. -func getBlasTriangular(t Triangular) blas64.Triangular { - n, upper := t.Triangle() - rt, ok := t.(RawTriangular) - if ok { - return rt.RawTriangular() - } - ta := blas64.Triangular{ - N: n, - Stride: n, - Diag: blas.NonUnit, - Data: make([]float64, n*n), - } - if upper { - ta.Uplo = blas.Upper - for i := 0; i < n; i++ { - for j := i; j < n; j++ { - ta.Data[i*n+j] = t.At(i, j) - } - } - return ta - } - ta.Uplo = blas.Lower - for i := 0; i < n; i++ { - for j := 0; j < i; j++ { - ta.Data[i*n+j] = t.At(i, j) - } - } - return ta -} - -// copySymIntoTriangle copies a symmetric matrix into a TriDense -func copySymIntoTriangle(t *TriDense, s Symmetric) { - n, upper := t.Triangle() - ns := s.Symmetric() - if n != ns { - panic("mat64: triangle size mismatch") - } - ts := t.mat.Stride - if rs, ok := s.(RawSymmetricer); ok { - sd := rs.RawSymmetric() - ss := sd.Stride - if upper { - if sd.Uplo == blas.Upper { - for i := 0; i < n; i++ { - copy(t.mat.Data[i*ts+i:i*ts+n], sd.Data[i*ss+i:i*ss+n]) - } - return - } - for i := 0; i < n; i++ { - for j := i; j < n; j++ { - t.mat.Data[i*ts+j] = sd.Data[j*ss+i] - } - return - } - } - if sd.Uplo == blas.Upper { - for i := 0; i < n; i++ { - for j := 0; j <= i; j++ { - t.mat.Data[i*ts+j] = sd.Data[j*ss+i] - } - } - return - } - for i := 0; i < n; i++ { - copy(t.mat.Data[i*ts:i*ts+i+1], sd.Data[i*ss:i*ss+i+1]) - } - return - } - if upper { - for i := 0; i < n; i++ { - for j := i; j < n; j++ { - t.mat.Data[i*ts+j] = s.At(i, j) - } - } - return - } - for i := 0; i < n; i++ { - for j := 0; j <= i; j++ { - t.mat.Data[i*ts+j] = s.At(i, j) - } - } -} diff --git a/vendor/github.com/gonum/matrix/mat64/vector.go b/vendor/github.com/gonum/matrix/mat64/vector.go deleted file mode 100644 index c2e966d7b..000000000 --- a/vendor/github.com/gonum/matrix/mat64/vector.go +++ /dev/null @@ -1,349 +0,0 @@ -// Copyright ©2013 The gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mat64 - -import ( - "math" - - "github.com/gonum/blas" - "github.com/gonum/blas/blas64" -) - -var ( - vector *Vector - - _ Matrix = vector - - // _ Cloner = vector - // _ Viewer = vector - // _ Subvectorer = vector - - // _ Adder = vector - // _ Suber = vector - // _ Muler = vector - // _ Dotter = vector - // _ ElemMuler = vector - - // _ Scaler = vector - // _ Applyer = vector - - // _ Normer = vector - // _ Sumer = vector - - // _ Stacker = vector - // _ Augmenter = vector - - // _ Equaler = vector - // _ ApproxEqualer = vector - - // _ RawMatrixLoader = vector - // _ RawMatrixer = vector -) - -// Vector represents a column vector. -type Vector struct { - mat blas64.Vector - n int - // A BLAS vector can have a negative increment, but allowing this - // in the mat64 type complicates a lot of code, and doesn't gain anything. - // Vector must have positive increment in this package. -} - -// NewVector creates a new Vector of length n. If len(data) == n, data is used -// as the backing data slice. If data == nil, a new slice is allocated. If -// neither of these is true, NewVector will panic. -func NewVector(n int, data []float64) *Vector { - if len(data) != n && data != nil { - panic(ErrShape) - } - if data == nil { - data = make([]float64, n) - } - return &Vector{ - mat: blas64.Vector{ - Inc: 1, - Data: data, - }, - n: n, - } -} - -// ViewVec returns a sub-vector view of the receiver starting at element i and -// extending n columns. If i is out of range, or if n is zero or extend beyond the -// bounds of the Vector ViewVec will panic with ErrIndexOutOfRange. The returned -// Vector retains reference to the underlying vector. -func (v *Vector) ViewVec(i, n int) *Vector { - if i+n > v.n { - panic(ErrIndexOutOfRange) - } - return &Vector{ - n: n, - mat: blas64.Vector{ - Inc: v.mat.Inc, - Data: v.mat.Data[i*v.mat.Inc:], - }, - } -} - -func (v *Vector) Dims() (r, c int) { return v.n, 1 } - -// Len returns the length of the vector. -func (v *Vector) Len() int { - return v.n -} - -func (v *Vector) Reset() { - v.mat.Data = v.mat.Data[:0] - v.mat.Inc = 0 - v.n = 0 -} - -func (v *Vector) RawVector() blas64.Vector { - return v.mat -} - -// CopyVec makes a copy of elements of a into the receiver. It is similar to the -// built-in copy; it copies as much as the overlap between the two matrices and -// returns the number of rows and columns it copied. -func (v *Vector) CopyVec(a *Vector) (n int) { - n = min(v.Len(), a.Len()) - blas64.Copy(n, a.mat, v.mat) - return n -} - -// AddVec adds a and b element-wise, placing the result in the receiver. -func (v *Vector) AddVec(a, b *Vector) { - ar := a.Len() - br := b.Len() - - if ar != br { - panic(ErrShape) - } - - v.reuseAs(ar) - - amat, bmat := a.RawVector(), b.RawVector() - for i := 0; i < v.n; i++ { - v.mat.Data[i*v.mat.Inc] = amat.Data[i*amat.Inc] + bmat.Data[i*bmat.Inc] - } -} - -// SubVec subtracts the vector b from a, placing the result in the receiver. -func (v *Vector) SubVec(a, b *Vector) { - ar := a.Len() - br := b.Len() - - if ar != br { - panic(ErrShape) - } - - v.reuseAs(ar) - - amat, bmat := a.RawVector(), b.RawVector() - for i := 0; i < v.n; i++ { - v.mat.Data[i*v.mat.Inc] = amat.Data[i*amat.Inc] - bmat.Data[i*bmat.Inc] - } -} - -// MulElemVec performs element-wise multiplication of a and b, placing the result -// in the receiver. -func (v *Vector) MulElemVec(a, b *Vector) { - ar := a.Len() - br := b.Len() - - if ar != br { - panic(ErrShape) - } - - v.reuseAs(ar) - - amat, bmat := a.RawVector(), b.RawVector() - for i := 0; i < v.n; i++ { - v.mat.Data[i*v.mat.Inc] = amat.Data[i*amat.Inc] * bmat.Data[i*bmat.Inc] - } -} - -// DivElemVec performs element-wise division of a by b, placing the result -// in the receiver. -func (v *Vector) DivElemVec(a, b *Vector) { - ar := a.Len() - br := b.Len() - - if ar != br { - panic(ErrShape) - } - - v.reuseAs(ar) - - amat, bmat := a.RawVector(), b.RawVector() - for i := 0; i < v.n; i++ { - v.mat.Data[i*v.mat.Inc] = amat.Data[i*amat.Inc] / bmat.Data[i*bmat.Inc] - } -} - -// MulVec computes a * b if trans == false and a^T * b if trans == true. The -// result is stored into the receiver. MulVec panics if the number of columns in -// a does not equal the number of rows in b. -func (v *Vector) MulVec(a Matrix, trans bool, b *Vector) { - ar, ac := a.Dims() - br := b.Len() - if trans { - if ar != br { - panic(ErrShape) - } - } else { - if ac != br { - panic(ErrShape) - } - } - - var w Vector - if v != a && v != b { - w = *v - } - if w.n == 0 { - if trans { - w.mat.Data = use(w.mat.Data, ac) - } else { - w.mat.Data = use(w.mat.Data, ar) - } - - w.mat.Inc = 1 - w.n = ar - if trans { - w.n = ac - } - } else { - if trans { - if ac != w.n { - panic(ErrShape) - } - } else { - if ar != w.n { - panic(ErrShape) - } - } - } - - switch a := a.(type) { - case RawSymmetricer: - amat := a.RawSymmetric() - blas64.Symv(1, amat, b.mat, 0, w.mat) - case RawTriangular: - w.CopyVec(b) - amat := a.RawTriangular() - ta := blas.NoTrans - if trans { - ta = blas.Trans - } - blas64.Trmv(ta, amat, w.mat) - case RawMatrixer: - amat := a.RawMatrix() - t := blas.NoTrans - if trans { - t = blas.Trans - } - blas64.Gemv(t, 1, amat, b.mat, 0, w.mat) - case Vectorer: - if trans { - col := make([]float64, ar) - for c := 0; c < ac; c++ { - w.mat.Data[c*w.mat.Inc] = blas64.Dot(ar, - blas64.Vector{Inc: 1, Data: a.Col(col, c)}, - b.mat, - ) - } - } else { - row := make([]float64, ac) - for r := 0; r < ar; r++ { - w.mat.Data[r*w.mat.Inc] = blas64.Dot(ac, - blas64.Vector{Inc: 1, Data: a.Row(row, r)}, - b.mat, - ) - } - } - default: - if trans { - col := make([]float64, ar) - for c := 0; c < ac; c++ { - for i := range col { - col[i] = a.At(i, c) - } - var f float64 - for i, e := range col { - f += e * b.mat.Data[i*b.mat.Inc] - } - w.mat.Data[c*w.mat.Inc] = f - } - } else { - row := make([]float64, ac) - for r := 0; r < ar; r++ { - for i := range row { - row[i] = a.At(r, i) - } - var f float64 - for i, e := range row { - f += e * b.mat.Data[i*b.mat.Inc] - } - w.mat.Data[r*w.mat.Inc] = f - } - } - } - *v = w -} - -// Equals compares the vectors represented by b and the receiver and returns true -// if the vectors are element-wise equal. -func (v *Vector) EqualsVec(b *Vector) bool { - n := v.Len() - nb := b.Len() - if n != nb { - return false - } - for i := 0; i < n; i++ { - if v.mat.Data[i*v.mat.Inc] != b.mat.Data[i*b.mat.Inc] { - return false - } - } - return true -} - -// EqualsApproxVec compares the vectors represented by b and the receiver, with -// tolerance for element-wise equality specified by epsilon. -func (v *Vector) EqualsApproxVec(b *Vector, epsilon float64) bool { - n := v.Len() - nb := b.Len() - if n != nb { - return false - } - for i := 0; i < n; i++ { - if math.Abs(v.mat.Data[i*v.mat.Inc]-b.mat.Data[i*b.mat.Inc]) > epsilon { - return false - } - } - return true -} - -// reuseAs resizes an empty vector to a r×1 vector, -// or checks that a non-empty matrix is r×1. -func (v *Vector) reuseAs(r int) { - if v.isZero() { - v.mat = blas64.Vector{ - Inc: 1, - Data: use(v.mat.Data, r), - } - v.n = r - return - } - if r != v.n { - panic(ErrShape) - } -} - -func (v *Vector) isZero() bool { - // It must be the case that v.Dims() returns - // zeros in this case. See comment in Reset(). - return v.mat.Inc == 0 -} diff --git a/vendor/github.com/google/gofuzz/.travis.yml b/vendor/github.com/google/gofuzz/.travis.yml deleted file mode 100644 index f8684d99f..000000000 --- a/vendor/github.com/google/gofuzz/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - 1.4 - - 1.3 - - 1.2 - - tip - -install: - - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi - -script: - - go test -cover diff --git a/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/vendor/github.com/google/gofuzz/CONTRIBUTING.md deleted file mode 100644 index 51cf5cd1a..000000000 --- a/vendor/github.com/google/gofuzz/CONTRIBUTING.md +++ /dev/null @@ -1,67 +0,0 @@ -# How to contribute # - -We'd love to accept your patches and contributions to this project. There are -a just a few small guidelines you need to follow. - - -## Contributor License Agreement ## - -Contributions to any Google project must be accompanied by a Contributor -License Agreement. This is not a copyright **assignment**, it simply gives -Google permission to use and redistribute your contributions as part of the -project. - - * If you are an individual writing original source code and you're sure you - own the intellectual property, then you'll need to sign an [individual - CLA][]. - - * If you work for a company that wants to allow you to contribute your work, - then you'll need to sign a [corporate CLA][]. - -You generally only need to submit a CLA once, so if you've already submitted -one (even if it was for a different project), you probably don't need to do it -again. - -[individual CLA]: https://developers.google.com/open-source/cla/individual -[corporate CLA]: https://developers.google.com/open-source/cla/corporate - - -## Submitting a patch ## - - 1. It's generally best to start by opening a new issue describing the bug or - feature you're intending to fix. Even if you think it's relatively minor, - it's helpful to know what people are working on. Mention in the initial - issue that you are planning to work on that bug or feature so that it can - be assigned to you. - - 1. Follow the normal process of [forking][] the project, and setup a new - branch to work in. It's important that each group of changes be done in - separate branches in order to ensure that a pull request only includes the - commits related to that bug or feature. - - 1. Go makes it very simple to ensure properly formatted code, so always run - `go fmt` on your code before committing it. You should also run - [golint][] over your code. As noted in the [golint readme][], it's not - strictly necessary that your code be completely "lint-free", but this will - help you find common style issues. - - 1. Any significant changes should almost always be accompanied by tests. The - project already has good test coverage, so look at some of the existing - tests if you're unsure how to go about it. [gocov][] and [gocov-html][] - are invaluable tools for seeing which parts of your code aren't being - exercised by your tests. - - 1. Do your best to have [well-formed commit messages][] for each change. - This provides consistency throughout the project, and ensures that commit - messages are able to be formatted properly by various git tools. - - 1. Finally, push the commits to your fork and submit a [pull request][]. - -[forking]: https://help.github.com/articles/fork-a-repo -[golint]: https://github.com/golang/lint -[golint readme]: https://github.com/golang/lint/blob/master/README -[gocov]: https://github.com/axw/gocov -[gocov-html]: https://github.com/matm/gocov-html -[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html -[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits -[pull request]: https://help.github.com/articles/creating-a-pull-request diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md deleted file mode 100644 index 68fcf2cab..000000000 --- a/vendor/github.com/google/gofuzz/README.md +++ /dev/null @@ -1,71 +0,0 @@ -gofuzz -====== - -gofuzz is a library for populating go objects with random values. - -[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.png)](https://godoc.org/github.com/google/gofuzz) -[![Travis](https://travis-ci.org/google/gofuzz.svg?branch=master)](https://travis-ci.org/google/gofuzz) - -This is useful for testing: - -* Do your project's objects really serialize/unserialize correctly in all cases? -* Is there an incorrectly formatted object that will cause your project to panic? - -Import with ```import "github.com/google/gofuzz"``` - -You can use it on single variables: -``` -f := fuzz.New() -var myInt int -f.Fuzz(&myInt) // myInt gets a random value. -``` - -You can use it on maps: -``` -f := fuzz.New().NilChance(0).NumElements(1, 1) -var myMap map[ComplexKeyType]string -f.Fuzz(&myMap) // myMap will have exactly one element. -``` - -Customize the chance of getting a nil pointer: -``` -f := fuzz.New().NilChance(.5) -var fancyStruct struct { - A, B, C, D *string -} -f.Fuzz(&fancyStruct) // About half the pointers should be set. -``` - -You can even customize the randomization completely if needed: -``` -type MyEnum string -const ( - A MyEnum = "A" - B MyEnum = "B" -) -type MyInfo struct { - Type MyEnum - AInfo *string - BInfo *string -} - -f := fuzz.New().NilChance(0).Funcs( - func(e *MyInfo, c fuzz.Continue) { - switch c.Intn(2) { - case 0: - e.Type = A - c.Fuzz(&e.AInfo) - case 1: - e.Type = B - c.Fuzz(&e.BInfo) - } - }, -) - -var myObject MyInfo -f.Fuzz(&myObject) // Type will correspond to whether A or B info is set. -``` - -See more examples in ```example_test.go```. - -Happy testing! diff --git a/vendor/github.com/gorilla/context/.travis.yml b/vendor/github.com/gorilla/context/.travis.yml deleted file mode 100644 index 6796581fb..000000000 --- a/vendor/github.com/gorilla/context/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.0 - - 1.1 - - 1.2 - - 1.3 - - 1.4 - - tip diff --git a/vendor/github.com/gorilla/context/README.md b/vendor/github.com/gorilla/context/README.md deleted file mode 100644 index c60a31b05..000000000 --- a/vendor/github.com/gorilla/context/README.md +++ /dev/null @@ -1,7 +0,0 @@ -context -======= -[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) - -gorilla/context is a general purpose registry for global request variables. - -Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/vendor/github.com/gorilla/mux/.travis.yml b/vendor/github.com/gorilla/mux/.travis.yml deleted file mode 100644 index d87d46576..000000000 --- a/vendor/github.com/gorilla/mux/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go - -go: - - 1.0 - - 1.1 - - 1.2 - - tip diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md deleted file mode 100644 index e60301b03..000000000 --- a/vendor/github.com/gorilla/mux/README.md +++ /dev/null @@ -1,7 +0,0 @@ -mux -=== -[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux) - -gorilla/mux is a powerful URL router and dispatcher. - -Read the full documentation here: http://www.gorillatoolkit.org/pkg/mux diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.proto deleted file mode 100644 index f7fba56c3..000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.proto +++ /dev/null @@ -1,12 +0,0 @@ -syntax = "proto3"; -package grpc.gateway.runtime; -option go_package = "internal"; - -// StreamError is a response type which is returned when -// streaming rpc returns an error. -message StreamError { - int32 grpc_code = 1; - int32 http_code = 2; - string message = 3; - string http_status = 4; -} diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml deleted file mode 100644 index 9d91c6339..000000000 --- a/vendor/github.com/imdario/mergo/.travis.yml +++ /dev/null @@ -1,2 +0,0 @@ -language: go -install: go get -t diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md deleted file mode 100644 index cdcea0f65..000000000 --- a/vendor/github.com/imdario/mergo/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# Mergo - -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region Marche. - -![Mergo dall'alto](http://www.comune.mergo.an.it/Siti/Mergo/Immagini/Foto/mergo_dall_alto.jpg) - -## Status - -It is ready for production use. It works fine although it may use more of testing. Here some projects in the wild using Mergo: - -- [EagerIO/Stout](https://github.com/EagerIO/Stout) -- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) -- [russross/canvasassignments](https://github.com/russross/canvasassignments) -- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) -- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) -- [divshot/gitling](https://github.com/divshot/gitling) -- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) - -[![Build Status][1]][2] -[![GoDoc](https://godoc.org/github.com/imdario/mergo?status.svg)](https://godoc.org/github.com/imdario/mergo) - -[1]: https://travis-ci.org/imdario/mergo.png -[2]: https://travis-ci.org/imdario/mergo - -## Installation - - go get github.com/imdario/mergo - - // use in your .go code - import ( - "github.com/imdario/mergo" - ) - -## Usage - -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - - if err := mergo.Merge(&dst, src); err != nil { - // ... - } - -Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. - - if err := mergo.Map(&dst, srcMap); err != nil { - // ... - } - -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. - -More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo). - -Note: if test are failing due missing package, please execute: - - go get gopkg.in/yaml.v1 - -## Contact me - -If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) - -## About - -Written by [Dario Castañé](http://dario.im). - -## License - -[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md deleted file mode 100644 index 7a950d177..000000000 --- a/vendor/github.com/inconshreveable/mousetrap/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# mousetrap - -mousetrap is a tiny library that answers a single question. - -On a Windows machine, was the process invoked by someone double clicking on -the executable file while browsing in explorer? - -### Motivation - -Windows developers unfamiliar with command line tools will often "double-click" -the executable for a tool. Because most CLI tools print the help and then exit -when invoked without arguments, this is often very frustrating for those users. - -mousetrap provides a way to detect these invocations so that you can provide -more helpful behavior and instructions on how to run the CLI tool. To see what -this looks like, both from an organizational and a technical perspective, see -https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/ - -### The interface - -The library exposes a single interface: - - func StartedByExplorer() (bool) diff --git a/vendor/github.com/jonboulle/clockwork/.gitignore b/vendor/github.com/jonboulle/clockwork/.gitignore deleted file mode 100644 index 010c242bd..000000000 --- a/vendor/github.com/jonboulle/clockwork/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test - -*.swp diff --git a/vendor/github.com/jonboulle/clockwork/.travis.yml b/vendor/github.com/jonboulle/clockwork/.travis.yml deleted file mode 100644 index 6a363c70f..000000000 --- a/vendor/github.com/jonboulle/clockwork/.travis.yml +++ /dev/null @@ -1,3 +0,0 @@ -language: go -go: - - 1.3 diff --git a/vendor/github.com/jonboulle/clockwork/README.md b/vendor/github.com/jonboulle/clockwork/README.md deleted file mode 100644 index d43a6c799..000000000 --- a/vendor/github.com/jonboulle/clockwork/README.md +++ /dev/null @@ -1,61 +0,0 @@ -clockwork -========= - -[![Build Status](https://travis-ci.org/jonboulle/clockwork.png?branch=master)](https://travis-ci.org/jonboulle/clockwork) -[![godoc](https://godoc.org/github.com/jonboulle/clockwork?status.svg)](http://godoc.org/github.com/jonboulle/clockwork) - -a simple fake clock for golang - -# Usage - -Replace uses of the `time` package with the `clockwork.Clock` interface instead. - -For example, instead of using `time.Sleep` directly: - -``` -func my_func() { - time.Sleep(3 * time.Second) - do_something() -} -``` - -inject a clock and use its `Sleep` method instead: - -``` -func my_func(clock clockwork.Clock) { - clock.Sleep(3 * time.Second) - do_something() -} -``` - -Now you can easily test `my_func` with a `FakeClock`: - -``` -func TestMyFunc(t *testing.T) { - c := clockwork.NewFakeClock() - - // Start our sleepy function - my_func(c) - - // Ensure we wait until my_func is sleeping - c.BlockUntil(1) - - assert_state() - - // Advance the FakeClock forward in time - c.Advance(3) - - assert_state() -} -``` - -and in production builds, simply inject the real clock instead: -``` -my_func(clockwork.NewRealClock()) -``` - -See [example_test.go](example_test.go) for a full example. - -# Credits - -clockwork is inspired by @wickman's [threaded fake clock](https://gist.github.com/wickman/3840816), and the [Golang playground](http://blog.golang.org/playground#Faking time) diff --git a/vendor/github.com/juju/ratelimit/README.md b/vendor/github.com/juju/ratelimit/README.md deleted file mode 100644 index a0fdfe2b1..000000000 --- a/vendor/github.com/juju/ratelimit/README.md +++ /dev/null @@ -1,117 +0,0 @@ -# ratelimit --- - import "github.com/juju/ratelimit" - -The ratelimit package provides an efficient token bucket implementation. See -http://en.wikipedia.org/wiki/Token_bucket. - -## Usage - -#### func Reader - -```go -func Reader(r io.Reader, bucket *Bucket) io.Reader -``` -Reader returns a reader that is rate limited by the given token bucket. Each -token in the bucket represents one byte. - -#### func Writer - -```go -func Writer(w io.Writer, bucket *Bucket) io.Writer -``` -Writer returns a writer that is rate limited by the given token bucket. Each -token in the bucket represents one byte. - -#### type Bucket - -```go -type Bucket struct { -} -``` - -Bucket represents a token bucket that fills at a predetermined rate. Methods on -Bucket may be called concurrently. - -#### func NewBucket - -```go -func NewBucket(fillInterval time.Duration, capacity int64) *Bucket -``` -NewBucket returns a new token bucket that fills at the rate of one token every -fillInterval, up to the given maximum capacity. Both arguments must be positive. -The bucket is initially full. - -#### func NewBucketWithQuantum - -```go -func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket -``` -NewBucketWithQuantum is similar to NewBucket, but allows the specification of -the quantum size - quantum tokens are added every fillInterval. - -#### func NewBucketWithRate - -```go -func NewBucketWithRate(rate float64, capacity int64) *Bucket -``` -NewBucketWithRate returns a token bucket that fills the bucket at the rate of -rate tokens per second up to the given maximum capacity. Because of limited -clock resolution, at high rates, the actual rate may be up to 1% different from -the specified rate. - -#### func (*Bucket) Rate - -```go -func (tb *Bucket) Rate() float64 -``` -Rate returns the fill rate of the bucket, in tokens per second. - -#### func (*Bucket) Take - -```go -func (tb *Bucket) Take(count int64) time.Duration -``` -Take takes count tokens from the bucket without blocking. It returns the time -that the caller should wait until the tokens are actually available. - -Note that if the request is irrevocable - there is no way to return tokens to -the bucket once this method commits us to taking them. - -#### func (*Bucket) TakeAvailable - -```go -func (tb *Bucket) TakeAvailable(count int64) int64 -``` -TakeAvailable takes up to count immediately available tokens from the bucket. It -returns the number of tokens removed, or zero if there are no available tokens. -It does not block. - -#### func (*Bucket) TakeMaxDuration - -```go -func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool) -``` -TakeMaxDuration is like Take, except that it will only take tokens from the -bucket if the wait time for the tokens is no greater than maxWait. - -If it would take longer than maxWait for the tokens to become available, it does -nothing and reports false, otherwise it returns the time that the caller should -wait until the tokens are actually available, and reports true. - -#### func (*Bucket) Wait - -```go -func (tb *Bucket) Wait(count int64) -``` -Wait takes count tokens from the bucket, waiting until they are available. - -#### func (*Bucket) WaitMaxDuration - -```go -func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool -``` -WaitMaxDuration is like Wait except that it will only take tokens from the -bucket if it needs to wait for no greater than maxWait. It reports whether any -tokens have been removed from the bucket If no tokens have been removed, it -returns immediately. diff --git a/vendor/github.com/openshift/origin/pkg/api/graph/graph.go b/vendor/github.com/openshift/origin/pkg/api/graph/graph.go deleted file mode 100644 index 58a08d32a..000000000 --- a/vendor/github.com/openshift/origin/pkg/api/graph/graph.go +++ /dev/null @@ -1,693 +0,0 @@ -package graph - -import ( - "fmt" - "reflect" - "sort" - "strings" - - "github.com/gonum/graph" - "github.com/gonum/graph/concrete" - "github.com/gonum/graph/encoding/dot" - - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/util/sets" -) - -type Node struct { - concrete.Node - UniqueName -} - -// DOTAttributes implements an attribute getter for the DOT encoding -func (n Node) DOTAttributes() []dot.Attribute { - return []dot.Attribute{{Key: "label", Value: fmt.Sprintf("%q", n.UniqueName)}} -} - -// ExistenceChecker is an interface for those nodes that can be created without a backing object. -// This can happen when a node wants an edge to a non-existent node. We know the node should exist, -// The graph needs something in that location to track the information we have about the node, but the -// backing object doesn't exist. -type ExistenceChecker interface { - // Found returns false if the node represents an object that we don't have the backing object for - Found() bool -} - -type UniqueName string - -type UniqueNameFunc func(obj interface{}) UniqueName - -func (n UniqueName) UniqueName() string { - return string(n) -} - -func (n UniqueName) String() string { - return string(n) -} - -type uniqueNamer interface { - UniqueName() UniqueName -} - -type NodeFinder interface { - Find(name UniqueName) graph.Node -} - -// UniqueNodeInitializer is a graph that allows nodes with a unique name to be added without duplication. -// If the node is newly added, true will be returned. -type UniqueNodeInitializer interface { - FindOrCreate(name UniqueName, fn NodeInitializerFunc) (graph.Node, bool) -} - -type NodeInitializerFunc func(Node) graph.Node - -func EnsureUnique(g UniqueNodeInitializer, name UniqueName, fn NodeInitializerFunc) graph.Node { - node, _ := g.FindOrCreate(name, fn) - return node -} - -type MutableDirectedEdge interface { - AddEdge(from, to graph.Node, edgeKind string) -} - -type MutableUniqueGraph interface { - graph.Mutable - MutableDirectedEdge - UniqueNodeInitializer - NodeFinder -} - -type Edge struct { - concrete.Edge - kinds sets.String -} - -func NewEdge(from, to graph.Node, kinds ...string) Edge { - return Edge{concrete.Edge{F: from, T: to}, sets.NewString(kinds...)} -} - -func (e Edge) Kinds() sets.String { - return e.kinds -} - -func (e Edge) IsKind(kind string) bool { - return e.kinds.Has(kind) -} - -// DOTAttributes implements an attribute getter for the DOT encoding -func (e Edge) DOTAttributes() []dot.Attribute { - return []dot.Attribute{{Key: "label", Value: fmt.Sprintf("%q", strings.Join(e.Kinds().List(), ","))}} -} - -type GraphDescriber interface { - Name(node graph.Node) string - Kind(node graph.Node) string - Object(node graph.Node) interface{} - EdgeKinds(edge graph.Edge) sets.String -} - -type Interface interface { - graph.Directed - - GraphDescriber - MutableUniqueGraph - - Edges() []graph.Edge -} - -type Namer interface { - ResourceName(obj interface{}) string -} - -type namer struct{} - -var DefaultNamer Namer = namer{} - -func (namer) ResourceName(obj interface{}) string { - switch t := obj.(type) { - case uniqueNamer: - return t.UniqueName().String() - default: - return reflect.TypeOf(obj).String() - } -} - -type Graph struct { - // the standard graph - graph.Directed - // helper methods for switching on the kind and types of the node - GraphDescriber - - // exposes the public interface for adding nodes - uniqueNamedGraph - // the internal graph object, which allows edges and nodes to be directly added - internal *concrete.DirectedGraph -} - -// Graph must implement MutableUniqueGraph -var _ MutableUniqueGraph = Graph{} - -// New initializes a graph from input to output. -func New() Graph { - g := concrete.NewDirectedGraph() - return Graph{ - Directed: g, - GraphDescriber: typedGraph{}, - - uniqueNamedGraph: newUniqueNamedGraph(g), - - internal: g, - } -} - -// Edges returns all the edges of the graph. Note that the returned set -// will have no specific ordering. -func (g Graph) Edges() []graph.Edge { - return g.internal.Edges() -} - -func (g Graph) String() string { - ret := "" - - nodes := g.Nodes() - sort.Sort(ByID(nodes)) - for _, node := range nodes { - ret += fmt.Sprintf("%d: %v\n", node.ID(), g.GraphDescriber.Name(node)) - - // can't use SuccessorEdges, because I want stable ordering - successors := g.From(node) - sort.Sort(ByID(successors)) - for _, successor := range successors { - edge := g.Edge(node, successor) - kinds := g.EdgeKinds(edge) - for _, kind := range kinds.List() { - ret += fmt.Sprintf("\t%v to %d: %v\n", kind, successor.ID(), g.GraphDescriber.Name(successor)) - } - } - } - - return ret -} - -// ByID is a sorted group of nodes by ID -type ByID []graph.Node - -func (m ByID) Len() int { return len(m) } -func (m ByID) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -func (m ByID) Less(i, j int) bool { - return m[i].ID() < m[j].ID() -} - -// SyntheticNodes returns back the set of nodes that were created in response to edge requests, but did not exist -func (g Graph) SyntheticNodes() []graph.Node { - ret := []graph.Node{} - - nodes := g.Nodes() - sort.Sort(ByID(nodes)) - for _, node := range nodes { - if potentiallySyntheticNode, ok := node.(ExistenceChecker); ok { - if !potentiallySyntheticNode.Found() { - ret = append(ret, node) - } - } - } - - return ret -} - -// NodesByKind returns all the nodes of the graph with the provided kinds -func (g Graph) NodesByKind(nodeKinds ...string) []graph.Node { - ret := []graph.Node{} - - kinds := sets.NewString(nodeKinds...) - for _, node := range g.internal.Nodes() { - if kinds.Has(g.Kind(node)) { - ret = append(ret, node) - } - } - - return ret -} - -// RootNodes returns all the roots of this graph. -func (g Graph) RootNodes() []graph.Node { - roots := []graph.Node{} - for _, n := range g.Nodes() { - if len(g.To(n)) != 0 { - continue - } - roots = append(roots, n) - } - return roots -} - -// PredecessorEdges invokes fn with all of the predecessor edges of node that have the specified -// edge kind. -func (g Graph) PredecessorEdges(node graph.Node, fn EdgeFunc, edgeKinds ...string) { - for _, n := range g.To(node) { - edge := g.Edge(n, node) - kinds := g.EdgeKinds(edge) - - if kinds.HasAny(edgeKinds...) { - fn(g, n, node, kinds) - } - } -} - -// SuccessorEdges invokes fn with all of the successor edges of node that have the specified -// edge kind. -func (g Graph) SuccessorEdges(node graph.Node, fn EdgeFunc, edgeKinds ...string) { - for _, n := range g.From(node) { - edge := g.Edge(node, n) - kinds := g.EdgeKinds(edge) - - if kinds.HasAny(edgeKinds...) { - fn(g, n, node, kinds) - } - } -} - -// OutboundEdges returns all the outbound edges from node that are in the list of edgeKinds -// if edgeKinds is empty, then all edges are returned -func (g Graph) OutboundEdges(node graph.Node, edgeKinds ...string) []graph.Edge { - ret := []graph.Edge{} - - for _, n := range g.From(node) { - edge := g.Edge(node, n) - if edge == nil { - continue - } - - if len(edgeKinds) == 0 || g.EdgeKinds(edge).HasAny(edgeKinds...) { - ret = append(ret, edge) - } - } - - return ret -} - -// InboundEdges returns all the inbound edges to node that are in the list of edgeKinds -// if edgeKinds is empty, then all edges are returned -func (g Graph) InboundEdges(node graph.Node, edgeKinds ...string) []graph.Edge { - ret := []graph.Edge{} - - for _, n := range g.To(node) { - edge := g.Edge(n, node) - if edge == nil { - continue - } - - if len(edgeKinds) == 0 || g.EdgeKinds(edge).HasAny(edgeKinds...) { - ret = append(ret, edge) - } - } - - return ret -} - -// PredecessorNodesByEdgeKind returns all the predecessor nodes of the given node -// that can be reached via edges of the provided kinds -func (g Graph) PredecessorNodesByEdgeKind(node graph.Node, edgeKinds ...string) []graph.Node { - ret := []graph.Node{} - - for _, inboundEdges := range g.InboundEdges(node, edgeKinds...) { - ret = append(ret, inboundEdges.From()) - } - - return ret -} - -// SuccessorNodesByEdgeKind returns all the successor nodes of the given node -// that can be reached via edges of the provided kinds -func (g Graph) SuccessorNodesByEdgeKind(node graph.Node, edgeKinds ...string) []graph.Node { - ret := []graph.Node{} - - for _, outboundEdge := range g.OutboundEdges(node, edgeKinds...) { - ret = append(ret, outboundEdge.To()) - } - - return ret -} - -func (g Graph) SuccessorNodesByNodeAndEdgeKind(node graph.Node, nodeKind, edgeKind string) []graph.Node { - ret := []graph.Node{} - - for _, successor := range g.SuccessorNodesByEdgeKind(node, edgeKind) { - if g.Kind(successor) != nodeKind { - continue - } - - ret = append(ret, successor) - } - - return ret -} - -func (g Graph) AddNode(n graph.Node) { - g.internal.AddNode(n) -} - -// AddEdge implements MutableUniqueGraph -func (g Graph) AddEdge(from, to graph.Node, edgeKind string) { - // a Contains edge has semantic meaning for osgraph.Graph objects. It never makes sense - // to allow a single object to be "contained" by multiple nodes. - if edgeKind == ContainsEdgeKind { - // check incoming edges on the 'to' node to be certain that we aren't already contained - containsEdges := g.InboundEdges(to, ContainsEdgeKind) - if len(containsEdges) != 0 { - // TODO consider changing the AddEdge API to make this cleaner. This is a pretty severe programming error - panic(fmt.Sprintf("%v is already contained by %v", to, containsEdges)) - } - } - - kinds := sets.NewString(edgeKind) - if existingEdge := g.Edge(from, to); existingEdge != nil { - kinds.Insert(g.EdgeKinds(existingEdge).List()...) - } - - g.internal.SetEdge(NewEdge(from, to, kinds.List()...), 1.0) -} - -// addEdges adds the specified edges, filtered by the provided edge connection -// function. -func (g Graph) addEdges(edges []graph.Edge, fn EdgeFunc) { - for _, e := range edges { - switch t := e.(type) { - case concrete.WeightedEdge: - if fn(g, t.From(), t.To(), t.Edge.(Edge).Kinds()) { - g.internal.SetEdge(t.Edge.(Edge), t.Cost) - } - case Edge: - if fn(g, t.From(), t.To(), t.Kinds()) { - g.internal.SetEdge(t, 1.0) - } - default: - panic("bad edge") - } - } -} - -// NodeFunc is passed a new graph, a node in the graph, and should return true if the -// node should be included. -type NodeFunc func(g Interface, n graph.Node) bool - -// NodesOfKind returns a new NodeFunc accepting the provided kinds of nodes -// If no kinds are specified, the returned NodeFunc will accept all nodes -func NodesOfKind(kinds ...string) NodeFunc { - if len(kinds) == 0 { - return func(g Interface, n graph.Node) bool { - return true - } - } - - allowedKinds := sets.NewString(kinds...) - return func(g Interface, n graph.Node) bool { - return allowedKinds.Has(g.Kind(n)) - } -} - -// EdgeFunc is passed a new graph, an edge in the current graph, and should mutate -// the new graph as needed. If true is returned, the existing edge will be added to the graph. -type EdgeFunc func(g Interface, from, to graph.Node, edgeKinds sets.String) bool - -// EdgesOfKind returns a new EdgeFunc accepting the provided kinds of edges -// If no kinds are specified, the returned EdgeFunc will accept all edges -func EdgesOfKind(kinds ...string) EdgeFunc { - if len(kinds) == 0 { - return func(g Interface, from, to graph.Node, edgeKinds sets.String) bool { - return true - } - } - - allowedKinds := sets.NewString(kinds...) - return func(g Interface, from, to graph.Node, edgeKinds sets.String) bool { - return allowedKinds.HasAny(edgeKinds.List()...) - } -} - -// RemoveInboundEdges returns a new EdgeFunc dismissing any inbound edges to -// the provided set of nodes -func RemoveInboundEdges(nodes []graph.Node) EdgeFunc { - return func(g Interface, from, to graph.Node, edgeKinds sets.String) bool { - for _, node := range nodes { - if node == to { - return false - } - } - return true - } -} - -func RemoveOutboundEdges(nodes []graph.Node) EdgeFunc { - return func(g Interface, from, to graph.Node, edgeKinds sets.String) bool { - for _, node := range nodes { - if node == from { - return false - } - } - return true - } -} - -// EdgeSubgraph returns the directed subgraph with only the edges that match the -// provided function. -func (g Graph) EdgeSubgraph(edgeFn EdgeFunc) Graph { - out := New() - for _, node := range g.Nodes() { - out.internal.AddNode(node) - } - out.addEdges(g.internal.Edges(), edgeFn) - return out -} - -// Subgraph returns the directed subgraph with only the nodes and edges that match the -// provided functions. -func (g Graph) Subgraph(nodeFn NodeFunc, edgeFn EdgeFunc) Graph { - out := New() - for _, node := range g.Nodes() { - if nodeFn(out, node) { - out.internal.AddNode(node) - } - } - out.addEdges(g.internal.Edges(), edgeFn) - return out -} - -// SubgraphWithNodes returns the directed subgraph with only the listed nodes and edges that -// match the provided function. -func (g Graph) SubgraphWithNodes(nodes []graph.Node, fn EdgeFunc) Graph { - out := New() - for _, node := range nodes { - out.internal.AddNode(node) - } - out.addEdges(g.internal.Edges(), fn) - return out -} - -// ConnectedEdgeSubgraph creates a new graph that iterates through all edges in the graph -// and includes all edges the provided function returns true for. Nodes not referenced by -// an edge will be dropped unless the function adds them explicitly. -func (g Graph) ConnectedEdgeSubgraph(fn EdgeFunc) Graph { - out := New() - out.addEdges(g.internal.Edges(), fn) - return out -} - -// AllNodes includes all nodes in the graph -func AllNodes(g Interface, node graph.Node) bool { - return true -} - -// ExistingDirectEdge returns true if both from and to already exist in the graph and the edge kind is -// not ReferencedByEdgeKind (the generic reverse edge kind). This will purge the graph of any -// edges created by AddReversedEdge. -func ExistingDirectEdge(g Interface, from, to graph.Node, edgeKinds sets.String) bool { - return !edgeKinds.Has(ReferencedByEdgeKind) && g.Has(from) && g.Has(to) -} - -// ReverseExistingDirectEdge reverses the order of the edge and drops the existing edge only if -// both from and to already exist in the graph and the edge kind is not ReferencedByEdgeKind -// (the generic reverse edge kind). -func ReverseExistingDirectEdge(g Interface, from, to graph.Node, edgeKinds sets.String) bool { - return ExistingDirectEdge(g, from, to, edgeKinds) && ReverseGraphEdge(g, from, to, edgeKinds) -} - -// ReverseGraphEdge reverses the order of the edge and drops the existing edge. -func ReverseGraphEdge(g Interface, from, to graph.Node, edgeKinds sets.String) bool { - for edgeKind := range edgeKinds { - g.AddEdge(to, from, edgeKind) - } - return false -} - -// AddReversedEdge adds a reversed edge for every passed edge and preserves the existing -// edge. Used to convert a one directional edge into a bidirectional edge, but will -// create duplicate edges if a bidirectional edge between two nodes already exists. -func AddReversedEdge(g Interface, from, to graph.Node, edgeKinds sets.String) bool { - g.AddEdge(to, from, ReferencedByEdgeKind) - return true -} - -// AddGraphEdgesTo returns an EdgeFunc that will add the selected edges to the passed -// graph. -func AddGraphEdgesTo(g Interface) EdgeFunc { - return func(_ Interface, from, to graph.Node, edgeKinds sets.String) bool { - for edgeKind := range edgeKinds { - g.AddEdge(from, to, edgeKind) - } - - return false - } -} - -type uniqueNamedGraph struct { - graph.Mutable - names map[UniqueName]graph.Node -} - -func newUniqueNamedGraph(g graph.Mutable) uniqueNamedGraph { - return uniqueNamedGraph{ - Mutable: g, - names: make(map[UniqueName]graph.Node), - } -} - -func (g uniqueNamedGraph) FindOrCreate(name UniqueName, fn NodeInitializerFunc) (graph.Node, bool) { - if node, ok := g.names[name]; ok { - return node, true - } - id := g.NewNodeID() - node := fn(Node{concrete.Node(id), name}) - g.names[name] = node - g.AddNode(node) - return node, false -} - -func (g uniqueNamedGraph) Find(name UniqueName) graph.Node { - if node, ok := g.names[name]; ok { - return node - } - return nil -} - -type typedGraph struct{} - -func (g typedGraph) Name(node graph.Node) string { - switch t := node.(type) { - case fmt.Stringer: - return t.String() - case uniqueNamer: - return t.UniqueName().String() - default: - return fmt.Sprintf("", node.ID()) - } -} - -type objectifier interface { - Object() interface{} -} - -func (g typedGraph) Object(node graph.Node) interface{} { - switch t := node.(type) { - case objectifier: - return t.Object() - default: - return nil - } -} - -type kind interface { - Kind() string -} - -func (g typedGraph) Kind(node graph.Node) string { - if k, ok := node.(kind); ok { - return k.Kind() - } - return UnknownNodeKind -} - -func (g typedGraph) EdgeKinds(edge graph.Edge) sets.String { - var e Edge - switch t := edge.(type) { - case concrete.WeightedEdge: - e = t.Edge.(Edge) - case Edge: - e = t - default: - return sets.NewString(UnknownEdgeKind) - } - return e.Kinds() -} - -type NodeSet map[int]struct{} - -func (n NodeSet) Has(id int) bool { - _, ok := n[id] - return ok -} - -func (n NodeSet) Add(id int) { - n[id] = struct{}{} -} - -func NodesByKind(g Interface, nodes []graph.Node, kinds ...string) [][]graph.Node { - buckets := make(map[string]int) - for i, kind := range kinds { - buckets[kind] = i - } - if nodes == nil { - nodes = g.Nodes() - } - - last := len(kinds) - result := make([][]graph.Node, last+1) - for _, node := range nodes { - if bucket, ok := buckets[g.Kind(node)]; ok { - result[bucket] = append(result[bucket], node) - } else { - result[last] = append(result[last], node) - } - } - return result -} - -// IsFromDifferentNamespace returns if a node is in a different namespace -// than the one provided. -func IsFromDifferentNamespace(namespace string, node graph.Node) bool { - potentiallySyntheticNode, ok := node.(ExistenceChecker) - if !ok || potentiallySyntheticNode.Found() { - return false - } - objectified, ok := node.(objectifier) - if !ok { - return false - } - object, err := meta.Accessor(objectified) - if err != nil { - return false - } - return object.GetNamespace() != namespace -} - -func pathCovered(path []graph.Node, paths map[int][]graph.Node) bool { - l := len(path) - for _, existing := range paths { - if l >= len(existing) { - continue - } - if pathEqual(path, existing) { - return true - } - } - return false -} - -func pathEqual(a, b []graph.Node) bool { - for i := range a { - if a[i] != b[i] { - return false - } - } - return true -} diff --git a/vendor/github.com/openshift/origin/pkg/api/graph/graphview/dc_pipeline.go b/vendor/github.com/openshift/origin/pkg/api/graph/graphview/dc_pipeline.go deleted file mode 100644 index 4cf39de97..000000000 --- a/vendor/github.com/openshift/origin/pkg/api/graph/graphview/dc_pipeline.go +++ /dev/null @@ -1,84 +0,0 @@ -package graphview - -import ( - "sort" - - osgraph "github.com/openshift/origin/pkg/api/graph" - kubegraph "github.com/openshift/origin/pkg/api/kubegraph/nodes" - deployedges "github.com/openshift/origin/pkg/deploy/graph" - deploygraph "github.com/openshift/origin/pkg/deploy/graph/nodes" -) - -type DeploymentConfigPipeline struct { - Deployment *deploygraph.DeploymentConfigNode - - ActiveDeployment *kubegraph.ReplicationControllerNode - InactiveDeployments []*kubegraph.ReplicationControllerNode - - Images []ImagePipeline -} - -// AllDeploymentConfigPipelines returns all the DCPipelines that aren't in the excludes set and the set of covered NodeIDs -func AllDeploymentConfigPipelines(g osgraph.Graph, excludeNodeIDs IntSet) ([]DeploymentConfigPipeline, IntSet) { - covered := IntSet{} - dcPipelines := []DeploymentConfigPipeline{} - - for _, uncastNode := range g.NodesByKind(deploygraph.DeploymentConfigNodeKind) { - if excludeNodeIDs.Has(uncastNode.ID()) { - continue - } - - pipeline, covers := NewDeploymentConfigPipeline(g, uncastNode.(*deploygraph.DeploymentConfigNode)) - covered.Insert(covers.List()...) - dcPipelines = append(dcPipelines, pipeline) - } - - sort.Sort(SortedDeploymentConfigPipeline(dcPipelines)) - return dcPipelines, covered -} - -// NewDeploymentConfigPipeline returns the DeploymentConfigPipeline and a set of all the NodeIDs covered by the DeploymentConfigPipeline -func NewDeploymentConfigPipeline(g osgraph.Graph, dcNode *deploygraph.DeploymentConfigNode) (DeploymentConfigPipeline, IntSet) { - covered := IntSet{} - covered.Insert(dcNode.ID()) - - dcPipeline := DeploymentConfigPipeline{} - dcPipeline.Deployment = dcNode - - // for everything that can trigger a deployment, create an image pipeline and add it to the list - for _, istNode := range g.PredecessorNodesByEdgeKind(dcNode, deployedges.TriggersDeploymentEdgeKind) { - imagePipeline, covers := NewImagePipelineFromImageTagLocation(g, istNode, istNode.(ImageTagLocation)) - - covered.Insert(covers.List()...) - dcPipeline.Images = append(dcPipeline.Images, imagePipeline) - } - - // for image that we use, create an image pipeline and add it to the list - for _, tagNode := range g.PredecessorNodesByEdgeKind(dcNode, deployedges.UsedInDeploymentEdgeKind) { - imagePipeline, covers := NewImagePipelineFromImageTagLocation(g, tagNode, tagNode.(ImageTagLocation)) - - covered.Insert(covers.List()...) - dcPipeline.Images = append(dcPipeline.Images, imagePipeline) - } - - dcPipeline.ActiveDeployment, dcPipeline.InactiveDeployments = deployedges.RelevantDeployments(g, dcNode) - for _, rc := range dcPipeline.InactiveDeployments { - _, covers := NewReplicationController(g, rc) - covered.Insert(covers.List()...) - } - - if dcPipeline.ActiveDeployment != nil { - _, covers := NewReplicationController(g, dcPipeline.ActiveDeployment) - covered.Insert(covers.List()...) - } - - return dcPipeline, covered -} - -type SortedDeploymentConfigPipeline []DeploymentConfigPipeline - -func (m SortedDeploymentConfigPipeline) Len() int { return len(m) } -func (m SortedDeploymentConfigPipeline) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -func (m SortedDeploymentConfigPipeline) Less(i, j int) bool { - return CompareObjectMeta(&m[i].Deployment.DeploymentConfig.ObjectMeta, &m[j].Deployment.DeploymentConfig.ObjectMeta) -} diff --git a/vendor/github.com/openshift/origin/pkg/api/graph/graphview/image_pipeline.go b/vendor/github.com/openshift/origin/pkg/api/graph/graphview/image_pipeline.go deleted file mode 100644 index 4eecd9b94..000000000 --- a/vendor/github.com/openshift/origin/pkg/api/graph/graphview/image_pipeline.go +++ /dev/null @@ -1,240 +0,0 @@ -package graphview - -import ( - "sort" - - "github.com/gonum/graph" - - osgraph "github.com/openshift/origin/pkg/api/graph" - buildedges "github.com/openshift/origin/pkg/build/graph" - buildgraph "github.com/openshift/origin/pkg/build/graph/nodes" - imageedges "github.com/openshift/origin/pkg/image/graph" - imagegraph "github.com/openshift/origin/pkg/image/graph/nodes" -) - -// ImagePipeline represents a build, its output, and any inputs. The input -// to a build may be another ImagePipeline. -type ImagePipeline struct { - Image ImageTagLocation - DestinationResolved bool - ScheduledImport bool - - Build *buildgraph.BuildConfigNode - - LastSuccessfulBuild *buildgraph.BuildNode - LastUnsuccessfulBuild *buildgraph.BuildNode - ActiveBuilds []*buildgraph.BuildNode - - // If set, the base image used by the build - BaseImage ImageTagLocation - // if set, the build config names that produces the base image - BaseBuilds []string - // If set, the source repository that inputs to the build - Source SourceLocation -} - -// ImageTagLocation identifies the source or destination of an image. Represents -// both a tag in a Docker image repository, as well as a tag in an OpenShift image stream. -type ImageTagLocation interface { - ID() int - ImageSpec() string - ImageTag() string -} - -// SourceLocation identifies a repository that is an input to a build. -type SourceLocation interface { - ID() int -} - -func AllImagePipelinesFromBuildConfig(g osgraph.Graph, excludeNodeIDs IntSet) ([]ImagePipeline, IntSet) { - covered := IntSet{} - pipelines := []ImagePipeline{} - - for _, uncastNode := range g.NodesByKind(buildgraph.BuildConfigNodeKind) { - if excludeNodeIDs.Has(uncastNode.ID()) { - continue - } - - pipeline, covers := NewImagePipelineFromBuildConfigNode(g, uncastNode.(*buildgraph.BuildConfigNode)) - covered.Insert(covers.List()...) - pipelines = append(pipelines, pipeline) - } - - sort.Sort(SortedImagePipelines(pipelines)) - - outputImageToBCMap := make(map[string][]string) - for _, pipeline := range pipelines { - // note, bc does not have to have an output image - if pipeline.Image != nil { - bcs, ok := outputImageToBCMap[pipeline.Image.ImageSpec()] - if !ok { - bcs = []string{} - } - bcs = append(bcs, pipeline.Build.BuildConfig.Name) - outputImageToBCMap[pipeline.Image.ImageSpec()] = bcs - } - } - - if len(outputImageToBCMap) > 0 { - for i, pipeline := range pipelines { - // note, bc does not have to have an input strategy image - if pipeline.BaseImage != nil { - baseBCs, ok := outputImageToBCMap[pipeline.BaseImage.ImageSpec()] - if ok && len(baseBCs) > 0 { - pipelines[i].BaseBuilds = baseBCs - } - } - } - } - - return pipelines, covered -} - -// NewImagePipeline attempts to locate a build flow from the provided node. If no such -// build flow can be located, false is returned. -func NewImagePipelineFromBuildConfigNode(g osgraph.Graph, bcNode *buildgraph.BuildConfigNode) (ImagePipeline, IntSet) { - covered := IntSet{} - covered.Insert(bcNode.ID()) - - flow := ImagePipeline{} - - base, src, coveredInputs, scheduled, _ := findBuildInputs(g, bcNode) - covered.Insert(coveredInputs.List()...) - flow.BaseImage = base - flow.Source = src - flow.Build = bcNode - flow.ScheduledImport = scheduled - flow.LastSuccessfulBuild, flow.LastUnsuccessfulBuild, flow.ActiveBuilds = buildedges.RelevantBuilds(g, flow.Build) - flow.Image = findBuildOutput(g, bcNode) - - // we should have at most one - for _, buildOutputNode := range g.SuccessorNodesByEdgeKind(bcNode, buildedges.BuildOutputEdgeKind) { - // this will handle the imagestream tag case - for _, input := range g.SuccessorNodesByEdgeKind(buildOutputNode, imageedges.ReferencedImageStreamGraphEdgeKind) { - imageStreamNode := input.(*imagegraph.ImageStreamNode) - - flow.DestinationResolved = (len(imageStreamNode.Status.DockerImageRepository) != 0) - } - // this will handle the imagestream image case - for _, input := range g.SuccessorNodesByEdgeKind(buildOutputNode, imageedges.ReferencedImageStreamImageGraphEdgeKind) { - imageStreamNode := input.(*imagegraph.ImageStreamNode) - - flow.DestinationResolved = (len(imageStreamNode.Status.DockerImageRepository) != 0) - } - - // TODO handle the DockerImage case - } - - return flow, covered -} - -// NewImagePipelineFromImageTagLocation returns the ImagePipeline and all the nodes contributing to it -func NewImagePipelineFromImageTagLocation(g osgraph.Graph, node graph.Node, imageTagLocation ImageTagLocation) (ImagePipeline, IntSet) { - covered := IntSet{} - covered.Insert(node.ID()) - - flow := ImagePipeline{} - flow.Image = imageTagLocation - - for _, input := range g.PredecessorNodesByEdgeKind(node, buildedges.BuildOutputEdgeKind) { - covered.Insert(input.ID()) - build := input.(*buildgraph.BuildConfigNode) - if flow.Build != nil { - // report this as an error (unexpected duplicate input build) - } - if build.BuildConfig == nil { - // report this as as a missing build / broken link - break - } - - base, src, coveredInputs, scheduled, _ := findBuildInputs(g, build) - covered.Insert(coveredInputs.List()...) - flow.BaseImage = base - flow.Source = src - flow.Build = build - flow.ScheduledImport = scheduled - flow.LastSuccessfulBuild, flow.LastUnsuccessfulBuild, flow.ActiveBuilds = buildedges.RelevantBuilds(g, flow.Build) - } - - for _, input := range g.SuccessorNodesByEdgeKind(node, imageedges.ReferencedImageStreamGraphEdgeKind) { - covered.Insert(input.ID()) - imageStreamNode := input.(*imagegraph.ImageStreamNode) - - flow.DestinationResolved = (len(imageStreamNode.Status.DockerImageRepository) != 0) - } - for _, input := range g.SuccessorNodesByEdgeKind(node, imageedges.ReferencedImageStreamImageGraphEdgeKind) { - covered.Insert(input.ID()) - imageStreamNode := input.(*imagegraph.ImageStreamNode) - - flow.DestinationResolved = (len(imageStreamNode.Status.DockerImageRepository) != 0) - } - - return flow, covered -} - -func findBuildInputs(g osgraph.Graph, bcNode *buildgraph.BuildConfigNode) (base ImageTagLocation, source SourceLocation, covered IntSet, scheduled bool, err error) { - covered = IntSet{} - - // find inputs to the build - for _, input := range g.PredecessorNodesByEdgeKind(bcNode, buildedges.BuildInputEdgeKind) { - if source != nil { - // report this as an error (unexpected duplicate source) - } - covered.Insert(input.ID()) - source = input.(SourceLocation) - } - for _, input := range g.PredecessorNodesByEdgeKind(bcNode, buildedges.BuildInputImageEdgeKind) { - if base != nil { - // report this as an error (unexpected duplicate input build) - } - covered.Insert(input.ID()) - base = input.(ImageTagLocation) - scheduled = imageStreamTagScheduled(g, input, base) - } - - return -} - -func findBuildOutput(g osgraph.Graph, bcNode *buildgraph.BuildConfigNode) (result ImageTagLocation) { - for _, output := range g.SuccessorNodesByEdgeKind(bcNode, buildedges.BuildOutputEdgeKind) { - result = output.(ImageTagLocation) - return - } - return -} - -func imageStreamTagScheduled(g osgraph.Graph, input graph.Node, base ImageTagLocation) (scheduled bool) { - for _, uncastImageStreamNode := range g.SuccessorNodesByEdgeKind(input, imageedges.ReferencedImageStreamGraphEdgeKind) { - imageStreamNode := uncastImageStreamNode.(*imagegraph.ImageStreamNode) - if imageStreamNode.ImageStream != nil { - if tag, ok := imageStreamNode.ImageStream.Spec.Tags[base.ImageTag()]; ok { - scheduled = tag.ImportPolicy.Scheduled - return - } - } - } - return -} - -type SortedImagePipelines []ImagePipeline - -func (m SortedImagePipelines) Len() int { return len(m) } -func (m SortedImagePipelines) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -func (m SortedImagePipelines) Less(i, j int) bool { - return CompareImagePipeline(&m[i], &m[j]) -} - -func CompareImagePipeline(a, b *ImagePipeline) bool { - switch { - case a.Build != nil && b.Build != nil && a.Build.BuildConfig != nil && b.Build.BuildConfig != nil: - return CompareObjectMeta(&a.Build.BuildConfig.ObjectMeta, &b.Build.BuildConfig.ObjectMeta) - case a.Build != nil && a.Build.BuildConfig != nil: - return true - case b.Build != nil && b.Build.BuildConfig != nil: - return false - } - if a.Image == nil || b.Image == nil { - return true - } - return a.Image.ImageSpec() < b.Image.ImageSpec() -} diff --git a/vendor/github.com/openshift/origin/pkg/api/graph/graphview/intset.go b/vendor/github.com/openshift/origin/pkg/api/graph/graphview/intset.go deleted file mode 100644 index b5f76d5c8..000000000 --- a/vendor/github.com/openshift/origin/pkg/api/graph/graphview/intset.go +++ /dev/null @@ -1,46 +0,0 @@ -package graphview - -import ( - "sort" - - "k8s.io/kubernetes/pkg/util/sets" -) - -type IntSet map[int]sets.Empty - -// NewIntSet creates a IntSet from a list of values. -func NewIntSet(items ...int) IntSet { - ss := IntSet{} - ss.Insert(items...) - return ss -} - -// Insert adds items to the set. -func (s IntSet) Insert(items ...int) { - for _, item := range items { - s[item] = sets.Empty{} - } -} - -// Delete removes all items from the set. -func (s IntSet) Delete(items ...int) { - for _, item := range items { - delete(s, item) - } -} - -// Has returns true iff item is contained in the set. -func (s IntSet) Has(item int) bool { - _, contained := s[item] - return contained -} - -// List returns the contents as a sorted string slice. -func (s IntSet) List() []int { - res := make([]int, 0, len(s)) - for key := range s { - res = append(res, key) - } - sort.IntSlice(res).Sort() - return res -} diff --git a/vendor/github.com/openshift/origin/pkg/api/graph/graphview/petset.go b/vendor/github.com/openshift/origin/pkg/api/graph/graphview/petset.go deleted file mode 100644 index b06d76f5c..000000000 --- a/vendor/github.com/openshift/origin/pkg/api/graph/graphview/petset.go +++ /dev/null @@ -1,51 +0,0 @@ -package graphview - -import ( - osgraph "github.com/openshift/origin/pkg/api/graph" - kubeedges "github.com/openshift/origin/pkg/api/kubegraph" - kubegraph "github.com/openshift/origin/pkg/api/kubegraph/nodes" -) - -type PetSet struct { - PetSet *kubegraph.PetSetNode - - OwnedPods []*kubegraph.PodNode - CreatedPods []*kubegraph.PodNode - - // TODO: handle conflicting once controller refs are present, not worth it yet -} - -// AllPetSets returns all the PetSets that aren't in the excludes set and the set of covered NodeIDs -func AllPetSets(g osgraph.Graph, excludeNodeIDs IntSet) ([]PetSet, IntSet) { - covered := IntSet{} - views := []PetSet{} - - for _, uncastNode := range g.NodesByKind(kubegraph.PetSetNodeKind) { - if excludeNodeIDs.Has(uncastNode.ID()) { - continue - } - - view, covers := NewPetSet(g, uncastNode.(*kubegraph.PetSetNode)) - covered.Insert(covers.List()...) - views = append(views, view) - } - - return views, covered -} - -// NewPetSet returns the PetSet and a set of all the NodeIDs covered by the PetSet -func NewPetSet(g osgraph.Graph, node *kubegraph.PetSetNode) (PetSet, IntSet) { - covered := IntSet{} - covered.Insert(node.ID()) - - view := PetSet{} - view.PetSet = node - - for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(node, kubeedges.ManagedByControllerEdgeKind) { - podNode := uncastPodNode.(*kubegraph.PodNode) - covered.Insert(podNode.ID()) - view.OwnedPods = append(view.OwnedPods, podNode) - } - - return view, covered -} diff --git a/vendor/github.com/openshift/origin/pkg/api/graph/graphview/pod.go b/vendor/github.com/openshift/origin/pkg/api/graph/graphview/pod.go deleted file mode 100644 index e0d09bbfa..000000000 --- a/vendor/github.com/openshift/origin/pkg/api/graph/graphview/pod.go +++ /dev/null @@ -1,39 +0,0 @@ -package graphview - -import ( - osgraph "github.com/openshift/origin/pkg/api/graph" - kubegraph "github.com/openshift/origin/pkg/api/kubegraph/nodes" -) - -type Pod struct { - Pod *kubegraph.PodNode -} - -// AllPods returns all Pods and the set of covered NodeIDs -func AllPods(g osgraph.Graph, excludeNodeIDs IntSet) ([]Pod, IntSet) { - covered := IntSet{} - pods := []Pod{} - - for _, uncastNode := range g.NodesByKind(kubegraph.PodNodeKind) { - if excludeNodeIDs.Has(uncastNode.ID()) { - continue - } - - pod, covers := NewPod(g, uncastNode.(*kubegraph.PodNode)) - covered.Insert(covers.List()...) - pods = append(pods, pod) - } - - return pods, covered -} - -// NewPod returns the Pod and a set of all the NodeIDs covered by the Pod -func NewPod(g osgraph.Graph, podNode *kubegraph.PodNode) (Pod, IntSet) { - covered := IntSet{} - covered.Insert(podNode.ID()) - - podView := Pod{} - podView.Pod = podNode - - return podView, covered -} diff --git a/vendor/github.com/openshift/origin/pkg/api/graph/graphview/rc.go b/vendor/github.com/openshift/origin/pkg/api/graph/graphview/rc.go deleted file mode 100644 index 59d57dec2..000000000 --- a/vendor/github.com/openshift/origin/pkg/api/graph/graphview/rc.go +++ /dev/null @@ -1,100 +0,0 @@ -package graphview - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - - osgraph "github.com/openshift/origin/pkg/api/graph" - kubeedges "github.com/openshift/origin/pkg/api/kubegraph" - "github.com/openshift/origin/pkg/api/kubegraph/analysis" - kubegraph "github.com/openshift/origin/pkg/api/kubegraph/nodes" -) - -type ReplicationController struct { - RC *kubegraph.ReplicationControllerNode - - OwnedPods []*kubegraph.PodNode - CreatedPods []*kubegraph.PodNode - - ConflictingRCs []*kubegraph.ReplicationControllerNode - ConflictingRCIDToPods map[int][]*kubegraph.PodNode -} - -// AllReplicationControllers returns all the ReplicationControllers that aren't in the excludes set and the set of covered NodeIDs -func AllReplicationControllers(g osgraph.Graph, excludeNodeIDs IntSet) ([]ReplicationController, IntSet) { - covered := IntSet{} - rcViews := []ReplicationController{} - - for _, uncastNode := range g.NodesByKind(kubegraph.ReplicationControllerNodeKind) { - if excludeNodeIDs.Has(uncastNode.ID()) { - continue - } - - rcView, covers := NewReplicationController(g, uncastNode.(*kubegraph.ReplicationControllerNode)) - covered.Insert(covers.List()...) - rcViews = append(rcViews, rcView) - } - - return rcViews, covered -} - -// MaxRecentContainerRestarts returns the maximum container restarts for all pods in -// replication controller. -func (rc *ReplicationController) MaxRecentContainerRestarts() int32 { - var maxRestarts int32 - for _, pod := range rc.OwnedPods { - for _, status := range pod.Status.ContainerStatuses { - if status.RestartCount > maxRestarts && analysis.ContainerRestartedRecently(status, unversioned.Now()) { - maxRestarts = status.RestartCount - } - } - } - return maxRestarts -} - -// NewReplicationController returns the ReplicationController and a set of all the NodeIDs covered by the ReplicationController -func NewReplicationController(g osgraph.Graph, rcNode *kubegraph.ReplicationControllerNode) (ReplicationController, IntSet) { - covered := IntSet{} - covered.Insert(rcNode.ID()) - - rcView := ReplicationController{} - rcView.RC = rcNode - rcView.ConflictingRCIDToPods = map[int][]*kubegraph.PodNode{} - - for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(rcNode, kubeedges.ManagedByControllerEdgeKind) { - podNode := uncastPodNode.(*kubegraph.PodNode) - covered.Insert(podNode.ID()) - rcView.OwnedPods = append(rcView.OwnedPods, podNode) - - // check to see if this pod is managed by more than one RC - uncastOwningRCs := g.SuccessorNodesByEdgeKind(podNode, kubeedges.ManagedByControllerEdgeKind) - if len(uncastOwningRCs) > 1 { - for _, uncastOwningRC := range uncastOwningRCs { - if uncastOwningRC.ID() == rcNode.ID() { - continue - } - - conflictingRC := uncastOwningRC.(*kubegraph.ReplicationControllerNode) - rcView.ConflictingRCs = append(rcView.ConflictingRCs, conflictingRC) - - conflictingPods, ok := rcView.ConflictingRCIDToPods[conflictingRC.ID()] - if !ok { - conflictingPods = []*kubegraph.PodNode{} - } - conflictingPods = append(conflictingPods, podNode) - rcView.ConflictingRCIDToPods[conflictingRC.ID()] = conflictingPods - } - } - } - - return rcView, covered -} - -// MaxRecentContainerRestartsForRC returns the maximum container restarts in pods -// in the replication controller node for the last 10 minutes. -func MaxRecentContainerRestartsForRC(g osgraph.Graph, rcNode *kubegraph.ReplicationControllerNode) int32 { - if rcNode == nil { - return 0 - } - rc, _ := NewReplicationController(g, rcNode) - return rc.MaxRecentContainerRestarts() -} diff --git a/vendor/github.com/openshift/origin/pkg/api/graph/graphview/service_group.go b/vendor/github.com/openshift/origin/pkg/api/graph/graphview/service_group.go deleted file mode 100644 index 5706dbc4f..000000000 --- a/vendor/github.com/openshift/origin/pkg/api/graph/graphview/service_group.go +++ /dev/null @@ -1,134 +0,0 @@ -package graphview - -import ( - "fmt" - "sort" - - kapi "k8s.io/kubernetes/pkg/api" - utilruntime "k8s.io/kubernetes/pkg/util/runtime" - - osgraph "github.com/openshift/origin/pkg/api/graph" - kubeedges "github.com/openshift/origin/pkg/api/kubegraph" - kubegraph "github.com/openshift/origin/pkg/api/kubegraph/nodes" - deploygraph "github.com/openshift/origin/pkg/deploy/graph/nodes" - routeedges "github.com/openshift/origin/pkg/route/graph" - routegraph "github.com/openshift/origin/pkg/route/graph/nodes" -) - -// ServiceGroup is a service, the DeploymentConfigPipelines it covers, and lists of the other nodes that fulfill it -type ServiceGroup struct { - Service *kubegraph.ServiceNode - - DeploymentConfigPipelines []DeploymentConfigPipeline - ReplicationControllers []ReplicationController - PetSets []PetSet - - // TODO: this has to stop - FulfillingPetSets []*kubegraph.PetSetNode - FulfillingDCs []*deploygraph.DeploymentConfigNode - FulfillingRCs []*kubegraph.ReplicationControllerNode - FulfillingPods []*kubegraph.PodNode - - ExposingRoutes []*routegraph.RouteNode -} - -// AllServiceGroups returns all the ServiceGroups that aren't in the excludes set and the set of covered NodeIDs -func AllServiceGroups(g osgraph.Graph, excludeNodeIDs IntSet) ([]ServiceGroup, IntSet) { - covered := IntSet{} - services := []ServiceGroup{} - - for _, uncastNode := range g.NodesByKind(kubegraph.ServiceNodeKind) { - if excludeNodeIDs.Has(uncastNode.ID()) { - continue - } - - service, covers := NewServiceGroup(g, uncastNode.(*kubegraph.ServiceNode)) - covered.Insert(covers.List()...) - services = append(services, service) - } - - sort.Sort(ServiceGroupByObjectMeta(services)) - return services, covered -} - -// NewServiceGroup returns the ServiceGroup and a set of all the NodeIDs covered by the service -func NewServiceGroup(g osgraph.Graph, serviceNode *kubegraph.ServiceNode) (ServiceGroup, IntSet) { - covered := IntSet{} - covered.Insert(serviceNode.ID()) - - service := ServiceGroup{} - service.Service = serviceNode - - for _, uncastServiceFulfiller := range g.PredecessorNodesByEdgeKind(serviceNode, kubeedges.ExposedThroughServiceEdgeKind) { - container := osgraph.GetTopLevelContainerNode(g, uncastServiceFulfiller) - - switch castContainer := container.(type) { - case *deploygraph.DeploymentConfigNode: - service.FulfillingDCs = append(service.FulfillingDCs, castContainer) - case *kubegraph.ReplicationControllerNode: - service.FulfillingRCs = append(service.FulfillingRCs, castContainer) - case *kubegraph.PodNode: - service.FulfillingPods = append(service.FulfillingPods, castContainer) - case *kubegraph.PetSetNode: - service.FulfillingPetSets = append(service.FulfillingPetSets, castContainer) - default: - utilruntime.HandleError(fmt.Errorf("unrecognized container: %v", castContainer)) - } - } - - for _, uncastServiceFulfiller := range g.PredecessorNodesByEdgeKind(serviceNode, routeedges.ExposedThroughRouteEdgeKind) { - container := osgraph.GetTopLevelContainerNode(g, uncastServiceFulfiller) - - switch castContainer := container.(type) { - case *routegraph.RouteNode: - service.ExposingRoutes = append(service.ExposingRoutes, castContainer) - default: - utilruntime.HandleError(fmt.Errorf("unrecognized container: %v", castContainer)) - } - } - - // add the DCPipelines for all the DCs that fulfill the service - for _, fulfillingDC := range service.FulfillingDCs { - dcPipeline, dcCovers := NewDeploymentConfigPipeline(g, fulfillingDC) - - covered.Insert(dcCovers.List()...) - service.DeploymentConfigPipelines = append(service.DeploymentConfigPipelines, dcPipeline) - } - - for _, fulfillingRC := range service.FulfillingRCs { - rcView, rcCovers := NewReplicationController(g, fulfillingRC) - - covered.Insert(rcCovers.List()...) - service.ReplicationControllers = append(service.ReplicationControllers, rcView) - } - - for _, fulfillingPetSet := range service.FulfillingPetSets { - view, covers := NewPetSet(g, fulfillingPetSet) - - covered.Insert(covers.List()...) - service.PetSets = append(service.PetSets, view) - } - - for _, fulfillingPod := range service.FulfillingPods { - _, podCovers := NewPod(g, fulfillingPod) - covered.Insert(podCovers.List()...) - } - - return service, covered -} - -type ServiceGroupByObjectMeta []ServiceGroup - -func (m ServiceGroupByObjectMeta) Len() int { return len(m) } -func (m ServiceGroupByObjectMeta) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -func (m ServiceGroupByObjectMeta) Less(i, j int) bool { - a, b := m[i], m[j] - return CompareObjectMeta(&a.Service.Service.ObjectMeta, &b.Service.Service.ObjectMeta) -} - -func CompareObjectMeta(a, b *kapi.ObjectMeta) bool { - if a.Namespace == b.Namespace { - return a.Name < b.Name - } - return a.Namespace < b.Namespace -} diff --git a/vendor/github.com/openshift/origin/pkg/api/graph/interfaces.go b/vendor/github.com/openshift/origin/pkg/api/graph/interfaces.go deleted file mode 100644 index d59c00269..000000000 --- a/vendor/github.com/openshift/origin/pkg/api/graph/interfaces.go +++ /dev/null @@ -1,134 +0,0 @@ -package graph - -import ( - "github.com/gonum/graph" -) - -// Marker is a struct that describes something interesting on a Node -type Marker struct { - // Node is the optional node that this message is attached to - Node graph.Node - // RelatedNodes is an optional list of other nodes that are involved in this marker. - RelatedNodes []graph.Node - - // Severity indicates how important this problem is. - Severity Severity - // Key is a short string to identify this message - Key string - - // Message is a human-readable string that describes what is interesting - Message string - // Suggestion is a human-readable string that holds advice for resolving this - // marker. - Suggestion Suggestion -} - -// Severity indicates how important this problem is. -type Severity string - -const ( - // InfoSeverity is interesting - // TODO: Consider what to do with this once we revisit the graph api - currently not used. - InfoSeverity Severity = "info" - // WarningSeverity is probably wrong, but we aren't certain - WarningSeverity Severity = "warning" - // ErrorSeverity is definitely wrong, this won't work - ErrorSeverity Severity = "error" -) - -type Markers []Marker - -// MarkerScanner is a function for analyzing a graph and finding interesting things in it -type MarkerScanner func(g Graph, f Namer) []Marker - -func (m Markers) BySeverity(severity Severity) []Marker { - ret := []Marker{} - for i := range m { - if m[i].Severity == severity { - ret = append(ret, m[i]) - } - } - - return ret -} - -// FilterByNamespace returns all the markers that are not associated with missing nodes -// from other namespaces (other than the provided namespace). -func (m Markers) FilterByNamespace(namespace string) Markers { - filtered := Markers{} - - for i := range m { - markerNodes := []graph.Node{} - markerNodes = append(markerNodes, m[i].Node) - markerNodes = append(markerNodes, m[i].RelatedNodes...) - hasCrossNamespaceLink := false - for _, node := range markerNodes { - if IsFromDifferentNamespace(namespace, node) { - hasCrossNamespaceLink = true - break - } - } - if !hasCrossNamespaceLink { - filtered = append(filtered, m[i]) - } - } - - return filtered -} - -type BySeverity []Marker - -func (m BySeverity) Len() int { return len(m) } -func (m BySeverity) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -func (m BySeverity) Less(i, j int) bool { - lhs := m[i] - rhs := m[j] - - switch lhs.Severity { - case ErrorSeverity: - switch rhs.Severity { - case ErrorSeverity: - return false - } - case WarningSeverity: - switch rhs.Severity { - case ErrorSeverity, WarningSeverity: - return false - } - case InfoSeverity: - switch rhs.Severity { - case ErrorSeverity, WarningSeverity, InfoSeverity: - return false - } - } - - return true -} - -type ByNodeID []Marker - -func (m ByNodeID) Len() int { return len(m) } -func (m ByNodeID) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -func (m ByNodeID) Less(i, j int) bool { - if m[i].Node == nil { - return true - } - if m[j].Node == nil { - return false - } - return m[i].Node.ID() < m[j].Node.ID() -} - -type ByKey []Marker - -func (m ByKey) Len() int { return len(m) } -func (m ByKey) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -func (m ByKey) Less(i, j int) bool { - return m[i].Key < m[j].Key -} - -type Suggestion string - -func (s Suggestion) String() string { - return string(s) -} diff --git a/vendor/github.com/openshift/origin/pkg/api/graph/types.go b/vendor/github.com/openshift/origin/pkg/api/graph/types.go deleted file mode 100644 index 1bb6622b1..000000000 --- a/vendor/github.com/openshift/origin/pkg/api/graph/types.go +++ /dev/null @@ -1,69 +0,0 @@ -package graph - -import ( - "fmt" - - "github.com/gonum/graph" - - kapi "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/runtime" -) - -const ( - UnknownNodeKind = "UnknownNode" -) - -const ( - UnknownEdgeKind = "UnknownEdge" - // ReferencedByEdgeKind is the kind to use if you're building reverse links that don't have a specific edge in the other direction - // other uses are discouraged. You should create a kind for your edge - ReferencedByEdgeKind = "ReferencedBy" - // ContainsEdgeKind is the kind to use if one node's contents logically contain another node's contents. A given node can only have - // a single inbound Contais edge. The code does not prevent contains cycles, but that's insane, don't do that. - ContainsEdgeKind = "Contains" -) - -func GetUniqueRuntimeObjectNodeName(nodeKind string, obj runtime.Object) UniqueName { - meta, err := kapi.ObjectMetaFor(obj) - if err != nil { - panic(err) - } - - return UniqueName(fmt.Sprintf("%s|%s/%s", nodeKind, meta.Namespace, meta.Name)) -} - -// GetTopLevelContainerNode traverses the reverse ContainsEdgeKind edges until it finds a node -// that does not have an inbound ContainsEdgeKind edge. This could be the node itself -func GetTopLevelContainerNode(g Graph, containedNode graph.Node) graph.Node { - // my kingdom for a LinkedHashSet - visited := map[int]bool{} - prevContainingNode := containedNode - - for { - visited[prevContainingNode.ID()] = true - currContainingNode := GetContainingNode(g, prevContainingNode) - - if currContainingNode == nil { - return prevContainingNode - } - if _, alreadyVisited := visited[currContainingNode.ID()]; alreadyVisited { - panic(fmt.Sprintf("contains cycle in %v", visited)) - } - - prevContainingNode = currContainingNode - } -} - -// GetContainingNode returns the direct predecessor that is linked to the node by a ContainsEdgeKind. It returns -// nil if no container is found. -func GetContainingNode(g Graph, containedNode graph.Node) graph.Node { - for _, node := range g.To(containedNode) { - edge := g.Edge(node, containedNode) - - if g.EdgeKinds(edge).Has(ContainsEdgeKind) { - return node - } - } - - return nil -} diff --git a/vendor/github.com/openshift/origin/pkg/api/kubegraph/analysis/hpa.go b/vendor/github.com/openshift/origin/pkg/api/kubegraph/analysis/hpa.go deleted file mode 100644 index 30ff94e05..000000000 --- a/vendor/github.com/openshift/origin/pkg/api/kubegraph/analysis/hpa.go +++ /dev/null @@ -1,180 +0,0 @@ -package analysis - -import ( - "fmt" - "strings" - - "k8s.io/kubernetes/pkg/util/sets" - - graphapi "github.com/gonum/graph" - "github.com/gonum/graph/path" - - osgraph "github.com/openshift/origin/pkg/api/graph" - "github.com/openshift/origin/pkg/api/kubegraph" - kubenodes "github.com/openshift/origin/pkg/api/kubegraph/nodes" - deploygraph "github.com/openshift/origin/pkg/deploy/graph" - deploynodes "github.com/openshift/origin/pkg/deploy/graph/nodes" -) - -const ( - // HPAMissingScaleRefError denotes an error where a Horizontal Pod Autoscaler does not have a reference to an object to scale - HPAMissingScaleRefError = "HPAMissingScaleRef" - // HPAMissingCPUTargetError denotes an error where a Horizontal Pod Autoscaler does not have a CPU target to scale by. - // Currently, the only supported scale metric is CPU utilization, so without this metric an HPA is useless. - HPAMissingCPUTargetError = "HPAMissingCPUTarget" - // HPAOverlappingScaleRefWarning denotes a warning where a Horizontal Pod Autoscaler scales an object that is scaled by some other object as well - HPAOverlappingScaleRefWarning = "HPAOverlappingScaleRef" -) - -// FindHPASpecsMissingCPUTargets scans the graph in search of HorizontalPodAutoscalers that are missing a CPU utilization target. -// As of right now, the only metric that HPAs can use to scale pods is the CPU utilization, so if a HPA is missing this target it -// is effectively useless. -func FindHPASpecsMissingCPUTargets(graph osgraph.Graph, namer osgraph.Namer) []osgraph.Marker { - markers := []osgraph.Marker{} - - for _, uncastNode := range graph.NodesByKind(kubenodes.HorizontalPodAutoscalerNodeKind) { - node := uncastNode.(*kubenodes.HorizontalPodAutoscalerNode) - - if node.HorizontalPodAutoscaler.Spec.TargetCPUUtilizationPercentage == nil { - markers = append(markers, osgraph.Marker{ - Node: node, - Severity: osgraph.ErrorSeverity, - Key: HPAMissingCPUTargetError, - Message: fmt.Sprintf("%s is missing a CPU utilization target", namer.ResourceName(node)), - Suggestion: osgraph.Suggestion(fmt.Sprintf(`oc patch %s -p '{"spec":{"targetCPUUtilizationPercentage": 80}}'`, namer.ResourceName(node))), - }) - } - } - - return markers -} - -// FindHPASpecsMissingScaleRefs finds all Horizontal Pod Autoscalers whose scale reference points to an object that doesn't exist -// or that the client does not have the permission to see. -func FindHPASpecsMissingScaleRefs(graph osgraph.Graph, namer osgraph.Namer) []osgraph.Marker { - markers := []osgraph.Marker{} - - for _, uncastNode := range graph.NodesByKind(kubenodes.HorizontalPodAutoscalerNodeKind) { - node := uncastNode.(*kubenodes.HorizontalPodAutoscalerNode) - - scaledObjects := graph.SuccessorNodesByEdgeKind( - uncastNode, - kubegraph.ScalingEdgeKind, - ) - - if len(scaledObjects) < 1 { - markers = append(markers, createMissingScaleRefMarker(node, nil, namer)) - continue - } - - for _, scaleRef := range scaledObjects { - if existenceChecker, ok := scaleRef.(osgraph.ExistenceChecker); ok && !existenceChecker.Found() { - // if this node is synthetic, we can't be sure that the HPA is scaling something that actually exists - markers = append(markers, createMissingScaleRefMarker(node, scaleRef, namer)) - } - } - } - - return markers -} - -func createMissingScaleRefMarker(hpaNode *kubenodes.HorizontalPodAutoscalerNode, scaleRef graphapi.Node, namer osgraph.Namer) osgraph.Marker { - return osgraph.Marker{ - Node: hpaNode, - Severity: osgraph.ErrorSeverity, - RelatedNodes: []graphapi.Node{scaleRef}, - Key: HPAMissingScaleRefError, - Message: fmt.Sprintf("%s is attempting to scale %s/%s, which doesn't exist", - namer.ResourceName(hpaNode), - hpaNode.HorizontalPodAutoscaler.Spec.ScaleTargetRef.Kind, - hpaNode.HorizontalPodAutoscaler.Spec.ScaleTargetRef.Name, - ), - } -} - -// FindOverlappingHPAs scans the graph in search of HorizontalPodAutoscalers that are attempting to scale the same set of pods. -// This can occur in two ways: -// - 1. label selectors for two ReplicationControllers/DeploymentConfigs/etc overlap -// - 2. multiple HorizontalPodAutoscalers are attempting to scale the same ReplicationController/DeploymentConfig/etc -// Case 1 is handled by deconflicting the area of influence of ReplicationControllers/DeploymentConfigs/etc, and therefore we -// can assume that it will be handled before this step. Therefore, we are only concerned with finding HPAs that are trying to -// scale the same resources. -// -// The algorithm that is used to implement this check is described as follows: -// - create a sub-graph containing only HPA nodes and other nodes that can be scaled, as well as any scaling edges or other -// edges used to connect between objects that can be scaled -// - for every resulting edge in the new sub-graph, create an edge in the reverse direction -// - find the shortest paths between all HPA nodes in the graph -// - shortest paths connecting two horizontal pod autoscalers are used to create markers for the graph -func FindOverlappingHPAs(graph osgraph.Graph, namer osgraph.Namer) []osgraph.Marker { - markers := []osgraph.Marker{} - - nodeFilter := osgraph.NodesOfKind( - kubenodes.HorizontalPodAutoscalerNodeKind, - kubenodes.ReplicationControllerNodeKind, - deploynodes.DeploymentConfigNodeKind, - ) - edgeFilter := osgraph.EdgesOfKind( - kubegraph.ScalingEdgeKind, - deploygraph.DeploymentEdgeKind, - ) - - hpaSubGraph := graph.Subgraph(nodeFilter, edgeFilter) - for _, edge := range hpaSubGraph.Edges() { - osgraph.AddReversedEdge(hpaSubGraph, edge.From(), edge.To(), sets.NewString()) - } - - hpaNodes := hpaSubGraph.NodesByKind(kubenodes.HorizontalPodAutoscalerNodeKind) - - for _, firstHPA := range hpaNodes { - // we can use Dijkstra's algorithm as we know we do not have any negative edge weights - shortestPaths := path.DijkstraFrom(firstHPA, hpaSubGraph) - - for _, secondHPA := range hpaNodes { - if firstHPA == secondHPA { - continue - } - - shortestPath, _ := shortestPaths.To(secondHPA) - - if shortestPath == nil { - // if two HPAs have no path between them, no error exists - continue - } - - markers = append(markers, osgraph.Marker{ - Node: firstHPA, - Severity: osgraph.WarningSeverity, - RelatedNodes: shortestPath[1:], - Key: HPAOverlappingScaleRefWarning, - Message: fmt.Sprintf("%s and %s overlap because they both attempt to scale %s", - namer.ResourceName(firstHPA), namer.ResourceName(secondHPA), nameList(shortestPath[1:len(shortestPath)-1], namer)), - }) - } - } - - return markers -} - -// nameList outputs a nicely-formatted list of names: -// - given nodes ['a', 'b', 'c'], this will return "one of a, b, or c" -// - given nodes ['a', 'b'], this will return "a or b" -// - given nodes ['a'], this will return "a" -func nameList(nodes []graphapi.Node, namer osgraph.Namer) string { - names := []string{} - - for _, node := range nodes { - names = append(names, namer.ResourceName(node)) - } - - switch len(names) { - case 0: - return "" - case 1: - return names[0] - case 2: - return names[0] + " or " + names[1] - default: - return "one of " + strings.Join(names[:len(names)-1], ", ") + ", or " + names[len(names)-1] - } -} diff --git a/vendor/github.com/openshift/origin/pkg/api/kubegraph/analysis/pod.go b/vendor/github.com/openshift/origin/pkg/api/kubegraph/analysis/pod.go deleted file mode 100644 index 679112044..000000000 --- a/vendor/github.com/openshift/origin/pkg/api/kubegraph/analysis/pod.go +++ /dev/null @@ -1,135 +0,0 @@ -package analysis - -import ( - "fmt" - "time" - - "github.com/MakeNowJust/heredoc" - - kapi "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - - osgraph "github.com/openshift/origin/pkg/api/graph" - kubegraph "github.com/openshift/origin/pkg/api/kubegraph/nodes" -) - -const ( - CrashLoopingPodError = "CrashLoopingPod" - RestartingPodWarning = "RestartingPod" - - RestartThreshold = 5 - // TODO: if you change this, you must change the messages below. - RestartRecentDuration = 10 * time.Minute -) - -// exposed for testing -var nowFn = unversioned.Now - -// FindRestartingPods inspects all Pods to see if they've restarted more than the threshold. logsCommandName is the name of -// the command that should be invoked to see pod logs. securityPolicyCommandPattern is a format string accepting two replacement -// variables for fmt.Sprintf - 1, the namespace of the current pod, 2 the service account of the pod. -func FindRestartingPods(g osgraph.Graph, f osgraph.Namer, logsCommandName, securityPolicyCommandPattern string) []osgraph.Marker { - markers := []osgraph.Marker{} - - for _, uncastPodNode := range g.NodesByKind(kubegraph.PodNodeKind) { - podNode := uncastPodNode.(*kubegraph.PodNode) - pod, ok := podNode.Object().(*kapi.Pod) - if !ok { - continue - } - - for _, containerStatus := range pod.Status.ContainerStatuses { - containerString := "" - if len(pod.Spec.Containers) > 1 { - containerString = fmt.Sprintf("container %q in ", containerStatus.Name) - } - switch { - case containerCrashLoopBackOff(containerStatus): - var suggestion string - switch { - case containerIsNonRoot(pod, containerStatus.Name): - suggestion = heredoc.Docf(` - The container is starting and exiting repeatedly. This usually means the container is unable - to start, misconfigured, or limited by security restrictions. Check the container logs with - - %s %s -c %s - - Current security policy prevents your containers from being run as the root user. Some images - may fail expecting to be able to change ownership or permissions on directories. Your admin - can grant you access to run containers that need to run as the root user with this command: - - %s - `, logsCommandName, pod.Name, containerStatus.Name, fmt.Sprintf(securityPolicyCommandPattern, pod.Namespace, pod.Spec.ServiceAccountName)) - default: - suggestion = heredoc.Docf(` - The container is starting and exiting repeatedly. This usually means the container is unable - to start, misconfigured, or limited by security restrictions. Check the container logs with - - %s %s -c %s - `, logsCommandName, pod.Name, containerStatus.Name) - } - markers = append(markers, osgraph.Marker{ - Node: podNode, - - Severity: osgraph.ErrorSeverity, - Key: CrashLoopingPodError, - Message: fmt.Sprintf("%s%s is crash-looping", containerString, - f.ResourceName(podNode)), - Suggestion: osgraph.Suggestion(suggestion), - }) - case ContainerRestartedRecently(containerStatus, nowFn()): - markers = append(markers, osgraph.Marker{ - Node: podNode, - - Severity: osgraph.WarningSeverity, - Key: RestartingPodWarning, - Message: fmt.Sprintf("%s%s has restarted within the last 10 minutes", containerString, - f.ResourceName(podNode)), - }) - case containerRestartedFrequently(containerStatus): - markers = append(markers, osgraph.Marker{ - Node: podNode, - - Severity: osgraph.WarningSeverity, - Key: RestartingPodWarning, - Message: fmt.Sprintf("%s%s has restarted %d times", containerString, - f.ResourceName(podNode), containerStatus.RestartCount), - }) - } - } - } - - return markers -} - -func containerIsNonRoot(pod *kapi.Pod, container string) bool { - for _, c := range pod.Spec.Containers { - if c.Name != container || c.SecurityContext == nil { - continue - } - switch { - case c.SecurityContext.RunAsUser != nil && *c.SecurityContext.RunAsUser != 0: - //c.SecurityContext.RunAsNonRoot != nil && *c.SecurityContext.RunAsNonRoot, - return true - } - } - return false -} - -func containerCrashLoopBackOff(status kapi.ContainerStatus) bool { - return status.State.Waiting != nil && status.State.Waiting.Reason == "CrashLoopBackOff" -} - -func ContainerRestartedRecently(status kapi.ContainerStatus, now unversioned.Time) bool { - if status.RestartCount == 0 { - return false - } - if status.LastTerminationState.Terminated != nil && now.Sub(status.LastTerminationState.Terminated.FinishedAt.Time) < RestartRecentDuration { - return true - } - return false -} - -func containerRestartedFrequently(status kapi.ContainerStatus) bool { - return status.RestartCount > RestartThreshold -} diff --git a/vendor/github.com/openshift/origin/pkg/api/kubegraph/analysis/podspec.go b/vendor/github.com/openshift/origin/pkg/api/kubegraph/analysis/podspec.go deleted file mode 100644 index e4fb1979b..000000000 --- a/vendor/github.com/openshift/origin/pkg/api/kubegraph/analysis/podspec.go +++ /dev/null @@ -1,124 +0,0 @@ -package analysis - -import ( - "fmt" - - "github.com/gonum/graph" - - osgraph "github.com/openshift/origin/pkg/api/graph" - kubeedges "github.com/openshift/origin/pkg/api/kubegraph" - kubegraph "github.com/openshift/origin/pkg/api/kubegraph/nodes" -) - -const ( - UnmountableSecretWarning = "UnmountableSecret" - MissingSecretWarning = "MissingSecret" -) - -// FindUnmountableSecrets inspects all PodSpecs for any Secret reference that isn't listed as mountable by the referenced ServiceAccount -func FindUnmountableSecrets(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { - markers := []osgraph.Marker{} - - for _, uncastPodSpecNode := range g.NodesByKind(kubegraph.PodSpecNodeKind) { - podSpecNode := uncastPodSpecNode.(*kubegraph.PodSpecNode) - unmountableSecrets := CheckForUnmountableSecrets(g, podSpecNode) - - topLevelNode := osgraph.GetTopLevelContainerNode(g, podSpecNode) - topLevelString := f.ResourceName(topLevelNode) - - saString := "MISSING_SA" - saNodes := g.SuccessorNodesByEdgeKind(podSpecNode, kubeedges.ReferencedServiceAccountEdgeKind) - if len(saNodes) > 0 { - saString = f.ResourceName(saNodes[0]) - } - - for _, unmountableSecret := range unmountableSecrets { - markers = append(markers, osgraph.Marker{ - Node: podSpecNode, - RelatedNodes: []graph.Node{unmountableSecret}, - - Severity: osgraph.WarningSeverity, - Key: UnmountableSecretWarning, - Message: fmt.Sprintf("%s is attempting to mount a secret %s disallowed by %s", - topLevelString, f.ResourceName(unmountableSecret), saString), - }) - } - } - - return markers -} - -// FindMissingSecrets inspects all PodSpecs for any Secret reference that is a synthetic node (not a pre-existing node in the graph) -func FindMissingSecrets(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { - markers := []osgraph.Marker{} - - for _, uncastPodSpecNode := range g.NodesByKind(kubegraph.PodSpecNodeKind) { - podSpecNode := uncastPodSpecNode.(*kubegraph.PodSpecNode) - missingSecrets := CheckMissingMountedSecrets(g, podSpecNode) - - topLevelNode := osgraph.GetTopLevelContainerNode(g, podSpecNode) - topLevelString := f.ResourceName(topLevelNode) - - for _, missingSecret := range missingSecrets { - markers = append(markers, osgraph.Marker{ - Node: podSpecNode, - RelatedNodes: []graph.Node{missingSecret}, - - Severity: osgraph.WarningSeverity, - Key: UnmountableSecretWarning, - Message: fmt.Sprintf("%s is attempting to mount a missing secret %s", - topLevelString, f.ResourceName(missingSecret)), - }) - } - } - - return markers -} - -// CheckForUnmountableSecrets checks to be sure that all the referenced secrets are mountable (by service account) -func CheckForUnmountableSecrets(g osgraph.Graph, podSpecNode *kubegraph.PodSpecNode) []*kubegraph.SecretNode { - saNodes := g.SuccessorNodesByNodeAndEdgeKind(podSpecNode, kubegraph.ServiceAccountNodeKind, kubeedges.ReferencedServiceAccountEdgeKind) - saMountableSecrets := []*kubegraph.SecretNode{} - - if len(saNodes) > 0 { - saNode := saNodes[0].(*kubegraph.ServiceAccountNode) - for _, secretNode := range g.SuccessorNodesByNodeAndEdgeKind(saNode, kubegraph.SecretNodeKind, kubeedges.MountableSecretEdgeKind) { - saMountableSecrets = append(saMountableSecrets, secretNode.(*kubegraph.SecretNode)) - } - } - - unmountableSecrets := []*kubegraph.SecretNode{} - - for _, uncastMountedSecretNode := range g.SuccessorNodesByNodeAndEdgeKind(podSpecNode, kubegraph.SecretNodeKind, kubeedges.MountedSecretEdgeKind) { - mountedSecretNode := uncastMountedSecretNode.(*kubegraph.SecretNode) - - mountable := false - for _, mountableSecretNode := range saMountableSecrets { - if mountableSecretNode == mountedSecretNode { - mountable = true - break - } - } - - if !mountable { - unmountableSecrets = append(unmountableSecrets, mountedSecretNode) - continue - } - } - - return unmountableSecrets -} - -// CheckMissingMountedSecrets checks to be sure that all the referenced secrets are present (not synthetic) -func CheckMissingMountedSecrets(g osgraph.Graph, podSpecNode *kubegraph.PodSpecNode) []*kubegraph.SecretNode { - missingSecrets := []*kubegraph.SecretNode{} - - for _, uncastMountedSecretNode := range g.SuccessorNodesByNodeAndEdgeKind(podSpecNode, kubegraph.SecretNodeKind, kubeedges.MountedSecretEdgeKind) { - mountedSecretNode := uncastMountedSecretNode.(*kubegraph.SecretNode) - if !mountedSecretNode.Found() { - missingSecrets = append(missingSecrets, mountedSecretNode) - } - } - - return missingSecrets -} diff --git a/vendor/github.com/openshift/origin/pkg/api/kubegraph/analysis/rc.go b/vendor/github.com/openshift/origin/pkg/api/kubegraph/analysis/rc.go deleted file mode 100644 index a0b164435..000000000 --- a/vendor/github.com/openshift/origin/pkg/api/kubegraph/analysis/rc.go +++ /dev/null @@ -1,56 +0,0 @@ -package analysis - -import ( - "fmt" - "strings" - - "github.com/gonum/graph" - - osgraph "github.com/openshift/origin/pkg/api/graph" - kubeedges "github.com/openshift/origin/pkg/api/kubegraph" - kubegraph "github.com/openshift/origin/pkg/api/kubegraph/nodes" -) - -const ( - DuelingReplicationControllerWarning = "DuelingReplicationControllers" -) - -func FindDuelingReplicationControllers(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { - markers := []osgraph.Marker{} - - for _, uncastRCNode := range g.NodesByKind(kubegraph.ReplicationControllerNodeKind) { - rcNode := uncastRCNode.(*kubegraph.ReplicationControllerNode) - - for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(rcNode, kubeedges.ManagedByControllerEdgeKind) { - podNode := uncastPodNode.(*kubegraph.PodNode) - - // check to see if this pod is managed by more than one RC - uncastOwningRCs := g.SuccessorNodesByEdgeKind(podNode, kubeedges.ManagedByControllerEdgeKind) - if len(uncastOwningRCs) > 1 { - involvedRCNames := []string{} - relatedNodes := []graph.Node{uncastPodNode} - - for _, uncastOwningRC := range uncastOwningRCs { - if uncastOwningRC.ID() == rcNode.ID() { - continue - } - owningRC := uncastOwningRC.(*kubegraph.ReplicationControllerNode) - involvedRCNames = append(involvedRCNames, f.ResourceName(owningRC)) - - relatedNodes = append(relatedNodes, uncastOwningRC) - } - - markers = append(markers, osgraph.Marker{ - Node: rcNode, - RelatedNodes: relatedNodes, - - Severity: osgraph.WarningSeverity, - Key: DuelingReplicationControllerWarning, - Message: fmt.Sprintf("%s is competing for %s with %s", f.ResourceName(rcNode), f.ResourceName(podNode), strings.Join(involvedRCNames, ", ")), - }) - } - } - } - - return markers -} diff --git a/vendor/github.com/openshift/origin/pkg/api/kubegraph/edges.go b/vendor/github.com/openshift/origin/pkg/api/kubegraph/edges.go deleted file mode 100644 index 053a63fbf..000000000 --- a/vendor/github.com/openshift/origin/pkg/api/kubegraph/edges.go +++ /dev/null @@ -1,248 +0,0 @@ -package kubegraph - -import ( - "strings" - - "github.com/gonum/graph" - - kapi "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" - - osgraph "github.com/openshift/origin/pkg/api/graph" - kubegraph "github.com/openshift/origin/pkg/api/kubegraph/nodes" - deployapi "github.com/openshift/origin/pkg/deploy/api" - deploygraph "github.com/openshift/origin/pkg/deploy/graph/nodes" -) - -const ( - // ExposedThroughServiceEdgeKind goes from a PodTemplateSpec or a Pod to Service. The head should make the service's selector. - ExposedThroughServiceEdgeKind = "ExposedThroughService" - // ManagedByControllerEdgeKind goes from Pod to controller when the Pod satisfies a controller's label selector - ManagedByControllerEdgeKind = "ManagedByController" - // MountedSecretEdgeKind goes from PodSpec to Secret indicating that is or will be a request to mount a volume with the Secret. - MountedSecretEdgeKind = "MountedSecret" - // MountableSecretEdgeKind goes from ServiceAccount to Secret indicating that the SA allows the Secret to be mounted - MountableSecretEdgeKind = "MountableSecret" - // ReferencedServiceAccountEdgeKind goes from PodSpec to ServiceAccount indicating that Pod is or will be running as the SA. - ReferencedServiceAccountEdgeKind = "ReferencedServiceAccount" - // ScalingEdgeKind goes from HorizontalPodAutoscaler to scaled objects indicating that the HPA scales the object - ScalingEdgeKind = "Scaling" -) - -// AddExposedPodTemplateSpecEdges ensures that a directed edge exists between a service and all the PodTemplateSpecs -// in the graph that match the service selector -func AddExposedPodTemplateSpecEdges(g osgraph.MutableUniqueGraph, node *kubegraph.ServiceNode) { - if node.Service.Spec.Selector == nil { - return - } - query := labels.SelectorFromSet(node.Service.Spec.Selector) - for _, n := range g.(graph.Graph).Nodes() { - switch target := n.(type) { - case *kubegraph.PodTemplateSpecNode: - if target.Namespace != node.Namespace { - continue - } - - if query.Matches(labels.Set(target.PodTemplateSpec.Labels)) { - g.AddEdge(target, node, ExposedThroughServiceEdgeKind) - } - } - } -} - -// AddAllExposedPodTemplateSpecEdges calls AddExposedPodTemplateSpecEdges for every ServiceNode in the graph -func AddAllExposedPodTemplateSpecEdges(g osgraph.MutableUniqueGraph) { - for _, node := range g.(graph.Graph).Nodes() { - if serviceNode, ok := node.(*kubegraph.ServiceNode); ok { - AddExposedPodTemplateSpecEdges(g, serviceNode) - } - } -} - -// AddExposedPodEdges ensures that a directed edge exists between a service and all the pods -// in the graph that match the service selector -func AddExposedPodEdges(g osgraph.MutableUniqueGraph, node *kubegraph.ServiceNode) { - if node.Service.Spec.Selector == nil { - return - } - query := labels.SelectorFromSet(node.Service.Spec.Selector) - for _, n := range g.(graph.Graph).Nodes() { - switch target := n.(type) { - case *kubegraph.PodNode: - if target.Namespace != node.Namespace { - continue - } - if query.Matches(labels.Set(target.Labels)) { - g.AddEdge(target, node, ExposedThroughServiceEdgeKind) - } - } - } -} - -// AddAllExposedPodEdges calls AddExposedPodEdges for every ServiceNode in the graph -func AddAllExposedPodEdges(g osgraph.MutableUniqueGraph) { - for _, node := range g.(graph.Graph).Nodes() { - if serviceNode, ok := node.(*kubegraph.ServiceNode); ok { - AddExposedPodEdges(g, serviceNode) - } - } -} - -// AddManagedByControllerPodEdges ensures that a directed edge exists between a controller and all the pods -// in the graph that match the label selector -func AddManagedByControllerPodEdges(g osgraph.MutableUniqueGraph, to graph.Node, namespace string, selector map[string]string) { - if selector == nil { - return - } - query := labels.SelectorFromSet(selector) - for _, n := range g.(graph.Graph).Nodes() { - switch target := n.(type) { - case *kubegraph.PodNode: - if target.Namespace != namespace { - continue - } - if query.Matches(labels.Set(target.Labels)) { - g.AddEdge(target, to, ManagedByControllerEdgeKind) - } - } - } -} - -// AddAllManagedByControllerPodEdges calls AddManagedByControllerPodEdges for every node in the graph -// TODO: should do this through an interface (selects pods) -func AddAllManagedByControllerPodEdges(g osgraph.MutableUniqueGraph) { - for _, node := range g.(graph.Graph).Nodes() { - switch cast := node.(type) { - case *kubegraph.ReplicationControllerNode: - AddManagedByControllerPodEdges(g, cast, cast.ReplicationController.Namespace, cast.ReplicationController.Spec.Selector) - case *kubegraph.PetSetNode: - // TODO: refactor to handle expanded selectors (along with ReplicaSets and Deployments) - AddManagedByControllerPodEdges(g, cast, cast.PetSet.Namespace, cast.PetSet.Spec.Selector.MatchLabels) - } - } -} - -func AddMountedSecretEdges(g osgraph.Graph, podSpec *kubegraph.PodSpecNode) { - //pod specs are always contained. We'll get the toplevel container so that we can pull a namespace from it - containerNode := osgraph.GetTopLevelContainerNode(g, podSpec) - containerObj := g.GraphDescriber.Object(containerNode) - - meta, err := kapi.ObjectMetaFor(containerObj.(runtime.Object)) - if err != nil { - // this should never happen. it means that a podSpec is owned by a top level container that is not a runtime.Object - panic(err) - } - - for _, volume := range podSpec.Volumes { - source := volume.VolumeSource - if source.Secret == nil { - continue - } - - // pod secrets must be in the same namespace - syntheticSecret := &kapi.Secret{} - syntheticSecret.Namespace = meta.Namespace - syntheticSecret.Name = source.Secret.SecretName - - secretNode := kubegraph.FindOrCreateSyntheticSecretNode(g, syntheticSecret) - g.AddEdge(podSpec, secretNode, MountedSecretEdgeKind) - } -} - -func AddAllMountedSecretEdges(g osgraph.Graph) { - for _, node := range g.Nodes() { - if podSpecNode, ok := node.(*kubegraph.PodSpecNode); ok { - AddMountedSecretEdges(g, podSpecNode) - } - } -} - -func AddMountableSecretEdges(g osgraph.Graph, saNode *kubegraph.ServiceAccountNode) { - for _, mountableSecret := range saNode.ServiceAccount.Secrets { - syntheticSecret := &kapi.Secret{} - syntheticSecret.Namespace = saNode.ServiceAccount.Namespace - syntheticSecret.Name = mountableSecret.Name - - secretNode := kubegraph.FindOrCreateSyntheticSecretNode(g, syntheticSecret) - g.AddEdge(saNode, secretNode, MountableSecretEdgeKind) - } -} - -func AddAllMountableSecretEdges(g osgraph.Graph) { - for _, node := range g.Nodes() { - if saNode, ok := node.(*kubegraph.ServiceAccountNode); ok { - AddMountableSecretEdges(g, saNode) - } - } -} - -func AddRequestedServiceAccountEdges(g osgraph.Graph, podSpecNode *kubegraph.PodSpecNode) { - //pod specs are always contained. We'll get the toplevel container so that we can pull a namespace from it - containerNode := osgraph.GetTopLevelContainerNode(g, podSpecNode) - containerObj := g.GraphDescriber.Object(containerNode) - - meta, err := kapi.ObjectMetaFor(containerObj.(runtime.Object)) - if err != nil { - panic(err) - } - - // if no SA name is present, admission will set 'default' - name := "default" - if len(podSpecNode.ServiceAccountName) > 0 { - name = podSpecNode.ServiceAccountName - } - - syntheticSA := &kapi.ServiceAccount{} - syntheticSA.Namespace = meta.Namespace - syntheticSA.Name = name - - saNode := kubegraph.FindOrCreateSyntheticServiceAccountNode(g, syntheticSA) - g.AddEdge(podSpecNode, saNode, ReferencedServiceAccountEdgeKind) -} - -func AddAllRequestedServiceAccountEdges(g osgraph.Graph) { - for _, node := range g.Nodes() { - if podSpecNode, ok := node.(*kubegraph.PodSpecNode); ok { - AddRequestedServiceAccountEdges(g, podSpecNode) - } - } -} - -func AddHPAScaleRefEdges(g osgraph.Graph) { - for _, node := range g.NodesByKind(kubegraph.HorizontalPodAutoscalerNodeKind) { - hpaNode := node.(*kubegraph.HorizontalPodAutoscalerNode) - - syntheticMeta := kapi.ObjectMeta{ - Name: hpaNode.HorizontalPodAutoscaler.Spec.ScaleTargetRef.Name, - Namespace: hpaNode.HorizontalPodAutoscaler.Namespace, - } - - var groupVersionResource unversioned.GroupVersionResource - resource := strings.ToLower(hpaNode.HorizontalPodAutoscaler.Spec.ScaleTargetRef.Kind) - if groupVersion, err := unversioned.ParseGroupVersion(hpaNode.HorizontalPodAutoscaler.Spec.ScaleTargetRef.APIVersion); err == nil { - groupVersionResource = groupVersion.WithResource(resource) - } else { - groupVersionResource = unversioned.GroupVersionResource{Resource: resource} - } - - groupVersionResource, err := registered.RESTMapper().ResourceFor(groupVersionResource) - if err != nil { - continue - } - - var syntheticNode graph.Node - switch groupVersionResource.GroupResource() { - case kapi.Resource("replicationcontrollers"): - syntheticNode = kubegraph.FindOrCreateSyntheticReplicationControllerNode(g, &kapi.ReplicationController{ObjectMeta: syntheticMeta}) - case deployapi.Resource("deploymentconfigs"): - syntheticNode = deploygraph.FindOrCreateSyntheticDeploymentConfigNode(g, &deployapi.DeploymentConfig{ObjectMeta: syntheticMeta}) - default: - continue - } - - g.AddEdge(hpaNode, syntheticNode, ScalingEdgeKind) - } -} diff --git a/vendor/github.com/openshift/origin/pkg/api/kubegraph/nodes/nodes.go b/vendor/github.com/openshift/origin/pkg/api/kubegraph/nodes/nodes.go deleted file mode 100644 index 5b7368f38..000000000 --- a/vendor/github.com/openshift/origin/pkg/api/kubegraph/nodes/nodes.go +++ /dev/null @@ -1,187 +0,0 @@ -package nodes - -import ( - "github.com/gonum/graph" - - kapi "k8s.io/kubernetes/pkg/api" - kapps "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/autoscaling" - - osgraph "github.com/openshift/origin/pkg/api/graph" -) - -func EnsurePodNode(g osgraph.MutableUniqueGraph, pod *kapi.Pod) *PodNode { - podNodeName := PodNodeName(pod) - podNode := osgraph.EnsureUnique(g, - podNodeName, - func(node osgraph.Node) graph.Node { - return &PodNode{node, pod} - }, - ).(*PodNode) - - podSpecNode := EnsurePodSpecNode(g, &pod.Spec, pod.Namespace, podNodeName) - g.AddEdge(podNode, podSpecNode, osgraph.ContainsEdgeKind) - - return podNode -} - -func EnsurePodSpecNode(g osgraph.MutableUniqueGraph, podSpec *kapi.PodSpec, namespace string, ownerName osgraph.UniqueName) *PodSpecNode { - return osgraph.EnsureUnique(g, - PodSpecNodeName(podSpec, ownerName), - func(node osgraph.Node) graph.Node { - return &PodSpecNode{node, podSpec, namespace, ownerName} - }, - ).(*PodSpecNode) -} - -// EnsureServiceNode adds the provided service to the graph if it does not already exist. -func EnsureServiceNode(g osgraph.MutableUniqueGraph, svc *kapi.Service) *ServiceNode { - return osgraph.EnsureUnique(g, - ServiceNodeName(svc), - func(node osgraph.Node) graph.Node { - return &ServiceNode{node, svc, true} - }, - ).(*ServiceNode) -} - -// FindOrCreateSyntheticServiceNode returns the existing service node or creates a synthetic node in its place -func FindOrCreateSyntheticServiceNode(g osgraph.MutableUniqueGraph, svc *kapi.Service) *ServiceNode { - return osgraph.EnsureUnique(g, - ServiceNodeName(svc), - func(node osgraph.Node) graph.Node { - return &ServiceNode{node, svc, false} - }, - ).(*ServiceNode) -} - -func EnsureServiceAccountNode(g osgraph.MutableUniqueGraph, o *kapi.ServiceAccount) *ServiceAccountNode { - return osgraph.EnsureUnique(g, - ServiceAccountNodeName(o), - func(node osgraph.Node) graph.Node { - return &ServiceAccountNode{node, o, true} - }, - ).(*ServiceAccountNode) -} - -func FindOrCreateSyntheticServiceAccountNode(g osgraph.MutableUniqueGraph, o *kapi.ServiceAccount) *ServiceAccountNode { - return osgraph.EnsureUnique(g, - ServiceAccountNodeName(o), - func(node osgraph.Node) graph.Node { - return &ServiceAccountNode{node, o, false} - }, - ).(*ServiceAccountNode) -} - -func EnsureSecretNode(g osgraph.MutableUniqueGraph, o *kapi.Secret) *SecretNode { - return osgraph.EnsureUnique(g, - SecretNodeName(o), - func(node osgraph.Node) graph.Node { - return &SecretNode{node, o, true} - }, - ).(*SecretNode) -} - -func FindOrCreateSyntheticSecretNode(g osgraph.MutableUniqueGraph, o *kapi.Secret) *SecretNode { - return osgraph.EnsureUnique(g, - SecretNodeName(o), - func(node osgraph.Node) graph.Node { - return &SecretNode{node, o, false} - }, - ).(*SecretNode) -} - -// EnsureReplicationControllerNode adds a graph node for the ReplicationController if it does not already exist. -func EnsureReplicationControllerNode(g osgraph.MutableUniqueGraph, rc *kapi.ReplicationController) *ReplicationControllerNode { - rcNodeName := ReplicationControllerNodeName(rc) - rcNode := osgraph.EnsureUnique(g, - rcNodeName, - func(node osgraph.Node) graph.Node { - return &ReplicationControllerNode{node, rc, true} - }, - ).(*ReplicationControllerNode) - - rcSpecNode := EnsureReplicationControllerSpecNode(g, &rc.Spec, rc.Namespace, rcNodeName) - g.AddEdge(rcNode, rcSpecNode, osgraph.ContainsEdgeKind) - - return rcNode -} - -func FindOrCreateSyntheticReplicationControllerNode(g osgraph.MutableUniqueGraph, rc *kapi.ReplicationController) *ReplicationControllerNode { - return osgraph.EnsureUnique(g, - ReplicationControllerNodeName(rc), - func(node osgraph.Node) graph.Node { - return &ReplicationControllerNode{node, rc, false} - }, - ).(*ReplicationControllerNode) -} - -func EnsureReplicationControllerSpecNode(g osgraph.MutableUniqueGraph, rcSpec *kapi.ReplicationControllerSpec, namespace string, ownerName osgraph.UniqueName) *ReplicationControllerSpecNode { - rcSpecName := ReplicationControllerSpecNodeName(rcSpec, ownerName) - rcSpecNode := osgraph.EnsureUnique(g, - rcSpecName, - func(node osgraph.Node) graph.Node { - return &ReplicationControllerSpecNode{node, rcSpec, namespace, ownerName} - }, - ).(*ReplicationControllerSpecNode) - - if rcSpec.Template != nil { - ptSpecNode := EnsurePodTemplateSpecNode(g, rcSpec.Template, namespace, rcSpecName) - g.AddEdge(rcSpecNode, ptSpecNode, osgraph.ContainsEdgeKind) - } - - return rcSpecNode -} - -func EnsurePodTemplateSpecNode(g osgraph.MutableUniqueGraph, ptSpec *kapi.PodTemplateSpec, namespace string, ownerName osgraph.UniqueName) *PodTemplateSpecNode { - ptSpecName := PodTemplateSpecNodeName(ptSpec, ownerName) - ptSpecNode := osgraph.EnsureUnique(g, - ptSpecName, - func(node osgraph.Node) graph.Node { - return &PodTemplateSpecNode{node, ptSpec, namespace, ownerName} - }, - ).(*PodTemplateSpecNode) - - podSpecNode := EnsurePodSpecNode(g, &ptSpec.Spec, namespace, ptSpecName) - g.AddEdge(ptSpecNode, podSpecNode, osgraph.ContainsEdgeKind) - - return ptSpecNode -} - -func EnsureHorizontalPodAutoscalerNode(g osgraph.MutableUniqueGraph, hpa *autoscaling.HorizontalPodAutoscaler) *HorizontalPodAutoscalerNode { - return osgraph.EnsureUnique(g, - HorizontalPodAutoscalerNodeName(hpa), - func(node osgraph.Node) graph.Node { - return &HorizontalPodAutoscalerNode{Node: node, HorizontalPodAutoscaler: hpa} - }, - ).(*HorizontalPodAutoscalerNode) -} - -func EnsurePetSetNode(g osgraph.MutableUniqueGraph, petset *kapps.PetSet) *PetSetNode { - nodeName := PetSetNodeName(petset) - node := osgraph.EnsureUnique(g, - nodeName, - func(node osgraph.Node) graph.Node { - return &PetSetNode{node, petset} - }, - ).(*PetSetNode) - - specNode := EnsurePetSetSpecNode(g, &petset.Spec, petset.Namespace, nodeName) - g.AddEdge(node, specNode, osgraph.ContainsEdgeKind) - - return node -} - -func EnsurePetSetSpecNode(g osgraph.MutableUniqueGraph, spec *kapps.PetSetSpec, namespace string, ownerName osgraph.UniqueName) *PetSetSpecNode { - specName := PetSetSpecNodeName(spec, ownerName) - specNode := osgraph.EnsureUnique(g, - specName, - func(node osgraph.Node) graph.Node { - return &PetSetSpecNode{node, spec, namespace, ownerName} - }, - ).(*PetSetSpecNode) - - ptSpecNode := EnsurePodTemplateSpecNode(g, &spec.Template, namespace, specName) - g.AddEdge(specNode, ptSpecNode, osgraph.ContainsEdgeKind) - - return specNode -} diff --git a/vendor/github.com/openshift/origin/pkg/api/kubegraph/nodes/types.go b/vendor/github.com/openshift/origin/pkg/api/kubegraph/nodes/types.go deleted file mode 100644 index 78f4c452c..000000000 --- a/vendor/github.com/openshift/origin/pkg/api/kubegraph/nodes/types.go +++ /dev/null @@ -1,325 +0,0 @@ -package nodes - -import ( - "fmt" - "reflect" - - kapi "k8s.io/kubernetes/pkg/api" - kapps "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/autoscaling" - - osgraph "github.com/openshift/origin/pkg/api/graph" -) - -var ( - ServiceNodeKind = reflect.TypeOf(kapi.Service{}).Name() - PodNodeKind = reflect.TypeOf(kapi.Pod{}).Name() - PodSpecNodeKind = reflect.TypeOf(kapi.PodSpec{}).Name() - PodTemplateSpecNodeKind = reflect.TypeOf(kapi.PodTemplateSpec{}).Name() - ReplicationControllerNodeKind = reflect.TypeOf(kapi.ReplicationController{}).Name() - ReplicationControllerSpecNodeKind = reflect.TypeOf(kapi.ReplicationControllerSpec{}).Name() - ServiceAccountNodeKind = reflect.TypeOf(kapi.ServiceAccount{}).Name() - SecretNodeKind = reflect.TypeOf(kapi.Secret{}).Name() - HorizontalPodAutoscalerNodeKind = reflect.TypeOf(autoscaling.HorizontalPodAutoscaler{}).Name() - PetSetNodeKind = reflect.TypeOf(kapps.PetSet{}).Name() - PetSetSpecNodeKind = reflect.TypeOf(kapps.PetSetSpec{}).Name() -) - -func ServiceNodeName(o *kapi.Service) osgraph.UniqueName { - return osgraph.GetUniqueRuntimeObjectNodeName(ServiceNodeKind, o) -} - -type ServiceNode struct { - osgraph.Node - *kapi.Service - - IsFound bool -} - -func (n ServiceNode) Object() interface{} { - return n.Service -} - -func (n ServiceNode) String() string { - return string(ServiceNodeName(n.Service)) -} - -func (*ServiceNode) Kind() string { - return ServiceNodeKind -} - -func (n ServiceNode) Found() bool { - return n.IsFound -} - -func PodNodeName(o *kapi.Pod) osgraph.UniqueName { - return osgraph.GetUniqueRuntimeObjectNodeName(PodNodeKind, o) -} - -type PodNode struct { - osgraph.Node - *kapi.Pod -} - -func (n PodNode) Object() interface{} { - return n.Pod -} - -func (n PodNode) String() string { - return string(PodNodeName(n.Pod)) -} - -func (n PodNode) UniqueName() osgraph.UniqueName { - return PodNodeName(n.Pod) -} - -func (*PodNode) Kind() string { - return PodNodeKind -} - -func PodSpecNodeName(o *kapi.PodSpec, ownerName osgraph.UniqueName) osgraph.UniqueName { - return osgraph.UniqueName(fmt.Sprintf("%s|%v", PodSpecNodeKind, ownerName)) -} - -type PodSpecNode struct { - osgraph.Node - *kapi.PodSpec - Namespace string - - OwnerName osgraph.UniqueName -} - -func (n PodSpecNode) Object() interface{} { - return n.PodSpec -} - -func (n PodSpecNode) String() string { - return string(n.UniqueName()) -} - -func (n PodSpecNode) UniqueName() osgraph.UniqueName { - return PodSpecNodeName(n.PodSpec, n.OwnerName) -} - -func (*PodSpecNode) Kind() string { - return PodSpecNodeKind -} - -func ReplicationControllerNodeName(o *kapi.ReplicationController) osgraph.UniqueName { - return osgraph.GetUniqueRuntimeObjectNodeName(ReplicationControllerNodeKind, o) -} - -type ReplicationControllerNode struct { - osgraph.Node - ReplicationController *kapi.ReplicationController - - IsFound bool -} - -func (n ReplicationControllerNode) Found() bool { - return n.IsFound -} - -func (n ReplicationControllerNode) Object() interface{} { - return n.ReplicationController -} - -func (n ReplicationControllerNode) String() string { - return string(ReplicationControllerNodeName(n.ReplicationController)) -} - -func (n ReplicationControllerNode) UniqueName() osgraph.UniqueName { - return ReplicationControllerNodeName(n.ReplicationController) -} - -func (*ReplicationControllerNode) Kind() string { - return ReplicationControllerNodeKind -} - -func ReplicationControllerSpecNodeName(o *kapi.ReplicationControllerSpec, ownerName osgraph.UniqueName) osgraph.UniqueName { - return osgraph.UniqueName(fmt.Sprintf("%s|%v", ReplicationControllerSpecNodeKind, ownerName)) -} - -type ReplicationControllerSpecNode struct { - osgraph.Node - ReplicationControllerSpec *kapi.ReplicationControllerSpec - Namespace string - - OwnerName osgraph.UniqueName -} - -func (n ReplicationControllerSpecNode) Object() interface{} { - return n.ReplicationControllerSpec -} - -func (n ReplicationControllerSpecNode) String() string { - return string(n.UniqueName()) -} - -func (n ReplicationControllerSpecNode) UniqueName() osgraph.UniqueName { - return ReplicationControllerSpecNodeName(n.ReplicationControllerSpec, n.OwnerName) -} - -func (*ReplicationControllerSpecNode) Kind() string { - return ReplicationControllerSpecNodeKind -} - -func PodTemplateSpecNodeName(o *kapi.PodTemplateSpec, ownerName osgraph.UniqueName) osgraph.UniqueName { - return osgraph.UniqueName(fmt.Sprintf("%s|%v", PodTemplateSpecNodeKind, ownerName)) -} - -type PodTemplateSpecNode struct { - osgraph.Node - *kapi.PodTemplateSpec - Namespace string - - OwnerName osgraph.UniqueName -} - -func (n PodTemplateSpecNode) Object() interface{} { - return n.PodTemplateSpec -} - -func (n PodTemplateSpecNode) String() string { - return string(n.UniqueName()) -} - -func (n PodTemplateSpecNode) UniqueName() osgraph.UniqueName { - return PodTemplateSpecNodeName(n.PodTemplateSpec, n.OwnerName) -} - -func (*PodTemplateSpecNode) Kind() string { - return PodTemplateSpecNodeKind -} - -func ServiceAccountNodeName(o *kapi.ServiceAccount) osgraph.UniqueName { - return osgraph.GetUniqueRuntimeObjectNodeName(ServiceAccountNodeKind, o) -} - -type ServiceAccountNode struct { - osgraph.Node - *kapi.ServiceAccount - - IsFound bool -} - -func (n ServiceAccountNode) Found() bool { - return n.IsFound -} - -func (n ServiceAccountNode) Object() interface{} { - return n.ServiceAccount -} - -func (n ServiceAccountNode) String() string { - return string(ServiceAccountNodeName(n.ServiceAccount)) -} - -func (*ServiceAccountNode) Kind() string { - return ServiceAccountNodeKind -} - -func SecretNodeName(o *kapi.Secret) osgraph.UniqueName { - return osgraph.GetUniqueRuntimeObjectNodeName(SecretNodeKind, o) -} - -type SecretNode struct { - osgraph.Node - *kapi.Secret - - IsFound bool -} - -func (n SecretNode) Found() bool { - return n.IsFound -} - -func (n SecretNode) Object() interface{} { - return n.Secret -} - -func (n SecretNode) String() string { - return string(SecretNodeName(n.Secret)) -} - -func (*SecretNode) Kind() string { - return SecretNodeKind -} - -func HorizontalPodAutoscalerNodeName(o *autoscaling.HorizontalPodAutoscaler) osgraph.UniqueName { - return osgraph.GetUniqueRuntimeObjectNodeName(HorizontalPodAutoscalerNodeKind, o) -} - -type HorizontalPodAutoscalerNode struct { - osgraph.Node - HorizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler -} - -func (n HorizontalPodAutoscalerNode) Object() interface{} { - return n.HorizontalPodAutoscaler -} - -func (n HorizontalPodAutoscalerNode) String() string { - return string(n.UniqueName()) -} - -func (*HorizontalPodAutoscalerNode) Kind() string { - return HorizontalPodAutoscalerNodeKind -} - -func (n HorizontalPodAutoscalerNode) UniqueName() osgraph.UniqueName { - return HorizontalPodAutoscalerNodeName(n.HorizontalPodAutoscaler) -} - -func PetSetNodeName(o *kapps.PetSet) osgraph.UniqueName { - return osgraph.GetUniqueRuntimeObjectNodeName(PetSetNodeKind, o) -} - -type PetSetNode struct { - osgraph.Node - PetSet *kapps.PetSet -} - -func (n PetSetNode) Object() interface{} { - return n.PetSet -} - -func (n PetSetNode) String() string { - return string(n.UniqueName()) -} - -func (n PetSetNode) UniqueName() osgraph.UniqueName { - return PetSetNodeName(n.PetSet) -} - -func (*PetSetNode) Kind() string { - return PetSetNodeKind -} - -func PetSetSpecNodeName(o *kapps.PetSetSpec, ownerName osgraph.UniqueName) osgraph.UniqueName { - return osgraph.UniqueName(fmt.Sprintf("%s|%v", PetSetSpecNodeKind, ownerName)) -} - -type PetSetSpecNode struct { - osgraph.Node - PetSetSpec *kapps.PetSetSpec - Namespace string - - OwnerName osgraph.UniqueName -} - -func (n PetSetSpecNode) Object() interface{} { - return n.PetSetSpec -} - -func (n PetSetSpecNode) String() string { - return string(n.UniqueName()) -} - -func (n PetSetSpecNode) UniqueName() osgraph.UniqueName { - return PetSetSpecNodeName(n.PetSetSpec, n.OwnerName) -} - -func (*PetSetSpecNode) Kind() string { - return PetSetSpecNodeKind -} diff --git a/vendor/github.com/openshift/origin/pkg/api/restmapper/discovery.go b/vendor/github.com/openshift/origin/pkg/api/restmapper/discovery.go deleted file mode 100644 index 3e99ea245..000000000 --- a/vendor/github.com/openshift/origin/pkg/api/restmapper/discovery.go +++ /dev/null @@ -1,176 +0,0 @@ -package restmapper - -import ( - "sync" - - kapi "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/client/typed/discovery" -) - -type discoveryRESTMapper struct { - discoveryClient discovery.DiscoveryInterface - - delegate meta.RESTMapper - - initLock sync.Mutex -} - -// NewDiscoveryRESTMapper that initializes using the discovery APIs, relying on group ordering and preferred versions -// to build its appropriate priorities. Only versions are registered with API machinery are added now. -// TODO make this work with generic resources at some point. For now, this handles enabled and disabled resources cleanly. -func NewDiscoveryRESTMapper(discoveryClient discovery.DiscoveryInterface) meta.RESTMapper { - return &discoveryRESTMapper{discoveryClient: discoveryClient} -} - -func (d *discoveryRESTMapper) getDelegate() (meta.RESTMapper, error) { - d.initLock.Lock() - defer d.initLock.Unlock() - - if d.delegate != nil { - return d.delegate, nil - } - - serverGroups, err := d.discoveryClient.ServerGroups() - if err != nil { - return nil, err - } - - // always prefer our default group for now. The version should be discovered from discovery, but this will hold us - // for quite some time. - resourcePriority := []unversioned.GroupVersionResource{ - {Group: kapi.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource}, - } - kindPriority := []unversioned.GroupVersionKind{ - {Group: kapi.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind}, - } - groupPriority := []string{} - - unionMapper := meta.MultiRESTMapper{} - - for _, group := range serverGroups.Groups { - if len(group.Versions) == 0 { - continue - } - groupPriority = append(groupPriority, group.Name) - - if len(group.PreferredVersion.Version) != 0 { - preferredVersion := unversioned.GroupVersion{Group: group.Name, Version: group.PreferredVersion.Version} - if registered.IsEnabledVersion(preferredVersion) { - resourcePriority = append(resourcePriority, preferredVersion.WithResource(meta.AnyResource)) - kindPriority = append(kindPriority, preferredVersion.WithKind(meta.AnyKind)) - } - } - - for _, discoveryVersion := range group.Versions { - version := unversioned.GroupVersion{Group: group.Name, Version: discoveryVersion.Version} - if !registered.IsEnabledVersion(version) { - continue - } - groupMeta, err := registered.Group(group.Name) - if err != nil { - return nil, err - } - resources, err := d.discoveryClient.ServerResourcesForGroupVersion(version.String()) - if err != nil { - return nil, err - } - - versionMapper := meta.NewDefaultRESTMapper([]unversioned.GroupVersion{version}, groupMeta.InterfacesFor) - for _, resource := range resources.APIResources { - // TODO properly handle resource versus kind - gvk := version.WithKind(resource.Kind) - - scope := meta.RESTScopeNamespace - if !resource.Namespaced { - scope = meta.RESTScopeRoot - } - versionMapper.Add(gvk, scope) - - // TODO formalize this by checking to see if they support listing - versionMapper.Add(version.WithKind(resource.Kind+"List"), scope) - } - - // we need to add List. Its a special case of something we need that isn't in the discovery doc - if group.Name == kapi.GroupName { - versionMapper.Add(version.WithKind("List"), meta.RESTScopeNamespace) - } - - unionMapper = append(unionMapper, versionMapper) - } - } - - for _, group := range groupPriority { - resourcePriority = append(resourcePriority, unversioned.GroupVersionResource{Group: group, Version: meta.AnyVersion, Resource: meta.AnyResource}) - kindPriority = append(kindPriority, unversioned.GroupVersionKind{Group: group, Version: meta.AnyVersion, Kind: meta.AnyKind}) - } - - d.delegate = meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} - return d.delegate, nil -} - -func (d *discoveryRESTMapper) KindFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionKind, error) { - delegate, err := d.getDelegate() - if err != nil { - return unversioned.GroupVersionKind{}, err - } - return delegate.KindFor(resource) -} - -func (d *discoveryRESTMapper) KindsFor(resource unversioned.GroupVersionResource) ([]unversioned.GroupVersionKind, error) { - delegate, err := d.getDelegate() - if err != nil { - return nil, err - } - return delegate.KindsFor(resource) -} - -func (d *discoveryRESTMapper) ResourceFor(input unversioned.GroupVersionResource) (unversioned.GroupVersionResource, error) { - delegate, err := d.getDelegate() - if err != nil { - return unversioned.GroupVersionResource{}, err - } - return delegate.ResourceFor(input) -} - -func (d *discoveryRESTMapper) ResourcesFor(input unversioned.GroupVersionResource) ([]unversioned.GroupVersionResource, error) { - delegate, err := d.getDelegate() - if err != nil { - return nil, err - } - return delegate.ResourcesFor(input) -} - -func (d *discoveryRESTMapper) RESTMapping(gk unversioned.GroupKind, versions ...string) (*meta.RESTMapping, error) { - delegate, err := d.getDelegate() - if err != nil { - return nil, err - } - return delegate.RESTMapping(gk, versions...) -} - -func (d *discoveryRESTMapper) RESTMappings(gk unversioned.GroupKind) ([]*meta.RESTMapping, error) { - delegate, err := d.getDelegate() - if err != nil { - return nil, err - } - return delegate.RESTMappings(gk) -} - -func (d *discoveryRESTMapper) AliasesForResource(resource string) ([]string, bool) { - delegate, err := d.getDelegate() - if err != nil { - return nil, false - } - return delegate.AliasesForResource(resource) -} - -func (d *discoveryRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { - delegate, err := d.getDelegate() - if err != nil { - return resource, err - } - return delegate.ResourceSingularizer(resource) -} diff --git a/vendor/github.com/openshift/origin/pkg/auth/api/types.go b/vendor/github.com/openshift/origin/pkg/auth/api/types.go index 9b62c2ad2..5c8e6ab4d 100644 --- a/vendor/github.com/openshift/origin/pkg/auth/api/types.go +++ b/vendor/github.com/openshift/origin/pkg/auth/api/types.go @@ -41,7 +41,7 @@ type UserIdentityMapper interface { type Client interface { GetId() string - ValidateSecret(secret string) bool + GetSecret() string GetRedirectUri() string GetUserData() interface{} } diff --git a/vendor/github.com/openshift/origin/pkg/auth/authenticator/request/x509request/x509.go b/vendor/github.com/openshift/origin/pkg/auth/authenticator/request/x509request/x509.go index 8f775e772..033356160 100644 --- a/vendor/github.com/openshift/origin/pkg/auth/authenticator/request/x509request/x509.go +++ b/vendor/github.com/openshift/origin/pkg/auth/authenticator/request/x509request/x509.go @@ -40,28 +40,34 @@ func New(opts x509.VerifyOptions, user UserConversion) *Authenticator { // AuthenticateRequest authenticates the request using presented client certificates func (a *Authenticator) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { - if req.TLS == nil { + if req.TLS == nil || len(req.TLS.PeerCertificates) == 0 { return nil, false, nil } + // Use intermediates, if provided + optsCopy := a.opts + if optsCopy.Intermediates == nil && len(req.TLS.PeerCertificates) > 1 { + optsCopy.Intermediates = x509.NewCertPool() + for _, intermediate := range req.TLS.PeerCertificates[1:] { + optsCopy.Intermediates.AddCert(intermediate) + } + } + + chains, err := req.TLS.PeerCertificates[0].Verify(optsCopy) + if err != nil { + return nil, false, err + } + var errlist []error - for _, cert := range req.TLS.PeerCertificates { - chains, err := cert.Verify(a.opts) + for _, chain := range chains { + user, ok, err := a.user.User(chain) if err != nil { errlist = append(errlist, err) continue } - for _, chain := range chains { - user, ok, err := a.user.User(chain) - if err != nil { - errlist = append(errlist, err) - continue - } - - if ok { - return user, ok, err - } + if ok { + return user, ok, err } } return nil, false, kerrors.NewAggregate(errlist) @@ -81,25 +87,28 @@ func NewVerifier(opts x509.VerifyOptions, auth authenticator.Request, allowedCom return &Verifier{opts, auth, allowedCommonNames} } -// AuthenticateRequest verifies the presented client certificates, then delegates to the wrapped auth +// AuthenticateRequest verifies the presented client certificate, then delegates to the wrapped auth func (a *Verifier) AuthenticateRequest(req *http.Request) (user.Info, bool, error) { - if req.TLS == nil { + if req.TLS == nil || len(req.TLS.PeerCertificates) == 0 { return nil, false, nil } - var errlist []error - for _, cert := range req.TLS.PeerCertificates { - if _, err := cert.Verify(a.opts); err != nil { - errlist = append(errlist, err) - continue - } - if err := a.verifySubject(cert.Subject); err != nil { - errlist = append(errlist, err) - continue + // Use intermediates, if provided + optsCopy := a.opts + if optsCopy.Intermediates == nil && len(req.TLS.PeerCertificates) > 1 { + optsCopy.Intermediates = x509.NewCertPool() + for _, intermediate := range req.TLS.PeerCertificates[1:] { + optsCopy.Intermediates.AddCert(intermediate) } - return a.auth.AuthenticateRequest(req) } - return nil, false, kerrors.NewAggregate(errlist) + + if _, err := req.TLS.PeerCertificates[0].Verify(optsCopy); err != nil { + return nil, false, err + } + if err := a.verifySubject(req.TLS.PeerCertificates[0].Subject); err != nil { + return nil, false, err + } + return a.auth.AuthenticateRequest(req) } func (a *Verifier) verifySubject(subject pkix.Name) error { diff --git a/vendor/github.com/openshift/origin/pkg/authorization/api/register.go b/vendor/github.com/openshift/origin/pkg/authorization/api/register.go index b52b75c64..710c073b6 100644 --- a/vendor/github.com/openshift/origin/pkg/authorization/api/register.go +++ b/vendor/github.com/openshift/origin/pkg/authorization/api/register.go @@ -39,6 +39,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { &RoleList{}, &SelfSubjectRulesReview{}, + &SubjectRulesReview{}, &ResourceAccessReview{}, &SubjectAccessReview{}, &LocalResourceAccessReview{}, diff --git a/vendor/github.com/openshift/origin/pkg/authorization/api/synthetic.go b/vendor/github.com/openshift/origin/pkg/authorization/api/synthetic.go index 5e235e835..111296200 100644 --- a/vendor/github.com/openshift/origin/pkg/authorization/api/synthetic.go +++ b/vendor/github.com/openshift/origin/pkg/authorization/api/synthetic.go @@ -9,6 +9,7 @@ const ( NodeMetricsResource = "nodes/metrics" NodeStatsResource = "nodes/stats" + NodeSpecResource = "nodes/spec" NodeLogResource = "nodes/log" RestrictedEndpointsResource = "endpoints/restricted" diff --git a/vendor/github.com/openshift/origin/pkg/authorization/api/types.go b/vendor/github.com/openshift/origin/pkg/authorization/api/types.go index 47cf816e9..fc325d040 100644 --- a/vendor/github.com/openshift/origin/pkg/authorization/api/types.go +++ b/vendor/github.com/openshift/origin/pkg/authorization/api/types.go @@ -50,6 +50,7 @@ var DiscoveryRule = PolicyRule{ "/apis", "/apis/*", "/oapi", "/oapi/*", "/osapi", "/osapi/", // these cannot be removed until we can drop support for pre 3.1 clients + "/.well-known", "/.well-known/*", ), } @@ -159,6 +160,27 @@ type SelfSubjectRulesReviewSpec struct { Scopes []string } +// SubjectRulesReview is a resource you can create to determine which actions another user can perform in a namespace +type SubjectRulesReview struct { + unversioned.TypeMeta + + // Spec adds information about how to conduct the check + Spec SubjectRulesReviewSpec + + // Status is completed by the server to tell which permissions you have + Status SubjectRulesReviewStatus +} + +// SubjectRulesReviewSpec adds information about how to conduct the check +type SubjectRulesReviewSpec struct { + // User is optional. At least one of User and Groups must be specified. + User string + // Groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified. + Groups []string + // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + Scopes []string +} + // SubjectRulesReviewStatus is contains the result of a rules check type SubjectRulesReviewStatus struct { // Rules is the list of rules (no particular sort) that are allowed for the subject diff --git a/vendor/github.com/openshift/origin/pkg/authorization/api/zz_generated.deepcopy.go b/vendor/github.com/openshift/origin/pkg/authorization/api/zz_generated.deepcopy.go index 094e0d7dd..34678557d 100644 --- a/vendor/github.com/openshift/origin/pkg/authorization/api/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/origin/pkg/authorization/api/zz_generated.deepcopy.go @@ -48,6 +48,8 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SelfSubjectRulesReviewSpec, InType: reflect.TypeOf(&SelfSubjectRulesReviewSpec{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SubjectAccessReview, InType: reflect.TypeOf(&SubjectAccessReview{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SubjectAccessReviewResponse, InType: reflect.TypeOf(&SubjectAccessReviewResponse{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SubjectRulesReview, InType: reflect.TypeOf(&SubjectRulesReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SubjectRulesReviewSpec, InType: reflect.TypeOf(&SubjectRulesReviewSpec{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SubjectRulesReviewStatus, InType: reflect.TypeOf(&SubjectRulesReviewStatus{})}, ) } @@ -669,6 +671,44 @@ func DeepCopy_api_SubjectAccessReviewResponse(in interface{}, out interface{}, c } } +func DeepCopy_api_SubjectRulesReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SubjectRulesReview) + out := out.(*SubjectRulesReview) + out.TypeMeta = in.TypeMeta + if err := DeepCopy_api_SubjectRulesReviewSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_api_SubjectRulesReviewStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_SubjectRulesReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SubjectRulesReviewSpec) + out := out.(*SubjectRulesReviewSpec) + out.User = in.User + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } else { + out.Groups = nil + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]string, len(*in)) + copy(*out, *in) + } else { + out.Scopes = nil + } + return nil + } +} + func DeepCopy_api_SubjectRulesReviewStatus(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*SubjectRulesReviewStatus) diff --git a/vendor/github.com/openshift/origin/pkg/authorization/reaper/cluster_role.go b/vendor/github.com/openshift/origin/pkg/authorization/reaper/cluster_role.go deleted file mode 100644 index 6e4739cc6..000000000 --- a/vendor/github.com/openshift/origin/pkg/authorization/reaper/cluster_role.go +++ /dev/null @@ -1,60 +0,0 @@ -package reaper - -import ( - "time" - - "github.com/golang/glog" - kapi "k8s.io/kubernetes/pkg/api" - kerrors "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/kubectl" - - "github.com/openshift/origin/pkg/client" -) - -func NewClusterRoleReaper(roleClient client.ClusterRolesInterface, clusterBindingClient client.ClusterRoleBindingsInterface, bindingClient client.RoleBindingsNamespacer) kubectl.Reaper { - return &ClusterRoleReaper{ - roleClient: roleClient, - clusterBindingClient: clusterBindingClient, - bindingClient: bindingClient, - } -} - -type ClusterRoleReaper struct { - roleClient client.ClusterRolesInterface - clusterBindingClient client.ClusterRoleBindingsInterface - bindingClient client.RoleBindingsNamespacer -} - -// Stop on a reaper is actually used for deletion. In this case, we'll delete referencing clusterroleclusterBindings -// then delete the clusterrole. -func (r *ClusterRoleReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *kapi.DeleteOptions) error { - clusterBindings, err := r.clusterBindingClient.ClusterRoleBindings().List(kapi.ListOptions{}) - if err != nil { - return err - } - for _, clusterBinding := range clusterBindings.Items { - if clusterBinding.RoleRef.Name == name { - if err := r.clusterBindingClient.ClusterRoleBindings().Delete(clusterBinding.Name); err != nil && !kerrors.IsNotFound(err) { - glog.Infof("Cannot delete clusterrolebinding/%s: %v", clusterBinding.Name, err) - } - } - } - - namespacedBindings, err := r.bindingClient.RoleBindings(kapi.NamespaceNone).List(kapi.ListOptions{}) - if err != nil { - return err - } - for _, namespacedBinding := range namespacedBindings.Items { - if namespacedBinding.RoleRef.Namespace == kapi.NamespaceNone && namespacedBinding.RoleRef.Name == name { - if err := r.bindingClient.RoleBindings(namespacedBinding.Namespace).Delete(namespacedBinding.Name); err != nil && !kerrors.IsNotFound(err) { - glog.Infof("Cannot delete rolebinding/%s in %s: %v", namespacedBinding.Name, namespacedBinding.Namespace, err) - } - } - } - - if err := r.roleClient.ClusterRoles().Delete(name); err != nil && !kerrors.IsNotFound(err) { - return err - } - - return nil -} diff --git a/vendor/github.com/openshift/origin/pkg/authorization/reaper/role.go b/vendor/github.com/openshift/origin/pkg/authorization/reaper/role.go deleted file mode 100644 index 1baffd37c..000000000 --- a/vendor/github.com/openshift/origin/pkg/authorization/reaper/role.go +++ /dev/null @@ -1,47 +0,0 @@ -package reaper - -import ( - "time" - - "github.com/golang/glog" - kapi "k8s.io/kubernetes/pkg/api" - kerrors "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/kubectl" - - "github.com/openshift/origin/pkg/client" -) - -func NewRoleReaper(roleClient client.RolesNamespacer, bindingClient client.RoleBindingsNamespacer) kubectl.Reaper { - return &RoleReaper{ - roleClient: roleClient, - bindingClient: bindingClient, - } -} - -type RoleReaper struct { - roleClient client.RolesNamespacer - bindingClient client.RoleBindingsNamespacer -} - -// Stop on a reaper is actually used for deletion. In this case, we'll delete referencing rolebindings -// then delete the role. -func (r *RoleReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *kapi.DeleteOptions) error { - bindings, err := r.bindingClient.RoleBindings(namespace).List(kapi.ListOptions{}) - if err != nil { - return err - } - - for _, binding := range bindings.Items { - if binding.RoleRef.Namespace == namespace && binding.RoleRef.Name == name { - if err := r.bindingClient.RoleBindings(namespace).Delete(binding.Name); err != nil && !kerrors.IsNotFound(err) { - glog.Infof("Cannot delete rolebinding/%s: %v", binding.Name, err) - } - } - } - - if err := r.roleClient.Roles(namespace).Delete(name); err != nil && !kerrors.IsNotFound(err) { - return err - } - - return nil -} diff --git a/vendor/github.com/openshift/origin/pkg/build/api/types.go b/vendor/github.com/openshift/origin/pkg/build/api/types.go index 7ca948a44..5910cc196 100644 --- a/vendor/github.com/openshift/origin/pkg/build/api/types.go +++ b/vendor/github.com/openshift/origin/pkg/build/api/types.go @@ -19,6 +19,12 @@ const ( BuildCloneAnnotation = "openshift.io/build.clone-of" // BuildPodNameAnnotation is an annotation whose value is the name of the pod running this build BuildPodNameAnnotation = "openshift.io/build.pod-name" + // BuildJenkinsStatusJSONAnnotation is an annotation holding the Jenkins status information + BuildJenkinsStatusJSONAnnotation = "openshift.io/jenkins-status-json" + // BuildJenkinsLogURLAnnotation is an annotation holding a link to the Jenkins build console log + BuildJenkinsLogURLAnnotation = "openshift.io/jenkins-log-url" + // BuildJenkinsBuildURIAnnotation is an annotation holding a link to the Jenkins build + BuildJenkinsBuildURIAnnotation = "openshift.io/jenkins-build-uri" // BuildLabel is the key of a Pod label whose value is the Name of a Build which is run. // NOTE: The value for this label may not contain the entire Build name because it will be // truncated to maximum label length. @@ -106,8 +112,22 @@ type CommonSpec struct { // be active on a node before the system actively tries to terminate the // build; value must be positive integer. CompletionDeadlineSeconds *int64 + + // NodeSelector is a selector which must be true for the build pod to fit on a node + // If nil, it can be overridden by default build nodeselector values for the cluster. + // If set to an empty map or a map with any values, default build nodeselector values + // are ignored. + NodeSelector map[string]string } +const ( + BuildTriggerCauseManualMsg = "Manually triggered" + BuildTriggerCauseConfigMsg = "Build configuration change" + BuildTriggerCauseImageMsg = "Image change" + BuildTriggerCauseGithubMsg = "GitHub WebHook" + BuildTriggerCauseGenericMsg = "Generic WebHook" +) + // BuildTriggerCause holds information about a triggered build. It is used for // displaying build trigger data for each build and build configuration in oc // describe. It is also used to describe which triggers led to the most recent @@ -240,32 +260,32 @@ const ( // StatusReasonCannotCreateBuildPodSpec is an error condition when the build // strategy cannot create a build pod spec. - StatusReasonCannotCreateBuildPodSpec = "CannotCreateBuildPodSpec" + StatusReasonCannotCreateBuildPodSpec StatusReason = "CannotCreateBuildPodSpec" // StatusReasonCannotCreateBuildPod is an error condition when a build pod // cannot be created. - StatusReasonCannotCreateBuildPod = "CannotCreateBuildPod" + StatusReasonCannotCreateBuildPod StatusReason = "CannotCreateBuildPod" // StatusReasonInvalidOutputReference is an error condition when the build // output is an invalid reference. - StatusReasonInvalidOutputReference = "InvalidOutputReference" + StatusReasonInvalidOutputReference StatusReason = "InvalidOutputReference" // StatusReasonCancelBuildFailed is an error condition when cancelling a build // fails. - StatusReasonCancelBuildFailed = "CancelBuildFailed" + StatusReasonCancelBuildFailed StatusReason = "CancelBuildFailed" // StatusReasonBuildPodDeleted is an error condition when the build pod is // deleted before build completion. - StatusReasonBuildPodDeleted = "BuildPodDeleted" + StatusReasonBuildPodDeleted StatusReason = "BuildPodDeleted" // StatusReasonExceededRetryTimeout is an error condition when the build has // not completed and retrying the build times out. - StatusReasonExceededRetryTimeout = "ExceededRetryTimeout" + StatusReasonExceededRetryTimeout StatusReason = "ExceededRetryTimeout" // StatusReasonMissingPushSecret indicates that the build is missing required // secret for pushing the output image. // The build will stay in the pending state until the secret is created, or the build times out. - StatusReasonMissingPushSecret = "MissingPushSecret" + StatusReasonMissingPushSecret StatusReason = "MissingPushSecret" ) // BuildSource is the input used for the build. @@ -387,6 +407,18 @@ type GitSourceRevision struct { Message string } +// ProxyConfig defines what proxies to use for an operation +type ProxyConfig struct { + // HTTPProxy is a proxy used to reach the git repository over http + HTTPProxy *string + + // HTTPSProxy is a proxy used to reach the git repository over https + HTTPSProxy *string + + // NoProxy is the list of domains for which the proxy should not be used + NoProxy *string +} + // GitBuildSource defines the parameters of a Git SCM type GitBuildSource struct { // URI points to the source that will be built. The structure of the source @@ -396,11 +428,8 @@ type GitBuildSource struct { // Ref is the branch/tag/ref to build. Ref string - // HTTPProxy is a proxy used to reach the git repository over http - HTTPProxy *string - - // HTTPSProxy is a proxy used to reach the git repository over https - HTTPSProxy *string + // ProxyConfig defines the proxies to use for the git clone operation + ProxyConfig } // SourceControlUser defines the identity of a user of source control @@ -646,6 +675,19 @@ type BuildOutput struct { // up the authentication for executing the Docker push to authentication // enabled Docker Registry (or Docker Hub). PushSecret *kapi.LocalObjectReference + + // ImageLabels define a list of labels that are applied to the resulting image. If there + // are multiple labels with the same name then the last one in the list is used. + ImageLabels []ImageLabel +} + +// ImageLabel represents a label applied to the resulting image. +type ImageLabel struct { + // Name defines the name of the label. It must have non-zero length. + Name string + + // Value defines the literal value of the label. + Value string } // BuildConfig is a template which can be used to create new builds. diff --git a/vendor/github.com/openshift/origin/pkg/build/api/zz_generated.deepcopy.go b/vendor/github.com/openshift/origin/pkg/build/api/zz_generated.deepcopy.go index 745953ba3..c472d5766 100644 --- a/vendor/github.com/openshift/origin/pkg/build/api/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/origin/pkg/build/api/zz_generated.deepcopy.go @@ -51,9 +51,11 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_GitSourceRevision, InType: reflect.TypeOf(&GitSourceRevision{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ImageChangeCause, InType: reflect.TypeOf(&ImageChangeCause{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ImageChangeTrigger, InType: reflect.TypeOf(&ImageChangeTrigger{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ImageLabel, InType: reflect.TypeOf(&ImageLabel{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ImageSource, InType: reflect.TypeOf(&ImageSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ImageSourcePath, InType: reflect.TypeOf(&ImageSourcePath{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_JenkinsPipelineBuildStrategy, InType: reflect.TypeOf(&JenkinsPipelineBuildStrategy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ProxyConfig, InType: reflect.TypeOf(&ProxyConfig{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretBuildSource, InType: reflect.TypeOf(&SecretBuildSource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretSpec, InType: reflect.TypeOf(&SecretSpec{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SourceBuildStrategy, InType: reflect.TypeOf(&SourceBuildStrategy{})}, @@ -275,6 +277,15 @@ func DeepCopy_api_BuildOutput(in interface{}, out interface{}, c *conversion.Clo } else { out.PushSecret = nil } + if in.ImageLabels != nil { + in, out := &in.ImageLabels, &out.ImageLabels + *out = make([]ImageLabel, len(*in)) + for i := range *in { + (*out)[i] = (*in)[i] + } + } else { + out.ImageLabels = nil + } return nil } } @@ -635,6 +646,15 @@ func DeepCopy_api_CommonSpec(in interface{}, out interface{}, c *conversion.Clon } else { out.CompletionDeadlineSeconds = nil } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } else { + out.NodeSelector = nil + } return nil } } @@ -766,19 +786,8 @@ func DeepCopy_api_GitBuildSource(in interface{}, out interface{}, c *conversion. out := out.(*GitBuildSource) out.URI = in.URI out.Ref = in.Ref - if in.HTTPProxy != nil { - in, out := &in.HTTPProxy, &out.HTTPProxy - *out = new(string) - **out = **in - } else { - out.HTTPProxy = nil - } - if in.HTTPSProxy != nil { - in, out := &in.HTTPSProxy, &out.HTTPSProxy - *out = new(string) - **out = **in - } else { - out.HTTPSProxy = nil + if err := DeepCopy_api_ProxyConfig(&in.ProxyConfig, &out.ProxyConfig, c); err != nil { + return err } return nil } @@ -881,6 +890,16 @@ func DeepCopy_api_ImageChangeTrigger(in interface{}, out interface{}, c *convers } } +func DeepCopy_api_ImageLabel(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ImageLabel) + out := out.(*ImageLabel) + out.Name = in.Name + out.Value = in.Value + return nil + } +} + func DeepCopy_api_ImageSource(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*ImageSource) @@ -926,6 +945,35 @@ func DeepCopy_api_JenkinsPipelineBuildStrategy(in interface{}, out interface{}, } } +func DeepCopy_api_ProxyConfig(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ProxyConfig) + out := out.(*ProxyConfig) + if in.HTTPProxy != nil { + in, out := &in.HTTPProxy, &out.HTTPProxy + *out = new(string) + **out = **in + } else { + out.HTTPProxy = nil + } + if in.HTTPSProxy != nil { + in, out := &in.HTTPSProxy, &out.HTTPSProxy + *out = new(string) + **out = **in + } else { + out.HTTPSProxy = nil + } + if in.NoProxy != nil { + in, out := &in.NoProxy, &out.NoProxy + *out = new(string) + **out = **in + } else { + out.NoProxy = nil + } + return nil + } +} + func DeepCopy_api_SecretBuildSource(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*SecretBuildSource) diff --git a/vendor/github.com/openshift/origin/pkg/build/client/clients.go b/vendor/github.com/openshift/origin/pkg/build/client/clients.go deleted file mode 100644 index 00d70144d..000000000 --- a/vendor/github.com/openshift/origin/pkg/build/client/clients.go +++ /dev/null @@ -1,109 +0,0 @@ -package client - -import ( - buildapi "github.com/openshift/origin/pkg/build/api" - osclient "github.com/openshift/origin/pkg/client" - kapi "k8s.io/kubernetes/pkg/api" -) - -// BuildConfigGetter provides methods for getting BuildConfigs -type BuildConfigGetter interface { - Get(namespace, name string) (*buildapi.BuildConfig, error) -} - -// BuildConfigUpdater provides methods for updating BuildConfigs -type BuildConfigUpdater interface { - Update(buildConfig *buildapi.BuildConfig) error -} - -// OSClientBuildConfigClient delegates get and update operations to the OpenShift client interface -type OSClientBuildConfigClient struct { - Client osclient.Interface -} - -// NewOSClientBuildConfigClient creates a new build config client that uses an openshift client to create and get BuildConfigs -func NewOSClientBuildConfigClient(client osclient.Interface) *OSClientBuildConfigClient { - return &OSClientBuildConfigClient{Client: client} -} - -// Get returns a BuildConfig using the OpenShift client. -func (c OSClientBuildConfigClient) Get(namespace, name string) (*buildapi.BuildConfig, error) { - return c.Client.BuildConfigs(namespace).Get(name) -} - -// Update updates a BuildConfig using the OpenShift client. -func (c OSClientBuildConfigClient) Update(buildConfig *buildapi.BuildConfig) error { - _, err := c.Client.BuildConfigs(buildConfig.Namespace).Update(buildConfig) - return err -} - -// BuildUpdater provides methods for updating existing Builds. -type BuildUpdater interface { - Update(namespace string, build *buildapi.Build) error -} - -// BuildLister provides methods for listing the Builds. -type BuildLister interface { - List(namespace string, opts kapi.ListOptions) (*buildapi.BuildList, error) -} - -// OSClientBuildClient deletes build create and update operations to the OpenShift client interface -type OSClientBuildClient struct { - Client osclient.Interface -} - -// NewOSClientBuildClient creates a new build client that uses an openshift client to update builds -func NewOSClientBuildClient(client osclient.Interface) *OSClientBuildClient { - return &OSClientBuildClient{Client: client} -} - -// Update updates builds using the OpenShift client. -func (c OSClientBuildClient) Update(namespace string, build *buildapi.Build) error { - _, e := c.Client.Builds(namespace).Update(build) - return e -} - -// List lists the builds using the OpenShift client. -func (c OSClientBuildClient) List(namespace string, opts kapi.ListOptions) (*buildapi.BuildList, error) { - return c.Client.Builds(namespace).List(opts) -} - -// BuildCloner provides methods for cloning builds -type BuildCloner interface { - Clone(namespace string, request *buildapi.BuildRequest) (*buildapi.Build, error) -} - -// OSClientBuildClonerClient creates a new build client that uses an openshift client to clone builds -type OSClientBuildClonerClient struct { - Client osclient.Interface -} - -// NewOSClientBuildClonerClient creates a new build client that uses an openshift client to clone builds -func NewOSClientBuildClonerClient(client osclient.Interface) *OSClientBuildClonerClient { - return &OSClientBuildClonerClient{Client: client} -} - -// Clone generates new build for given build name -func (c OSClientBuildClonerClient) Clone(namespace string, request *buildapi.BuildRequest) (*buildapi.Build, error) { - return c.Client.Builds(namespace).Clone(request) -} - -// BuildConfigInstantiator provides methods for instantiating builds from build configs -type BuildConfigInstantiator interface { - Instantiate(namespace string, request *buildapi.BuildRequest) (*buildapi.Build, error) -} - -// OSClientBuildConfigInstantiatorClient creates a new build client that uses an openshift client to create builds -type OSClientBuildConfigInstantiatorClient struct { - Client osclient.Interface -} - -// NewOSClientBuildConfigInstantiatorClient creates a new build client that uses an openshift client to create builds -func NewOSClientBuildConfigInstantiatorClient(client osclient.Interface) *OSClientBuildConfigInstantiatorClient { - return &OSClientBuildConfigInstantiatorClient{Client: client} -} - -// Instantiate generates new build for given buildConfig -func (c OSClientBuildConfigInstantiatorClient) Instantiate(namespace string, request *buildapi.BuildRequest) (*buildapi.Build, error) { - return c.Client.BuildConfigs(namespace).Instantiate(request) -} diff --git a/vendor/github.com/openshift/origin/pkg/build/cmd/doc.go b/vendor/github.com/openshift/origin/pkg/build/cmd/doc.go deleted file mode 100644 index e9cc603aa..000000000 --- a/vendor/github.com/openshift/origin/pkg/build/cmd/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package cmd provides command helpers for builds -package cmd diff --git a/vendor/github.com/openshift/origin/pkg/build/cmd/reaper.go b/vendor/github.com/openshift/origin/pkg/build/cmd/reaper.go deleted file mode 100644 index 11fb11a55..000000000 --- a/vendor/github.com/openshift/origin/pkg/build/cmd/reaper.go +++ /dev/null @@ -1,150 +0,0 @@ -package cmd - -import ( - "sort" - "strings" - "time" - - "github.com/golang/glog" - kapi "k8s.io/kubernetes/pkg/api" - kerrors "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/kubectl" - ktypes "k8s.io/kubernetes/pkg/types" - kutilerrors "k8s.io/kubernetes/pkg/util/errors" - - buildapi "github.com/openshift/origin/pkg/build/api" - buildutil "github.com/openshift/origin/pkg/build/util" - "github.com/openshift/origin/pkg/client" - "github.com/openshift/origin/pkg/util" -) - -// NewBuildConfigReaper returns a new reaper for buildConfigs -func NewBuildConfigReaper(oc *client.Client) kubectl.Reaper { - return &BuildConfigReaper{oc: oc, pollInterval: kubectl.Interval, timeout: kubectl.Timeout} -} - -// BuildConfigReaper implements the Reaper interface for buildConfigs -type BuildConfigReaper struct { - oc client.Interface - pollInterval, timeout time.Duration -} - -// Stop deletes the build configuration and all of the associated builds. -func (reaper *BuildConfigReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *kapi.DeleteOptions) error { - _, err := reaper.oc.BuildConfigs(namespace).Get(name) - - if err != nil { - return err - } - - var bcPotentialBuilds []buildapi.Build - - // Collect builds related to the config. - builds, err := reaper.oc.Builds(namespace).List(kapi.ListOptions{LabelSelector: buildutil.BuildConfigSelector(name)}) - if err != nil { - return err - } - - bcPotentialBuilds = append(bcPotentialBuilds, builds.Items...) - - // Collect deprecated builds related to the config. - // TODO: Delete this block after BuildConfigLabelDeprecated is removed. - builds, err = reaper.oc.Builds(namespace).List(kapi.ListOptions{LabelSelector: buildutil.BuildConfigSelectorDeprecated(name)}) - if err != nil { - return err - } - - bcPotentialBuilds = append(bcPotentialBuilds, builds.Items...) - - // A map of builds associated with this build configuration - bcBuilds := make(map[ktypes.UID]buildapi.Build) - - // Because of name length limits in the BuildConfigSelector, annotations are used to ensure - // reliable selection of associated builds. - for _, build := range bcPotentialBuilds { - if build.Annotations != nil { - if bcName, ok := build.Annotations[buildapi.BuildConfigAnnotation]; ok { - // The annotation, if present, has the full build config name. - if bcName != name { - // If the name does not match exactly, the build is not truly associated with the build configuration - continue - } - } - } - // Note that if there is no annotation, this is a deprecated build spec - // and we choose to include it in the deletion having matched only the BuildConfigSelectorDeprecated - - // Use a map to union the lists returned by the contemporary & deprecated build queries - // (there will be overlap between the lists, and we only want to try to delete each build once) - bcBuilds[build.UID] = build - } - - // If there are builds associated with this build configuration, pause it before attempting the deletion - if len(bcBuilds) > 0 { - - // Add paused annotation to the build config pending the deletion - err = unversioned.RetryOnConflict(unversioned.DefaultRetry, func() error { - - bc, err := reaper.oc.BuildConfigs(namespace).Get(name) - if err != nil { - return err - } - - // Ignore if the annotation already exists - if strings.ToLower(bc.Annotations[buildapi.BuildConfigPausedAnnotation]) == "true" { - return nil - } - - // Set the annotation and update - if err := util.AddObjectAnnotations(bc, map[string]string{buildapi.BuildConfigPausedAnnotation: "true"}); err != nil { - return err - } - _, err = reaper.oc.BuildConfigs(namespace).Update(bc) - return err - }) - - if err != nil { - return err - } - - } - - // Warn the user if the BuildConfig won't get deleted after this point. - bcDeleted := false - defer func() { - if !bcDeleted { - glog.Warningf("BuildConfig %s/%s will not be deleted because not all associated builds could be deleted. You can try re-running the command or removing them manually", namespace, name) - } - }() - - // For the benefit of test cases, sort the UIDs so that the deletion order is deterministic - buildUIDs := make([]string, 0, len(bcBuilds)) - for buildUID := range bcBuilds { - buildUIDs = append(buildUIDs, string(buildUID)) - } - sort.Strings(buildUIDs) - - errList := []error{} - for _, buildUID := range buildUIDs { - build := bcBuilds[ktypes.UID(buildUID)] - if err := reaper.oc.Builds(namespace).Delete(build.Name); err != nil { - glog.Warningf("Cannot delete Build %s/%s: %v", build.Namespace, build.Name, err) - if !kerrors.IsNotFound(err) { - errList = append(errList, err) - } - } - } - - // Aggregate all errors - if len(errList) > 0 { - return kutilerrors.NewAggregate(errList) - } - - if err := reaper.oc.BuildConfigs(namespace).Delete(name); err != nil { - return err - } - - bcDeleted = true - return nil -} diff --git a/vendor/github.com/openshift/origin/pkg/build/graph/analysis/bc.go b/vendor/github.com/openshift/origin/pkg/build/graph/analysis/bc.go deleted file mode 100644 index d19bf0bb7..000000000 --- a/vendor/github.com/openshift/origin/pkg/build/graph/analysis/bc.go +++ /dev/null @@ -1,351 +0,0 @@ -package analysis - -import ( - "fmt" - "strings" - "time" - - "github.com/gonum/graph" - "github.com/gonum/graph/topo" - - "k8s.io/kubernetes/pkg/api/unversioned" - - osgraph "github.com/openshift/origin/pkg/api/graph" - buildapi "github.com/openshift/origin/pkg/build/api" - buildedges "github.com/openshift/origin/pkg/build/graph" - buildgraph "github.com/openshift/origin/pkg/build/graph/nodes" - imageapi "github.com/openshift/origin/pkg/image/api" - imageedges "github.com/openshift/origin/pkg/image/graph" - imagegraph "github.com/openshift/origin/pkg/image/graph/nodes" -) - -const ( - TagNotAvailableWarning = "ImageStreamTagNotAvailable" - LatestBuildFailedErr = "LatestBuildFailed" - MissingRequiredRegistryErr = "MissingRequiredRegistry" - MissingOutputImageStreamErr = "MissingOutputImageStream" - CyclicBuildConfigWarning = "CyclicBuildConfig" - MissingImageStreamTagWarning = "MissingImageStreamTag" - MissingImageStreamImageWarning = "MissingImageStreamImage" -) - -// FindUnpushableBuildConfigs checks all build configs that will output to an IST backed by an ImageStream and checks to make sure their builds can push. -func FindUnpushableBuildConfigs(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { - markers := []osgraph.Marker{} - - // note, unlike with Inputs, ImageStreamImage is not a valid type for build output - -bc: - for _, bcNode := range g.NodesByKind(buildgraph.BuildConfigNodeKind) { - for _, istNode := range g.SuccessorNodesByEdgeKind(bcNode, buildedges.BuildOutputEdgeKind) { - for _, uncastImageStreamNode := range g.SuccessorNodesByEdgeKind(istNode, imageedges.ReferencedImageStreamGraphEdgeKind) { - imageStreamNode := uncastImageStreamNode.(*imagegraph.ImageStreamNode) - - if !imageStreamNode.IsFound { - markers = append(markers, osgraph.Marker{ - Node: bcNode, - RelatedNodes: []graph.Node{istNode}, - - Severity: osgraph.ErrorSeverity, - Key: MissingOutputImageStreamErr, - Message: fmt.Sprintf("%s is pushing to %s, but the image stream for that tag does not exist.", - f.ResourceName(bcNode), f.ResourceName(istNode)), - }) - - continue - } - - if len(imageStreamNode.Status.DockerImageRepository) == 0 { - markers = append(markers, osgraph.Marker{ - Node: bcNode, - RelatedNodes: []graph.Node{istNode}, - - Severity: osgraph.ErrorSeverity, - Key: MissingRequiredRegistryErr, - Message: fmt.Sprintf("%s is pushing to %s, but the administrator has not configured the integrated Docker registry.", - f.ResourceName(bcNode), f.ResourceName(istNode)), - Suggestion: osgraph.Suggestion("oc adm registry -h"), - }) - - continue bc - } - } - } - } - - return markers -} - -// FindMissingInputImageStreams checks all build configs and confirms that their From element exists -// -// Precedence of failures: -// 1. A build config's input points to an image stream that does not exist -// 2. A build config's input uses an image stream tag reference in an existing image stream, but no images within the image stream have that tag assigned -// 3. A build config's input uses an image stream image reference in an exisiting image stream, but no images within the image stream have the supplied image hexadecimal ID -func FindMissingInputImageStreams(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { - markers := []osgraph.Marker{} - - for _, bcNode := range g.NodesByKind(buildgraph.BuildConfigNodeKind) { - for _, bcInputNode := range g.PredecessorNodesByEdgeKind(bcNode, buildedges.BuildInputImageEdgeKind) { - switch bcInputNode.(type) { - case *imagegraph.ImageStreamTagNode: - - for _, uncastImageStreamNode := range g.SuccessorNodesByEdgeKind(bcInputNode, imageedges.ReferencedImageStreamGraphEdgeKind) { - imageStreamNode := uncastImageStreamNode.(*imagegraph.ImageStreamNode) - - // note, BuildConfig.Spec.BuildSpec.Strategy.[Docker|Source|Custom]Stragegy.From Input of ImageStream has been converted to ImageStreamTag on the vX to api conversion - // prior to our reaching this point in the code; so there is not need to check for that type vs. ImageStreamTag or ImageStreamImage; - - tagNode, _ := bcInputNode.(*imagegraph.ImageStreamTagNode) - imageStream := imageStreamNode.Object().(*imageapi.ImageStream) - if _, ok := imageStream.Status.Tags[tagNode.ImageTag()]; !ok { - - markers = append(markers, getImageStreamTagMarker(g, f, bcInputNode, imageStreamNode, tagNode, bcNode)) - - } - - } - - case *imagegraph.ImageStreamImageNode: - - for _, uncastImageStreamNode := range g.SuccessorNodesByEdgeKind(bcInputNode, imageedges.ReferencedImageStreamImageGraphEdgeKind) { - imageStreamNode := uncastImageStreamNode.(*imagegraph.ImageStreamNode) - - imageNode, _ := bcInputNode.(*imagegraph.ImageStreamImageNode) - imageStream := imageStreamNode.Object().(*imageapi.ImageStream) - found, imageID := validImageStreamImage(imageNode, imageStream) - if !found { - - markers = append(markers, getImageStreamImageMarker(g, f, bcNode, bcInputNode, imageStreamNode, imageNode, imageStream, imageID)) - - } - - } - - } - - } - } - return markers -} - -// FindCircularBuilds checks all build configs for cycles -func FindCircularBuilds(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { - // Filter out all but ImageStreamTag and BuildConfig nodes - nodeFn := osgraph.NodesOfKind(imagegraph.ImageStreamTagNodeKind, buildgraph.BuildConfigNodeKind) - // Filter out all but BuildInputImage and BuildOutput edges - edgeFn := osgraph.EdgesOfKind(buildedges.BuildInputImageEdgeKind, buildedges.BuildOutputEdgeKind) - - // Create desired subgraph - sub := g.Subgraph(nodeFn, edgeFn) - - markers := []osgraph.Marker{} - - // Check for cycles - for _, cycle := range topo.CyclesIn(sub) { - nodeNames := []string{} - for _, node := range cycle { - nodeNames = append(nodeNames, f.ResourceName(node)) - } - - markers = append(markers, osgraph.Marker{ - Node: cycle[0], - RelatedNodes: cycle, - - Severity: osgraph.WarningSeverity, - Key: CyclicBuildConfigWarning, - Message: fmt.Sprintf("Cycle detected in build configurations: %s", strings.Join(nodeNames, " -> ")), - }) - - } - - return markers -} - -// multiBCStartBuildSuggestion builds the `oc start-build` suggestion string with multiple build configs -func multiBCStartBuildSuggestion(bcNodes []*buildgraph.BuildConfigNode) string { - var ret string - if len(bcNodes) > 1 { - ret = "Run one of the following commands: " - } - for i, bcNode := range bcNodes { - // use of f.ResourceName(bcNode) will produce a string like oc start-build BuildConfig|example/ruby-hello-world - ret = ret + fmt.Sprintf("oc start-build %s", bcNode.BuildConfig.GetName()) - if i < (len(bcNodes) - 1) { - ret = ret + ", " - } - } - return ret -} - -// bcNodesToRelatedNodes takes an array of BuildConfigNode's and returns an array of graph.Node's for the Marker.RelatedNodes field -func bcNodesToRelatedNodes(bcNodes []*buildgraph.BuildConfigNode) []graph.Node { - relatedNodes := []graph.Node{} - for _, bcNode := range bcNodes { - relatedNodes = append(relatedNodes, graph.Node(bcNode)) - } - return relatedNodes -} - -// findPendingTagMarkers is the guts behind FindPendingTags .... break out some of the content and reduce some indentation -func findPendingTagMarkers(istNode *imagegraph.ImageStreamTagNode, g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { - markers := []osgraph.Marker{} - - buildFound := false - bcNodes := buildedges.BuildConfigsForTag(g, graph.Node(istNode)) - for _, bcNode := range bcNodes { - latestBuild := buildedges.GetLatestBuild(g, bcNode) - - // A build config points to the non existent tag but no current build exists. - if latestBuild == nil { - continue - } - buildFound = true - - // A build config points to the non existent tag but something is going on with - // the latest build. - // TODO: Handle other build phases. - switch latestBuild.Build.Status.Phase { - case buildapi.BuildPhaseCancelled: - // TODO: Add a warning here. - case buildapi.BuildPhaseError: - // TODO: Add a warning here. - case buildapi.BuildPhaseComplete: - // We should never hit this. The output of our build is missing but the build is complete. - // Most probably the user has messed up? - case buildapi.BuildPhaseFailed: - // Since the tag hasn't been populated yet, we assume there hasn't been a successful - // build so far. - markers = append(markers, osgraph.Marker{ - Node: graph.Node(latestBuild), - RelatedNodes: []graph.Node{graph.Node(istNode), graph.Node(bcNode)}, - - Severity: osgraph.ErrorSeverity, - Key: LatestBuildFailedErr, - Message: fmt.Sprintf("%s has failed.", f.ResourceName(latestBuild)), - Suggestion: osgraph.Suggestion(fmt.Sprintf("Inspect the build failure with 'oc logs -f bc/%s'", bcNode.BuildConfig.GetName())), - }) - default: - // Do nothing when latest build is new, pending, or running. - } - - } - - // if no current builds exist for any of the build configs, append marker for that - // but ignore ISTs which have no build configs - if !buildFound && len(bcNodes) > 0 { - markers = append(markers, osgraph.Marker{ - Node: graph.Node(istNode), - RelatedNodes: bcNodesToRelatedNodes(bcNodes), - - Severity: osgraph.WarningSeverity, - Key: TagNotAvailableWarning, - Message: fmt.Sprintf("%s needs to be imported or created by a build.", f.ResourceName(istNode)), - Suggestion: osgraph.Suggestion(multiBCStartBuildSuggestion(bcNodes)), - }) - } - return markers -} - -// FindPendingTags inspects all imageStreamTags that serve as outputs to builds. -// -// Precedence of failures: -// 1. A build config points to the non existent tag but no current build exists. -// 2. A build config points to the non existent tag but the latest build has failed. -func FindPendingTags(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { - markers := []osgraph.Marker{} - - for _, uncastIstNode := range g.NodesByKind(imagegraph.ImageStreamTagNodeKind) { - istNode := uncastIstNode.(*imagegraph.ImageStreamTagNode) - if !istNode.Found() { - markers = append(markers, findPendingTagMarkers(istNode, g, f)...) - } - } - - return markers -} - -// getImageStreamTagMarker will return the appropriate marker for when a BuildConfig is missing its input ImageStreamTag -func getImageStreamTagMarker(g osgraph.Graph, f osgraph.Namer, bcInputNode graph.Node, imageStreamNode graph.Node, tagNode *imagegraph.ImageStreamTagNode, bcNode graph.Node) osgraph.Marker { - return osgraph.Marker{ - Node: bcNode, - RelatedNodes: []graph.Node{bcInputNode, - imageStreamNode}, - Severity: osgraph.WarningSeverity, - Key: MissingImageStreamImageWarning, - Message: fmt.Sprintf("%s builds from %s, but the image stream tag does not exist.", f.ResourceName(bcNode), f.ResourceName(bcInputNode)), - Suggestion: getImageStreamTagSuggestion(g, f, tagNode), - } -} - -// getImageStreamTagSuggestion will return the appropriate marker Suggestion for when a BuildConfig is missing its input ImageStreamTag; in particular, -// it will determine whether or not another BuildConfig can produce the aforementioned ImageStreamTag -func getImageStreamTagSuggestion(g osgraph.Graph, f osgraph.Namer, tagNode *imagegraph.ImageStreamTagNode) osgraph.Suggestion { - bcs := []string{} - for _, bcNode := range g.PredecessorNodesByEdgeKind(tagNode, buildedges.BuildOutputEdgeKind) { - bcs = append(bcs, f.ResourceName(bcNode)) - } - if len(bcs) == 1 { - return osgraph.Suggestion(fmt.Sprintf("oc start-build %s", bcs[0])) - } - if len(bcs) > 0 { - return osgraph.Suggestion(fmt.Sprintf("`oc start-build` with one of these: %s.", strings.Join(bcs[:], ","))) - } - return osgraph.Suggestion(fmt.Sprintf("%s needs to be imported.", f.ResourceName(tagNode))) -} - -// getImageStreamImageMarker will return the appropriate marker for when a BuildConfig is missing its input ImageStreamImage -func getImageStreamImageMarker(g osgraph.Graph, f osgraph.Namer, bcNode graph.Node, bcInputNode graph.Node, imageStreamNode graph.Node, imageNode *imagegraph.ImageStreamImageNode, imageStream *imageapi.ImageStream, imageID string) osgraph.Marker { - return osgraph.Marker{ - Node: bcNode, - RelatedNodes: []graph.Node{bcInputNode, - imageStreamNode}, - Severity: osgraph.WarningSeverity, - Key: MissingImageStreamImageWarning, - Message: fmt.Sprintf("%s builds from %s, but the image stream image does not exist.", f.ResourceName(bcNode), f.ResourceName(bcInputNode)), - Suggestion: getImageStreamImageSuggestion(imageID, imageStream), - } -} - -// getImageStreamImageSuggestion will return the appropriate marker Suggestion for when a BuildConfig is missing its input ImageStreamImage -func getImageStreamImageSuggestion(imageID string, imageStream *imageapi.ImageStream) osgraph.Suggestion { - // check the images stream to see if any import images are in flight or have failed - annotation, ok := imageStream.Annotations[imageapi.DockerImageRepositoryCheckAnnotation] - if !ok { - return osgraph.Suggestion(fmt.Sprintf("`oc import-image %s --from=` where `--from` specifies an image with hexadecimal ID %s", imageStream.GetName(), imageID)) - } - - if checkTime, err := time.Parse(time.RFC3339, annotation); err == nil { - // this time based annotation is set by pkg/image/controller/controller.go whenever import/tag operations are performed; unless - // in the midst of an import/tag operation, it stays set and serves as a timestamp for when the last operation occurred; - // so we will check if the image stream has been updated "recently"; - // in case it is a slow link to the remote repo, see if if the check annotation occurred within the last 5 minutes; if so, consider that as potentially "in progress" - compareTime := checkTime.Add(5 * time.Minute) - currentTime, _ := time.Parse(time.RFC3339, unversioned.Now().UTC().Format(time.RFC3339)) - if compareTime.Before(currentTime) { - return osgraph.Suggestion(fmt.Sprintf("`oc import-image %s --from=` where `--from` specifies an image with hexadecimal ID %s", imageStream.GetName(), imageID)) - } - - return osgraph.Suggestion(fmt.Sprintf("`oc import-image %s --from=` with hexadecimal ID %s possibly in progress", imageStream.GetName(), imageID)) - - } - return osgraph.Suggestion(fmt.Sprintf("Possible error occurred with `oc import-image %s --from=` with hexadecimal ID %s; inspect images stream annotations", imageStream.GetName(), imageID)) -} - -// validImageStreamImage will cycle through the imageStream.Status.Tags.[]TagEvent.DockerImageReference and determine whether an image with the hexadecimal image id -// associated with an ImageStreamImage reference in fact exists in a given ImageStream; on return, this method returns a true if does exist, and as well as the hexadecimal image -// id from the ImageStreamImage -func validImageStreamImage(imageNode *imagegraph.ImageStreamImageNode, imageStream *imageapi.ImageStream) (bool, string) { - dockerImageReference, err := imageapi.ParseDockerImageReference(imageNode.Name) - if err == nil { - for _, tagEventList := range imageStream.Status.Tags { - for _, tagEvent := range tagEventList.Items { - if strings.Contains(tagEvent.DockerImageReference, dockerImageReference.ID) { - return true, dockerImageReference.ID - } - } - } - return false, dockerImageReference.ID - } - return false, "" -} diff --git a/vendor/github.com/openshift/origin/pkg/build/graph/edges.go b/vendor/github.com/openshift/origin/pkg/build/graph/edges.go deleted file mode 100644 index b0c4d3b24..000000000 --- a/vendor/github.com/openshift/origin/pkg/build/graph/edges.go +++ /dev/null @@ -1,133 +0,0 @@ -package graph - -import ( - "github.com/gonum/graph" - kapi "k8s.io/kubernetes/pkg/api" - - osgraph "github.com/openshift/origin/pkg/api/graph" - buildapi "github.com/openshift/origin/pkg/build/api" - buildgraph "github.com/openshift/origin/pkg/build/graph/nodes" - buildutil "github.com/openshift/origin/pkg/build/util" - imageapi "github.com/openshift/origin/pkg/image/api" - imagegraph "github.com/openshift/origin/pkg/image/graph/nodes" -) - -const ( - // BuildTriggerImageEdgeKind is an edge from an ImageStream to a BuildConfig that - // represents a trigger connection. Changes to the ImageStream will trigger a new build - // from the BuildConfig. - BuildTriggerImageEdgeKind = "BuildTriggerImage" - - // BuildInputImageEdgeKind is an edge from an ImageStream to a BuildConfig, where the - // ImageStream is the source image for the build (builder in S2I builds, FROM in Docker builds, - // custom builder in Custom builds). The same ImageStream can also have a trigger - // relationship with the BuildConfig, but not necessarily. - BuildInputImageEdgeKind = "BuildInputImage" - - // BuildOutputEdgeKind is an edge from a BuildConfig to an ImageStream. The ImageStream will hold - // the ouptut of the Builds created with that BuildConfig. - BuildOutputEdgeKind = "BuildOutput" - - // BuildInputEdgeKind is an edge from a source repository to a BuildConfig. The source repository is the - // input source for the build. - BuildInputEdgeKind = "BuildInput" - - // BuildEdgeKind goes from a BuildConfigNode to a BuildNode and indicates that the buildConfig owns the build - BuildEdgeKind = "Build" -) - -// AddBuildEdges adds edges that connect a BuildConfig to Builds to the given graph -func AddBuildEdges(g osgraph.MutableUniqueGraph, node *buildgraph.BuildConfigNode) { - for _, n := range g.(graph.Graph).Nodes() { - if buildNode, ok := n.(*buildgraph.BuildNode); ok { - if buildNode.Build.Namespace != node.BuildConfig.Namespace { - continue - } - if belongsToBuildConfig(node.BuildConfig, buildNode.Build) { - g.AddEdge(node, buildNode, BuildEdgeKind) - } - } - } -} - -// AddAllBuildEdges adds build edges to all BuildConfig nodes in the given graph -func AddAllBuildEdges(g osgraph.MutableUniqueGraph) { - for _, node := range g.(graph.Graph).Nodes() { - if bcNode, ok := node.(*buildgraph.BuildConfigNode); ok { - AddBuildEdges(g, bcNode) - } - } -} - -func imageRefNode(g osgraph.MutableUniqueGraph, ref *kapi.ObjectReference, bc *buildapi.BuildConfig) graph.Node { - if ref == nil { - return nil - } - switch ref.Kind { - case "DockerImage": - if ref, err := imageapi.ParseDockerImageReference(ref.Name); err == nil { - tag := ref.Tag - ref.Tag = "" - return imagegraph.EnsureDockerRepositoryNode(g, ref.String(), tag) - } - case "ImageStream": - return imagegraph.FindOrCreateSyntheticImageStreamTagNode(g, imagegraph.MakeImageStreamTagObjectMeta(defaultNamespace(ref.Namespace, bc.Namespace), ref.Name, imageapi.DefaultImageTag)) - case "ImageStreamTag": - return imagegraph.FindOrCreateSyntheticImageStreamTagNode(g, imagegraph.MakeImageStreamTagObjectMeta2(defaultNamespace(ref.Namespace, bc.Namespace), ref.Name)) - case "ImageStreamImage": - return imagegraph.FindOrCreateSyntheticImageStreamImageNode(g, imagegraph.MakeImageStreamImageObjectMeta(defaultNamespace(ref.Namespace, bc.Namespace), ref.Name)) - } - return nil -} - -// AddOutputEdges links the build config to its output image node. -func AddOutputEdges(g osgraph.MutableUniqueGraph, node *buildgraph.BuildConfigNode) { - if node.BuildConfig.Spec.Output.To == nil { - return - } - out := imageRefNode(g, node.BuildConfig.Spec.Output.To, node.BuildConfig) - g.AddEdge(node, out, BuildOutputEdgeKind) -} - -// AddInputEdges links the build config to its input image and source nodes. -func AddInputEdges(g osgraph.MutableUniqueGraph, node *buildgraph.BuildConfigNode) { - if in := buildgraph.EnsureSourceRepositoryNode(g, node.BuildConfig.Spec.Source); in != nil { - g.AddEdge(in, node, BuildInputEdgeKind) - } - inputImage := buildutil.GetInputReference(node.BuildConfig.Spec.Strategy) - if input := imageRefNode(g, inputImage, node.BuildConfig); input != nil { - g.AddEdge(input, node, BuildInputImageEdgeKind) - } -} - -// AddTriggerEdges links the build config to its trigger input image nodes. -func AddTriggerEdges(g osgraph.MutableUniqueGraph, node *buildgraph.BuildConfigNode) { - for _, trigger := range node.BuildConfig.Spec.Triggers { - if trigger.Type != buildapi.ImageChangeBuildTriggerType { - continue - } - from := trigger.ImageChange.From - if trigger.ImageChange.From == nil { - from = buildutil.GetInputReference(node.BuildConfig.Spec.Strategy) - } - triggerNode := imageRefNode(g, from, node.BuildConfig) - g.AddEdge(triggerNode, node, BuildTriggerImageEdgeKind) - } -} - -// AddInputOutputEdges links the build config to other nodes for the images and source repositories it depends on. -func AddInputOutputEdges(g osgraph.MutableUniqueGraph, node *buildgraph.BuildConfigNode) *buildgraph.BuildConfigNode { - AddInputEdges(g, node) - AddTriggerEdges(g, node) - AddOutputEdges(g, node) - return node -} - -// AddAllInputOutputEdges adds input and output edges for all BuildConfigs in the given graph -func AddAllInputOutputEdges(g osgraph.MutableUniqueGraph) { - for _, node := range g.(graph.Graph).Nodes() { - if bcNode, ok := node.(*buildgraph.BuildConfigNode); ok { - AddInputOutputEdges(g, bcNode) - } - } -} diff --git a/vendor/github.com/openshift/origin/pkg/build/graph/helpers.go b/vendor/github.com/openshift/origin/pkg/build/graph/helpers.go deleted file mode 100644 index 3c27375cd..000000000 --- a/vendor/github.com/openshift/origin/pkg/build/graph/helpers.go +++ /dev/null @@ -1,111 +0,0 @@ -package graph - -import ( - "sort" - - "github.com/gonum/graph" - - osgraph "github.com/openshift/origin/pkg/api/graph" - buildapi "github.com/openshift/origin/pkg/build/api" - buildgraph "github.com/openshift/origin/pkg/build/graph/nodes" -) - -// RelevantBuilds returns the lastSuccessful build, lastUnsuccessful build, and a list of active builds -func RelevantBuilds(g osgraph.Graph, bcNode *buildgraph.BuildConfigNode) (*buildgraph.BuildNode, *buildgraph.BuildNode, []*buildgraph.BuildNode) { - var ( - lastSuccessfulBuild *buildgraph.BuildNode - lastUnsuccessfulBuild *buildgraph.BuildNode - ) - activeBuilds := []*buildgraph.BuildNode{} - allBuilds := []*buildgraph.BuildNode{} - uncastBuilds := g.SuccessorNodesByEdgeKind(bcNode, BuildEdgeKind) - - for i := range uncastBuilds { - buildNode := uncastBuilds[i].(*buildgraph.BuildNode) - if belongsToBuildConfig(bcNode.BuildConfig, buildNode.Build) { - allBuilds = append(allBuilds, buildNode) - } - } - - if len(allBuilds) == 0 { - return nil, nil, []*buildgraph.BuildNode{} - } - - sort.Sort(RecentBuildReferences(allBuilds)) - - for i := range allBuilds { - switch allBuilds[i].Build.Status.Phase { - case buildapi.BuildPhaseComplete: - if lastSuccessfulBuild == nil { - lastSuccessfulBuild = allBuilds[i] - } - case buildapi.BuildPhaseFailed, buildapi.BuildPhaseCancelled, buildapi.BuildPhaseError: - if lastUnsuccessfulBuild == nil { - lastUnsuccessfulBuild = allBuilds[i] - } - default: - activeBuilds = append(activeBuilds, allBuilds[i]) - } - } - - return lastSuccessfulBuild, lastUnsuccessfulBuild, activeBuilds -} - -func belongsToBuildConfig(config *buildapi.BuildConfig, b *buildapi.Build) bool { - if b.Labels == nil { - return false - } - if b.Annotations != nil && b.Annotations[buildapi.BuildConfigAnnotation] == config.Name { - return true - } - if b.Labels[buildapi.BuildConfigLabel] == config.Name { - return true - } - if b.Labels[buildapi.BuildConfigLabelDeprecated] == config.Name { - return true - } - return false -} - -type RecentBuildReferences []*buildgraph.BuildNode - -func (m RecentBuildReferences) Len() int { return len(m) } -func (m RecentBuildReferences) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -func (m RecentBuildReferences) Less(i, j int) bool { - return m[i].Build.CreationTimestamp.After(m[j].Build.CreationTimestamp.Time) -} - -func defaultNamespace(value, defaultValue string) string { - if len(value) == 0 { - return defaultValue - } - return value -} - -// BuildConfigsForTag returns the buildConfig that points to the provided imageStreamTag. -func BuildConfigsForTag(g osgraph.Graph, istag graph.Node) []*buildgraph.BuildConfigNode { - bcs := []*buildgraph.BuildConfigNode{} - for _, bcNode := range g.PredecessorNodesByEdgeKind(istag, BuildOutputEdgeKind) { - bcs = append(bcs, bcNode.(*buildgraph.BuildConfigNode)) - } - return bcs -} - -// GetLatestBuild returns the latest build for the provided buildConfig. -func GetLatestBuild(g osgraph.Graph, bc graph.Node) *buildgraph.BuildNode { - builds := g.SuccessorNodesByEdgeKind(bc, BuildEdgeKind) - if len(builds) == 0 { - return nil - } - latestBuild := builds[0].(*buildgraph.BuildNode) - - for _, buildNode := range builds[1:] { - if build, ok := buildNode.(*buildgraph.BuildNode); ok { - if latestBuild.Build.CreationTimestamp.Before(build.Build.CreationTimestamp) { - latestBuild = build - } - } - } - - return latestBuild -} diff --git a/vendor/github.com/openshift/origin/pkg/build/graph/nodes/nodes.go b/vendor/github.com/openshift/origin/pkg/build/graph/nodes/nodes.go deleted file mode 100644 index 96bbc85f4..000000000 --- a/vendor/github.com/openshift/origin/pkg/build/graph/nodes/nodes.go +++ /dev/null @@ -1,47 +0,0 @@ -package nodes - -import ( - "github.com/gonum/graph" - - osgraph "github.com/openshift/origin/pkg/api/graph" - buildapi "github.com/openshift/origin/pkg/build/api" -) - -// EnsureBuildConfigNode adds a graph node for the specific build config if it does not exist -func EnsureBuildConfigNode(g osgraph.MutableUniqueGraph, config *buildapi.BuildConfig) *BuildConfigNode { - return osgraph.EnsureUnique( - g, - BuildConfigNodeName(config), - func(node osgraph.Node) graph.Node { - return &BuildConfigNode{ - Node: node, - BuildConfig: config, - } - }, - ).(*BuildConfigNode) -} - -// EnsureSourceRepositoryNode adds the specific BuildSource to the graph if it does not already exist. -func EnsureSourceRepositoryNode(g osgraph.MutableUniqueGraph, source buildapi.BuildSource) *SourceRepositoryNode { - switch { - case source.Git != nil: - default: - return nil - } - return osgraph.EnsureUnique(g, - SourceRepositoryNodeName(source), - func(node osgraph.Node) graph.Node { - return &SourceRepositoryNode{node, source} - }, - ).(*SourceRepositoryNode) -} - -// EnsureBuildNode adds a graph node for the build if it does not already exist. -func EnsureBuildNode(g osgraph.MutableUniqueGraph, build *buildapi.Build) *BuildNode { - return osgraph.EnsureUnique(g, - BuildNodeName(build), - func(node osgraph.Node) graph.Node { - return &BuildNode{node, build} - }, - ).(*BuildNode) -} diff --git a/vendor/github.com/openshift/origin/pkg/build/graph/nodes/types.go b/vendor/github.com/openshift/origin/pkg/build/graph/nodes/types.go deleted file mode 100644 index fbfb3b057..000000000 --- a/vendor/github.com/openshift/origin/pkg/build/graph/nodes/types.go +++ /dev/null @@ -1,90 +0,0 @@ -package nodes - -import ( - "fmt" - "reflect" - - osgraph "github.com/openshift/origin/pkg/api/graph" - buildapi "github.com/openshift/origin/pkg/build/api" -) - -var ( - BuildConfigNodeKind = reflect.TypeOf(buildapi.BuildConfig{}).Name() - BuildNodeKind = reflect.TypeOf(buildapi.Build{}).Name() - - // non-api types - SourceRepositoryNodeKind = reflect.TypeOf(buildapi.BuildSource{}).Name() -) - -func BuildConfigNodeName(o *buildapi.BuildConfig) osgraph.UniqueName { - return osgraph.GetUniqueRuntimeObjectNodeName(BuildConfigNodeKind, o) -} - -type BuildConfigNode struct { - osgraph.Node - BuildConfig *buildapi.BuildConfig -} - -func (n BuildConfigNode) Object() interface{} { - return n.BuildConfig -} - -func (n BuildConfigNode) String() string { - return string(BuildConfigNodeName(n.BuildConfig)) -} - -func (n BuildConfigNode) UniqueName() osgraph.UniqueName { - return BuildConfigNodeName(n.BuildConfig) -} - -func (*BuildConfigNode) Kind() string { - return BuildConfigNodeKind -} - -func SourceRepositoryNodeName(source buildapi.BuildSource) osgraph.UniqueName { - switch { - case source.Git != nil: - sourceType, uri, ref := "git", source.Git.URI, source.Git.Ref - return osgraph.UniqueName(fmt.Sprintf("%s|%s|%s#%s", SourceRepositoryNodeKind, sourceType, uri, ref)) - default: - panic(fmt.Sprintf("invalid build source: %v", source)) - } -} - -type SourceRepositoryNode struct { - osgraph.Node - Source buildapi.BuildSource -} - -func (n SourceRepositoryNode) String() string { - return string(SourceRepositoryNodeName(n.Source)) -} - -func (SourceRepositoryNode) Kind() string { - return SourceRepositoryNodeKind -} - -func BuildNodeName(o *buildapi.Build) osgraph.UniqueName { - return osgraph.GetUniqueRuntimeObjectNodeName(BuildNodeKind, o) -} - -type BuildNode struct { - osgraph.Node - Build *buildapi.Build -} - -func (n BuildNode) Object() interface{} { - return n.Build -} - -func (n BuildNode) String() string { - return string(BuildNodeName(n.Build)) -} - -func (n BuildNode) UniqueName() osgraph.UniqueName { - return BuildNodeName(n.Build) -} - -func (*BuildNode) Kind() string { - return BuildNodeKind -} diff --git a/vendor/github.com/openshift/origin/pkg/build/util/doc.go b/vendor/github.com/openshift/origin/pkg/build/util/doc.go deleted file mode 100644 index 07e585bbb..000000000 --- a/vendor/github.com/openshift/origin/pkg/build/util/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package util contains common functions that are used -// by the rest of the OpenShift build system. -package util diff --git a/vendor/github.com/openshift/origin/pkg/build/util/util.go b/vendor/github.com/openshift/origin/pkg/build/util/util.go deleted file mode 100644 index bfb3b6702..000000000 --- a/vendor/github.com/openshift/origin/pkg/build/util/util.go +++ /dev/null @@ -1,169 +0,0 @@ -package util - -import ( - "fmt" - "strconv" - "strings" - - kapi "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/labels" - - "github.com/golang/glog" - buildapi "github.com/openshift/origin/pkg/build/api" - buildclient "github.com/openshift/origin/pkg/build/client" -) - -const ( - // NoBuildLogsMessage reports that no build logs are available - NoBuildLogsMessage = "No logs are available." -) - -// GetBuildName returns name of the build pod. -func GetBuildName(pod *kapi.Pod) string { - if pod == nil { - return "" - } - return pod.Annotations[buildapi.BuildAnnotation] -} - -// GetInputReference returns the From ObjectReference associated with the -// BuildStrategy. -func GetInputReference(strategy buildapi.BuildStrategy) *kapi.ObjectReference { - switch { - case strategy.SourceStrategy != nil: - return &strategy.SourceStrategy.From - case strategy.DockerStrategy != nil: - return strategy.DockerStrategy.From - case strategy.CustomStrategy != nil: - return &strategy.CustomStrategy.From - default: - return nil - } -} - -// NameFromImageStream returns a concatenated name representing an ImageStream[Tag/Image] -// reference. If the reference does not contain a Namespace, the namespace parameter -// is used instead. -func NameFromImageStream(namespace string, ref *kapi.ObjectReference, tag string) string { - var ret string - if ref.Namespace == "" { - ret = namespace - } else { - ret = ref.Namespace - } - ret = ret + "/" + ref.Name - if tag != "" && strings.Index(ref.Name, ":") == -1 && strings.Index(ref.Name, "@") == -1 { - ret = ret + ":" + tag - } - return ret -} - -// IsBuildComplete returns whether the provided build is complete or not -func IsBuildComplete(build *buildapi.Build) bool { - return build.Status.Phase != buildapi.BuildPhaseRunning && build.Status.Phase != buildapi.BuildPhasePending && build.Status.Phase != buildapi.BuildPhaseNew -} - -// IsPaused returns true if the provided BuildConfig is paused and cannot be used to create a new Build -func IsPaused(bc *buildapi.BuildConfig) bool { - return strings.ToLower(bc.Annotations[buildapi.BuildConfigPausedAnnotation]) == "true" -} - -// BuildNumber returns the given build number. -func BuildNumber(build *buildapi.Build) (int64, error) { - annotations := build.GetAnnotations() - if stringNumber, ok := annotations[buildapi.BuildNumberAnnotation]; ok { - return strconv.ParseInt(stringNumber, 10, 64) - } - return 0, fmt.Errorf("build %s/%s does not have %s annotation", build.Namespace, build.Name, buildapi.BuildNumberAnnotation) -} - -// BuildRunPolicy returns the scheduling policy for the build based on the -// "queued" label. -func BuildRunPolicy(build *buildapi.Build) buildapi.BuildRunPolicy { - labels := build.GetLabels() - if value, found := labels[buildapi.BuildRunPolicyLabel]; found { - switch value { - case "Parallel": - return buildapi.BuildRunPolicyParallel - case "Serial": - return buildapi.BuildRunPolicySerial - case "SerialLatestOnly": - return buildapi.BuildRunPolicySerialLatestOnly - } - } - glog.V(5).Infof("Build %s/%s does not have start policy label set, using default (Serial)") - return buildapi.BuildRunPolicySerial -} - -// BuildNameForConfigVersion returns the name of the version-th build -// for the config that has the provided name. -func BuildNameForConfigVersion(name string, version int) string { - return fmt.Sprintf("%s-%d", name, version) -} - -// BuildConfigSelector returns a label Selector which can be used to find all -// builds for a BuildConfig. -func BuildConfigSelector(name string) labels.Selector { - return labels.Set{buildapi.BuildConfigLabel: buildapi.LabelValue(name)}.AsSelector() -} - -// BuildConfigSelectorDeprecated returns a label Selector which can be used to find -// all builds for a BuildConfig that use the deprecated labels. -func BuildConfigSelectorDeprecated(name string) labels.Selector { - return labels.Set{buildapi.BuildConfigLabelDeprecated: name}.AsSelector() -} - -type buildFilter func(buildapi.Build) bool - -// BuildConfigBuilds return a list of builds for the given build config. -// Optionally you can specify a filter function to select only builds that -// matches your criteria. -func BuildConfigBuilds(c buildclient.BuildLister, namespace, name string, filterFunc buildFilter) (*buildapi.BuildList, error) { - result, err := c.List(namespace, kapi.ListOptions{ - LabelSelector: BuildConfigSelector(name), - }) - if err != nil { - return nil, err - } - if filterFunc == nil { - return result, nil - } - filteredList := &buildapi.BuildList{TypeMeta: result.TypeMeta, ListMeta: result.ListMeta} - for _, b := range result.Items { - if filterFunc(b) { - filteredList.Items = append(filteredList.Items, b) - } - } - return filteredList, nil -} - -// ConfigNameForBuild returns the name of the build config from a -// build name. -func ConfigNameForBuild(build *buildapi.Build) string { - if build == nil { - return "" - } - if build.Annotations != nil { - if _, exists := build.Annotations[buildapi.BuildConfigAnnotation]; exists { - return build.Annotations[buildapi.BuildConfigAnnotation] - } - } - if _, exists := build.Labels[buildapi.BuildConfigLabel]; exists { - return build.Labels[buildapi.BuildConfigLabel] - } - return build.Labels[buildapi.BuildConfigLabelDeprecated] -} - -// VersionForBuild returns the version from the provided build name. -// If no version can be found, 0 is returned to indicate no version. -func VersionForBuild(build *buildapi.Build) int { - if build == nil { - return 0 - } - versionString := build.Annotations[buildapi.BuildNumberAnnotation] - version, err := strconv.Atoi(versionString) - if err != nil { - return 0 - } - return version -} diff --git a/vendor/github.com/openshift/origin/pkg/client/client.go b/vendor/github.com/openshift/origin/pkg/client/client.go index c5c00a17f..d7c620266 100644 --- a/vendor/github.com/openshift/origin/pkg/client/client.go +++ b/vendor/github.com/openshift/origin/pkg/client/client.go @@ -48,6 +48,7 @@ type Interface interface { SubjectAccessReviews LocalSubjectAccessReviewsNamespacer SelfSubjectRulesReviewsNamespacer + SubjectRulesReviewsNamespacer TemplatesNamespacer TemplateConfigsNamespacer OAuthClientsInterface @@ -245,6 +246,10 @@ func (c *Client) SelfSubjectRulesReviews(namespace string) SelfSubjectRulesRevie return newSelfSubjectRulesReviews(c, namespace) } +func (c *Client) SubjectRulesReviews(namespace string) SubjectRulesReviewInterface { + return newSubjectRulesReviews(c, namespace) +} + func (c *Client) OAuthClients() OAuthClientInterface { return newOAuthClients(c) } diff --git a/vendor/github.com/openshift/origin/pkg/client/deploymentconfigs.go b/vendor/github.com/openshift/origin/pkg/client/deploymentconfigs.go index cd50f6033..2b7c1dd06 100644 --- a/vendor/github.com/openshift/origin/pkg/client/deploymentconfigs.go +++ b/vendor/github.com/openshift/origin/pkg/client/deploymentconfigs.go @@ -30,6 +30,7 @@ type DeploymentConfigInterface interface { GetScale(name string) (*extensions.Scale, error) UpdateScale(scale *extensions.Scale) (*extensions.Scale, error) UpdateStatus(config *deployapi.DeploymentConfig) (*deployapi.DeploymentConfig, error) + Instantiate(request *deployapi.DeploymentRequest) (*deployapi.DeploymentConfig, error) } // deploymentConfigs implements DeploymentConfigsNamespacer interface @@ -155,6 +156,18 @@ func (c *deploymentConfigs) UpdateStatus(deploymentConfig *deployapi.DeploymentC return } +// Instantiate instantiates a new build from build config returning new object or an error +func (c *deploymentConfigs) Instantiate(request *deployapi.DeploymentRequest) (*deployapi.DeploymentConfig, error) { + result := &deployapi.DeploymentConfig{} + resp := c.r.Post().Namespace(c.ns).Resource("deploymentConfigs").Name(request.Name).SubResource("instantiate").Body(request).Do() + var statusCode int + if resp.StatusCode(&statusCode); statusCode == 204 { + return nil, nil + } + err := resp.Into(result) + return result, err +} + type updateConfigFunc func(d *deployapi.DeploymentConfig) // UpdateConfigWithRetries will try to update a deployment config and ignore any update conflicts. diff --git a/vendor/github.com/openshift/origin/pkg/client/imagestreams.go b/vendor/github.com/openshift/origin/pkg/client/imagestreams.go index 42a855cbf..628e8ac0e 100644 --- a/vendor/github.com/openshift/origin/pkg/client/imagestreams.go +++ b/vendor/github.com/openshift/origin/pkg/client/imagestreams.go @@ -68,13 +68,6 @@ func (c *imageStreams) Get(name string) (result *imageapi.ImageStream, err error return } -// GetByNamespace returns information about a particular image stream in a particular namespace and error if one occurs. -func (c *imageStreams) GetByNamespace(namespace, name string) (result *imageapi.ImageStream, err error) { - result = &imageapi.ImageStream{} - c.r.Get().Namespace(namespace).Resource("imageStreams").Name(name).Do().Into(result) - return -} - // Create create a new image stream. Returns the server's representation of the image stream and error if one occurs. func (c *imageStreams) Create(stream *imageapi.ImageStream) (result *imageapi.ImageStream, err error) { result = &imageapi.ImageStream{} diff --git a/vendor/github.com/openshift/origin/pkg/client/oauthclient.go b/vendor/github.com/openshift/origin/pkg/client/oauthclient.go index 0361b078b..390004840 100644 --- a/vendor/github.com/openshift/origin/pkg/client/oauthclient.go +++ b/vendor/github.com/openshift/origin/pkg/client/oauthclient.go @@ -17,6 +17,7 @@ type OAuthClientInterface interface { Get(name string) (*oauthapi.OAuthClient, error) Delete(name string) error Watch(opts kapi.ListOptions) (watch.Interface, error) + Update(client *oauthapi.OAuthClient) (*oauthapi.OAuthClient, error) } type oauthClients struct { @@ -55,3 +56,9 @@ func (c *oauthClients) Delete(name string) (err error) { func (c *oauthClients) Watch(opts kapi.ListOptions) (watch.Interface, error) { return c.r.Get().Prefix("watch").Resource("oAuthClients").VersionedParams(&opts, kapi.ParameterCodec).Watch() } + +func (c *oauthClients) Update(client *oauthapi.OAuthClient) (result *oauthapi.OAuthClient, err error) { + result = &oauthapi.OAuthClient{} + err = c.r.Put().Resource("oAuthClients").Name(client.Name).Body(client).Do().Into(result) + return +} diff --git a/vendor/github.com/openshift/origin/pkg/client/subjectrulesreviews.go b/vendor/github.com/openshift/origin/pkg/client/subjectrulesreviews.go new file mode 100644 index 000000000..ba5bda538 --- /dev/null +++ b/vendor/github.com/openshift/origin/pkg/client/subjectrulesreviews.go @@ -0,0 +1,32 @@ +package client + +import ( + authorizationapi "github.com/openshift/origin/pkg/authorization/api" +) + +type SubjectRulesReviewsNamespacer interface { + SubjectRulesReviews(namespace string) SubjectRulesReviewInterface +} + +type SubjectRulesReviewInterface interface { + Create(*authorizationapi.SubjectRulesReview) (*authorizationapi.SubjectRulesReview, error) +} + +type subjectRulesReviews struct { + r *Client + ns string +} + +func newSubjectRulesReviews(c *Client, namespace string) *subjectRulesReviews { + return &subjectRulesReviews{ + r: c, + ns: namespace, + } +} + +func (c *subjectRulesReviews) Create(subjectRulesReview *authorizationapi.SubjectRulesReview) (result *authorizationapi.SubjectRulesReview, err error) { + result = &authorizationapi.SubjectRulesReview{} + err = c.r.Post().Namespace(c.ns).Resource("subjectRulesReviews").Body(subjectRulesReview).Do().Into(result) + + return +} diff --git a/vendor/github.com/openshift/origin/pkg/cmd/cli/config/smart_merge.go b/vendor/github.com/openshift/origin/pkg/cmd/cli/config/smart_merge.go index 8ff84d0ab..a4adc54c8 100644 --- a/vendor/github.com/openshift/origin/pkg/cmd/cli/config/smart_merge.go +++ b/vendor/github.com/openshift/origin/pkg/cmd/cli/config/smart_merge.go @@ -6,6 +6,7 @@ import ( "reflect" "strings" + kerrors "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/client/restclient" clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" "k8s.io/kubernetes/third_party/forked/golang/netutil" @@ -34,11 +35,7 @@ func GetClusterNicknameFromURL(apiServerLocation string) (string, error) { // GetUserNicknameFromConfig returns "username(as known by the server)/GetClusterNicknameFromConfig". This allows tab completion for switching users to // work easily and obviously. func GetUserNicknameFromConfig(clientCfg *restclient.Config) (string, error) { - client, err := osclient.New(clientCfg) - if err != nil { - return "", err - } - userInfo, err := client.Users().Get("~") + userPartOfNick, err := getUserPartOfNickname(clientCfg) if err != nil { return "", err } @@ -48,7 +45,7 @@ func GetUserNicknameFromConfig(clientCfg *restclient.Config) (string, error) { return "", err } - return userInfo.Name + "/" + clusterNick, nil + return userPartOfNick + "/" + clusterNick, nil } func GetUserNicknameFromCert(clusterNick string, chain ...*x509.Certificate) (string, error) { @@ -60,15 +57,32 @@ func GetUserNicknameFromCert(clusterNick string, chain ...*x509.Certificate) (st return userInfo.GetName() + "/" + clusterNick, nil } -// GetContextNicknameFromConfig returns "namespace/GetClusterNicknameFromConfig/username(as known by the server)". This allows tab completion for switching projects/context -// to work easily. First tab is the most selective on project. Second stanza in the next most selective on cluster name. The chances of a user trying having -// one projects on a single server that they want to operate against with two identities is low, so username is last. -func GetContextNicknameFromConfig(namespace string, clientCfg *restclient.Config) (string, error) { +func getUserPartOfNickname(clientCfg *restclient.Config) (string, error) { client, err := osclient.New(clientCfg) if err != nil { return "", err } userInfo, err := client.Users().Get("~") + if kerrors.IsNotFound(err) { + // if we're talking to kube (or likely talking to kube), take a best guess consistent with login + switch { + case len(clientCfg.BearerToken) > 0: + userInfo.Name = clientCfg.BearerToken + case len(clientCfg.Username) > 0: + userInfo.Name = clientCfg.Username + } + } else if err != nil { + return "", err + } + + return userInfo.Name, nil +} + +// GetContextNicknameFromConfig returns "namespace/GetClusterNicknameFromConfig/username(as known by the server)". This allows tab completion for switching projects/context +// to work easily. First tab is the most selective on project. Second stanza in the next most selective on cluster name. The chances of a user trying having +// one projects on a single server that they want to operate against with two identities is low, so username is last. +func GetContextNicknameFromConfig(namespace string, clientCfg *restclient.Config) (string, error) { + userPartOfNick, err := getUserPartOfNickname(clientCfg) if err != nil { return "", err } @@ -78,7 +92,7 @@ func GetContextNicknameFromConfig(namespace string, clientCfg *restclient.Config return "", err } - return namespace + "/" + clusterNick + "/" + userInfo.Name, nil + return namespace + "/" + clusterNick + "/" + userPartOfNick, nil } func GetContextNickname(namespace, clusterNick, userNick string) string { diff --git a/vendor/github.com/openshift/origin/pkg/cmd/cli/describe/chaindescriber.go b/vendor/github.com/openshift/origin/pkg/cmd/cli/describe/chaindescriber.go deleted file mode 100644 index 34ff224c2..000000000 --- a/vendor/github.com/openshift/origin/pkg/cmd/cli/describe/chaindescriber.go +++ /dev/null @@ -1,319 +0,0 @@ -package describe - -import ( - "fmt" - "sort" - "strings" - - "github.com/golang/glog" - "github.com/gonum/graph" - "github.com/gonum/graph/encoding/dot" - "github.com/gonum/graph/path" - kapi "k8s.io/kubernetes/pkg/api" - utilerrors "k8s.io/kubernetes/pkg/util/errors" - "k8s.io/kubernetes/pkg/util/sets" - - osgraph "github.com/openshift/origin/pkg/api/graph" - buildedges "github.com/openshift/origin/pkg/build/graph" - buildanalysis "github.com/openshift/origin/pkg/build/graph/analysis" - buildgraph "github.com/openshift/origin/pkg/build/graph/nodes" - "github.com/openshift/origin/pkg/client" - imageapi "github.com/openshift/origin/pkg/image/api" - imagegraph "github.com/openshift/origin/pkg/image/graph/nodes" - dotutil "github.com/openshift/origin/pkg/util/dot" - "github.com/openshift/origin/pkg/util/parallel" -) - -// NotFoundErr is returned when the imageStreamTag (ist) of interest cannot -// be found in the graph. This doesn't mean though that the IST does not -// exist. A user may have an image stream without a build configuration -// pointing at it. In that case, the IST of interest simply doesn't have -// other dependant ists -type NotFoundErr string - -func (e NotFoundErr) Error() string { - return fmt.Sprintf("couldn't find image stream tag: %q", string(e)) -} - -// ChainDescriber generates extended information about a chain of -// dependencies of an image stream -type ChainDescriber struct { - c client.BuildConfigsNamespacer - namespaces sets.String - outputFormat string - namer osgraph.Namer -} - -// NewChainDescriber returns a new ChainDescriber -func NewChainDescriber(c client.BuildConfigsNamespacer, namespaces sets.String, out string) *ChainDescriber { - return &ChainDescriber{c: c, namespaces: namespaces, outputFormat: out, namer: namespacedFormatter{hideNamespace: true}} -} - -// MakeGraph will create the graph of all build configurations and the image streams -// they point to via image change triggers in the provided namespace(s) -func (d *ChainDescriber) MakeGraph() (osgraph.Graph, error) { - g := osgraph.New() - - loaders := []GraphLoader{} - for namespace := range d.namespaces { - glog.V(4).Infof("Loading build configurations from %q", namespace) - loaders = append(loaders, &bcLoader{namespace: namespace, lister: d.c}) - } - loadingFuncs := []func() error{} - for _, loader := range loaders { - loadingFuncs = append(loadingFuncs, loader.Load) - } - - if errs := parallel.Run(loadingFuncs...); len(errs) > 0 { - return g, utilerrors.NewAggregate(errs) - } - - for _, loader := range loaders { - loader.AddToGraph(g) - } - - buildedges.AddAllInputOutputEdges(g) - - return g, nil -} - -// Describe returns the output of the graph starting from the provided -// image stream tag (name:tag) in namespace. Namespace is needed here -// because image stream tags with the same name can be found across -// different namespaces. -func (d *ChainDescriber) Describe(ist *imageapi.ImageStreamTag, includeInputImages, reverse bool) (string, error) { - g, err := d.MakeGraph() - if err != nil { - return "", err - } - - // Retrieve the imageStreamTag node of interest - istNode := g.Find(imagegraph.ImageStreamTagNodeName(ist)) - if istNode == nil { - return "", NotFoundErr(fmt.Sprintf("%q", ist.Name)) - } - - markers := buildanalysis.FindCircularBuilds(g, d.namer) - if len(markers) > 0 { - for _, marker := range markers { - if strings.Contains(marker.Message, ist.Name) { - return marker.Message, nil - } - } - } - - buildInputEdgeKinds := []string{buildedges.BuildTriggerImageEdgeKind} - if includeInputImages { - buildInputEdgeKinds = append(buildInputEdgeKinds, buildedges.BuildInputImageEdgeKind) - } - - // Partition down to the subgraph containing the imagestreamtag of interest - var partitioned osgraph.Graph - if reverse { - partitioned = partitionReverse(g, istNode, buildInputEdgeKinds) - } else { - partitioned = partition(g, istNode, buildInputEdgeKinds) - } - - switch strings.ToLower(d.outputFormat) { - case "dot": - data, err := dot.Marshal(partitioned, dotutil.Quote(ist.Name), "", " ", false) - if err != nil { - return "", err - } - return string(data), nil - case "": - return d.humanReadableOutput(partitioned, d.namer, istNode, reverse), nil - } - - return "", fmt.Errorf("unknown specified format %q", d.outputFormat) -} - -// partition the graph down to a subgraph starting from the given root -func partition(g osgraph.Graph, root graph.Node, buildInputEdgeKinds []string) osgraph.Graph { - // Filter out all but BuildConfig and ImageStreamTag nodes - nodeFn := osgraph.NodesOfKind(buildgraph.BuildConfigNodeKind, imagegraph.ImageStreamTagNodeKind) - // Filter out all but BuildInputImage and BuildOutput edges - edgeKinds := []string{} - edgeKinds = append(edgeKinds, buildInputEdgeKinds...) - edgeKinds = append(edgeKinds, buildedges.BuildOutputEdgeKind) - edgeFn := osgraph.EdgesOfKind(edgeKinds...) - sub := g.Subgraph(nodeFn, edgeFn) - - // Filter out inbound edges to the IST of interest - edgeFn = osgraph.RemoveInboundEdges([]graph.Node{root}) - sub = sub.Subgraph(nodeFn, edgeFn) - - // Check all paths leading from the root node, collect any - // node found in them, and create the desired subgraph - desired := []graph.Node{root} - paths := path.DijkstraAllPaths(sub) - for _, node := range sub.Nodes() { - if node == root { - continue - } - path, _, _ := paths.Between(root, node) - if len(path) != 0 { - desired = append(desired, node) - } - } - return sub.SubgraphWithNodes(desired, osgraph.ExistingDirectEdge) -} - -// partitionReverse the graph down to a subgraph starting from the given root -func partitionReverse(g osgraph.Graph, root graph.Node, buildInputEdgeKinds []string) osgraph.Graph { - // Filter out all but BuildConfig and ImageStreamTag nodes - nodeFn := osgraph.NodesOfKind(buildgraph.BuildConfigNodeKind, imagegraph.ImageStreamTagNodeKind) - // Filter out all but BuildInputImage and BuildOutput edges - edgeKinds := []string{} - edgeKinds = append(edgeKinds, buildInputEdgeKinds...) - edgeKinds = append(edgeKinds, buildedges.BuildOutputEdgeKind) - edgeFn := osgraph.EdgesOfKind(edgeKinds...) - sub := g.Subgraph(nodeFn, edgeFn) - - // Filter out inbound edges to the IST of interest - edgeFn = osgraph.RemoveOutboundEdges([]graph.Node{root}) - sub = sub.Subgraph(nodeFn, edgeFn) - - // Check all paths leading from the root node, collect any - // node found in them, and create the desired subgraph - desired := []graph.Node{root} - paths := path.DijkstraAllPaths(sub) - for _, node := range sub.Nodes() { - if node == root { - continue - } - path, _, _ := paths.Between(node, root) - if len(path) != 0 { - desired = append(desired, node) - } - } - return sub.SubgraphWithNodes(desired, osgraph.ExistingDirectEdge) -} - -// humanReadableOutput traverses the provided graph using DFS and outputs it -// in a human-readable format. It starts from the provided root, assuming it -// is an imageStreamTag node and continues to the rest of the graph handling -// only imageStreamTag and buildConfig nodes. -func (d *ChainDescriber) humanReadableOutput(g osgraph.Graph, f osgraph.Namer, root graph.Node, reverse bool) string { - if reverse { - g = g.EdgeSubgraph(osgraph.ReverseExistingDirectEdge) - } - - var singleNamespace bool - if len(d.namespaces) == 1 && !d.namespaces.Has(kapi.NamespaceAll) { - singleNamespace = true - } - depth := map[graph.Node]int{ - root: 0, - } - out := "" - - dfs := &DepthFirst{ - Visit: func(u, v graph.Node) { - depth[v] = depth[u] + 1 - }, - } - - until := func(node graph.Node) bool { - var info string - - switch t := node.(type) { - case *imagegraph.ImageStreamTagNode: - info = outputHelper(f.ResourceName(t), t.Namespace, singleNamespace) - case *buildgraph.BuildConfigNode: - info = outputHelper(f.ResourceName(t), t.BuildConfig.Namespace, singleNamespace) - default: - panic("this graph contains node kinds other than imageStreamTags and buildConfigs") - } - - if depth[node] != 0 { - out += "\n" - } - out += fmt.Sprintf("%s", strings.Repeat("\t", depth[node])) - out += fmt.Sprintf("%s", info) - - return false - } - - dfs.Walk(g, root, until) - - return out -} - -// outputHelper returns resource/name in a single namespace, -// in multiple namespaces -func outputHelper(info, namespace string, singleNamespace bool) string { - if singleNamespace { - return info - } - return fmt.Sprintf("<%s %s>", namespace, info) -} - -// DepthFirst implements stateful depth-first graph traversal. -// Modifies behavior of visitor.DepthFirst to allow nodes to be visited multiple -// times as long as they're not in the current stack -type DepthFirst struct { - EdgeFilter func(graph.Edge) bool - Visit func(u, v graph.Node) - stack NodeStack -} - -// Walk performs a depth-first traversal of the graph g starting from the given node -func (d *DepthFirst) Walk(g graph.Graph, from graph.Node, until func(graph.Node) bool) graph.Node { - return d.visit(g, from, until) -} - -func (d *DepthFirst) visit(g graph.Graph, t graph.Node, until func(graph.Node) bool) graph.Node { - if until != nil && until(t) { - return t - } - d.stack.Push(t) - children := osgraph.ByID(g.From(t)) - sort.Sort(children) - for _, n := range children { - if d.EdgeFilter != nil && !d.EdgeFilter(g.Edge(t, n)) { - continue - } - if d.visited(n.ID()) { - continue - } - if d.Visit != nil { - d.Visit(t, n) - } - result := d.visit(g, n, until) - if result != nil { - return result - } - } - d.stack.Pop() - return nil -} - -func (d *DepthFirst) visited(id int) bool { - for _, n := range d.stack { - if n.ID() == id { - return true - } - } - return false -} - -// NodeStack implements a LIFO stack of graph.Node. -// NodeStack is internal only in go 1.5. -type NodeStack []graph.Node - -// Len returns the number of graph.Nodes on the stack. -func (s *NodeStack) Len() int { return len(*s) } - -// Pop returns the last graph.Node on the stack and removes it -// from the stack. -func (s *NodeStack) Pop() graph.Node { - v := *s - v, n := v[:len(v)-1], v[len(v)-1] - *s = v - return n -} - -// Push adds the node n to the stack at the last position. -func (s *NodeStack) Push(n graph.Node) { *s = append(*s, n) } diff --git a/vendor/github.com/openshift/origin/pkg/cmd/cli/describe/deployments.go b/vendor/github.com/openshift/origin/pkg/cmd/cli/describe/deployments.go deleted file mode 100644 index 977c4292f..000000000 --- a/vendor/github.com/openshift/origin/pkg/cmd/cli/describe/deployments.go +++ /dev/null @@ -1,417 +0,0 @@ -package describe - -import ( - "fmt" - "io" - "sort" - "strconv" - "strings" - "text/tabwriter" - - kapi "k8s.io/kubernetes/pkg/api" - kerrors "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/autoscaling" - kclient "k8s.io/kubernetes/pkg/client/unversioned" - rcutils "k8s.io/kubernetes/pkg/controller/replication" - kctl "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/labels" - - "github.com/openshift/origin/pkg/api/graph" - kubegraph "github.com/openshift/origin/pkg/api/kubegraph/nodes" - "github.com/openshift/origin/pkg/client" - deployapi "github.com/openshift/origin/pkg/deploy/api" - deployedges "github.com/openshift/origin/pkg/deploy/graph" - deploygraph "github.com/openshift/origin/pkg/deploy/graph/nodes" - deployutil "github.com/openshift/origin/pkg/deploy/util" - imageapi "github.com/openshift/origin/pkg/image/api" -) - -const ( - // maxDisplayDeployments is the number of deployments to show when describing - // deployment configuration. - maxDisplayDeployments = 3 - - // maxDisplayDeploymentsEvents is the number of events to display when - // describing the deployment configuration. - // TODO: Make the estimation of this number more sophisticated and make this - // number configurable via DescriberSettings - maxDisplayDeploymentsEvents = 8 -) - -// DeploymentConfigDescriber generates information about a DeploymentConfig -type DeploymentConfigDescriber struct { - osClient client.Interface - kubeClient kclient.Interface - - config *deployapi.DeploymentConfig -} - -// NewDeploymentConfigDescriber returns a new DeploymentConfigDescriber -func NewDeploymentConfigDescriber(client client.Interface, kclient kclient.Interface, config *deployapi.DeploymentConfig) *DeploymentConfigDescriber { - return &DeploymentConfigDescriber{ - osClient: client, - kubeClient: kclient, - config: config, - } -} - -// Describe returns the description of a DeploymentConfig -func (d *DeploymentConfigDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - var deploymentConfig *deployapi.DeploymentConfig - if d.config != nil { - // If a deployment config is already provided use that. - // This is used by `oc rollback --dry-run`. - deploymentConfig = d.config - } else { - var err error - deploymentConfig, err = d.osClient.DeploymentConfigs(namespace).Get(name) - if err != nil { - return "", err - } - } - - return tabbedString(func(out *tabwriter.Writer) error { - formatMeta(out, deploymentConfig.ObjectMeta) - var ( - deploymentsHistory []kapi.ReplicationController - activeDeploymentName string - ) - - if d.config == nil { - if rcs, err := d.kubeClient.ReplicationControllers(namespace).List(kapi.ListOptions{LabelSelector: deployutil.ConfigSelector(deploymentConfig.Name)}); err == nil { - deploymentsHistory = rcs.Items - } - } - - if deploymentConfig.Status.LatestVersion == 0 { - formatString(out, "Latest Version", "Not deployed") - } else { - formatString(out, "Latest Version", strconv.FormatInt(deploymentConfig.Status.LatestVersion, 10)) - } - - printDeploymentConfigSpec(d.kubeClient, *deploymentConfig, out) - fmt.Fprintln(out) - - latestDeploymentName := deployutil.LatestDeploymentNameForConfig(deploymentConfig) - if activeDeployment := deployutil.ActiveDeployment(deploymentConfig, deploymentsHistory); activeDeployment != nil { - activeDeploymentName = activeDeployment.Name - } - - var deployment *kapi.ReplicationController - isNotDeployed := len(deploymentsHistory) == 0 - for _, item := range deploymentsHistory { - if item.Name == latestDeploymentName { - deployment = &item - } - } - - if isNotDeployed { - formatString(out, "Latest Deployment", "") - } else { - header := fmt.Sprintf("Deployment #%d (latest)", deployutil.DeploymentVersionFor(deployment)) - // Show details if the current deployment is the active one or it is the - // initial deployment. - printDeploymentRc(deployment, d.kubeClient, out, header, (deployment.Name == activeDeploymentName) || len(deploymentsHistory) == 1) - } - - // We don't show the deployment history when running `oc rollback --dry-run`. - if d.config == nil && !isNotDeployed { - sorted := deploymentsHistory - sort.Sort(sort.Reverse(rcutils.OverlappingControllers(sorted))) - counter := 1 - for _, item := range sorted { - if item.Name != latestDeploymentName && deploymentConfig.Name == deployutil.DeploymentConfigNameFor(&item) { - header := fmt.Sprintf("Deployment #%d", deployutil.DeploymentVersionFor(&item)) - printDeploymentRc(&item, d.kubeClient, out, header, item.Name == activeDeploymentName) - counter++ - } - if counter == maxDisplayDeployments { - break - } - } - } - - if settings.ShowEvents { - // Events - if events, err := d.kubeClient.Events(deploymentConfig.Namespace).Search(deploymentConfig); err == nil && events != nil { - latestDeploymentEvents := &kapi.EventList{Items: []kapi.Event{}} - for i := len(events.Items); i != 0 && i > len(events.Items)-maxDisplayDeploymentsEvents; i-- { - latestDeploymentEvents.Items = append(latestDeploymentEvents.Items, events.Items[i-1]) - } - fmt.Fprintln(out) - kctl.DescribeEvents(latestDeploymentEvents, out) - } - } - return nil - }) -} - -func multilineStringArray(sep, indent string, args ...string) string { - for i, s := range args { - if strings.HasSuffix(s, "\n") { - s = strings.TrimSuffix(s, "\n") - } - if strings.Contains(s, "\n") { - s = "\n" + indent + strings.Join(strings.Split(s, "\n"), "\n"+indent) - } - args[i] = s - } - strings.TrimRight(args[len(args)-1], "\n ") - return strings.Join(args, " ") -} - -func printStrategy(strategy deployapi.DeploymentStrategy, indent string, w *tabwriter.Writer) { - if strategy.CustomParams != nil { - if len(strategy.CustomParams.Image) == 0 { - fmt.Fprintf(w, "%sImage:\t%s\n", indent, "") - } else { - fmt.Fprintf(w, "%sImage:\t%s\n", indent, strategy.CustomParams.Image) - } - - if len(strategy.CustomParams.Environment) > 0 { - fmt.Fprintf(w, "%sEnvironment:\t%s\n", indent, formatLabels(convertEnv(strategy.CustomParams.Environment))) - } - - if len(strategy.CustomParams.Command) > 0 { - fmt.Fprintf(w, "%sCommand:\t%v\n", indent, multilineStringArray(" ", "\t ", strategy.CustomParams.Command...)) - } - } - - if strategy.RecreateParams != nil { - pre := strategy.RecreateParams.Pre - mid := strategy.RecreateParams.Mid - post := strategy.RecreateParams.Post - if pre != nil { - printHook("Pre-deployment", pre, indent, w) - } - if mid != nil { - printHook("Mid-deployment", mid, indent, w) - } - if post != nil { - printHook("Post-deployment", post, indent, w) - } - } - - if strategy.RollingParams != nil { - pre := strategy.RollingParams.Pre - post := strategy.RollingParams.Post - if pre != nil { - printHook("Pre-deployment", pre, indent, w) - } - if post != nil { - printHook("Post-deployment", post, indent, w) - } - } -} - -func printHook(prefix string, hook *deployapi.LifecycleHook, indent string, w io.Writer) { - if hook.ExecNewPod != nil { - fmt.Fprintf(w, "%s%s hook (pod type, failure policy: %s):\n", indent, prefix, hook.FailurePolicy) - fmt.Fprintf(w, "%s Container:\t%s\n", indent, hook.ExecNewPod.ContainerName) - fmt.Fprintf(w, "%s Command:\t%v\n", indent, multilineStringArray(" ", "\t ", hook.ExecNewPod.Command...)) - if len(hook.ExecNewPod.Env) > 0 { - fmt.Fprintf(w, "%s Env:\t%s\n", indent, formatLabels(convertEnv(hook.ExecNewPod.Env))) - } - } - if len(hook.TagImages) > 0 { - fmt.Fprintf(w, "%s%s hook (tag images, failure policy: %s):\n", indent, prefix, hook.FailurePolicy) - for _, image := range hook.TagImages { - fmt.Fprintf(w, "%s Tag:\tcontainer %s to %s %s %s\n", indent, image.ContainerName, image.To.Kind, image.To.Name, image.To.Namespace) - } - } -} - -func printTriggers(triggers []deployapi.DeploymentTriggerPolicy, w *tabwriter.Writer) { - if len(triggers) == 0 { - formatString(w, "Triggers", "") - return - } - - labels := []string{} - - for _, t := range triggers { - switch t.Type { - case deployapi.DeploymentTriggerOnConfigChange: - labels = append(labels, "Config") - case deployapi.DeploymentTriggerOnImageChange: - if len(t.ImageChangeParams.From.Name) > 0 { - name, tag, _ := imageapi.SplitImageStreamTag(t.ImageChangeParams.From.Name) - labels = append(labels, fmt.Sprintf("Image(%s@%s, auto=%v)", name, tag, t.ImageChangeParams.Automatic)) - } - } - } - - desc := strings.Join(labels, ", ") - formatString(w, "Triggers", desc) -} - -func printDeploymentConfigSpec(kc kclient.Interface, dc deployapi.DeploymentConfig, w *tabwriter.Writer) error { - spec := dc.Spec - // Selector - formatString(w, "Selector", formatLabels(spec.Selector)) - - // Replicas - test := "" - if spec.Test { - test = " (test, will be scaled down between deployments)" - } - formatString(w, "Replicas", fmt.Sprintf("%d%s", spec.Replicas, test)) - - if spec.Paused { - formatString(w, "Paused", "yes") - } - - // Autoscaling info - printAutoscalingInfo(deployapi.Resource("DeploymentConfig"), dc.Namespace, dc.Name, kc, w) - - // Triggers - printTriggers(spec.Triggers, w) - - // Strategy - formatString(w, "Strategy", spec.Strategy.Type) - printStrategy(spec.Strategy, " ", w) - - if dc.Spec.MinReadySeconds > 0 { - formatString(w, "MinReadySeconds", fmt.Sprintf("%d", spec.MinReadySeconds)) - } - - // Pod template - fmt.Fprintf(w, "Template:\n") - kctl.DescribePodTemplate(spec.Template, w) - - return nil -} - -// TODO: Move this upstream -func printAutoscalingInfo(res unversioned.GroupResource, namespace, name string, kclient kclient.Interface, w *tabwriter.Writer) { - hpaList, err := kclient.Autoscaling().HorizontalPodAutoscalers(namespace).List(kapi.ListOptions{LabelSelector: labels.Everything()}) - if err != nil { - return - } - - scaledBy := []autoscaling.HorizontalPodAutoscaler{} - for _, hpa := range hpaList.Items { - if hpa.Spec.ScaleTargetRef.Name == name && hpa.Spec.ScaleTargetRef.Kind == res.String() { - scaledBy = append(scaledBy, hpa) - } - } - - for _, hpa := range scaledBy { - cpuUtil := "" - if hpa.Spec.TargetCPUUtilizationPercentage != nil { - cpuUtil = fmt.Sprintf(", triggered at %d%% CPU usage", *hpa.Spec.TargetCPUUtilizationPercentage) - } - fmt.Fprintf(w, "Autoscaling:\tbetween %d and %d replicas%s\n", *hpa.Spec.MinReplicas, hpa.Spec.MaxReplicas, cpuUtil) - // TODO: Print a warning in case of multiple hpas. - // Related oc status PR: https://github.com/openshift/origin/pull/7799 - break - } -} - -func printDeploymentRc(deployment *kapi.ReplicationController, kubeClient kclient.Interface, w io.Writer, header string, verbose bool) error { - if len(header) > 0 { - fmt.Fprintf(w, "%v:\n", header) - } - - if verbose { - fmt.Fprintf(w, "\tName:\t%s\n", deployment.Name) - } - timeAt := strings.ToLower(formatRelativeTime(deployment.CreationTimestamp.Time)) - fmt.Fprintf(w, "\tCreated:\t%s ago\n", timeAt) - fmt.Fprintf(w, "\tStatus:\t%s\n", deployutil.DeploymentStatusFor(deployment)) - fmt.Fprintf(w, "\tReplicas:\t%d current / %d desired\n", deployment.Status.Replicas, deployment.Spec.Replicas) - - if verbose { - fmt.Fprintf(w, "\tSelector:\t%s\n", formatLabels(deployment.Spec.Selector)) - fmt.Fprintf(w, "\tLabels:\t%s\n", formatLabels(deployment.Labels)) - running, waiting, succeeded, failed, err := getPodStatusForDeployment(deployment, kubeClient) - if err != nil { - return err - } - fmt.Fprintf(w, "\tPods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) - } - - return nil -} - -func getPodStatusForDeployment(deployment *kapi.ReplicationController, kubeClient kclient.Interface) (running, waiting, succeeded, failed int, err error) { - rcPods, err := kubeClient.Pods(deployment.Namespace).List(kapi.ListOptions{LabelSelector: labels.Set(deployment.Spec.Selector).AsSelector()}) - if err != nil { - return - } - for _, pod := range rcPods.Items { - switch pod.Status.Phase { - case kapi.PodRunning: - running++ - case kapi.PodPending: - waiting++ - case kapi.PodSucceeded: - succeeded++ - case kapi.PodFailed: - failed++ - } - } - return -} - -type LatestDeploymentsDescriber struct { - count int - osClient client.Interface - kubeClient kclient.Interface -} - -// NewLatestDeploymentsDescriber lists the latest deployments limited to "count". In case count == -1, list back to the last successful. -func NewLatestDeploymentsDescriber(client client.Interface, kclient kclient.Interface, count int) *LatestDeploymentsDescriber { - return &LatestDeploymentsDescriber{ - count: count, - osClient: client, - kubeClient: kclient, - } -} - -// Describe returns the description of the latest deployments for a config -func (d *LatestDeploymentsDescriber) Describe(namespace, name string) (string, error) { - var f formatter - - config, err := d.osClient.DeploymentConfigs(namespace).Get(name) - if err != nil { - return "", err - } - - var deployments []kapi.ReplicationController - if d.count == -1 || d.count > 1 { - list, err := d.kubeClient.ReplicationControllers(namespace).List(kapi.ListOptions{LabelSelector: deployutil.ConfigSelector(name)}) - if err != nil && !kerrors.IsNotFound(err) { - return "", err - } - deployments = list.Items - } else { - deploymentName := deployutil.LatestDeploymentNameForConfig(config) - deployment, err := d.kubeClient.ReplicationControllers(config.Namespace).Get(deploymentName) - if err != nil && !kerrors.IsNotFound(err) { - return "", err - } - if deployment != nil { - deployments = []kapi.ReplicationController{*deployment} - } - } - - g := graph.New() - dcNode := deploygraph.EnsureDeploymentConfigNode(g, config) - for i := range deployments { - kubegraph.EnsureReplicationControllerNode(g, &deployments[i]) - } - deployedges.AddTriggerEdges(g, dcNode) - deployedges.AddDeploymentEdges(g, dcNode) - activeDeployment, inactiveDeployments := deployedges.RelevantDeployments(g, dcNode) - - return tabbedString(func(out *tabwriter.Writer) error { - descriptions := describeDeployments(f, dcNode, activeDeployment, inactiveDeployments, nil, d.count) - for i, description := range descriptions { - descriptions[i] = fmt.Sprintf("%v %v", name, description) - } - printLines(out, "", 0, descriptions...) - return nil - }) -} diff --git a/vendor/github.com/openshift/origin/pkg/cmd/cli/describe/describer.go b/vendor/github.com/openshift/origin/pkg/cmd/cli/describe/describer.go deleted file mode 100644 index cf003e7ea..000000000 --- a/vendor/github.com/openshift/origin/pkg/cmd/cli/describe/describer.go +++ /dev/null @@ -1,1606 +0,0 @@ -package describe - -import ( - "bytes" - "fmt" - "path/filepath" - "sort" - "strconv" - "strings" - "text/tabwriter" - "time" - - units "github.com/docker/go-units" - - kapi "k8s.io/kubernetes/pkg/api" - kerrs "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - kclient "k8s.io/kubernetes/pkg/client/unversioned" - kctl "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/sets" - - authorizationapi "github.com/openshift/origin/pkg/authorization/api" - buildapi "github.com/openshift/origin/pkg/build/api" - "github.com/openshift/origin/pkg/client" - deployapi "github.com/openshift/origin/pkg/deploy/api" - imageapi "github.com/openshift/origin/pkg/image/api" - oauthapi "github.com/openshift/origin/pkg/oauth/api" - projectapi "github.com/openshift/origin/pkg/project/api" - quotaapi "github.com/openshift/origin/pkg/quota/api" - routeapi "github.com/openshift/origin/pkg/route/api" - sdnapi "github.com/openshift/origin/pkg/sdn/api" - templateapi "github.com/openshift/origin/pkg/template/api" - userapi "github.com/openshift/origin/pkg/user/api" -) - -func describerMap(c *client.Client, kclient kclient.Interface, host string) map[unversioned.GroupKind]kctl.Describer { - m := map[unversioned.GroupKind]kctl.Describer{ - buildapi.Kind("Build"): &BuildDescriber{c, kclient}, - buildapi.Kind("BuildConfig"): &BuildConfigDescriber{c, host}, - deployapi.Kind("DeploymentConfig"): &DeploymentConfigDescriber{c, kclient, nil}, - authorizationapi.Kind("Identity"): &IdentityDescriber{c}, - imageapi.Kind("Image"): &ImageDescriber{c}, - imageapi.Kind("ImageStream"): &ImageStreamDescriber{c}, - imageapi.Kind("ImageStreamTag"): &ImageStreamTagDescriber{c}, - imageapi.Kind("ImageStreamImage"): &ImageStreamImageDescriber{c}, - routeapi.Kind("Route"): &RouteDescriber{c, kclient}, - projectapi.Kind("Project"): &ProjectDescriber{c, kclient}, - templateapi.Kind("Template"): &TemplateDescriber{c, meta.NewAccessor(), kapi.Scheme, nil}, - authorizationapi.Kind("Policy"): &PolicyDescriber{c}, - authorizationapi.Kind("PolicyBinding"): &PolicyBindingDescriber{c}, - authorizationapi.Kind("RoleBinding"): &RoleBindingDescriber{c}, - authorizationapi.Kind("Role"): &RoleDescriber{c}, - authorizationapi.Kind("ClusterPolicy"): &ClusterPolicyDescriber{c}, - authorizationapi.Kind("ClusterPolicyBinding"): &ClusterPolicyBindingDescriber{c}, - authorizationapi.Kind("ClusterRoleBinding"): &ClusterRoleBindingDescriber{c}, - authorizationapi.Kind("ClusterRole"): &ClusterRoleDescriber{c}, - oauthapi.Kind("OAuthAccessToken"): &OAuthAccessTokenDescriber{c}, - userapi.Kind("User"): &UserDescriber{c}, - userapi.Kind("Group"): &GroupDescriber{c.Groups()}, - userapi.Kind("UserIdentityMapping"): &UserIdentityMappingDescriber{c}, - quotaapi.Kind("ClusterResourceQuota"): &ClusterQuotaDescriber{c}, - quotaapi.Kind("AppliedClusterResourceQuota"): &AppliedClusterQuotaDescriber{c}, - sdnapi.Kind("ClusterNetwork"): &ClusterNetworkDescriber{c}, - sdnapi.Kind("HostSubnet"): &HostSubnetDescriber{c}, - sdnapi.Kind("NetNamespace"): &NetNamespaceDescriber{c}, - sdnapi.Kind("EgressNetworkPolicy"): &EgressNetworkPolicyDescriber{c}, - } - return m -} - -// DescribableResources lists all of the resource types we can describe -func DescribableResources() []string { - // Include describable resources in kubernetes - keys := kctl.DescribableResources() - - for k := range describerMap(nil, nil, "") { - resource := strings.ToLower(k.Kind) - keys = append(keys, resource) - } - return keys -} - -// DescriberFor returns a describer for a given kind of resource -func DescriberFor(kind unversioned.GroupKind, c *client.Client, kclient kclient.Interface, host string) (kctl.Describer, bool) { - f, ok := describerMap(c, kclient, host)[kind] - if ok { - return f, true - } - return nil, false -} - -// BuildDescriber generates information about a build -type BuildDescriber struct { - osClient client.Interface - kubeClient kclient.Interface -} - -// Describe returns the description of a build -func (d *BuildDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - c := d.osClient.Builds(namespace) - build, err := c.Get(name) - if err != nil { - return "", err - } - events, _ := d.kubeClient.Events(namespace).Search(build) - if events == nil { - events = &kapi.EventList{} - } - // get also pod events and merge it all into one list for describe - if pod, err := d.kubeClient.Pods(namespace).Get(buildapi.GetBuildPodName(build)); err == nil { - if podEvents, _ := d.kubeClient.Events(namespace).Search(pod); podEvents != nil { - events.Items = append(events.Items, podEvents.Items...) - } - } - return tabbedString(func(out *tabwriter.Writer) error { - formatMeta(out, build.ObjectMeta) - - fmt.Fprintln(out, "") - - status := bold(build.Status.Phase) - if build.Status.Message != "" { - status += " (" + build.Status.Message + ")" - } - formatString(out, "Status", status) - - if build.Status.StartTimestamp != nil && !build.Status.StartTimestamp.IsZero() { - formatString(out, "Started", build.Status.StartTimestamp.Time.Format(time.RFC1123)) - } - - // Create the time object with second-level precision so we don't get - // output like "duration: 1.2724395728934s" - formatString(out, "Duration", describeBuildDuration(build)) - - if build.Status.Config != nil { - formatString(out, "Build Config", build.Status.Config.Name) - } - formatString(out, "Build Pod", buildapi.GetBuildPodName(build)) - - describeCommonSpec(build.Spec.CommonSpec, out) - describeBuildTriggerCauses(build.Spec.TriggeredBy, out) - - if settings.ShowEvents { - kctl.DescribeEvents(events, out) - } - - return nil - }) -} - -func describeBuildDuration(build *buildapi.Build) string { - t := unversioned.Now().Rfc3339Copy() - if build.Status.StartTimestamp == nil && - build.Status.CompletionTimestamp != nil && - (build.Status.Phase == buildapi.BuildPhaseCancelled || - build.Status.Phase == buildapi.BuildPhaseFailed || - build.Status.Phase == buildapi.BuildPhaseError) { - // time a build waited for its pod before ultimately being cancelled before that pod was created - return fmt.Sprintf("waited for %s", build.Status.CompletionTimestamp.Rfc3339Copy().Time.Sub(build.CreationTimestamp.Rfc3339Copy().Time)) - } else if build.Status.StartTimestamp == nil && build.Status.Phase != buildapi.BuildPhaseCancelled { - // time a new build has been waiting for its pod to be created so it can run - return fmt.Sprintf("waiting for %v", t.Sub(build.CreationTimestamp.Rfc3339Copy().Time)) - } else if build.Status.StartTimestamp != nil && build.Status.CompletionTimestamp == nil { - // time a still running build has been running in a pod - return fmt.Sprintf("running for %v", build.Status.Duration) - } - return fmt.Sprintf("%v", build.Status.Duration) -} - -// BuildConfigDescriber generates information about a buildConfig -type BuildConfigDescriber struct { - client.Interface - host string -} - -func nameAndNamespace(ns, name string) string { - if len(ns) != 0 { - return fmt.Sprintf("%s/%s", ns, name) - } - return name -} - -func describeCommonSpec(p buildapi.CommonSpec, out *tabwriter.Writer) { - formatString(out, "\nStrategy", buildapi.StrategyType(p.Strategy)) - noneType := true - if p.Source.Git != nil { - noneType = false - formatString(out, "URL", p.Source.Git.URI) - if len(p.Source.Git.Ref) > 0 { - formatString(out, "Ref", p.Source.Git.Ref) - } - if len(p.Source.ContextDir) > 0 { - formatString(out, "ContextDir", p.Source.ContextDir) - } - if p.Source.SourceSecret != nil { - formatString(out, "Source Secret", p.Source.SourceSecret.Name) - } - squashGitInfo(p.Revision, out) - } - if p.Source.Dockerfile != nil { - if len(strings.TrimSpace(*p.Source.Dockerfile)) == 0 { - formatString(out, "Dockerfile", "") - } else { - fmt.Fprintf(out, "Dockerfile:\n") - for _, s := range strings.Split(*p.Source.Dockerfile, "\n") { - fmt.Fprintf(out, " %s\n", s) - } - } - } - switch { - case p.Strategy.DockerStrategy != nil: - describeDockerStrategy(p.Strategy.DockerStrategy, out) - case p.Strategy.SourceStrategy != nil: - describeSourceStrategy(p.Strategy.SourceStrategy, out) - case p.Strategy.CustomStrategy != nil: - describeCustomStrategy(p.Strategy.CustomStrategy, out) - case p.Strategy.JenkinsPipelineStrategy != nil: - describeJenkinsPipelineStrategy(p.Strategy.JenkinsPipelineStrategy, out) - } - - if p.Output.To != nil { - formatString(out, "Output to", fmt.Sprintf("%s %s", p.Output.To.Kind, nameAndNamespace(p.Output.To.Namespace, p.Output.To.Name))) - } - - if p.Source.Binary != nil { - noneType = false - if len(p.Source.Binary.AsFile) > 0 { - formatString(out, "Binary", fmt.Sprintf("provided as file %q on build", p.Source.Binary.AsFile)) - } else { - formatString(out, "Binary", "provided on build") - } - } - - if len(p.Source.Secrets) > 0 { - result := []string{} - for _, s := range p.Source.Secrets { - result = append(result, fmt.Sprintf("%s->%s", s.Secret.Name, filepath.Clean(s.DestinationDir))) - } - formatString(out, "Build Secrets", strings.Join(result, ",")) - } - if len(p.Source.Images) == 1 && len(p.Source.Images[0].Paths) == 1 { - noneType = false - image := p.Source.Images[0] - path := image.Paths[0] - formatString(out, "Image Source", fmt.Sprintf("copies %s from %s to %s", path.SourcePath, nameAndNamespace(image.From.Namespace, image.From.Name), path.DestinationDir)) - } else { - for _, image := range p.Source.Images { - noneType = false - formatString(out, "Image Source", fmt.Sprintf("%s", nameAndNamespace(image.From.Namespace, image.From.Name))) - for _, path := range image.Paths { - fmt.Fprintf(out, "\t- %s -> %s\n", path.SourcePath, path.DestinationDir) - } - } - } - - if noneType { - formatString(out, "Empty Source", "no input source provided") - } - - describePostCommitHook(p.PostCommit, out) - - if p.Output.PushSecret != nil { - formatString(out, "Push Secret", p.Output.PushSecret.Name) - } - - if p.CompletionDeadlineSeconds != nil { - formatString(out, "Fail Build After", time.Duration(*p.CompletionDeadlineSeconds)*time.Second) - } -} - -func describePostCommitHook(hook buildapi.BuildPostCommitSpec, out *tabwriter.Writer) { - command := hook.Command - args := hook.Args - script := hook.Script - if len(command) == 0 && len(args) == 0 && len(script) == 0 { - // Post commit hook is not set, nothing to do. - return - } - if len(script) != 0 { - command = []string{"/bin/sh", "-ic"} - if len(args) > 0 { - args = append([]string{script, command[0]}, args...) - } else { - args = []string{script} - } - } - if len(command) == 0 { - command = []string{""} - } - all := append(command, args...) - for i, v := range all { - all[i] = fmt.Sprintf("%q", v) - } - formatString(out, "Post Commit Hook", fmt.Sprintf("[%s]", strings.Join(all, ", "))) -} - -func describeSourceStrategy(s *buildapi.SourceBuildStrategy, out *tabwriter.Writer) { - if len(s.From.Name) != 0 { - formatString(out, "From Image", fmt.Sprintf("%s %s", s.From.Kind, nameAndNamespace(s.From.Namespace, s.From.Name))) - } - if len(s.Scripts) != 0 { - formatString(out, "Scripts", s.Scripts) - } - if s.PullSecret != nil { - formatString(out, "Pull Secret Name", s.PullSecret.Name) - } - if s.Incremental != nil && *s.Incremental { - formatString(out, "Incremental Build", "yes") - } - if s.ForcePull { - formatString(out, "Force Pull", "yes") - } -} - -func describeDockerStrategy(s *buildapi.DockerBuildStrategy, out *tabwriter.Writer) { - if s.From != nil && len(s.From.Name) != 0 { - formatString(out, "From Image", fmt.Sprintf("%s %s", s.From.Kind, nameAndNamespace(s.From.Namespace, s.From.Name))) - } - if len(s.DockerfilePath) != 0 { - formatString(out, "Dockerfile Path", s.DockerfilePath) - } - if s.PullSecret != nil { - formatString(out, "Pull Secret Name", s.PullSecret.Name) - } - if s.NoCache { - formatString(out, "No Cache", "true") - } - if s.ForcePull { - formatString(out, "Force Pull", "true") - } -} - -func describeCustomStrategy(s *buildapi.CustomBuildStrategy, out *tabwriter.Writer) { - if len(s.From.Name) != 0 { - formatString(out, "Image Reference", fmt.Sprintf("%s %s", s.From.Kind, nameAndNamespace(s.From.Namespace, s.From.Name))) - } - if s.ExposeDockerSocket { - formatString(out, "Expose Docker Socket", "yes") - } - if s.ForcePull { - formatString(out, "Force Pull", "yes") - } - if s.PullSecret != nil { - formatString(out, "Pull Secret Name", s.PullSecret.Name) - } - for i, env := range s.Env { - if i == 0 { - formatString(out, "Environment", formatEnv(env)) - } else { - formatString(out, "", formatEnv(env)) - } - } -} - -func describeJenkinsPipelineStrategy(s *buildapi.JenkinsPipelineBuildStrategy, out *tabwriter.Writer) { - if len(s.JenkinsfilePath) != 0 { - formatString(out, "Jenkinsfile path", s.JenkinsfilePath) - } - if len(s.Jenkinsfile) != 0 { - fmt.Fprintf(out, "Jenkinsfile contents:\n") - for _, s := range strings.Split(s.Jenkinsfile, "\n") { - fmt.Fprintf(out, " %s\n", s) - } - } - if len(s.Jenkinsfile) == 0 && len(s.JenkinsfilePath) == 0 { - formatString(out, "Jenkinsfile", "from source repository root") - } -} - -// DescribeTriggers generates information about the triggers associated with a -// buildconfig -func (d *BuildConfigDescriber) DescribeTriggers(bc *buildapi.BuildConfig, out *tabwriter.Writer) { - describeBuildTriggers(bc.Spec.Triggers, bc.Name, bc.Namespace, out, d) -} - -func describeBuildTriggers(triggers []buildapi.BuildTriggerPolicy, name, namespace string, w *tabwriter.Writer, d *BuildConfigDescriber) { - if len(triggers) == 0 { - formatString(w, "Triggered by", "") - return - } - - labels := []string{} - - for _, t := range triggers { - switch t.Type { - case buildapi.GitHubWebHookBuildTriggerType, buildapi.GenericWebHookBuildTriggerType: - continue - case buildapi.ConfigChangeBuildTriggerType: - labels = append(labels, "Config") - case buildapi.ImageChangeBuildTriggerType: - if t.ImageChange != nil && t.ImageChange.From != nil && len(t.ImageChange.From.Name) > 0 { - labels = append(labels, fmt.Sprintf("Image(%s %s)", t.ImageChange.From.Kind, t.ImageChange.From.Name)) - } else { - labels = append(labels, string(t.Type)) - } - case "": - labels = append(labels, "") - default: - labels = append(labels, string(t.Type)) - } - } - - desc := strings.Join(labels, ", ") - formatString(w, "Triggered by", desc) - - webHooks := webHooksDescribe(triggers, name, namespace, d.Interface) - for webHookType, webHookDesc := range webHooks { - fmt.Fprintf(w, "Webhook %s:\n", strings.Title(webHookType)) - for _, trigger := range webHookDesc { - fmt.Fprintf(w, "\tURL:\t%s\n", trigger.URL) - if webHookType == string(buildapi.GenericWebHookBuildTriggerType) && trigger.AllowEnv != nil { - fmt.Fprintf(w, fmt.Sprintf("\t%s:\t%v\n", "AllowEnv", *trigger.AllowEnv)) - } - } - } -} - -// Describe returns the description of a buildConfig -func (d *BuildConfigDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - c := d.BuildConfigs(namespace) - buildConfig, err := c.Get(name) - if err != nil { - return "", err - } - buildList, err := d.Builds(namespace).List(kapi.ListOptions{}) - if err != nil { - return "", err - } - buildList.Items = buildapi.FilterBuilds(buildList.Items, buildapi.ByBuildConfigPredicate(name)) - - return tabbedString(func(out *tabwriter.Writer) error { - formatMeta(out, buildConfig.ObjectMeta) - if buildConfig.Status.LastVersion == 0 { - formatString(out, "Latest Version", "Never built") - } else { - formatString(out, "Latest Version", strconv.FormatInt(buildConfig.Status.LastVersion, 10)) - } - describeCommonSpec(buildConfig.Spec.CommonSpec, out) - formatString(out, "\nBuild Run Policy", string(buildConfig.Spec.RunPolicy)) - d.DescribeTriggers(buildConfig, out) - if len(buildList.Items) == 0 { - return nil - } - fmt.Fprintf(out, "\nBuild\tStatus\tDuration\tCreation Time\n") - - builds := buildList.Items - sort.Sort(sort.Reverse(buildapi.BuildSliceByCreationTimestamp(builds))) - - for i, build := range builds { - fmt.Fprintf(out, "%s \t%s \t%v \t%v\n", - build.Name, - strings.ToLower(string(build.Status.Phase)), - describeBuildDuration(&build), - build.CreationTimestamp.Rfc3339Copy().Time) - // only print the 10 most recent builds. - if i == 9 { - break - } - } - return nil - }) -} - -// OAuthAccessTokenDescriber generates information about an OAuth Acess Token (OAuth) -type OAuthAccessTokenDescriber struct { - client.Interface -} - -func (d *OAuthAccessTokenDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - c := d.OAuthAccessTokens() - oAuthAccessToken, err := c.Get(name) - if err != nil { - return "", err - } - - var timeCreated time.Time = oAuthAccessToken.ObjectMeta.CreationTimestamp.Time - var timeExpired time.Time = timeCreated.Add(time.Duration(oAuthAccessToken.ExpiresIn) * time.Second) - - return tabbedString(func(out *tabwriter.Writer) error { - formatMeta(out, oAuthAccessToken.ObjectMeta) - formatString(out, "Scopes", oAuthAccessToken.Scopes) - formatString(out, "Expires In", formatToHumanDuration(timeExpired.Sub(time.Now()))) - formatString(out, "User Name", oAuthAccessToken.UserName) - formatString(out, "User UID", oAuthAccessToken.UserUID) - formatString(out, "Client Name", oAuthAccessToken.ClientName) - - return nil - }) -} - -// ImageDescriber generates information about a Image -type ImageDescriber struct { - client.Interface -} - -// Describe returns the description of an image -func (d *ImageDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - c := d.Images() - image, err := c.Get(name) - if err != nil { - return "", err - } - - return describeImage(image, "") -} - -func describeImage(image *imageapi.Image, imageName string) (string, error) { - return tabbedString(func(out *tabwriter.Writer) error { - formatMeta(out, image.ObjectMeta) - formatString(out, "Docker Image", image.DockerImageReference) - if len(imageName) > 0 { - formatString(out, "Image Name", imageName) - } - switch l := len(image.DockerImageLayers); l { - case 0: - // legacy case, server does not know individual layers - formatString(out, "Layer Size", units.HumanSize(float64(image.DockerImageMetadata.Size))) - case 1: - formatString(out, "Image Size", units.HumanSize(float64(image.DockerImageMetadata.Size))) - default: - info := []string{} - if image.DockerImageLayers[0].LayerSize > 0 { - info = append(info, fmt.Sprintf("first layer %s", units.HumanSize(float64(image.DockerImageLayers[0].LayerSize)))) - } - for i := l - 1; i > 0; i-- { - if image.DockerImageLayers[i].LayerSize == 0 { - continue - } - info = append(info, fmt.Sprintf("last binary layer %s", units.HumanSize(float64(image.DockerImageLayers[i].LayerSize)))) - break - } - if len(info) > 0 { - formatString(out, "Image Size", fmt.Sprintf("%s (%s)", units.HumanSize(float64(image.DockerImageMetadata.Size)), strings.Join(info, ", "))) - } else { - formatString(out, "Image Size", units.HumanSize(float64(image.DockerImageMetadata.Size))) - } - } - //formatString(out, "Parent Image", image.DockerImageMetadata.Parent) - formatString(out, "Image Created", fmt.Sprintf("%s ago", formatRelativeTime(image.DockerImageMetadata.Created.Time))) - formatString(out, "Author", image.DockerImageMetadata.Author) - formatString(out, "Arch", image.DockerImageMetadata.Architecture) - describeDockerImage(out, image.DockerImageMetadata.Config) - return nil - }) -} - -func describeDockerImage(out *tabwriter.Writer, image *imageapi.DockerConfig) { - if image == nil { - return - } - hasCommand := false - if len(image.Entrypoint) > 0 { - hasCommand = true - formatString(out, "Entrypoint", strings.Join(image.Entrypoint, " ")) - } - if len(image.Cmd) > 0 { - hasCommand = true - formatString(out, "Command", strings.Join(image.Cmd, " ")) - } - if !hasCommand { - formatString(out, "Command", "") - } - formatString(out, "Working Dir", image.WorkingDir) - formatString(out, "User", image.User) - ports := sets.NewString() - for k := range image.ExposedPorts { - ports.Insert(k) - } - formatString(out, "Exposes Ports", strings.Join(ports.List(), ", ")) - formatMapStringString(out, "Docker Labels", image.Labels) - for i, env := range image.Env { - if i == 0 { - formatString(out, "Environment", env) - } else { - fmt.Fprintf(out, "\t%s\n", env) - } - } - volumes := sets.NewString() - for k := range image.Volumes { - volumes.Insert(k) - } - for i, volume := range volumes.List() { - if i == 0 { - formatString(out, "Volumes", volume) - } else { - fmt.Fprintf(out, "\t%s\n", volume) - } - } -} - -// ImageStreamTagDescriber generates information about a ImageStreamTag (Image). -type ImageStreamTagDescriber struct { - client.Interface -} - -// Describe returns the description of an imageStreamTag -func (d *ImageStreamTagDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - c := d.ImageStreamTags(namespace) - repo, tag, err := imageapi.ParseImageStreamTagName(name) - if err != nil { - return "", err - } - if len(tag) == 0 { - // TODO use repo's preferred default, when that's coded - tag = imageapi.DefaultImageTag - } - imageStreamTag, err := c.Get(repo, tag) - if err != nil { - return "", err - } - - return describeImage(&imageStreamTag.Image, imageStreamTag.Image.Name) -} - -// ImageStreamImageDescriber generates information about a ImageStreamImage (Image). -type ImageStreamImageDescriber struct { - client.Interface -} - -// Describe returns the description of an imageStreamImage -func (d *ImageStreamImageDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - c := d.ImageStreamImages(namespace) - repo, id, err := imageapi.ParseImageStreamImageName(name) - if err != nil { - return "", err - } - imageStreamImage, err := c.Get(repo, id) - if err != nil { - return "", err - } - - return describeImage(&imageStreamImage.Image, imageStreamImage.Image.Name) -} - -// ImageStreamDescriber generates information about a ImageStream (Image). -type ImageStreamDescriber struct { - client.Interface -} - -// Describe returns the description of an imageStream -func (d *ImageStreamDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - c := d.ImageStreams(namespace) - imageStream, err := c.Get(name) - if err != nil { - return "", err - } - - return tabbedString(func(out *tabwriter.Writer) error { - formatMeta(out, imageStream.ObjectMeta) - formatString(out, "Docker Pull Spec", imageStream.Status.DockerImageRepository) - formatImageStreamTags(out, imageStream) - return nil - }) -} - -// RouteDescriber generates information about a Route -type RouteDescriber struct { - client.Interface - kubeClient kclient.Interface -} - -type routeEndpointInfo struct { - *kapi.Endpoints - Err error -} - -// Describe returns the description of a route -func (d *RouteDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - c := d.Routes(namespace) - route, err := c.Get(name) - if err != nil { - return "", err - } - - backends := append([]routeapi.RouteTargetReference{route.Spec.To}, route.Spec.AlternateBackends...) - totalWeight := int32(0) - endpoints := make(map[string]routeEndpointInfo) - for _, backend := range backends { - if backend.Weight != nil { - totalWeight += *backend.Weight - } - ep, endpointsErr := d.kubeClient.Endpoints(namespace).Get(backend.Name) - endpoints[backend.Name] = routeEndpointInfo{ep, endpointsErr} - } - - return tabbedString(func(out *tabwriter.Writer) error { - formatMeta(out, route.ObjectMeta) - if len(route.Spec.Host) > 0 { - formatString(out, "Requested Host", route.Spec.Host) - for _, ingress := range route.Status.Ingress { - if route.Spec.Host != ingress.Host { - continue - } - switch status, condition := routeapi.IngressConditionStatus(&ingress, routeapi.RouteAdmitted); status { - case kapi.ConditionTrue: - fmt.Fprintf(out, "\t exposed on router %s %s ago\n", ingress.RouterName, strings.ToLower(formatRelativeTime(condition.LastTransitionTime.Time))) - case kapi.ConditionFalse: - fmt.Fprintf(out, "\t rejected by router %s: %s (%s ago)\n", ingress.RouterName, condition.Reason, strings.ToLower(formatRelativeTime(condition.LastTransitionTime.Time))) - if len(condition.Message) > 0 { - fmt.Fprintf(out, "\t %s\n", condition.Message) - } - } - } - } else { - formatString(out, "Requested Host", "") - } - - for _, ingress := range route.Status.Ingress { - if route.Spec.Host == ingress.Host { - continue - } - switch status, condition := routeapi.IngressConditionStatus(&ingress, routeapi.RouteAdmitted); status { - case kapi.ConditionTrue: - fmt.Fprintf(out, "\t%s exposed on router %s %s ago\n", ingress.Host, ingress.RouterName, strings.ToLower(formatRelativeTime(condition.LastTransitionTime.Time))) - case kapi.ConditionFalse: - fmt.Fprintf(out, "\trejected by router %s: %s (%s ago)\n", ingress.RouterName, condition.Reason, strings.ToLower(formatRelativeTime(condition.LastTransitionTime.Time))) - if len(condition.Message) > 0 { - fmt.Fprintf(out, "\t %s\n", condition.Message) - } - } - } - formatString(out, "Path", route.Spec.Path) - - tlsTerm := "" - insecurePolicy := "" - if route.Spec.TLS != nil { - tlsTerm = string(route.Spec.TLS.Termination) - insecurePolicy = string(route.Spec.TLS.InsecureEdgeTerminationPolicy) - } - formatString(out, "TLS Termination", tlsTerm) - formatString(out, "Insecure Policy", insecurePolicy) - if route.Spec.Port != nil { - formatString(out, "Endpoint Port", route.Spec.Port.TargetPort.String()) - } else { - formatString(out, "Endpoint Port", "") - } - - for _, backend := range backends { - fmt.Fprintln(out) - formatString(out, "Service", backend.Name) - weight := int32(0) - if backend.Weight != nil { - weight = *backend.Weight - } - if weight > 0 { - fmt.Fprintf(out, "Weight:\t%d (%d%%)\n", weight, weight*100/totalWeight) - } else { - formatString(out, "Weight", "0") - } - - info := endpoints[backend.Name] - if info.Err != nil { - formatString(out, "Endpoints", fmt.Sprintf("", info.Err)) - continue - } - endpoints := info.Endpoints - if len(endpoints.Subsets) == 0 { - formatString(out, "Endpoints", "") - continue - } - - list := []string{} - max := 3 - count := 0 - for i := range endpoints.Subsets { - ss := &endpoints.Subsets[i] - for p := range ss.Ports { - for a := range ss.Addresses { - if len(list) < max { - list = append(list, fmt.Sprintf("%s:%d", ss.Addresses[a].IP, ss.Ports[p].Port)) - } - count++ - } - } - } - ends := strings.Join(list, ", ") - if count > max { - ends += fmt.Sprintf(" + %d more...", count-max) - } - formatString(out, "Endpoints", ends) - } - return nil - }) -} - -// ProjectDescriber generates information about a Project -type ProjectDescriber struct { - osClient client.Interface - kubeClient kclient.Interface -} - -// Describe returns the description of a project -func (d *ProjectDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - projectsClient := d.osClient.Projects() - project, err := projectsClient.Get(name) - if err != nil { - return "", err - } - resourceQuotasClient := d.kubeClient.ResourceQuotas(name) - resourceQuotaList, err := resourceQuotasClient.List(kapi.ListOptions{}) - if err != nil { - return "", err - } - limitRangesClient := d.kubeClient.LimitRanges(name) - limitRangeList, err := limitRangesClient.List(kapi.ListOptions{}) - if err != nil { - return "", err - } - - nodeSelector := "" - if len(project.ObjectMeta.Annotations) > 0 { - if ns, ok := project.ObjectMeta.Annotations[projectapi.ProjectNodeSelector]; ok { - nodeSelector = ns - } - } - - return tabbedString(func(out *tabwriter.Writer) error { - formatMeta(out, project.ObjectMeta) - formatString(out, "Display Name", project.Annotations[projectapi.ProjectDisplayName]) - formatString(out, "Description", project.Annotations[projectapi.ProjectDescription]) - formatString(out, "Status", project.Status.Phase) - formatString(out, "Node Selector", nodeSelector) - if len(resourceQuotaList.Items) == 0 { - formatString(out, "Quota", "") - } else { - fmt.Fprintf(out, "Quota:\n") - for i := range resourceQuotaList.Items { - resourceQuota := &resourceQuotaList.Items[i] - fmt.Fprintf(out, "\tName:\t%s\n", resourceQuota.Name) - fmt.Fprintf(out, "\tResource\tUsed\tHard\n") - fmt.Fprintf(out, "\t--------\t----\t----\n") - - resources := []kapi.ResourceName{} - for resource := range resourceQuota.Status.Hard { - resources = append(resources, resource) - } - sort.Sort(kctl.SortableResourceNames(resources)) - - msg := "\t%v\t%v\t%v\n" - for i := range resources { - resource := resources[i] - hardQuantity := resourceQuota.Status.Hard[resource] - usedQuantity := resourceQuota.Status.Used[resource] - fmt.Fprintf(out, msg, resource, usedQuantity.String(), hardQuantity.String()) - } - } - } - if len(limitRangeList.Items) == 0 { - formatString(out, "Resource limits", "") - } else { - fmt.Fprintf(out, "Resource limits:\n") - for i := range limitRangeList.Items { - limitRange := &limitRangeList.Items[i] - fmt.Fprintf(out, "\tName:\t%s\n", limitRange.Name) - fmt.Fprintf(out, "\tType\tResource\tMin\tMax\tDefault\n") - fmt.Fprintf(out, "\t----\t--------\t---\t---\t---\n") - for i := range limitRange.Spec.Limits { - item := limitRange.Spec.Limits[i] - maxResources := item.Max - minResources := item.Min - defaultResources := item.Default - - set := map[kapi.ResourceName]bool{} - for k := range maxResources { - set[k] = true - } - for k := range minResources { - set[k] = true - } - for k := range defaultResources { - set[k] = true - } - - for k := range set { - // if no value is set, we output - - maxValue := "-" - minValue := "-" - defaultValue := "-" - - maxQuantity, maxQuantityFound := maxResources[k] - if maxQuantityFound { - maxValue = maxQuantity.String() - } - - minQuantity, minQuantityFound := minResources[k] - if minQuantityFound { - minValue = minQuantity.String() - } - - defaultQuantity, defaultQuantityFound := defaultResources[k] - if defaultQuantityFound { - defaultValue = defaultQuantity.String() - } - - msg := "\t%v\t%v\t%v\t%v\t%v\n" - fmt.Fprintf(out, msg, item.Type, k, minValue, maxValue, defaultValue) - } - } - } - } - return nil - }) -} - -// TemplateDescriber generates information about a template -type TemplateDescriber struct { - client.Interface - meta.MetadataAccessor - runtime.ObjectTyper - kctl.ObjectDescriber -} - -// DescribeMessage prints the message that will be parameter substituted and displayed to the -// user when this template is processed. -func (d *TemplateDescriber) DescribeMessage(msg string, out *tabwriter.Writer) { - if len(msg) == 0 { - msg = "" - } - formatString(out, "Message", msg) -} - -// DescribeParameters prints out information about the parameters of a template -func (d *TemplateDescriber) DescribeParameters(params []templateapi.Parameter, out *tabwriter.Writer) { - formatString(out, "Parameters", " ") - indent := " " - for _, p := range params { - formatString(out, indent+"Name", p.Name) - if len(p.DisplayName) > 0 { - formatString(out, indent+"Display Name", p.DisplayName) - } - if len(p.Description) > 0 { - formatString(out, indent+"Description", p.Description) - } - formatString(out, indent+"Required", p.Required) - if len(p.Generate) == 0 { - formatString(out, indent+"Value", p.Value) - continue - } - if len(p.Value) > 0 { - formatString(out, indent+"Value", p.Value) - formatString(out, indent+"Generated (ignored)", p.Generate) - formatString(out, indent+"From", p.From) - } else { - formatString(out, indent+"Generated", p.Generate) - formatString(out, indent+"From", p.From) - } - out.Write([]byte("\n")) - } -} - -// describeObjects prints out information about the objects of a template -func (d *TemplateDescriber) describeObjects(objects []runtime.Object, out *tabwriter.Writer) { - formatString(out, "Objects", " ") - indent := " " - for _, obj := range objects { - if d.ObjectDescriber != nil { - output, err := d.DescribeObject(obj) - if err != nil { - fmt.Fprintf(out, "error: %v\n", err) - continue - } - fmt.Fprint(out, output) - fmt.Fprint(out, "\n") - continue - } - - meta := kapi.ObjectMeta{} - meta.Name, _ = d.MetadataAccessor.Name(obj) - gvk, _, err := d.ObjectTyper.ObjectKinds(obj) - if err != nil { - fmt.Fprintf(out, fmt.Sprintf("%s%s\t%s\n", indent, "", meta.Name)) - continue - } - fmt.Fprintf(out, fmt.Sprintf("%s%s\t%s\n", indent, gvk[0].Kind, meta.Name)) - //meta.Annotations, _ = d.MetadataAccessor.Annotations(obj) - //meta.Labels, _ = d.MetadataAccessor.Labels(obj) - /*if len(meta.Labels) > 0 { - formatString(out, indent+"Labels", formatLabels(meta.Labels)) - } - formatAnnotations(out, meta, indent)*/ - } -} - -// Describe returns the description of a template -func (d *TemplateDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - c := d.Templates(namespace) - template, err := c.Get(name) - if err != nil { - return "", err - } - return d.DescribeTemplate(template) -} - -func (d *TemplateDescriber) DescribeTemplate(template *templateapi.Template) (string, error) { - // TODO: write error? - _ = runtime.DecodeList(template.Objects, kapi.Codecs.UniversalDecoder(), runtime.UnstructuredJSONScheme) - - return tabbedString(func(out *tabwriter.Writer) error { - formatMeta(out, template.ObjectMeta) - out.Write([]byte("\n")) - out.Flush() - d.DescribeParameters(template.Parameters, out) - out.Write([]byte("\n")) - formatString(out, "Object Labels", formatLabels(template.ObjectLabels)) - out.Write([]byte("\n")) - d.DescribeMessage(template.Message, out) - out.Write([]byte("\n")) - out.Flush() - d.describeObjects(template.Objects, out) - return nil - }) -} - -// IdentityDescriber generates information about a user -type IdentityDescriber struct { - client.Interface -} - -// Describe returns the description of an identity -func (d *IdentityDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - userClient := d.Users() - identityClient := d.Identities() - - identity, err := identityClient.Get(name) - if err != nil { - return "", err - } - - return tabbedString(func(out *tabwriter.Writer) error { - formatMeta(out, identity.ObjectMeta) - - if len(identity.User.Name) == 0 { - formatString(out, "User Name", identity.User.Name) - formatString(out, "User UID", identity.User.UID) - } else { - resolvedUser, err := userClient.Get(identity.User.Name) - - nameValue := identity.User.Name - uidValue := string(identity.User.UID) - - if kerrs.IsNotFound(err) { - nameValue += fmt.Sprintf(" (Error: User does not exist)") - } else if err != nil { - nameValue += fmt.Sprintf(" (Error: User lookup failed)") - } else { - if !sets.NewString(resolvedUser.Identities...).Has(name) { - nameValue += fmt.Sprintf(" (Error: User identities do not include %s)", name) - } - if resolvedUser.UID != identity.User.UID { - uidValue += fmt.Sprintf(" (Error: Actual user UID is %s)", string(resolvedUser.UID)) - } - } - - formatString(out, "User Name", nameValue) - formatString(out, "User UID", uidValue) - } - return nil - }) - -} - -// UserIdentityMappingDescriber generates information about a user -type UserIdentityMappingDescriber struct { - client.Interface -} - -// Describe returns the description of a userIdentity -func (d *UserIdentityMappingDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - c := d.UserIdentityMappings() - - mapping, err := c.Get(name) - if err != nil { - return "", err - } - - return tabbedString(func(out *tabwriter.Writer) error { - formatMeta(out, mapping.ObjectMeta) - formatString(out, "Identity", mapping.Identity.Name) - formatString(out, "User Name", mapping.User.Name) - formatString(out, "User UID", mapping.User.UID) - return nil - }) -} - -// UserDescriber generates information about a user -type UserDescriber struct { - client.Interface -} - -// Describe returns the description of a user -func (d *UserDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - userClient := d.Users() - identityClient := d.Identities() - - user, err := userClient.Get(name) - if err != nil { - return "", err - } - - return tabbedString(func(out *tabwriter.Writer) error { - formatMeta(out, user.ObjectMeta) - if len(user.FullName) > 0 { - formatString(out, "Full Name", user.FullName) - } - - if len(user.Identities) == 0 { - formatString(out, "Identities", "") - } else { - for i, identity := range user.Identities { - resolvedIdentity, err := identityClient.Get(identity) - - value := identity - if kerrs.IsNotFound(err) { - value += fmt.Sprintf(" (Error: Identity does not exist)") - } else if err != nil { - value += fmt.Sprintf(" (Error: Identity lookup failed)") - } else if resolvedIdentity.User.Name != name { - value += fmt.Sprintf(" (Error: Identity maps to user name '%s')", resolvedIdentity.User.Name) - } else if resolvedIdentity.User.UID != user.UID { - value += fmt.Sprintf(" (Error: Identity maps to user UID '%s')", resolvedIdentity.User.UID) - } - - if i == 0 { - formatString(out, "Identities", value) - } else { - fmt.Fprintf(out, " \t%s\n", value) - } - } - } - return nil - }) -} - -// GroupDescriber generates information about a group -type GroupDescriber struct { - c client.GroupInterface -} - -// Describe returns the description of a group -func (d *GroupDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - group, err := d.c.Get(name) - if err != nil { - return "", err - } - - return tabbedString(func(out *tabwriter.Writer) error { - formatMeta(out, group.ObjectMeta) - - if len(group.Users) == 0 { - formatString(out, "Users", "") - } else { - for i, user := range group.Users { - if i == 0 { - formatString(out, "Users", user) - } else { - fmt.Fprintf(out, " \t%s\n", user) - } - } - } - return nil - }) -} - -// policy describers - -// PolicyDescriber generates information about a Project -type PolicyDescriber struct { - client.Interface -} - -// Describe returns the description of a policy -// TODO make something a lot prettier -func (d *PolicyDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - c := d.Policies(namespace) - policy, err := c.Get(name) - if err != nil { - return "", err - } - - return DescribePolicy(policy) -} - -func DescribePolicy(policy *authorizationapi.Policy) (string, error) { - return tabbedString(func(out *tabwriter.Writer) error { - formatMeta(out, policy.ObjectMeta) - formatString(out, "Last Modified", policy.LastModified) - - // using .List() here because I always want the sorted order that it provides - for _, key := range sets.StringKeySet(policy.Roles).List() { - role := policy.Roles[key] - fmt.Fprint(out, key+"\t"+PolicyRuleHeadings+"\n") - for _, rule := range role.Rules { - DescribePolicyRule(out, rule, "\t") - } - } - - return nil - }) -} - -const PolicyRuleHeadings = "Verbs\tNon-Resource URLs\tExtension\tResource Names\tAPI Groups\tResources" - -func DescribePolicyRule(out *tabwriter.Writer, rule authorizationapi.PolicyRule, indent string) { - extensionString := "" - if rule.AttributeRestrictions != nil { - extensionString = fmt.Sprintf("%#v", rule.AttributeRestrictions) - - buffer := new(bytes.Buffer) - - printer := NewHumanReadablePrinter(kctl.PrintOptions{NoHeaders: true}) - if err := printer.PrintObj(rule.AttributeRestrictions, buffer); err == nil { - extensionString = strings.TrimSpace(buffer.String()) - } - } - - fmt.Fprintf(out, indent+"%v\t%v\t%v\t%v\t%v\t%v\n", - rule.Verbs.List(), - rule.NonResourceURLs.List(), - extensionString, - rule.ResourceNames.List(), - rule.APIGroups, - rule.Resources.List(), - ) -} - -// RoleDescriber generates information about a Project -type RoleDescriber struct { - client.Interface -} - -// Describe returns the description of a role -func (d *RoleDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - c := d.Roles(namespace) - role, err := c.Get(name) - if err != nil { - return "", err - } - - return DescribeRole(role) -} - -func DescribeRole(role *authorizationapi.Role) (string, error) { - return tabbedString(func(out *tabwriter.Writer) error { - formatMeta(out, role.ObjectMeta) - - fmt.Fprint(out, PolicyRuleHeadings+"\n") - for _, rule := range role.Rules { - DescribePolicyRule(out, rule, "") - - } - - return nil - }) -} - -// PolicyBindingDescriber generates information about a Project -type PolicyBindingDescriber struct { - client.Interface -} - -// Describe returns the description of a policyBinding -func (d *PolicyBindingDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - c := d.PolicyBindings(namespace) - policyBinding, err := c.Get(name) - if err != nil { - return "", err - } - - return DescribePolicyBinding(policyBinding) -} - -func DescribePolicyBinding(policyBinding *authorizationapi.PolicyBinding) (string, error) { - - return tabbedString(func(out *tabwriter.Writer) error { - formatMeta(out, policyBinding.ObjectMeta) - formatString(out, "Last Modified", policyBinding.LastModified) - formatString(out, "Policy", policyBinding.PolicyRef.Namespace) - - // using .List() here because I always want the sorted order that it provides - for _, key := range sets.StringKeySet(policyBinding.RoleBindings).List() { - roleBinding := policyBinding.RoleBindings[key] - users, groups, sas, others := authorizationapi.SubjectsStrings(roleBinding.Namespace, roleBinding.Subjects) - - formatString(out, "RoleBinding["+key+"]", " ") - formatString(out, "\tRole", roleBinding.RoleRef.Name) - formatString(out, "\tUsers", strings.Join(users, ", ")) - formatString(out, "\tGroups", strings.Join(groups, ", ")) - formatString(out, "\tServiceAccounts", strings.Join(sas, ", ")) - formatString(out, "\tSubjects", strings.Join(others, ", ")) - } - - return nil - }) -} - -// RoleBindingDescriber generates information about a Project -type RoleBindingDescriber struct { - client.Interface -} - -// Describe returns the description of a roleBinding -func (d *RoleBindingDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - c := d.RoleBindings(namespace) - roleBinding, err := c.Get(name) - if err != nil { - return "", err - } - - var role *authorizationapi.Role - if len(roleBinding.RoleRef.Namespace) == 0 { - var clusterRole *authorizationapi.ClusterRole - clusterRole, err = d.ClusterRoles().Get(roleBinding.RoleRef.Name) - role = authorizationapi.ToRole(clusterRole) - } else { - role, err = d.Roles(roleBinding.RoleRef.Namespace).Get(roleBinding.RoleRef.Name) - } - - return DescribeRoleBinding(roleBinding, role, err) -} - -// DescribeRoleBinding prints out information about a role binding and its associated role -func DescribeRoleBinding(roleBinding *authorizationapi.RoleBinding, role *authorizationapi.Role, err error) (string, error) { - users, groups, sas, others := authorizationapi.SubjectsStrings(roleBinding.Namespace, roleBinding.Subjects) - - return tabbedString(func(out *tabwriter.Writer) error { - formatMeta(out, roleBinding.ObjectMeta) - - formatString(out, "Role", roleBinding.RoleRef.Namespace+"/"+roleBinding.RoleRef.Name) - formatString(out, "Users", strings.Join(users, ", ")) - formatString(out, "Groups", strings.Join(groups, ", ")) - formatString(out, "ServiceAccounts", strings.Join(sas, ", ")) - formatString(out, "Subjects", strings.Join(others, ", ")) - - switch { - case err != nil: - formatString(out, "Policy Rules", fmt.Sprintf("error: %v", err)) - - case role != nil: - fmt.Fprint(out, PolicyRuleHeadings+"\n") - for _, rule := range role.Rules { - DescribePolicyRule(out, rule, "") - } - - default: - formatString(out, "Policy Rules", "") - } - - return nil - }) -} - -// ClusterPolicyDescriber generates information about a Project -type ClusterPolicyDescriber struct { - client.Interface -} - -// Describe returns the description of a policy -// TODO make something a lot prettier -func (d *ClusterPolicyDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - c := d.ClusterPolicies() - policy, err := c.Get(name) - if err != nil { - return "", err - } - - return DescribePolicy(authorizationapi.ToPolicy(policy)) -} - -type ClusterRoleDescriber struct { - client.Interface -} - -// Describe returns the description of a role -func (d *ClusterRoleDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - c := d.ClusterRoles() - role, err := c.Get(name) - if err != nil { - return "", err - } - - return DescribeRole(authorizationapi.ToRole(role)) -} - -// ClusterPolicyBindingDescriber generates information about a Project -type ClusterPolicyBindingDescriber struct { - client.Interface -} - -// Describe returns the description of a policyBinding -func (d *ClusterPolicyBindingDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - c := d.ClusterPolicyBindings() - policyBinding, err := c.Get(name) - if err != nil { - return "", err - } - - return DescribePolicyBinding(authorizationapi.ToPolicyBinding(policyBinding)) -} - -// ClusterRoleBindingDescriber generates information about a Project -type ClusterRoleBindingDescriber struct { - client.Interface -} - -// Describe returns the description of a roleBinding -func (d *ClusterRoleBindingDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - c := d.ClusterRoleBindings() - roleBinding, err := c.Get(name) - if err != nil { - return "", err - } - - role, err := d.ClusterRoles().Get(roleBinding.RoleRef.Name) - return DescribeRoleBinding(authorizationapi.ToRoleBinding(roleBinding), authorizationapi.ToRole(role), err) -} - -func describeBuildTriggerCauses(causes []buildapi.BuildTriggerCause, out *tabwriter.Writer) { - if causes == nil { - formatString(out, "\nBuild trigger cause", "") - } - - for _, cause := range causes { - formatString(out, "\nBuild trigger cause", cause.Message) - - switch { - case cause.GitHubWebHook != nil: - squashGitInfo(cause.GitHubWebHook.Revision, out) - formatString(out, "Secret", cause.GitHubWebHook.Secret) - - case cause.GenericWebHook != nil: - squashGitInfo(cause.GenericWebHook.Revision, out) - formatString(out, "Secret", cause.GenericWebHook.Secret) - - case cause.ImageChangeBuild != nil: - formatString(out, "Image ID", cause.ImageChangeBuild.ImageID) - formatString(out, "Image Name/Kind", fmt.Sprintf("%s / %s", cause.ImageChangeBuild.FromRef.Name, cause.ImageChangeBuild.FromRef.Kind)) - } - } - fmt.Fprintf(out, "\n") -} - -func squashGitInfo(sourceRevision *buildapi.SourceRevision, out *tabwriter.Writer) { - if sourceRevision != nil && sourceRevision.Git != nil { - rev := sourceRevision.Git - var commit string - if len(rev.Commit) > 7 { - commit = rev.Commit[:7] - } else { - commit = rev.Commit - } - formatString(out, "Commit", fmt.Sprintf("%s (%s)", commit, rev.Message)) - hasAuthor := len(rev.Author.Name) != 0 - hasCommitter := len(rev.Committer.Name) != 0 - if hasAuthor && hasCommitter { - if rev.Author.Name == rev.Committer.Name { - formatString(out, "Author/Committer", rev.Author.Name) - } else { - formatString(out, "Author/Committer", fmt.Sprintf("%s / %s", rev.Author.Name, rev.Committer.Name)) - } - } else if hasAuthor { - formatString(out, "Author", rev.Author.Name) - } else if hasCommitter { - formatString(out, "Committer", rev.Committer.Name) - } - } -} - -type ClusterQuotaDescriber struct { - client.Interface -} - -func (d *ClusterQuotaDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - quota, err := d.ClusterResourceQuotas().Get(name) - if err != nil { - return "", err - } - return DescribeClusterQuota(quota) -} - -func DescribeClusterQuota(quota *quotaapi.ClusterResourceQuota) (string, error) { - labelSelector, err := unversioned.LabelSelectorAsSelector(quota.Spec.Selector.LabelSelector) - if err != nil { - return "", err - } - - return tabbedString(func(out *tabwriter.Writer) error { - formatMeta(out, quota.ObjectMeta) - fmt.Fprintf(out, "Label Selector: %s\n", labelSelector) - fmt.Fprintf(out, "AnnotationSelector: %s\n", quota.Spec.Selector.AnnotationSelector) - if len(quota.Spec.Quota.Scopes) > 0 { - scopes := []string{} - for _, scope := range quota.Spec.Quota.Scopes { - scopes = append(scopes, string(scope)) - } - sort.Strings(scopes) - fmt.Fprintf(out, "Scopes:\t%s\n", strings.Join(scopes, ", ")) - } - fmt.Fprintf(out, "Resource\tUsed\tHard\n") - fmt.Fprintf(out, "--------\t----\t----\n") - - resources := []kapi.ResourceName{} - for resource := range quota.Status.Total.Hard { - resources = append(resources, resource) - } - sort.Sort(kctl.SortableResourceNames(resources)) - - msg := "%v\t%v\t%v\n" - for i := range resources { - resource := resources[i] - hardQuantity := quota.Status.Total.Hard[resource] - usedQuantity := quota.Status.Total.Used[resource] - fmt.Fprintf(out, msg, resource, usedQuantity.String(), hardQuantity.String()) - } - return nil - }) -} - -type AppliedClusterQuotaDescriber struct { - client.Interface -} - -func (d *AppliedClusterQuotaDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - quota, err := d.AppliedClusterResourceQuotas(namespace).Get(name) - if err != nil { - return "", err - } - return DescribeClusterQuota(quotaapi.ConvertAppliedClusterResourceQuotaToClusterResourceQuota(quota)) -} - -type ClusterNetworkDescriber struct { - client.Interface -} - -// Describe returns the description of a ClusterNetwork -func (d *ClusterNetworkDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - cn, err := d.ClusterNetwork().Get(name) - if err != nil { - return "", err - } - return tabbedString(func(out *tabwriter.Writer) error { - formatMeta(out, cn.ObjectMeta) - formatString(out, "Cluster Network", cn.Network) - formatString(out, "Host Subnet Length", cn.HostSubnetLength) - formatString(out, "Service Network", cn.ServiceNetwork) - formatString(out, "Plugin Name", cn.PluginName) - return nil - }) -} - -type HostSubnetDescriber struct { - client.Interface -} - -// Describe returns the description of a HostSubnet -func (d *HostSubnetDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - hs, err := d.HostSubnets().Get(name) - if err != nil { - return "", err - } - return tabbedString(func(out *tabwriter.Writer) error { - formatMeta(out, hs.ObjectMeta) - formatString(out, "Node", hs.Host) - formatString(out, "Node IP", hs.HostIP) - formatString(out, "Pod Subnet", hs.Subnet) - return nil - }) -} - -type NetNamespaceDescriber struct { - client.Interface -} - -// Describe returns the description of a NetNamespace -func (d *NetNamespaceDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - netns, err := d.NetNamespaces().Get(name) - if err != nil { - return "", err - } - return tabbedString(func(out *tabwriter.Writer) error { - formatMeta(out, netns.ObjectMeta) - formatString(out, "Name", netns.NetName) - formatString(out, "ID", netns.NetID) - return nil - }) -} - -type EgressNetworkPolicyDescriber struct { - osClient client.Interface -} - -// Describe returns the description of an EgressNetworkPolicy -func (d *EgressNetworkPolicyDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { - c := d.osClient.EgressNetworkPolicies(namespace) - policy, err := c.Get(name) - if err != nil { - return "", err - } - return tabbedString(func(out *tabwriter.Writer) error { - formatMeta(out, policy.ObjectMeta) - for _, rule := range policy.Spec.Egress { - fmt.Fprintf(out, "Rule:\t%s to %s\n", rule.Type, rule.To.CIDRSelector) - } - return nil - }) -} diff --git a/vendor/github.com/openshift/origin/pkg/cmd/cli/describe/helpers.go b/vendor/github.com/openshift/origin/pkg/cmd/cli/describe/helpers.go deleted file mode 100644 index 9f34aeefe..000000000 --- a/vendor/github.com/openshift/origin/pkg/cmd/cli/describe/helpers.go +++ /dev/null @@ -1,427 +0,0 @@ -package describe - -import ( - "bytes" - "fmt" - "regexp" - "strings" - "text/tabwriter" - "time" - - units "github.com/docker/go-units" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/util/sets" - - buildapi "github.com/openshift/origin/pkg/build/api" - "github.com/openshift/origin/pkg/client" - imageapi "github.com/openshift/origin/pkg/image/api" -) - -const emptyString = "" - -func tabbedString(f func(*tabwriter.Writer) error) (string, error) { - out := new(tabwriter.Writer) - buf := &bytes.Buffer{} - out.Init(buf, 0, 8, 1, '\t', 0) - - err := f(out) - if err != nil { - return "", err - } - - out.Flush() - str := string(buf.String()) - return str, nil -} - -func toString(v interface{}) string { - value := fmt.Sprintf("%v", v) - if len(value) == 0 { - value = emptyString - } - return value -} - -func bold(v interface{}) string { - return "\033[1m" + toString(v) + "\033[0m" -} - -func convertEnv(env []api.EnvVar) map[string]string { - result := make(map[string]string, len(env)) - for _, e := range env { - result[e.Name] = toString(e.Value) - } - return result -} - -func formatEnv(env api.EnvVar) string { - if env.ValueFrom != nil && env.ValueFrom.FieldRef != nil { - return fmt.Sprintf("%s=<%s>", env.Name, env.ValueFrom.FieldRef.FieldPath) - } - return fmt.Sprintf("%s=%s", env.Name, env.Value) -} - -func formatString(out *tabwriter.Writer, label string, v interface{}) { - fmt.Fprintf(out, fmt.Sprintf("%s:\t%s\n", label, toString(v))) -} - -func formatTime(out *tabwriter.Writer, label string, t time.Time) { - fmt.Fprintf(out, fmt.Sprintf("%s:\t%s ago\n", label, formatRelativeTime(t))) -} - -func formatLabels(labelMap map[string]string) string { - return labels.Set(labelMap).String() -} - -func extractAnnotations(annotations map[string]string, keys ...string) ([]string, map[string]string) { - extracted := make([]string, len(keys)) - remaining := make(map[string]string) - for k, v := range annotations { - remaining[k] = v - } - for i, key := range keys { - extracted[i] = remaining[key] - delete(remaining, key) - } - return extracted, remaining -} - -func formatMapStringString(out *tabwriter.Writer, label string, items map[string]string) { - keys := sets.NewString() - for k := range items { - keys.Insert(k) - } - if keys.Len() == 0 { - formatString(out, label, "") - return - } - for i, key := range keys.List() { - if i == 0 { - formatString(out, label, fmt.Sprintf("%s=%s", key, items[key])) - } else { - fmt.Fprintf(out, "%s\t%s=%s\n", "", key, items[key]) - } - } -} - -func formatAnnotations(out *tabwriter.Writer, m api.ObjectMeta, prefix string) { - values, annotations := extractAnnotations(m.Annotations, "description") - if len(values[0]) > 0 { - formatString(out, prefix+"Description", values[0]) - } - formatMapStringString(out, prefix+"Annotations", annotations) -} - -var timeNowFn = func() time.Time { - return time.Now() -} - -// Receives a time.Duration and returns Docker go-utils' -// human-readable output -func formatToHumanDuration(dur time.Duration) string { - return units.HumanDuration(dur) -} - -func formatRelativeTime(t time.Time) string { - return units.HumanDuration(timeNowFn().Sub(t)) -} - -// FormatRelativeTime converts a time field into a human readable age string (hours, minutes, days). -func FormatRelativeTime(t time.Time) string { - return formatRelativeTime(t) -} - -func formatMeta(out *tabwriter.Writer, m api.ObjectMeta) { - formatString(out, "Name", m.Name) - formatString(out, "Namespace", m.Namespace) - if !m.CreationTimestamp.IsZero() { - formatTime(out, "Created", m.CreationTimestamp.Time) - } - formatMapStringString(out, "Labels", m.Labels) - formatAnnotations(out, m, "") -} - -// DescribeWebhook holds the URL information about a webhook and for generic -// webhooks it tells us if we allow env variables. -type DescribeWebhook struct { - URL string - AllowEnv *bool -} - -// webhookDescribe returns a map of webhook trigger types and its corresponding -// information. -func webHooksDescribe(triggers []buildapi.BuildTriggerPolicy, name, namespace string, cli client.BuildConfigsNamespacer) map[string][]DescribeWebhook { - result := map[string][]DescribeWebhook{} - - for _, trigger := range triggers { - var webHookTrigger string - var allowEnv *bool - - switch trigger.Type { - case buildapi.GitHubWebHookBuildTriggerType: - webHookTrigger = trigger.GitHubWebHook.Secret - - case buildapi.GenericWebHookBuildTriggerType: - webHookTrigger = trigger.GenericWebHook.Secret - allowEnv = &trigger.GenericWebHook.AllowEnv - - default: - continue - } - webHookDesc := result[string(trigger.Type)] - - if len(webHookTrigger) == 0 { - continue - } - - var urlStr string - url, err := cli.BuildConfigs(namespace).WebHookURL(name, &trigger) - if err != nil { - urlStr = fmt.Sprintf("", err.Error()) - } else { - urlStr = url.String() - } - - webHookDesc = append(webHookDesc, - DescribeWebhook{ - URL: urlStr, - AllowEnv: allowEnv, - }) - result[string(trigger.Type)] = webHookDesc - } - - return result -} - -var reLongImageID = regexp.MustCompile(`[a-f0-9]{60,}$`) - -// shortenImagePullSpec returns a version of the pull spec intended for -// display, which may result in the image not being usable via cut-and-paste -// for users. -func shortenImagePullSpec(spec string) string { - if reLongImageID.MatchString(spec) { - return spec[:len(spec)-50] - } - return spec -} - -func formatImageStreamTags(out *tabwriter.Writer, stream *imageapi.ImageStream) { - if len(stream.Status.Tags) == 0 && len(stream.Spec.Tags) == 0 { - fmt.Fprintf(out, "Tags:\t\n") - return - } - - now := timeNowFn() - - images := make(map[string]string) - for tag, tags := range stream.Status.Tags { - for _, item := range tags.Items { - switch { - case len(item.Image) > 0: - if _, ok := images[item.Image]; !ok { - images[item.Image] = tag - } - case len(item.DockerImageReference) > 0: - if _, ok := images[item.DockerImageReference]; !ok { - images[item.Image] = item.DockerImageReference - } - } - } - } - - sortedTags := []string{} - for k := range stream.Status.Tags { - sortedTags = append(sortedTags, k) - } - var localReferences sets.String - var referentialTags map[string]sets.String - for k := range stream.Spec.Tags { - if target, _, ok, multiple := imageapi.FollowTagReference(stream, k); ok && multiple { - if referentialTags == nil { - referentialTags = make(map[string]sets.String) - } - if localReferences == nil { - localReferences = sets.NewString() - } - localReferences.Insert(k) - v := referentialTags[target] - if v == nil { - v = sets.NewString() - referentialTags[target] = v - } - v.Insert(k) - } - if _, ok := stream.Status.Tags[k]; !ok { - sortedTags = append(sortedTags, k) - } - } - fmt.Fprintf(out, "Unique Images:\t%d\nTags:\t%d\n\n", len(images), len(sortedTags)) - - first := true - imageapi.PrioritizeTags(sortedTags) - for _, tag := range sortedTags { - if localReferences.Has(tag) { - continue - } - if first { - first = false - } else { - fmt.Fprintf(out, "\n") - } - taglist, _ := stream.Status.Tags[tag] - tagRef, hasSpecTag := stream.Spec.Tags[tag] - scheduled := false - insecure := false - importing := false - - var name string - if hasSpecTag && tagRef.From != nil { - if len(tagRef.From.Namespace) > 0 && tagRef.From.Namespace != stream.Namespace { - name = fmt.Sprintf("%s/%s", tagRef.From.Namespace, tagRef.From.Name) - } else { - name = tagRef.From.Name - } - scheduled, insecure = tagRef.ImportPolicy.Scheduled, tagRef.ImportPolicy.Insecure - gen := imageapi.LatestObservedTagGeneration(stream, tag) - importing = !tagRef.Reference && tagRef.Generation != nil && *tagRef.Generation != gen - } - - // updates whenever tag :5.2 is changed - - // :latest (30 minutes ago) -> 102.205.358.453/foo/bar@sha256:abcde734 - // error: last import failed 20 minutes ago - // updates automatically from index.docker.io/mysql/bar - // will use insecure HTTPS connections or HTTP - // - // MySQL 5.5 - // --------- - // Describes a system for updating based on practical changes to a database system - // with some other data involved - // - // 20 minutes ago - // Failed to locate the server in time - // 30 minutes ago 102.205.358.453/foo/bar@sha256:abcdef - // 1 hour ago 102.205.358.453/foo/bar@sha256:bfedfc - - //var shortErrors []string - /* - var internalReference *imageapi.DockerImageReference - if value := stream.Status.DockerImageRepository; len(value) > 0 { - ref, err := imageapi.ParseDockerImageReference(value) - if err != nil { - internalReference = &ref - } - } - */ - - if referentialTags[tag].Len() > 0 { - references := referentialTags[tag].List() - imageapi.PrioritizeTags(references) - fmt.Fprintf(out, "%s (%s)\n", tag, strings.Join(references, ", ")) - } else { - fmt.Fprintf(out, "%s\n", tag) - } - - switch { - case !hasSpecTag || tagRef.From == nil: - fmt.Fprintf(out, " pushed image\n") - case tagRef.From.Kind == "ImageStreamTag": - switch { - case tagRef.Reference: - fmt.Fprintf(out, " reference to %s\n", name) - case scheduled: - fmt.Fprintf(out, " updates automatically from %s\n", name) - default: - fmt.Fprintf(out, " tagged from %s\n", name) - } - case tagRef.From.Kind == "DockerImage": - switch { - case tagRef.Reference: - fmt.Fprintf(out, " reference to registry %s\n", name) - case scheduled: - fmt.Fprintf(out, " updates automatically from registry %s\n", name) - default: - fmt.Fprintf(out, " tagged from %s\n", name) - } - case tagRef.From.Kind == "ImageStreamImage": - switch { - case tagRef.Reference: - fmt.Fprintf(out, " reference to image %s\n", name) - default: - fmt.Fprintf(out, " tagged from %s\n", name) - } - default: - switch { - case tagRef.Reference: - fmt.Fprintf(out, " reference to %s %s\n", tagRef.From.Kind, name) - default: - fmt.Fprintf(out, " updates from %s %s\n", tagRef.From.Kind, name) - } - } - if insecure { - fmt.Fprintf(out, " will use insecure HTTPS or HTTP connections\n") - } - - fmt.Fprintln(out) - - extraOutput := false - if d := tagRef.Annotations["description"]; len(d) > 0 { - fmt.Fprintf(out, " %s\n", d) - extraOutput = true - } - if t := tagRef.Annotations["tags"]; len(t) > 0 { - fmt.Fprintf(out, " Tags: %s\n", strings.Join(strings.Split(t, ","), ", ")) - extraOutput = true - } - if t := tagRef.Annotations["supports"]; len(t) > 0 { - fmt.Fprintf(out, " Supports: %s\n", strings.Join(strings.Split(t, ","), ", ")) - extraOutput = true - } - if t := tagRef.Annotations["sampleRepo"]; len(t) > 0 { - fmt.Fprintf(out, " Example Repo: %s\n", t) - extraOutput = true - } - if extraOutput { - fmt.Fprintln(out) - } - - if importing { - fmt.Fprintf(out, " ~ importing latest image ...\n") - } - - for i := range taglist.Conditions { - condition := &taglist.Conditions[i] - switch condition.Type { - case imageapi.ImportSuccess: - if condition.Status == api.ConditionFalse { - d := now.Sub(condition.LastTransitionTime.Time) - fmt.Fprintf(out, " ! error: Import failed (%s): %s\n %s ago\n", condition.Reason, condition.Message, units.HumanDuration(d)) - } - } - } - - if len(taglist.Items) == 0 { - continue - } - - for i, event := range taglist.Items { - d := now.Sub(event.Created.Time) - - if i == 0 { - fmt.Fprintf(out, " * %s\n", event.DockerImageReference) - } else { - fmt.Fprintf(out, " %s\n", event.DockerImageReference) - } - - ref, err := imageapi.ParseDockerImageReference(event.DockerImageReference) - id := event.Image - if len(id) > 0 && err == nil && ref.ID != id { - fmt.Fprintf(out, " %s ago\t%s\n", units.HumanDuration(d), id) - } else { - fmt.Fprintf(out, " %s ago\n", units.HumanDuration(d)) - } - } - } -} diff --git a/vendor/github.com/openshift/origin/pkg/cmd/cli/describe/printer.go b/vendor/github.com/openshift/origin/pkg/cmd/cli/describe/printer.go deleted file mode 100644 index f47f63fdf..000000000 --- a/vendor/github.com/openshift/origin/pkg/cmd/cli/describe/printer.go +++ /dev/null @@ -1,1057 +0,0 @@ -package describe - -import ( - "fmt" - "io" - "regexp" - "sort" - "strings" - "text/tabwriter" - "time" - - kapi "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - kctl "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/util/sets" - - authorizationapi "github.com/openshift/origin/pkg/authorization/api" - buildapi "github.com/openshift/origin/pkg/build/api" - deployapi "github.com/openshift/origin/pkg/deploy/api" - imageapi "github.com/openshift/origin/pkg/image/api" - oauthapi "github.com/openshift/origin/pkg/oauth/api" - projectapi "github.com/openshift/origin/pkg/project/api" - quotaapi "github.com/openshift/origin/pkg/quota/api" - routeapi "github.com/openshift/origin/pkg/route/api" - sdnapi "github.com/openshift/origin/pkg/sdn/api" - templateapi "github.com/openshift/origin/pkg/template/api" - userapi "github.com/openshift/origin/pkg/user/api" -) - -var ( - buildColumns = []string{"NAME", "TYPE", "FROM", "STATUS", "STARTED", "DURATION"} - buildConfigColumns = []string{"NAME", "TYPE", "FROM", "LATEST"} - imageColumns = []string{"NAME", "DOCKER REF"} - imageStreamTagColumns = []string{"NAME", "DOCKER REF", "UPDATED", "IMAGENAME"} - imageStreamImageColumns = []string{"NAME", "DOCKER REF", "UPDATED", "IMAGENAME"} - imageStreamColumns = []string{"NAME", "DOCKER REPO", "TAGS", "UPDATED"} - projectColumns = []string{"NAME", "DISPLAY NAME", "STATUS"} - routeColumns = []string{"NAME", "HOST/PORT", "PATH", "SERVICES", "PORT", "TERMINATION"} - deploymentConfigColumns = []string{"NAME", "REVISION", "DESIRED", "CURRENT", "TRIGGERED BY"} - templateColumns = []string{"NAME", "DESCRIPTION", "PARAMETERS", "OBJECTS"} - policyColumns = []string{"NAME", "ROLES", "LAST MODIFIED"} - policyBindingColumns = []string{"NAME", "ROLE BINDINGS", "LAST MODIFIED"} - roleBindingColumns = []string{"NAME", "ROLE", "USERS", "GROUPS", "SERVICE ACCOUNTS", "SUBJECTS"} - roleColumns = []string{"NAME"} - - oauthClientColumns = []string{"NAME", "SECRET", "WWW-CHALLENGE", "REDIRECT URIS"} - oauthClientAuthorizationColumns = []string{"NAME", "USER NAME", "CLIENT NAME", "SCOPES"} - oauthAccessTokenColumns = []string{"NAME", "USER NAME", "CLIENT NAME", "CREATED", "EXPIRES", "REDIRECT URI", "SCOPES"} - oauthAuthorizeTokenColumns = []string{"NAME", "USER NAME", "CLIENT NAME", "CREATED", "EXPIRES", "REDIRECT URI", "SCOPES"} - - userColumns = []string{"NAME", "UID", "FULL NAME", "IDENTITIES"} - identityColumns = []string{"NAME", "IDP NAME", "IDP USER NAME", "USER NAME", "USER UID"} - userIdentityMappingColumns = []string{"NAME", "IDENTITY", "USER NAME", "USER UID"} - groupColumns = []string{"NAME", "USERS"} - - // IsPersonalSubjectAccessReviewColumns contains known custom role extensions - IsPersonalSubjectAccessReviewColumns = []string{"NAME"} - - hostSubnetColumns = []string{"NAME", "HOST", "HOST IP", "SUBNET"} - netNamespaceColumns = []string{"NAME", "NETID"} - clusterNetworkColumns = []string{"NAME", "NETWORK", "HOST SUBNET LENGTH", "SERVICE NETWORK", "PLUGIN NAME"} - egressNetworkPolicyColumns = []string{"NAME"} - - clusterResourceQuotaColumns = []string{"NAME", "LABEL SELECTOR", "ANNOTATION SELECTOR"} -) - -// NewHumanReadablePrinter returns a new HumanReadablePrinter -func NewHumanReadablePrinter(printOptions kctl.PrintOptions) *kctl.HumanReadablePrinter { - // TODO: support cross namespace listing - p := kctl.NewHumanReadablePrinter(printOptions) - p.Handler(buildColumns, printBuild) - p.Handler(buildColumns, printBuildList) - p.Handler(buildConfigColumns, printBuildConfig) - p.Handler(buildConfigColumns, printBuildConfigList) - p.Handler(imageColumns, printImage) - p.Handler(imageStreamTagColumns, printImageStreamTag) - p.Handler(imageStreamTagColumns, printImageStreamTagList) - p.Handler(imageStreamImageColumns, printImageStreamImage) - p.Handler(imageColumns, printImageList) - p.Handler(imageStreamColumns, printImageStream) - p.Handler(imageStreamColumns, printImageStreamList) - p.Handler(projectColumns, printProject) - p.Handler(projectColumns, printProjectList) - p.Handler(routeColumns, printRoute) - p.Handler(routeColumns, printRouteList) - p.Handler(deploymentConfigColumns, printDeploymentConfig) - p.Handler(deploymentConfigColumns, printDeploymentConfigList) - p.Handler(templateColumns, printTemplate) - p.Handler(templateColumns, printTemplateList) - - p.Handler(policyColumns, printPolicy) - p.Handler(policyColumns, printPolicyList) - p.Handler(policyBindingColumns, printPolicyBinding) - p.Handler(policyBindingColumns, printPolicyBindingList) - p.Handler(roleBindingColumns, printRoleBinding) - p.Handler(roleBindingColumns, printRoleBindingList) - p.Handler(roleColumns, printRole) - p.Handler(roleColumns, printRoleList) - - p.Handler(policyColumns, printClusterPolicy) - p.Handler(policyColumns, printClusterPolicyList) - p.Handler(policyBindingColumns, printClusterPolicyBinding) - p.Handler(policyBindingColumns, printClusterPolicyBindingList) - p.Handler(roleColumns, printClusterRole) - p.Handler(roleColumns, printClusterRoleList) - p.Handler(roleBindingColumns, printClusterRoleBinding) - p.Handler(roleBindingColumns, printClusterRoleBindingList) - - p.Handler(oauthClientColumns, printOAuthClient) - p.Handler(oauthClientColumns, printOAuthClientList) - p.Handler(oauthClientAuthorizationColumns, printOAuthClientAuthorization) - p.Handler(oauthClientAuthorizationColumns, printOAuthClientAuthorizationList) - p.Handler(oauthAccessTokenColumns, printOAuthAccessToken) - p.Handler(oauthAccessTokenColumns, printOAuthAccessTokenList) - p.Handler(oauthAuthorizeTokenColumns, printOAuthAuthorizeToken) - p.Handler(oauthAuthorizeTokenColumns, printOAuthAuthorizeTokenList) - - p.Handler(userColumns, printUser) - p.Handler(userColumns, printUserList) - p.Handler(identityColumns, printIdentity) - p.Handler(identityColumns, printIdentityList) - p.Handler(userIdentityMappingColumns, printUserIdentityMapping) - p.Handler(groupColumns, printGroup) - p.Handler(groupColumns, printGroupList) - - p.Handler(IsPersonalSubjectAccessReviewColumns, printIsPersonalSubjectAccessReview) - - p.Handler(hostSubnetColumns, printHostSubnet) - p.Handler(hostSubnetColumns, printHostSubnetList) - p.Handler(netNamespaceColumns, printNetNamespaceList) - p.Handler(netNamespaceColumns, printNetNamespace) - p.Handler(clusterNetworkColumns, printClusterNetwork) - p.Handler(clusterNetworkColumns, printClusterNetworkList) - p.Handler(egressNetworkPolicyColumns, printEgressNetworkPolicy) - p.Handler(egressNetworkPolicyColumns, printEgressNetworkPolicyList) - - p.Handler(clusterResourceQuotaColumns, printClusterResourceQuota) - p.Handler(clusterResourceQuotaColumns, printClusterResourceQuotaList) - p.Handler(clusterResourceQuotaColumns, printAppliedClusterResourceQuota) - p.Handler(clusterResourceQuotaColumns, printAppliedClusterResourceQuotaList) - - return p -} - -const templateDescriptionLen = 80 - -// PrintTemplateParameters the Template parameters with their default values -func PrintTemplateParameters(params []templateapi.Parameter, output io.Writer) error { - w := tabwriter.NewWriter(output, 20, 5, 3, ' ', 0) - defer w.Flush() - parameterColumns := []string{"NAME", "DESCRIPTION", "GENERATOR", "VALUE"} - fmt.Fprintf(w, "%s\n", strings.Join(parameterColumns, "\t")) - for _, p := range params { - value := p.Value - if len(p.Generate) != 0 { - value = p.From - } - _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", p.Name, p.Description, p.Generate, value) - if err != nil { - return err - } - } - return nil -} - -// formatResourceName receives a resource kind, name, and boolean specifying -// whether or not to update the current name to "kind/name" -func formatResourceName(kind, name string, withKind bool) string { - if !withKind || kind == "" { - return name - } - - return kind + "/" + name -} - -func printTemplate(t *templateapi.Template, w io.Writer, opts kctl.PrintOptions) error { - description := "" - if t.Annotations != nil { - description = t.Annotations["description"] - } - if len(description) > templateDescriptionLen { - description = strings.TrimSpace(description[:templateDescriptionLen-3]) + "..." - } - empty, generated, total := 0, 0, len(t.Parameters) - for _, p := range t.Parameters { - if len(p.Value) > 0 { - continue - } - if len(p.Generate) > 0 { - generated++ - continue - } - empty++ - } - params := "" - switch { - case empty > 0: - params = fmt.Sprintf("%d (%d blank)", total, empty) - case generated > 0: - params = fmt.Sprintf("%d (%d generated)", total, generated) - default: - params = fmt.Sprintf("%d (all set)", total) - } - - name := formatResourceName(opts.Kind, t.Name, opts.WithKind) - - if opts.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", t.Namespace); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%d", name, description, params, len(t.Objects)); err != nil { - return err - } - if err := appendItemLabels(t.Labels, w, opts.ColumnLabels, opts.ShowLabels); err != nil { - return err - } - return nil -} - -func printTemplateList(list *templateapi.TemplateList, w io.Writer, opts kctl.PrintOptions) error { - for _, t := range list.Items { - if err := printTemplate(&t, w, opts); err != nil { - return err - } - } - return nil -} - -func printBuild(build *buildapi.Build, w io.Writer, opts kctl.PrintOptions) error { - name := formatResourceName(opts.Kind, build.Name, opts.WithKind) - - if opts.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", build.Namespace); err != nil { - return err - } - } - var created string - if build.Status.StartTimestamp != nil { - created = fmt.Sprintf("%s ago", formatRelativeTime(build.Status.StartTimestamp.Time)) - } - var duration string - if build.Status.Duration > 0 { - duration = build.Status.Duration.String() - } - from := describeSourceShort(build.Spec.CommonSpec) - status := string(build.Status.Phase) - if len(build.Status.Reason) > 0 { - status = fmt.Sprintf("%s (%s)", status, build.Status.Reason) - } - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s", name, buildapi.StrategyType(build.Spec.Strategy), from, status, created, duration); err != nil { - return err - } - if err := appendItemLabels(build.Labels, w, opts.ColumnLabels, opts.ShowLabels); err != nil { - return err - } - return nil -} - -func describeSourceShort(spec buildapi.CommonSpec) string { - var from string - switch source := spec.Source; { - case source.Binary != nil: - from = "Binary" - if rev := describeSourceGitRevision(spec); len(rev) != 0 { - from = fmt.Sprintf("%s@%s", from, rev) - } - case source.Dockerfile != nil && source.Git != nil: - from = "Dockerfile,Git" - if rev := describeSourceGitRevision(spec); len(rev) != 0 { - from = fmt.Sprintf("%s@%s", from, rev) - } - case source.Dockerfile != nil: - from = "Dockerfile" - case source.Git != nil: - from = "Git" - if rev := describeSourceGitRevision(spec); len(rev) != 0 { - from = fmt.Sprintf("%s@%s", from, rev) - } - default: - from = buildapi.SourceType(source) - } - return from -} - -var nonCommitRev = regexp.MustCompile("[^a-fA-F0-9]") - -func describeSourceGitRevision(spec buildapi.CommonSpec) string { - var rev string - if spec.Revision != nil && spec.Revision.Git != nil { - rev = spec.Revision.Git.Commit - } - if len(rev) == 0 && spec.Source.Git != nil { - rev = spec.Source.Git.Ref - } - // if this appears to be a full Git commit hash, shorten it to 7 characters for brevity - if !nonCommitRev.MatchString(rev) && len(rev) > 20 { - rev = rev[:7] - } - return rev -} - -func printBuildList(buildList *buildapi.BuildList, w io.Writer, opts kctl.PrintOptions) error { - builds := buildList.Items - sort.Sort(buildapi.BuildSliceByCreationTimestamp(builds)) - for _, build := range builds { - if err := printBuild(&build, w, opts); err != nil { - return err - } - } - return nil -} - -func printBuildConfig(bc *buildapi.BuildConfig, w io.Writer, opts kctl.PrintOptions) error { - name := formatResourceName(opts.Kind, bc.Name, opts.WithKind) - from := describeSourceShort(bc.Spec.CommonSpec) - - if bc.Spec.Strategy.CustomStrategy != nil { - if opts.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", bc.Namespace); err != nil { - return err - } - } - _, err := fmt.Fprintf(w, "%s\t%v\t%s\t%d\n", name, buildapi.StrategyType(bc.Spec.Strategy), bc.Spec.Strategy.CustomStrategy.From.Name, bc.Status.LastVersion) - return err - } - if opts.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", bc.Namespace); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "%s\t%v\t%s\t%d", name, buildapi.StrategyType(bc.Spec.Strategy), from, bc.Status.LastVersion); err != nil { - return err - } - if err := appendItemLabels(bc.Labels, w, opts.ColumnLabels, opts.ShowLabels); err != nil { - return err - } - return nil -} - -func printBuildConfigList(buildList *buildapi.BuildConfigList, w io.Writer, opts kctl.PrintOptions) error { - for _, buildConfig := range buildList.Items { - if err := printBuildConfig(&buildConfig, w, opts); err != nil { - return err - } - } - return nil -} - -func printImage(image *imageapi.Image, w io.Writer, opts kctl.PrintOptions) error { - name := formatResourceName(opts.Kind, image.Name, opts.WithKind) - _, err := fmt.Fprintf(w, "%s\t%s\n", name, image.DockerImageReference) - return err -} - -func printImageStreamTag(ist *imageapi.ImageStreamTag, w io.Writer, opts kctl.PrintOptions) error { - name := formatResourceName(opts.Kind, ist.Name, opts.WithKind) - created := fmt.Sprintf("%s ago", formatRelativeTime(ist.CreationTimestamp.Time)) - - if opts.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", ist.Namespace); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s", name, ist.Image.DockerImageReference, created, ist.Image.Name); err != nil { - return err - } - if err := appendItemLabels(ist.Labels, w, opts.ColumnLabels, opts.ShowLabels); err != nil { - return err - } - return nil -} - -func printImageStreamTagList(list *imageapi.ImageStreamTagList, w io.Writer, opts kctl.PrintOptions) error { - for _, ist := range list.Items { - if err := printImageStreamTag(&ist, w, opts); err != nil { - return err - } - } - return nil -} - -func printImageStreamImage(isi *imageapi.ImageStreamImage, w io.Writer, opts kctl.PrintOptions) error { - name := formatResourceName(opts.Kind, isi.Name, opts.WithKind) - created := fmt.Sprintf("%s ago", formatRelativeTime(isi.CreationTimestamp.Time)) - if opts.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", isi.Namespace); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s", name, isi.Image.DockerImageReference, created, isi.Image.Name); err != nil { - return err - } - if err := appendItemLabels(isi.Labels, w, opts.ColumnLabels, opts.ShowLabels); err != nil { - return err - } - return nil -} - -func printImageList(images *imageapi.ImageList, w io.Writer, opts kctl.PrintOptions) error { - for _, image := range images.Items { - if err := printImage(&image, w, opts); err != nil { - return err - } - } - return nil -} - -func printImageStream(stream *imageapi.ImageStream, w io.Writer, opts kctl.PrintOptions) error { - name := formatResourceName(opts.Kind, stream.Name, opts.WithKind) - tags := "" - const numOfTagsShown = 3 - - var latest unversioned.Time - for _, list := range stream.Status.Tags { - if len(list.Items) > 0 { - if list.Items[0].Created.After(latest.Time) { - latest = list.Items[0].Created - } - } - } - latestTime := "" - if !latest.IsZero() { - latestTime = fmt.Sprintf("%s ago", formatRelativeTime(latest.Time)) - } - list := imageapi.SortStatusTags(stream.Status.Tags) - more := false - if len(list) > numOfTagsShown { - list = list[:numOfTagsShown] - more = true - } - tags = strings.Join(list, ",") - if more { - tags = fmt.Sprintf("%s + %d more...", tags, len(stream.Status.Tags)-numOfTagsShown) - } - if opts.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", stream.Namespace); err != nil { - return err - } - } - repo := stream.Spec.DockerImageRepository - if len(repo) == 0 { - repo = stream.Status.DockerImageRepository - } - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s", name, repo, tags, latestTime); err != nil { - return err - } - if err := appendItemLabels(stream.Labels, w, opts.ColumnLabels, opts.ShowLabels); err != nil { - return err - } - return nil -} - -func printImageStreamList(streams *imageapi.ImageStreamList, w io.Writer, opts kctl.PrintOptions) error { - for _, stream := range streams.Items { - if err := printImageStream(&stream, w, opts); err != nil { - return err - } - } - return nil -} - -func printProject(project *projectapi.Project, w io.Writer, opts kctl.PrintOptions) error { - name := formatResourceName(opts.Kind, project.Name, opts.WithKind) - _, err := fmt.Fprintf(w, "%s\t%s\t%s", name, project.Annotations[projectapi.ProjectDisplayName], project.Status.Phase) - if err := appendItemLabels(project.Labels, w, opts.ColumnLabels, opts.ShowLabels); err != nil { - return err - } - return err -} - -// SortableProjects is a list of projects that can be sorted -type SortableProjects []projectapi.Project - -func (list SortableProjects) Len() int { - return len(list) -} - -func (list SortableProjects) Swap(i, j int) { - list[i], list[j] = list[j], list[i] -} - -func (list SortableProjects) Less(i, j int) bool { - return list[i].ObjectMeta.Name < list[j].ObjectMeta.Name -} - -func printProjectList(projects *projectapi.ProjectList, w io.Writer, opts kctl.PrintOptions) error { - sort.Sort(SortableProjects(projects.Items)) - for _, project := range projects.Items { - if err := printProject(&project, w, opts); err != nil { - return err - } - } - return nil -} - -func printRoute(route *routeapi.Route, w io.Writer, opts kctl.PrintOptions) error { - tlsTerm := "" - insecurePolicy := "" - if route.Spec.TLS != nil { - tlsTerm = string(route.Spec.TLS.Termination) - insecurePolicy = string(route.Spec.TLS.InsecureEdgeTerminationPolicy) - } - - name := formatResourceName(opts.Kind, route.Name, opts.WithKind) - - if opts.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", route.Namespace); err != nil { - return err - } - } - var ( - matchedHost bool - reason string - host = route.Spec.Host - - admitted, errors = 0, 0 - ) - for _, ingress := range route.Status.Ingress { - switch status, condition := routeapi.IngressConditionStatus(&ingress, routeapi.RouteAdmitted); status { - case kapi.ConditionTrue: - admitted++ - if !matchedHost { - matchedHost = ingress.Host == route.Spec.Host - host = ingress.Host - } - case kapi.ConditionFalse: - reason = condition.Reason - errors++ - } - } - switch { - case route.Status.Ingress == nil: - // this is the legacy case, we should continue to show the host when talking to servers - // that have not set status ingress, since we can't distinguish this condition from there - // being no routers. - case admitted == 0 && errors > 0: - host = reason - case errors > 0: - host = fmt.Sprintf("%s ... %d rejected", host, errors) - case admitted == 0: - host = "Pending" - case admitted > 1: - host = fmt.Sprintf("%s ... %d more", host, admitted-1) - } - var policy string - switch { - case len(tlsTerm) != 0 && len(insecurePolicy) != 0: - policy = fmt.Sprintf("%s/%s", tlsTerm, insecurePolicy) - case len(tlsTerm) != 0: - policy = tlsTerm - case len(insecurePolicy) != 0: - policy = fmt.Sprintf("default/%s", insecurePolicy) - default: - policy = "" - } - - backends := append([]routeapi.RouteTargetReference{route.Spec.To}, route.Spec.AlternateBackends...) - totalWeight := int32(0) - for _, backend := range backends { - if backend.Weight != nil { - totalWeight += *backend.Weight - } - } - var backendInfo []string - for _, backend := range backends { - switch { - case backend.Weight == nil, len(backends) == 1 && totalWeight != 0: - backendInfo = append(backendInfo, backend.Name) - case totalWeight == 0: - backendInfo = append(backendInfo, fmt.Sprintf("%s(0%%)", backend.Name)) - default: - backendInfo = append(backendInfo, fmt.Sprintf("%s(%d%%)", backend.Name, *backend.Weight*100/totalWeight)) - } - } - - var port string - if route.Spec.Port != nil { - port = route.Spec.Port.TargetPort.String() - } else { - port = "" - } - - _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", name, host, route.Spec.Path, strings.Join(backendInfo, ","), port, policy) - return err -} - -func printRouteList(routeList *routeapi.RouteList, w io.Writer, opts kctl.PrintOptions) error { - for _, route := range routeList.Items { - if err := printRoute(&route, w, opts); err != nil { - return err - } - } - return nil -} - -func printDeploymentConfig(dc *deployapi.DeploymentConfig, w io.Writer, opts kctl.PrintOptions) error { - var desired string - if dc.Spec.Test { - desired = fmt.Sprintf("%d (during test)", dc.Spec.Replicas) - } else { - desired = fmt.Sprintf("%d", dc.Spec.Replicas) - } - - containers := sets.NewString() - if dc.Spec.Template != nil { - for _, c := range dc.Spec.Template.Spec.Containers { - containers.Insert(c.Name) - } - } - //names := containers.List() - referencedContainers := sets.NewString() - - triggers := sets.String{} - for _, trigger := range dc.Spec.Triggers { - switch t := trigger.Type; t { - case deployapi.DeploymentTriggerOnConfigChange: - triggers.Insert("config") - case deployapi.DeploymentTriggerOnImageChange: - if p := trigger.ImageChangeParams; p != nil && p.Automatic { - var prefix string - if len(containers) != 1 && !containers.HasAll(p.ContainerNames...) { - sort.Sort(sort.StringSlice(p.ContainerNames)) - prefix = strings.Join(p.ContainerNames, ",") + ":" - } - referencedContainers.Insert(p.ContainerNames...) - switch p.From.Kind { - case "ImageStreamTag": - triggers.Insert(fmt.Sprintf("image(%s%s)", prefix, p.From.Name)) - default: - triggers.Insert(fmt.Sprintf("%s(%s%s)", p.From.Kind, prefix, p.From.Name)) - } - } - default: - triggers.Insert(string(t)) - } - } - - name := formatResourceName(opts.Kind, dc.Name, opts.WithKind) - trigger := strings.Join(triggers.List(), ",") - - if opts.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", dc.Namespace); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "%s\t%d\t%s\t%d\t%s", name, dc.Status.LatestVersion, desired, dc.Status.UpdatedReplicas, trigger); err != nil { - return err - } - err := appendItemLabels(dc.Labels, w, opts.ColumnLabels, opts.ShowLabels) - return err -} - -func printDeploymentConfigList(list *deployapi.DeploymentConfigList, w io.Writer, opts kctl.PrintOptions) error { - for _, dc := range list.Items { - if err := printDeploymentConfig(&dc, w, opts); err != nil { - return err - } - } - return nil -} - -func printPolicy(policy *authorizationapi.Policy, w io.Writer, opts kctl.PrintOptions) error { - roleNames := sets.String{} - for key := range policy.Roles { - roleNames.Insert(key) - } - - name := formatResourceName(opts.Kind, policy.Name, opts.WithKind) - rolesString := strings.Join(roleNames.List(), ", ") - - if opts.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", policy.Namespace); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "%s\t%s\t%v", name, rolesString, policy.LastModified); err != nil { - return err - } - if err := appendItemLabels(policy.Labels, w, opts.ColumnLabels, opts.ShowLabels); err != nil { - return err - } - return nil -} - -func printPolicyList(list *authorizationapi.PolicyList, w io.Writer, opts kctl.PrintOptions) error { - for _, policy := range list.Items { - if err := printPolicy(&policy, w, opts); err != nil { - return err - } - } - return nil -} - -func printPolicyBinding(policyBinding *authorizationapi.PolicyBinding, w io.Writer, opts kctl.PrintOptions) error { - roleBindingNames := sets.String{} - for key := range policyBinding.RoleBindings { - roleBindingNames.Insert(key) - } - - name := formatResourceName(opts.Kind, policyBinding.Name, opts.WithKind) - roleBindingsString := strings.Join(roleBindingNames.List(), ", ") - - if opts.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", policyBinding.Namespace); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "%s\t%s\t%v", name, roleBindingsString, policyBinding.LastModified); err != nil { - return err - } - if err := appendItemLabels(policyBinding.Labels, w, opts.ColumnLabels, opts.ShowLabels); err != nil { - return err - } - return nil -} - -func printPolicyBindingList(list *authorizationapi.PolicyBindingList, w io.Writer, opts kctl.PrintOptions) error { - for _, policyBinding := range list.Items { - if err := printPolicyBinding(&policyBinding, w, opts); err != nil { - return err - } - } - return nil -} - -func printClusterPolicy(policy *authorizationapi.ClusterPolicy, w io.Writer, opts kctl.PrintOptions) error { - return printPolicy(authorizationapi.ToPolicy(policy), w, opts) -} - -func printClusterPolicyList(list *authorizationapi.ClusterPolicyList, w io.Writer, opts kctl.PrintOptions) error { - return printPolicyList(authorizationapi.ToPolicyList(list), w, opts) -} - -func printClusterPolicyBinding(policyBinding *authorizationapi.ClusterPolicyBinding, w io.Writer, opts kctl.PrintOptions) error { - return printPolicyBinding(authorizationapi.ToPolicyBinding(policyBinding), w, opts) -} - -func printClusterPolicyBindingList(list *authorizationapi.ClusterPolicyBindingList, w io.Writer, opts kctl.PrintOptions) error { - return printPolicyBindingList(authorizationapi.ToPolicyBindingList(list), w, opts) -} - -func printClusterRole(role *authorizationapi.ClusterRole, w io.Writer, opts kctl.PrintOptions) error { - return printRole(authorizationapi.ToRole(role), w, opts) -} - -func printClusterRoleList(list *authorizationapi.ClusterRoleList, w io.Writer, opts kctl.PrintOptions) error { - return printRoleList(authorizationapi.ToRoleList(list), w, opts) -} - -func printClusterRoleBinding(roleBinding *authorizationapi.ClusterRoleBinding, w io.Writer, opts kctl.PrintOptions) error { - return printRoleBinding(authorizationapi.ToRoleBinding(roleBinding), w, opts) -} - -func printClusterRoleBindingList(list *authorizationapi.ClusterRoleBindingList, w io.Writer, opts kctl.PrintOptions) error { - return printRoleBindingList(authorizationapi.ToRoleBindingList(list), w, opts) -} - -func printIsPersonalSubjectAccessReview(a *authorizationapi.IsPersonalSubjectAccessReview, w io.Writer, opts kctl.PrintOptions) error { - _, err := fmt.Fprintf(w, "IsPersonalSubjectAccessReview\n") - return err -} - -func printRole(role *authorizationapi.Role, w io.Writer, opts kctl.PrintOptions) error { - name := formatResourceName(opts.Kind, role.Name, opts.WithKind) - if opts.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", role.Namespace); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "%s", name); err != nil { - return err - } - if err := appendItemLabels(role.Labels, w, opts.ColumnLabels, opts.ShowLabels); err != nil { - return err - } - return nil -} - -func printRoleList(list *authorizationapi.RoleList, w io.Writer, opts kctl.PrintOptions) error { - for _, role := range list.Items { - if err := printRole(&role, w, opts); err != nil { - return err - } - } - - return nil -} - -func printRoleBinding(roleBinding *authorizationapi.RoleBinding, w io.Writer, opts kctl.PrintOptions) error { - name := formatResourceName(opts.Kind, roleBinding.Name, opts.WithKind) - if opts.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", roleBinding.Namespace); err != nil { - return err - } - } - users, groups, sas, others := authorizationapi.SubjectsStrings(roleBinding.Namespace, roleBinding.Subjects) - - if _, err := fmt.Fprintf(w, "%s\t%s\t%v\t%v\t%v\t%v", name, roleBinding.RoleRef.Namespace+"/"+roleBinding.RoleRef.Name, strings.Join(users, ", "), strings.Join(groups, ", "), strings.Join(sas, ", "), strings.Join(others, ", ")); err != nil { - return err - } - if err := appendItemLabels(roleBinding.Labels, w, opts.ColumnLabels, opts.ShowLabels); err != nil { - return err - } - return nil -} - -func printRoleBindingList(list *authorizationapi.RoleBindingList, w io.Writer, opts kctl.PrintOptions) error { - for _, roleBinding := range list.Items { - if err := printRoleBinding(&roleBinding, w, opts); err != nil { - return err - } - } - - return nil -} - -func printOAuthClient(client *oauthapi.OAuthClient, w io.Writer, opts kctl.PrintOptions) error { - name := formatResourceName(opts.Kind, client.Name, opts.WithKind) - challenge := "FALSE" - if client.RespondWithChallenges { - challenge = "TRUE" - } - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%v", name, client.Secret, challenge, strings.Join(client.RedirectURIs, ",")); err != nil { - return err - } - if err := appendItemLabels(client.Labels, w, opts.ColumnLabels, opts.ShowLabels); err != nil { - return err - } - return nil -} - -func printOAuthClientList(list *oauthapi.OAuthClientList, w io.Writer, opts kctl.PrintOptions) error { - for _, item := range list.Items { - if err := printOAuthClient(&item, w, opts); err != nil { - return err - } - } - return nil -} - -func printOAuthClientAuthorization(auth *oauthapi.OAuthClientAuthorization, w io.Writer, opts kctl.PrintOptions) error { - name := formatResourceName(opts.Kind, auth.Name, opts.WithKind) - _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%v\n", name, auth.UserName, auth.ClientName, strings.Join(auth.Scopes, ",")) - return err -} - -func printOAuthClientAuthorizationList(list *oauthapi.OAuthClientAuthorizationList, w io.Writer, opts kctl.PrintOptions) error { - for _, item := range list.Items { - if err := printOAuthClientAuthorization(&item, w, opts); err != nil { - return err - } - } - return nil -} - -func printOAuthAccessToken(token *oauthapi.OAuthAccessToken, w io.Writer, opts kctl.PrintOptions) error { - name := formatResourceName(opts.Kind, token.Name, opts.WithKind) - created := token.CreationTimestamp - expires := created.Add(time.Duration(token.ExpiresIn) * time.Second) - _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", name, token.UserName, token.ClientName, created, expires, token.RedirectURI, strings.Join(token.Scopes, ",")) - return err -} - -func printOAuthAccessTokenList(list *oauthapi.OAuthAccessTokenList, w io.Writer, opts kctl.PrintOptions) error { - for _, item := range list.Items { - if err := printOAuthAccessToken(&item, w, opts); err != nil { - return err - } - } - return nil -} - -func printOAuthAuthorizeToken(token *oauthapi.OAuthAuthorizeToken, w io.Writer, opts kctl.PrintOptions) error { - name := formatResourceName(opts.Kind, token.Name, opts.WithKind) - created := token.CreationTimestamp - expires := created.Add(time.Duration(token.ExpiresIn) * time.Second) - _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", name, token.UserName, token.ClientName, created, expires, token.RedirectURI, strings.Join(token.Scopes, ",")) - return err -} - -func printOAuthAuthorizeTokenList(list *oauthapi.OAuthAuthorizeTokenList, w io.Writer, opts kctl.PrintOptions) error { - for _, item := range list.Items { - if err := printOAuthAuthorizeToken(&item, w, opts); err != nil { - return err - } - } - return nil -} - -func printUser(user *userapi.User, w io.Writer, opts kctl.PrintOptions) error { - name := formatResourceName(opts.Kind, user.Name, opts.WithKind) - _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", name, user.UID, user.FullName, strings.Join(user.Identities, ", ")) - return err -} - -func printUserList(list *userapi.UserList, w io.Writer, opts kctl.PrintOptions) error { - for _, item := range list.Items { - if err := printUser(&item, w, opts); err != nil { - return err - } - } - return nil -} - -func printIdentity(identity *userapi.Identity, w io.Writer, opts kctl.PrintOptions) error { - name := formatResourceName(opts.Kind, identity.Name, opts.WithKind) - _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", name, identity.ProviderName, identity.ProviderUserName, identity.User.Name, identity.User.UID) - return err -} - -func printIdentityList(list *userapi.IdentityList, w io.Writer, opts kctl.PrintOptions) error { - for _, item := range list.Items { - if err := printIdentity(&item, w, opts); err != nil { - return err - } - } - return nil -} - -func printUserIdentityMapping(mapping *userapi.UserIdentityMapping, w io.Writer, opts kctl.PrintOptions) error { - name := formatResourceName(opts.Kind, mapping.Name, opts.WithKind) - _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", name, mapping.Identity.Name, mapping.User.Name, mapping.User.UID) - return err -} - -func printGroup(group *userapi.Group, w io.Writer, opts kctl.PrintOptions) error { - name := formatResourceName(opts.Kind, group.Name, opts.WithKind) - _, err := fmt.Fprintf(w, "%s\t%s\n", name, strings.Join(group.Users, ", ")) - return err -} - -func printGroupList(list *userapi.GroupList, w io.Writer, opts kctl.PrintOptions) error { - for _, item := range list.Items { - if err := printGroup(&item, w, opts); err != nil { - return err - } - } - return nil -} - -func printHostSubnet(h *sdnapi.HostSubnet, w io.Writer, opts kctl.PrintOptions) error { - name := formatResourceName(opts.Kind, h.Name, opts.WithKind) - _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", name, h.Host, h.HostIP, h.Subnet) - return err -} - -func printHostSubnetList(list *sdnapi.HostSubnetList, w io.Writer, opts kctl.PrintOptions) error { - for _, item := range list.Items { - if err := printHostSubnet(&item, w, opts); err != nil { - return err - } - } - return nil -} - -func printNetNamespace(h *sdnapi.NetNamespace, w io.Writer, opts kctl.PrintOptions) error { - name := formatResourceName(opts.Kind, h.NetName, opts.WithKind) - _, err := fmt.Fprintf(w, "%s\t%d\n", name, h.NetID) - return err -} - -func printNetNamespaceList(list *sdnapi.NetNamespaceList, w io.Writer, opts kctl.PrintOptions) error { - for _, item := range list.Items { - if err := printNetNamespace(&item, w, opts); err != nil { - return err - } - } - return nil -} - -func printClusterNetwork(n *sdnapi.ClusterNetwork, w io.Writer, opts kctl.PrintOptions) error { - name := formatResourceName(opts.Kind, n.Name, opts.WithKind) - _, err := fmt.Fprintf(w, "%s\t%s\t%d\t%s\t%s\n", name, n.Network, n.HostSubnetLength, n.ServiceNetwork, n.PluginName) - return err -} - -func printClusterNetworkList(list *sdnapi.ClusterNetworkList, w io.Writer, opts kctl.PrintOptions) error { - for _, item := range list.Items { - if err := printClusterNetwork(&item, w, opts); err != nil { - return err - } - } - return nil -} - -func printEgressNetworkPolicy(n *sdnapi.EgressNetworkPolicy, w io.Writer, opts kctl.PrintOptions) error { - if opts.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", n.Namespace); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "%s\n", n.Name); err != nil { - return err - } - return nil -} - -func printEgressNetworkPolicyList(list *sdnapi.EgressNetworkPolicyList, w io.Writer, opts kctl.PrintOptions) error { - for _, item := range list.Items { - if err := printEgressNetworkPolicy(&item, w, opts); err != nil { - return err - } - } - return nil -} - -func appendItemLabels(itemLabels map[string]string, w io.Writer, columnLabels []string, showLabels bool) error { - if _, err := fmt.Fprint(w, kctl.AppendLabels(itemLabels, columnLabels)); err != nil { - return err - } - if _, err := fmt.Fprint(w, kctl.AppendAllLabels(showLabels, itemLabels)); err != nil { - return err - } - return nil -} - -func printClusterResourceQuota(resourceQuota *quotaapi.ClusterResourceQuota, w io.Writer, options kctl.PrintOptions) error { - name := formatResourceName(options.Kind, resourceQuota.Name, options.WithKind) - - if _, err := fmt.Fprintf(w, "%s", name); err != nil { - return err - } - if _, err := fmt.Fprintf(w, "\t%s", unversioned.FormatLabelSelector(resourceQuota.Spec.Selector.LabelSelector)); err != nil { - return err - } - if _, err := fmt.Fprintf(w, "\t%s", resourceQuota.Spec.Selector.AnnotationSelector); err != nil { - return err - } - if _, err := fmt.Fprint(w, kctl.AppendLabels(resourceQuota.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, kctl.AppendAllLabels(options.ShowLabels, resourceQuota.Labels)) - return err -} - -func printClusterResourceQuotaList(list *quotaapi.ClusterResourceQuotaList, w io.Writer, options kctl.PrintOptions) error { - for i := range list.Items { - if err := printClusterResourceQuota(&list.Items[i], w, options); err != nil { - return err - } - } - return nil -} - -func printAppliedClusterResourceQuota(resourceQuota *quotaapi.AppliedClusterResourceQuota, w io.Writer, options kctl.PrintOptions) error { - return printClusterResourceQuota(quotaapi.ConvertAppliedClusterResourceQuotaToClusterResourceQuota(resourceQuota), w, options) -} - -func printAppliedClusterResourceQuotaList(list *quotaapi.AppliedClusterResourceQuotaList, w io.Writer, options kctl.PrintOptions) error { - for i := range list.Items { - if err := printClusterResourceQuota(quotaapi.ConvertAppliedClusterResourceQuotaToClusterResourceQuota(&list.Items[i]), w, options); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/openshift/origin/pkg/cmd/cli/describe/projectstatus.go b/vendor/github.com/openshift/origin/pkg/cmd/cli/describe/projectstatus.go deleted file mode 100644 index 99b719227..000000000 --- a/vendor/github.com/openshift/origin/pkg/cmd/cli/describe/projectstatus.go +++ /dev/null @@ -1,1458 +0,0 @@ -package describe - -import ( - "fmt" - "io" - "sort" - "strconv" - "strings" - "text/tabwriter" - - kapi "k8s.io/kubernetes/pkg/api" - kapierrors "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/unversioned" - kapps "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/autoscaling" - kclient "k8s.io/kubernetes/pkg/client/unversioned" - utilerrors "k8s.io/kubernetes/pkg/util/errors" - "k8s.io/kubernetes/pkg/util/sets" - - osgraph "github.com/openshift/origin/pkg/api/graph" - "github.com/openshift/origin/pkg/api/graph/graphview" - kubeedges "github.com/openshift/origin/pkg/api/kubegraph" - kubeanalysis "github.com/openshift/origin/pkg/api/kubegraph/analysis" - kubegraph "github.com/openshift/origin/pkg/api/kubegraph/nodes" - buildapi "github.com/openshift/origin/pkg/build/api" - buildedges "github.com/openshift/origin/pkg/build/graph" - buildanalysis "github.com/openshift/origin/pkg/build/graph/analysis" - buildgraph "github.com/openshift/origin/pkg/build/graph/nodes" - "github.com/openshift/origin/pkg/client" - deployapi "github.com/openshift/origin/pkg/deploy/api" - deployedges "github.com/openshift/origin/pkg/deploy/graph" - deployanalysis "github.com/openshift/origin/pkg/deploy/graph/analysis" - deploygraph "github.com/openshift/origin/pkg/deploy/graph/nodes" - deployutil "github.com/openshift/origin/pkg/deploy/util" - imageapi "github.com/openshift/origin/pkg/image/api" - imageedges "github.com/openshift/origin/pkg/image/graph" - imagegraph "github.com/openshift/origin/pkg/image/graph/nodes" - projectapi "github.com/openshift/origin/pkg/project/api" - routeapi "github.com/openshift/origin/pkg/route/api" - routeedges "github.com/openshift/origin/pkg/route/graph" - routeanalysis "github.com/openshift/origin/pkg/route/graph/analysis" - routegraph "github.com/openshift/origin/pkg/route/graph/nodes" - "github.com/openshift/origin/pkg/util/errors" - "github.com/openshift/origin/pkg/util/parallel" -) - -const ForbiddenListWarning = "Forbidden" - -// ProjectStatusDescriber generates extended information about a Project -type ProjectStatusDescriber struct { - K kclient.Interface - C client.Interface - Server string - Suggest bool - - // root command used when calling this command - CommandBaseName string - - LogsCommandName string - SecurityPolicyCommandFormat string - SetProbeCommandName string -} - -func (d *ProjectStatusDescriber) MakeGraph(namespace string) (osgraph.Graph, sets.String, error) { - g := osgraph.New() - - loaders := []GraphLoader{ - &serviceLoader{namespace: namespace, lister: d.K}, - &serviceAccountLoader{namespace: namespace, lister: d.K}, - &secretLoader{namespace: namespace, lister: d.K}, - &rcLoader{namespace: namespace, lister: d.K}, - &podLoader{namespace: namespace, lister: d.K}, - &petsetLoader{namespace: namespace, lister: d.K.Apps()}, - &horizontalPodAutoscalerLoader{namespace: namespace, lister: d.K.Autoscaling()}, - // TODO check swagger for feature enablement and selectively add bcLoader and buildLoader - // then remove errors.TolerateNotFoundError method. - &bcLoader{namespace: namespace, lister: d.C}, - &buildLoader{namespace: namespace, lister: d.C}, - &isLoader{namespace: namespace, lister: d.C}, - &dcLoader{namespace: namespace, lister: d.C}, - &routeLoader{namespace: namespace, lister: d.C}, - } - loadingFuncs := []func() error{} - for _, loader := range loaders { - loadingFuncs = append(loadingFuncs, loader.Load) - } - - forbiddenResources := sets.String{} - if errs := parallel.Run(loadingFuncs...); len(errs) > 0 { - actualErrors := []error{} - for _, err := range errs { - if kapierrors.IsForbidden(err) { - forbiddenErr := err.(*kapierrors.StatusError) - if (forbiddenErr.Status().Details != nil) && (len(forbiddenErr.Status().Details.Kind) > 0) { - forbiddenResources.Insert(forbiddenErr.Status().Details.Kind) - } - continue - } - actualErrors = append(actualErrors, err) - } - - if len(actualErrors) > 0 { - return g, forbiddenResources, utilerrors.NewAggregate(actualErrors) - } - } - - for _, loader := range loaders { - loader.AddToGraph(g) - } - - kubeedges.AddAllExposedPodTemplateSpecEdges(g) - kubeedges.AddAllExposedPodEdges(g) - kubeedges.AddAllManagedByControllerPodEdges(g) - kubeedges.AddAllRequestedServiceAccountEdges(g) - kubeedges.AddAllMountableSecretEdges(g) - kubeedges.AddAllMountedSecretEdges(g) - kubeedges.AddHPAScaleRefEdges(g) - buildedges.AddAllInputOutputEdges(g) - buildedges.AddAllBuildEdges(g) - deployedges.AddAllTriggerEdges(g) - deployedges.AddAllDeploymentEdges(g) - imageedges.AddAllImageStreamRefEdges(g) - imageedges.AddAllImageStreamImageRefEdges(g) - routeedges.AddAllRouteEdges(g) - - return g, forbiddenResources, nil -} - -// Describe returns the description of a project -func (d *ProjectStatusDescriber) Describe(namespace, name string) (string, error) { - var f formatter = namespacedFormatter{} - - g, forbiddenResources, err := d.MakeGraph(namespace) - if err != nil { - return "", err - } - - allNamespaces := namespace == kapi.NamespaceAll - var project *projectapi.Project - if !allNamespaces { - p, err := d.C.Projects().Get(namespace) - if err != nil { - return "", err - } - project = p - f = namespacedFormatter{currentNamespace: namespace} - } - - coveredNodes := graphview.IntSet{} - - services, coveredByServices := graphview.AllServiceGroups(g, coveredNodes) - coveredNodes.Insert(coveredByServices.List()...) - - standaloneDCs, coveredByDCs := graphview.AllDeploymentConfigPipelines(g, coveredNodes) - coveredNodes.Insert(coveredByDCs.List()...) - - standaloneRCs, coveredByRCs := graphview.AllReplicationControllers(g, coveredNodes) - coveredNodes.Insert(coveredByRCs.List()...) - - standaloneImages, coveredByImages := graphview.AllImagePipelinesFromBuildConfig(g, coveredNodes) - coveredNodes.Insert(coveredByImages.List()...) - - standalonePods, coveredByPods := graphview.AllPods(g, coveredNodes) - coveredNodes.Insert(coveredByPods.List()...) - - return tabbedString(func(out *tabwriter.Writer) error { - indent := " " - if allNamespaces { - fmt.Fprintf(out, describeAllProjectsOnServer(f, d.Server)) - } else { - fmt.Fprintf(out, describeProjectAndServer(f, project, d.Server)) - } - - for _, service := range services { - if !service.Service.Found() { - continue - } - local := namespacedFormatter{currentNamespace: service.Service.Namespace} - - var exposes []string - for _, routeNode := range service.ExposingRoutes { - exposes = append(exposes, describeRouteInServiceGroup(local, routeNode)...) - } - sort.Sort(exposedRoutes(exposes)) - - fmt.Fprintln(out) - printLines(out, "", 0, describeServiceInServiceGroup(f, service, exposes...)...) - - for _, dcPipeline := range service.DeploymentConfigPipelines { - printLines(out, indent, 1, describeDeploymentInServiceGroup(local, dcPipeline, func(rc *kubegraph.ReplicationControllerNode) int32 { - return graphview.MaxRecentContainerRestartsForRC(g, rc) - })...) - } - - for _, node := range service.FulfillingPetSets { - printLines(out, indent, 1, describePetSetInServiceGroup(local, node)...) - } - - rcNode: - for _, rcNode := range service.FulfillingRCs { - for _, coveredDC := range service.FulfillingDCs { - if deployedges.BelongsToDeploymentConfig(coveredDC.DeploymentConfig, rcNode.ReplicationController) { - continue rcNode - } - } - printLines(out, indent, 1, describeRCInServiceGroup(local, rcNode)...) - } - - pod: - for _, node := range service.FulfillingPods { - // skip pods that have been displayed in a roll-up of RCs and DCs (by implicit usage of RCs) - for _, coveredRC := range service.FulfillingRCs { - if g.Edge(node, coveredRC) != nil { - continue pod - } - } - // TODO: collapse into FulfillingControllers - for _, covered := range service.FulfillingPetSets { - if g.Edge(node, covered) != nil { - continue pod - } - } - printLines(out, indent, 1, describePodInServiceGroup(local, node)...) - } - } - - for _, standaloneDC := range standaloneDCs { - fmt.Fprintln(out) - printLines(out, indent, 0, describeDeploymentInServiceGroup(f, standaloneDC, func(rc *kubegraph.ReplicationControllerNode) int32 { - return graphview.MaxRecentContainerRestartsForRC(g, rc) - })...) - } - - for _, standaloneImage := range standaloneImages { - fmt.Fprintln(out) - lines := describeStandaloneBuildGroup(f, standaloneImage, namespace) - lines = append(lines, describeAdditionalBuildDetail(standaloneImage.Build, standaloneImage.LastSuccessfulBuild, standaloneImage.LastUnsuccessfulBuild, standaloneImage.ActiveBuilds, standaloneImage.DestinationResolved, true)...) - printLines(out, indent, 0, lines...) - } - - for _, standaloneRC := range standaloneRCs { - fmt.Fprintln(out) - printLines(out, indent, 0, describeRCInServiceGroup(f, standaloneRC.RC)...) - } - - monopods, err := filterBoringPods(standalonePods) - if err != nil { - return err - } - for _, monopod := range monopods { - fmt.Fprintln(out) - printLines(out, indent, 0, describeMonopod(f, monopod.Pod)...) - } - - allMarkers := osgraph.Markers{} - allMarkers = append(allMarkers, createForbiddenMarkers(forbiddenResources)...) - for _, scanner := range getMarkerScanners(d.LogsCommandName, d.SecurityPolicyCommandFormat, d.SetProbeCommandName) { - allMarkers = append(allMarkers, scanner(g, f)...) - } - - // TODO: Provide an option to chase these hidden markers. - allMarkers = allMarkers.FilterByNamespace(namespace) - - fmt.Fprintln(out) - - sort.Stable(osgraph.ByKey(allMarkers)) - sort.Stable(osgraph.ByNodeID(allMarkers)) - - errorMarkers := allMarkers.BySeverity(osgraph.ErrorSeverity) - errorSuggestions := 0 - if len(errorMarkers) > 0 { - fmt.Fprintln(out, "Errors:") - for _, marker := range errorMarkers { - fmt.Fprintln(out, indent+"* "+marker.Message) - if len(marker.Suggestion) > 0 { - errorSuggestions++ - if d.Suggest { - switch s := marker.Suggestion.String(); { - case strings.Contains(s, "\n"): - fmt.Fprintln(out) - for _, line := range strings.Split(s, "\n") { - fmt.Fprintln(out, indent+" "+line) - } - case len(s) > 0: - fmt.Fprintln(out, indent+" try: "+s) - } - } - } - } - } - - warningMarkers := allMarkers.BySeverity(osgraph.WarningSeverity) - if len(warningMarkers) > 0 { - if d.Suggest { - fmt.Fprintln(out, "Warnings:") - } - for _, marker := range warningMarkers { - if d.Suggest { - fmt.Fprintln(out, indent+"* "+marker.Message) - switch s := marker.Suggestion.String(); { - case strings.Contains(s, "\n"): - fmt.Fprintln(out) - for _, line := range strings.Split(s, "\n") { - fmt.Fprintln(out, indent+" "+line) - } - case len(s) > 0: - fmt.Fprintln(out, indent+" try: "+s) - } - } - } - } - - // We print errors by default and warnings if -v is used. If we get none, - // this would be an extra new line. - if len(errorMarkers) != 0 || (d.Suggest && len(warningMarkers) != 0) { - fmt.Fprintln(out) - } - - errors, warnings := "", "" - if len(errorMarkers) == 1 { - errors = "1 error" - } else if len(errorMarkers) > 1 { - errors = fmt.Sprintf("%d errors", len(errorMarkers)) - } - if len(warningMarkers) == 1 { - warnings = "1 warning" - } else if len(warningMarkers) > 1 { - warnings = fmt.Sprintf("%d warnings", len(warningMarkers)) - } - - switch { - case !d.Suggest && len(errorMarkers) > 0 && len(warningMarkers) > 0: - fmt.Fprintf(out, "%s and %s identified, use '%[3]s status -v' to see details.\n", errors, warnings, d.CommandBaseName) - - case !d.Suggest && len(errorMarkers) > 0 && errorSuggestions > 0: - fmt.Fprintf(out, "%s identified, use '%[2]s status -v' to see details.\n", errors, d.CommandBaseName) - - case !d.Suggest && len(warningMarkers) > 0: - fmt.Fprintf(out, "%s identified, use '%[2]s status -v' to see details.\n", warnings, d.CommandBaseName) - - case (len(services) == 0) && (len(standaloneDCs) == 0) && (len(standaloneImages) == 0): - fmt.Fprintln(out, "You have no services, deployment configs, or build configs.") - fmt.Fprintf(out, "Run '%[1]s new-app' to create an application.\n", d.CommandBaseName) - - default: - fmt.Fprintf(out, "View details with '%[1]s describe /' or list everything with '%[1]s get all'.\n", d.CommandBaseName) - } - - return nil - }) -} - -func createForbiddenMarkers(forbiddenResources sets.String) []osgraph.Marker { - markers := []osgraph.Marker{} - for forbiddenResource := range forbiddenResources { - markers = append(markers, osgraph.Marker{ - Severity: osgraph.WarningSeverity, - Key: ForbiddenListWarning, - Message: fmt.Sprintf("Unable to list %s resources. Not all status relationships can be established.", forbiddenResource), - }) - } - return markers -} - -func getMarkerScanners(logsCommandName, securityPolicyCommandFormat, setProbeCommandName string) []osgraph.MarkerScanner { - return []osgraph.MarkerScanner{ - func(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { - return kubeanalysis.FindRestartingPods(g, f, logsCommandName, securityPolicyCommandFormat) - }, - kubeanalysis.FindDuelingReplicationControllers, - kubeanalysis.FindMissingSecrets, - kubeanalysis.FindHPASpecsMissingCPUTargets, - kubeanalysis.FindHPASpecsMissingScaleRefs, - kubeanalysis.FindOverlappingHPAs, - buildanalysis.FindUnpushableBuildConfigs, - buildanalysis.FindCircularBuilds, - buildanalysis.FindPendingTags, - deployanalysis.FindDeploymentConfigTriggerErrors, - buildanalysis.FindMissingInputImageStreams, - func(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { - return deployanalysis.FindDeploymentConfigReadinessWarnings(g, f, setProbeCommandName) - }, - routeanalysis.FindPortMappingIssues, - routeanalysis.FindMissingTLSTerminationType, - routeanalysis.FindPathBasedPassthroughRoutes, - routeanalysis.FindRouteAdmissionFailures, - routeanalysis.FindMissingRouter, - // We disable this feature by default and we don't have a capability detection for this sort of thing. Disable this check for now. - // kubeanalysis.FindUnmountableSecrets, - } -} - -func printLines(out io.Writer, indent string, depth int, lines ...string) { - for i, s := range lines { - fmt.Fprintf(out, strings.Repeat(indent, depth)) - if i != 0 { - fmt.Fprint(out, indent) - } - fmt.Fprintln(out, s) - } -} - -func indentLines(indent string, lines ...string) []string { - ret := make([]string, 0, len(lines)) - for _, line := range lines { - ret = append(ret, indent+line) - } - - return ret -} - -type formatter interface { - ResourceName(obj interface{}) string -} - -func namespaceNameWithType(resource, name, namespace, defaultNamespace string, noNamespace bool) string { - if noNamespace || namespace == defaultNamespace || len(namespace) == 0 { - return resource + "/" + name - } - return resource + "/" + name + "[" + namespace + "]" -} - -var namespaced = namespacedFormatter{} - -type namespacedFormatter struct { - hideNamespace bool - currentNamespace string -} - -func (f namespacedFormatter) ResourceName(obj interface{}) string { - switch t := obj.(type) { - - case *kubegraph.PodNode: - return namespaceNameWithType("pod", t.Name, t.Namespace, f.currentNamespace, f.hideNamespace) - case *kubegraph.ServiceNode: - return namespaceNameWithType("svc", t.Name, t.Namespace, f.currentNamespace, f.hideNamespace) - case *kubegraph.SecretNode: - return namespaceNameWithType("secret", t.Name, t.Namespace, f.currentNamespace, f.hideNamespace) - case *kubegraph.ServiceAccountNode: - return namespaceNameWithType("sa", t.Name, t.Namespace, f.currentNamespace, f.hideNamespace) - case *kubegraph.ReplicationControllerNode: - return namespaceNameWithType("rc", t.ReplicationController.Name, t.ReplicationController.Namespace, f.currentNamespace, f.hideNamespace) - case *kubegraph.HorizontalPodAutoscalerNode: - return namespaceNameWithType("hpa", t.HorizontalPodAutoscaler.Name, t.HorizontalPodAutoscaler.Namespace, f.currentNamespace, f.hideNamespace) - case *kubegraph.PetSetNode: - return namespaceNameWithType("petset", t.PetSet.Name, t.PetSet.Namespace, f.currentNamespace, f.hideNamespace) - - case *imagegraph.ImageStreamNode: - return namespaceNameWithType("is", t.ImageStream.Name, t.ImageStream.Namespace, f.currentNamespace, f.hideNamespace) - case *imagegraph.ImageStreamTagNode: - return namespaceNameWithType("istag", t.ImageStreamTag.Name, t.ImageStreamTag.Namespace, f.currentNamespace, f.hideNamespace) - case *imagegraph.ImageStreamImageNode: - return namespaceNameWithType("isi", t.ImageStreamImage.Name, t.ImageStreamImage.Namespace, f.currentNamespace, f.hideNamespace) - case *imagegraph.ImageNode: - return namespaceNameWithType("image", t.Image.Name, t.Image.Namespace, f.currentNamespace, f.hideNamespace) - case *buildgraph.BuildConfigNode: - return namespaceNameWithType("bc", t.BuildConfig.Name, t.BuildConfig.Namespace, f.currentNamespace, f.hideNamespace) - case *buildgraph.BuildNode: - return namespaceNameWithType("build", t.Build.Name, t.Build.Namespace, f.currentNamespace, f.hideNamespace) - - case *deploygraph.DeploymentConfigNode: - return namespaceNameWithType("dc", t.DeploymentConfig.Name, t.DeploymentConfig.Namespace, f.currentNamespace, f.hideNamespace) - - case *routegraph.RouteNode: - return namespaceNameWithType("route", t.Route.Name, t.Route.Namespace, f.currentNamespace, f.hideNamespace) - - default: - return fmt.Sprintf("", obj) - } -} - -func describeProjectAndServer(f formatter, project *projectapi.Project, server string) string { - if len(server) == 0 { - return fmt.Sprintf("In project %s on server %s\n", projectapi.DisplayNameAndNameForProject(project), server) - } - return fmt.Sprintf("In project %s on server %s\n", projectapi.DisplayNameAndNameForProject(project), server) - -} - -func describeAllProjectsOnServer(f formatter, server string) string { - if len(server) == 0 { - return "Showing all projects\n" - } - return fmt.Sprintf("Showing all projects on server %s\n", server) -} - -func describeDeploymentInServiceGroup(f formatter, deploy graphview.DeploymentConfigPipeline, restartFn func(*kubegraph.ReplicationControllerNode) int32) []string { - local := namespacedFormatter{currentNamespace: deploy.Deployment.DeploymentConfig.Namespace} - - includeLastPass := deploy.ActiveDeployment == nil - if len(deploy.Images) == 1 { - format := "%s deploys %s %s" - if deploy.Deployment.DeploymentConfig.Spec.Test { - format = "%s test deploys %s %s" - } - lines := []string{fmt.Sprintf(format, f.ResourceName(deploy.Deployment), describeImageInPipeline(local, deploy.Images[0], deploy.Deployment.DeploymentConfig.Namespace), describeDeploymentConfigTrigger(deploy.Deployment.DeploymentConfig))} - if len(lines[0]) > 120 && strings.Contains(lines[0], " <- ") { - segments := strings.SplitN(lines[0], " <- ", 2) - lines[0] = segments[0] + " <-" - lines = append(lines, segments[1]) - } - lines = append(lines, indentLines(" ", describeAdditionalBuildDetail(deploy.Images[0].Build, deploy.Images[0].LastSuccessfulBuild, deploy.Images[0].LastUnsuccessfulBuild, deploy.Images[0].ActiveBuilds, deploy.Images[0].DestinationResolved, includeLastPass)...)...) - lines = append(lines, describeDeployments(local, deploy.Deployment, deploy.ActiveDeployment, deploy.InactiveDeployments, restartFn, maxDisplayDeployments)...) - return lines - } - - format := "%s deploys %s" - if deploy.Deployment.DeploymentConfig.Spec.Test { - format = "%s test deploys %s" - } - lines := []string{fmt.Sprintf(format, f.ResourceName(deploy.Deployment), describeDeploymentConfigTrigger(deploy.Deployment.DeploymentConfig))} - for _, image := range deploy.Images { - lines = append(lines, describeImageInPipeline(local, image, deploy.Deployment.DeploymentConfig.Namespace)) - lines = append(lines, indentLines(" ", describeAdditionalBuildDetail(image.Build, image.LastSuccessfulBuild, image.LastUnsuccessfulBuild, image.ActiveBuilds, image.DestinationResolved, includeLastPass)...)...) - lines = append(lines, describeDeployments(local, deploy.Deployment, deploy.ActiveDeployment, deploy.InactiveDeployments, restartFn, maxDisplayDeployments)...) - } - return lines -} - -func describePetSetInServiceGroup(f formatter, node *kubegraph.PetSetNode) []string { - images := []string{} - for _, container := range node.PetSet.Spec.Template.Spec.Containers { - images = append(images, container.Image) - } - - return []string{fmt.Sprintf("%s manages %s, %s", f.ResourceName(node), strings.Join(images, ", "), describePetSetStatus(node.PetSet))} -} - -func describeRCInServiceGroup(f formatter, rcNode *kubegraph.ReplicationControllerNode) []string { - if rcNode.ReplicationController.Spec.Template == nil { - return []string{} - } - - images := []string{} - for _, container := range rcNode.ReplicationController.Spec.Template.Spec.Containers { - images = append(images, container.Image) - } - - lines := []string{fmt.Sprintf("%s runs %s", f.ResourceName(rcNode), strings.Join(images, ", "))} - lines = append(lines, describeRCStatus(rcNode.ReplicationController)) - - return lines -} - -func describePodInServiceGroup(f formatter, podNode *kubegraph.PodNode) []string { - images := []string{} - for _, container := range podNode.Pod.Spec.Containers { - images = append(images, container.Image) - } - - lines := []string{fmt.Sprintf("%s runs %s", f.ResourceName(podNode), strings.Join(images, ", "))} - return lines -} - -func describeMonopod(f formatter, podNode *kubegraph.PodNode) []string { - images := []string{} - for _, container := range podNode.Pod.Spec.Containers { - images = append(images, container.Image) - } - - lines := []string{fmt.Sprintf("%s runs %s", f.ResourceName(podNode), strings.Join(images, ", "))} - return lines -} - -// exposedRoutes orders strings by their leading prefix (https:// -> http:// other prefixes), then by -// the shortest distance up to the first space (indicating a break), then alphabetically: -// -// https://test.com -// https://www.test.com -// http://t.com -// other string -// -type exposedRoutes []string - -func (e exposedRoutes) Len() int { return len(e) } -func (e exposedRoutes) Swap(i, j int) { e[i], e[j] = e[j], e[i] } -func (e exposedRoutes) Less(i, j int) bool { - a, b := e[i], e[j] - prefixA, prefixB := strings.HasPrefix(a, "https://"), strings.HasPrefix(b, "https://") - switch { - case prefixA && !prefixB: - return true - case !prefixA && prefixB: - return false - case !prefixA && !prefixB: - prefixA, prefixB = strings.HasPrefix(a, "http://"), strings.HasPrefix(b, "http://") - switch { - case prefixA && !prefixB: - return true - case !prefixA && prefixB: - return false - case !prefixA && !prefixB: - return a < b - default: - a, b = a[7:], b[7:] - } - default: - a, b = a[8:], b[8:] - } - lA, lB := strings.Index(a, " "), strings.Index(b, " ") - if lA == -1 { - lA = len(a) - } - if lB == -1 { - lB = len(b) - } - switch { - case lA < lB: - return true - case lA > lB: - return false - default: - return a < b - } -} - -func extractRouteInfo(route *routeapi.Route) (requested bool, other []string, errors []string) { - reasons := sets.NewString() - for _, ingress := range route.Status.Ingress { - exact := route.Spec.Host == ingress.Host - switch status, condition := routeapi.IngressConditionStatus(&ingress, routeapi.RouteAdmitted); status { - case kapi.ConditionFalse: - reasons.Insert(condition.Reason) - default: - if exact { - requested = true - } else { - other = append(other, ingress.Host) - } - } - } - return requested, other, reasons.List() -} - -func describeRouteExposed(host string, route *routeapi.Route, errors bool) string { - var trailer string - if errors { - trailer = " (!)" - } - var prefix string - switch { - case route.Spec.TLS == nil: - prefix = fmt.Sprintf("http://%s", host) - case route.Spec.TLS.Termination == routeapi.TLSTerminationPassthrough: - prefix = fmt.Sprintf("https://%s (passthrough)", host) - case route.Spec.TLS.Termination == routeapi.TLSTerminationReencrypt: - prefix = fmt.Sprintf("https://%s (reencrypt)", host) - case route.Spec.TLS.Termination != routeapi.TLSTerminationEdge: - // future proof against other types of TLS termination being added - prefix = fmt.Sprintf("https://%s", host) - case route.Spec.TLS.InsecureEdgeTerminationPolicy == routeapi.InsecureEdgeTerminationPolicyRedirect: - prefix = fmt.Sprintf("https://%s (redirects)", host) - case route.Spec.TLS.InsecureEdgeTerminationPolicy == routeapi.InsecureEdgeTerminationPolicyAllow: - prefix = fmt.Sprintf("https://%s (and http)", host) - default: - prefix = fmt.Sprintf("https://%s", host) - } - - if route.Spec.Port != nil && len(route.Spec.Port.TargetPort.String()) > 0 { - return fmt.Sprintf("%s to pod port %s%s", prefix, route.Spec.Port.TargetPort.String(), trailer) - } - return fmt.Sprintf("%s%s", prefix, trailer) -} - -func describeRouteInServiceGroup(f formatter, routeNode *routegraph.RouteNode) []string { - // markers should cover printing information about admission failure - requested, other, errors := extractRouteInfo(routeNode.Route) - var lines []string - if requested { - lines = append(lines, describeRouteExposed(routeNode.Spec.Host, routeNode.Route, len(errors) > 0)) - } - for _, s := range other { - lines = append(lines, describeRouteExposed(s, routeNode.Route, len(errors) > 0)) - } - if len(lines) == 0 { - switch { - case len(errors) >= 1: - // router rejected the output - lines = append(lines, fmt.Sprintf("%s not accepted: %s", f.ResourceName(routeNode), errors[0])) - case len(routeNode.Spec.Host) == 0: - // no errors or output, likely no router running and no default domain - lines = append(lines, fmt.Sprintf("%s has no host set", f.ResourceName(routeNode))) - case len(routeNode.Status.Ingress) == 0: - // host set, but no ingress, an older legacy router - lines = append(lines, describeRouteExposed(routeNode.Spec.Host, routeNode.Route, false)) - default: - // multiple conditions but no host exposed, use the generic legacy output - lines = append(lines, fmt.Sprintf("exposed as %s by %s", routeNode.Spec.Host, f.ResourceName(routeNode))) - } - } - return lines -} - -func describeDeploymentConfigTrigger(dc *deployapi.DeploymentConfig) string { - if len(dc.Spec.Triggers) == 0 { - return "(manual)" - } - - return "" -} - -func describeStandaloneBuildGroup(f formatter, pipeline graphview.ImagePipeline, namespace string) []string { - switch { - case pipeline.Build != nil: - lines := []string{describeBuildInPipeline(f, pipeline, namespace)} - if pipeline.Image != nil { - lines = append(lines, fmt.Sprintf("-> %s", describeImageTagInPipeline(f, pipeline.Image, namespace))) - } - return lines - case pipeline.Image != nil: - return []string{describeImageTagInPipeline(f, pipeline.Image, namespace)} - default: - return []string{""} - } -} - -func describeImageInPipeline(f formatter, pipeline graphview.ImagePipeline, namespace string) string { - switch { - case pipeline.Image != nil && pipeline.Build != nil: - return fmt.Sprintf("%s <- %s", describeImageTagInPipeline(f, pipeline.Image, namespace), describeBuildInPipeline(f, pipeline, namespace)) - case pipeline.Image != nil: - return describeImageTagInPipeline(f, pipeline.Image, namespace) - case pipeline.Build != nil: - return describeBuildInPipeline(f, pipeline, namespace) - default: - return "" - } -} - -func describeImageTagInPipeline(f formatter, image graphview.ImageTagLocation, namespace string) string { - switch t := image.(type) { - case *imagegraph.ImageStreamTagNode: - if t.ImageStreamTag.Namespace != namespace { - return image.ImageSpec() - } - return f.ResourceName(t) - default: - return image.ImageSpec() - } -} - -func describeBuildInPipeline(f formatter, pipeline graphview.ImagePipeline, namespace string) string { - bldType := "" - switch { - case pipeline.Build.BuildConfig.Spec.Strategy.DockerStrategy != nil: - bldType = "docker" - case pipeline.Build.BuildConfig.Spec.Strategy.SourceStrategy != nil: - bldType = "source" - case pipeline.Build.BuildConfig.Spec.Strategy.CustomStrategy != nil: - bldType = "custom" - case pipeline.Build.BuildConfig.Spec.Strategy.JenkinsPipelineStrategy != nil: - return fmt.Sprintf("bc/%s is a Jenkins Pipeline", pipeline.Build.BuildConfig.Name) - default: - return fmt.Sprintf("bc/%s unrecognized build", pipeline.Build.BuildConfig.Name) - } - - source, ok := describeSourceInPipeline(&pipeline.Build.BuildConfig.Spec.Source) - if !ok { - return fmt.Sprintf("bc/%s unconfigured %s build", pipeline.Build.BuildConfig.Name, bldType) - } - - retStr := fmt.Sprintf("bc/%s %s builds %s", pipeline.Build.BuildConfig.Name, bldType, source) - if pipeline.BaseImage != nil { - retStr = retStr + fmt.Sprintf(" on %s", describeImageTagInPipeline(f, pipeline.BaseImage, namespace)) - } - if pipeline.BaseBuilds != nil && len(pipeline.BaseBuilds) > 0 { - bcList := "bc/" + pipeline.BaseBuilds[0] - for i, bc := range pipeline.BaseBuilds { - if i == 0 { - continue - } - bcList = bcList + ", bc/" + bc - } - retStr = retStr + fmt.Sprintf(" (from %s)", bcList) - } else if pipeline.ScheduledImport { - // technically, an image stream produced by a bc could also have a scheduled import, - // but in the interest of saving space, we'll only note this possibility when there is no input BC - // (giving the input BC precedence) - retStr = retStr + " (import scheduled)" - } - return retStr -} - -func describeAdditionalBuildDetail(build *buildgraph.BuildConfigNode, lastSuccessfulBuild *buildgraph.BuildNode, lastUnsuccessfulBuild *buildgraph.BuildNode, activeBuilds []*buildgraph.BuildNode, pushTargetResolved bool, includeSuccess bool) []string { - if build == nil { - return nil - } - out := []string{} - - passTime := unversioned.Time{} - if lastSuccessfulBuild != nil { - passTime = buildTimestamp(lastSuccessfulBuild.Build) - } - failTime := unversioned.Time{} - if lastUnsuccessfulBuild != nil { - failTime = buildTimestamp(lastUnsuccessfulBuild.Build) - } - - lastTime := failTime - if passTime.After(failTime.Time) { - lastTime = passTime - } - - // display the last successful build if specifically requested or we're going to display an active build for context - if lastSuccessfulBuild != nil && (includeSuccess || len(activeBuilds) > 0) { - out = append(out, describeBuildPhase(lastSuccessfulBuild.Build, &passTime, build.BuildConfig.Name, pushTargetResolved)) - } - if passTime.Before(failTime) { - out = append(out, describeBuildPhase(lastUnsuccessfulBuild.Build, &failTime, build.BuildConfig.Name, pushTargetResolved)) - } - - if len(activeBuilds) > 0 { - activeOut := []string{} - for i := range activeBuilds { - activeOut = append(activeOut, describeBuildPhase(activeBuilds[i].Build, nil, build.BuildConfig.Name, pushTargetResolved)) - } - - if buildTimestamp(activeBuilds[0].Build).Before(lastTime) { - out = append(out, activeOut...) - } else { - out = append(activeOut, out...) - } - } - if len(out) == 0 && lastSuccessfulBuild == nil { - out = append(out, "not built yet") - } - return out -} - -func describeBuildPhase(build *buildapi.Build, t *unversioned.Time, parentName string, pushTargetResolved bool) string { - imageStreamFailure := "" - // if we're using an image stream and that image stream is the internal registry and that registry doesn't exist - if (build.Spec.Output.To != nil) && !pushTargetResolved { - imageStreamFailure = " (can't push to image)" - } - - if t == nil { - ts := buildTimestamp(build) - t = &ts - } - var time string - if t.IsZero() { - time = "" - } else { - time = strings.ToLower(formatRelativeTime(t.Time)) - } - buildIdentification := fmt.Sprintf("build/%s", build.Name) - prefix := parentName + "-" - if strings.HasPrefix(build.Name, prefix) { - suffix := build.Name[len(prefix):] - - if buildNumber, err := strconv.Atoi(suffix); err == nil { - buildIdentification = fmt.Sprintf("build #%d", buildNumber) - } - } - - revision := describeSourceRevision(build.Spec.Revision) - if len(revision) != 0 { - revision = fmt.Sprintf(" - %s", revision) - } - switch build.Status.Phase { - case buildapi.BuildPhaseComplete: - return fmt.Sprintf("%s succeeded %s ago%s%s", buildIdentification, time, revision, imageStreamFailure) - case buildapi.BuildPhaseError: - return fmt.Sprintf("%s stopped with an error %s ago%s%s", buildIdentification, time, revision, imageStreamFailure) - case buildapi.BuildPhaseFailed: - return fmt.Sprintf("%s failed %s ago%s%s", buildIdentification, time, revision, imageStreamFailure) - default: - status := strings.ToLower(string(build.Status.Phase)) - return fmt.Sprintf("%s %s for %s%s%s", buildIdentification, status, time, revision, imageStreamFailure) - } -} - -func describeSourceRevision(rev *buildapi.SourceRevision) string { - if rev == nil { - return "" - } - switch { - case rev.Git != nil: - author := describeSourceControlUser(rev.Git.Author) - if len(author) == 0 { - author = describeSourceControlUser(rev.Git.Committer) - } - if len(author) != 0 { - author = fmt.Sprintf(" (%s)", author) - } - commit := rev.Git.Commit - if len(commit) > 7 { - commit = commit[:7] - } - return fmt.Sprintf("%s: %s%s", commit, rev.Git.Message, author) - default: - return "" - } -} - -func describeSourceControlUser(user buildapi.SourceControlUser) string { - if len(user.Name) == 0 { - return user.Email - } - if len(user.Email) == 0 { - return user.Name - } - return fmt.Sprintf("%s <%s>", user.Name, user.Email) -} - -func buildTimestamp(build *buildapi.Build) unversioned.Time { - if build == nil { - return unversioned.Time{} - } - if !build.Status.CompletionTimestamp.IsZero() { - return *build.Status.CompletionTimestamp - } - if !build.Status.StartTimestamp.IsZero() { - return *build.Status.StartTimestamp - } - return build.CreationTimestamp -} - -func describeSourceInPipeline(source *buildapi.BuildSource) (string, bool) { - switch { - case source.Git != nil: - if len(source.Git.Ref) == 0 { - return source.Git.URI, true - } - return fmt.Sprintf("%s#%s", source.Git.URI, source.Git.Ref), true - case source.Dockerfile != nil: - return "Dockerfile", true - case source.Binary != nil: - return "uploaded code", true - case len(source.Images) > 0: - return "contents in other images", true - } - return "", false -} - -func describeDeployments(f formatter, dcNode *deploygraph.DeploymentConfigNode, activeDeployment *kubegraph.ReplicationControllerNode, inactiveDeployments []*kubegraph.ReplicationControllerNode, restartFn func(*kubegraph.ReplicationControllerNode) int32, count int) []string { - if dcNode == nil { - return nil - } - out := []string{} - deploymentsToPrint := append([]*kubegraph.ReplicationControllerNode{}, inactiveDeployments...) - - if activeDeployment == nil { - on, auto := describeDeploymentConfigTriggers(dcNode.DeploymentConfig) - if dcNode.DeploymentConfig.Status.LatestVersion == 0 { - out = append(out, fmt.Sprintf("deployment #1 waiting %s", on)) - } else if auto { - out = append(out, fmt.Sprintf("deployment #%d pending %s", dcNode.DeploymentConfig.Status.LatestVersion, on)) - } - // TODO: detect new image available? - } else { - deploymentsToPrint = append([]*kubegraph.ReplicationControllerNode{activeDeployment}, inactiveDeployments...) - } - - for i, deployment := range deploymentsToPrint { - restartCount := int32(0) - if restartFn != nil { - restartCount = restartFn(deployment) - } - out = append(out, describeDeploymentStatus(deployment.ReplicationController, i == 0, dcNode.DeploymentConfig.Spec.Test, restartCount)) - switch { - case count == -1: - if deployutil.DeploymentStatusFor(deployment.ReplicationController) == deployapi.DeploymentStatusComplete { - return out - } - default: - if i+1 >= count { - return out - } - } - } - return out -} - -func describeDeploymentStatus(deploy *kapi.ReplicationController, first, test bool, restartCount int32) string { - timeAt := strings.ToLower(formatRelativeTime(deploy.CreationTimestamp.Time)) - status := deployutil.DeploymentStatusFor(deploy) - version := deployutil.DeploymentVersionFor(deploy) - maybeCancelling := "" - if deployutil.IsDeploymentCancelled(deploy) && !deployutil.IsTerminatedDeployment(deploy) { - maybeCancelling = " (cancelling)" - } - - switch status { - case deployapi.DeploymentStatusFailed: - reason := deployutil.DeploymentStatusReasonFor(deploy) - if len(reason) > 0 { - reason = fmt.Sprintf(": %s", reason) - } - // TODO: encode fail time in the rc - return fmt.Sprintf("deployment #%d failed %s ago%s%s", version, timeAt, reason, describePodSummaryInline(deploy.Status.Replicas, deploy.Spec.Replicas, false, restartCount)) - case deployapi.DeploymentStatusComplete: - // TODO: pod status output - if test { - return fmt.Sprintf("test deployment #%d deployed %s ago", version, timeAt) - } - return fmt.Sprintf("deployment #%d deployed %s ago%s", version, timeAt, describePodSummaryInline(deploy.Status.Replicas, deploy.Spec.Replicas, first, restartCount)) - case deployapi.DeploymentStatusRunning: - format := "deployment #%d running%s for %s%s" - if test { - format = "test deployment #%d running%s for %s%s" - } - return fmt.Sprintf(format, version, maybeCancelling, timeAt, describePodSummaryInline(deploy.Status.Replicas, deploy.Spec.Replicas, false, restartCount)) - default: - return fmt.Sprintf("deployment #%d %s%s %s ago%s", version, strings.ToLower(string(status)), maybeCancelling, timeAt, describePodSummaryInline(deploy.Status.Replicas, deploy.Spec.Replicas, false, restartCount)) - } -} - -func describePetSetStatus(p *kapps.PetSet) string { - timeAt := strings.ToLower(formatRelativeTime(p.CreationTimestamp.Time)) - return fmt.Sprintf("created %s ago%s", timeAt, describePodSummaryInline(int32(p.Status.Replicas), int32(p.Spec.Replicas), false, 0)) -} - -func describeRCStatus(rc *kapi.ReplicationController) string { - timeAt := strings.ToLower(formatRelativeTime(rc.CreationTimestamp.Time)) - return fmt.Sprintf("rc/%s created %s ago%s", rc.Name, timeAt, describePodSummaryInline(rc.Status.Replicas, rc.Spec.Replicas, false, 0)) -} - -func describePodSummaryInline(actual, requested int32, includeEmpty bool, restartCount int32) string { - s := describePodSummary(actual, requested, includeEmpty, restartCount) - if len(s) == 0 { - return s - } - change := "" - switch { - case requested < actual: - change = fmt.Sprintf(" reducing to %d", requested) - case requested > actual: - change = fmt.Sprintf(" growing to %d", requested) - } - return fmt.Sprintf(" - %s%s", s, change) -} - -func describePodSummary(actual, requested int32, includeEmpty bool, restartCount int32) string { - var restartWarn string - if restartCount > 0 { - restartWarn = fmt.Sprintf(" (warning: %d restarts)", restartCount) - } - if actual == requested { - switch { - case actual == 0: - if !includeEmpty { - return "" - } - return "0 pods" - case actual > 1: - return fmt.Sprintf("%d pods", actual) + restartWarn - default: - return "1 pod" + restartWarn - } - } - return fmt.Sprintf("%d/%d pods", actual, requested) + restartWarn -} - -func describeDeploymentConfigTriggers(config *deployapi.DeploymentConfig) (string, bool) { - hasConfig, hasImage := false, false - for _, t := range config.Spec.Triggers { - switch t.Type { - case deployapi.DeploymentTriggerOnConfigChange: - hasConfig = true - case deployapi.DeploymentTriggerOnImageChange: - hasImage = true - } - } - switch { - case hasConfig && hasImage: - return "on image or update", true - case hasConfig: - return "on update", true - case hasImage: - return "on image", true - default: - return "for manual", false - } -} - -func describeServiceInServiceGroup(f formatter, svc graphview.ServiceGroup, exposed ...string) []string { - spec := svc.Service.Spec - ip := spec.ClusterIP - port := describeServicePorts(spec) - switch { - case len(exposed) > 1: - return append([]string{fmt.Sprintf("%s (%s)", exposed[0], f.ResourceName(svc.Service))}, exposed[1:]...) - case len(exposed) == 1: - return []string{fmt.Sprintf("%s (%s)", exposed[0], f.ResourceName(svc.Service))} - case spec.Type == kapi.ServiceTypeNodePort: - return []string{fmt.Sprintf("%s (all nodes)%s", f.ResourceName(svc.Service), port)} - case ip == "None": - return []string{fmt.Sprintf("%s (headless)%s", f.ResourceName(svc.Service), port)} - case len(ip) == 0: - return []string{fmt.Sprintf("%s %s", f.ResourceName(svc.Service), port)} - default: - return []string{fmt.Sprintf("%s - %s%s", f.ResourceName(svc.Service), ip, port)} - } -} - -func portOrNodePort(spec kapi.ServiceSpec, port kapi.ServicePort) string { - switch { - case spec.Type != kapi.ServiceTypeNodePort: - return strconv.Itoa(int(port.Port)) - case port.NodePort == 0: - return "" - default: - return strconv.Itoa(int(port.NodePort)) - } -} - -func describeServicePorts(spec kapi.ServiceSpec) string { - switch len(spec.Ports) { - case 0: - return " no ports" - - case 1: - port := portOrNodePort(spec, spec.Ports[0]) - if spec.Ports[0].TargetPort.String() == "0" || spec.ClusterIP == kapi.ClusterIPNone || port == spec.Ports[0].TargetPort.String() { - return fmt.Sprintf(":%s", port) - } - return fmt.Sprintf(":%s -> %s", port, spec.Ports[0].TargetPort.String()) - - default: - pairs := []string{} - for _, port := range spec.Ports { - externalPort := portOrNodePort(spec, port) - if port.TargetPort.String() == "0" || spec.ClusterIP == kapi.ClusterIPNone { - pairs = append(pairs, externalPort) - continue - } - if port.Port == port.TargetPort.IntVal { - pairs = append(pairs, port.TargetPort.String()) - } else { - pairs = append(pairs, fmt.Sprintf("%s->%s", externalPort, port.TargetPort.String())) - } - } - return " ports " + strings.Join(pairs, ", ") - } -} - -func filterBoringPods(pods []graphview.Pod) ([]graphview.Pod, error) { - monopods := []graphview.Pod{} - - for _, pod := range pods { - actualPod, ok := pod.Pod.Object().(*kapi.Pod) - if !ok { - continue - } - meta, err := kapi.ObjectMetaFor(actualPod) - if err != nil { - return nil, err - } - _, isDeployerPod := meta.Labels[deployapi.DeployerPodForDeploymentLabel] - _, isBuilderPod := meta.Annotations[buildapi.BuildAnnotation] - isFinished := actualPod.Status.Phase == kapi.PodSucceeded || actualPod.Status.Phase == kapi.PodFailed - if isDeployerPod || isBuilderPod || isFinished { - continue - } - monopods = append(monopods, pod) - } - - return monopods, nil -} - -// GraphLoader is a stateful interface that provides methods for building the nodes of a graph -type GraphLoader interface { - // Load is responsible for gathering and saving the objects this GraphLoader should AddToGraph - Load() error - // AddToGraph - AddToGraph(g osgraph.Graph) error -} - -type rcLoader struct { - namespace string - lister kclient.ReplicationControllersNamespacer - items []kapi.ReplicationController -} - -func (l *rcLoader) Load() error { - list, err := l.lister.ReplicationControllers(l.namespace).List(kapi.ListOptions{}) - if err != nil { - return err - } - - l.items = list.Items - return nil -} - -func (l *rcLoader) AddToGraph(g osgraph.Graph) error { - for i := range l.items { - kubegraph.EnsureReplicationControllerNode(g, &l.items[i]) - } - - return nil -} - -type serviceLoader struct { - namespace string - lister kclient.ServicesNamespacer - items []kapi.Service -} - -func (l *serviceLoader) Load() error { - list, err := l.lister.Services(l.namespace).List(kapi.ListOptions{}) - if err != nil { - return err - } - - l.items = list.Items - return nil -} - -func (l *serviceLoader) AddToGraph(g osgraph.Graph) error { - for i := range l.items { - kubegraph.EnsureServiceNode(g, &l.items[i]) - } - - return nil -} - -type podLoader struct { - namespace string - lister kclient.PodsNamespacer - items []kapi.Pod -} - -func (l *podLoader) Load() error { - list, err := l.lister.Pods(l.namespace).List(kapi.ListOptions{}) - if err != nil { - return err - } - - l.items = list.Items - return nil -} - -func (l *podLoader) AddToGraph(g osgraph.Graph) error { - for i := range l.items { - kubegraph.EnsurePodNode(g, &l.items[i]) - } - - return nil -} - -type petsetLoader struct { - namespace string - lister kclient.PetSetNamespacer - items []kapps.PetSet -} - -func (l *petsetLoader) Load() error { - list, err := l.lister.PetSets(l.namespace).List(kapi.ListOptions{}) - if err != nil { - return err - } - - l.items = list.Items - return nil -} - -func (l *petsetLoader) AddToGraph(g osgraph.Graph) error { - for i := range l.items { - kubegraph.EnsurePetSetNode(g, &l.items[i]) - } - - return nil -} - -type horizontalPodAutoscalerLoader struct { - namespace string - lister kclient.HorizontalPodAutoscalersNamespacer - items []autoscaling.HorizontalPodAutoscaler -} - -func (l *horizontalPodAutoscalerLoader) Load() error { - list, err := l.lister.HorizontalPodAutoscalers(l.namespace).List(kapi.ListOptions{}) - if err != nil { - return err - } - - l.items = list.Items - return nil -} - -func (l *horizontalPodAutoscalerLoader) AddToGraph(g osgraph.Graph) error { - for i := range l.items { - kubegraph.EnsureHorizontalPodAutoscalerNode(g, &l.items[i]) - } - - return nil -} - -type serviceAccountLoader struct { - namespace string - lister kclient.ServiceAccountsNamespacer - items []kapi.ServiceAccount -} - -func (l *serviceAccountLoader) Load() error { - list, err := l.lister.ServiceAccounts(l.namespace).List(kapi.ListOptions{}) - if err != nil { - return err - } - - l.items = list.Items - return nil -} - -func (l *serviceAccountLoader) AddToGraph(g osgraph.Graph) error { - for i := range l.items { - kubegraph.EnsureServiceAccountNode(g, &l.items[i]) - } - - return nil -} - -type secretLoader struct { - namespace string - lister kclient.SecretsNamespacer - items []kapi.Secret -} - -func (l *secretLoader) Load() error { - list, err := l.lister.Secrets(l.namespace).List(kapi.ListOptions{}) - if err != nil { - return err - } - - l.items = list.Items - return nil -} - -func (l *secretLoader) AddToGraph(g osgraph.Graph) error { - for i := range l.items { - kubegraph.EnsureSecretNode(g, &l.items[i]) - } - - return nil -} - -type isLoader struct { - namespace string - lister client.ImageStreamsNamespacer - items []imageapi.ImageStream -} - -func (l *isLoader) Load() error { - list, err := l.lister.ImageStreams(l.namespace).List(kapi.ListOptions{}) - if err != nil { - return err - } - - l.items = list.Items - return nil -} - -func (l *isLoader) AddToGraph(g osgraph.Graph) error { - for i := range l.items { - imagegraph.EnsureImageStreamNode(g, &l.items[i]) - imagegraph.EnsureAllImageStreamTagNodes(g, &l.items[i]) - } - - return nil -} - -type dcLoader struct { - namespace string - lister client.DeploymentConfigsNamespacer - items []deployapi.DeploymentConfig -} - -func (l *dcLoader) Load() error { - list, err := l.lister.DeploymentConfigs(l.namespace).List(kapi.ListOptions{}) - if err != nil { - return err - } - - l.items = list.Items - return nil -} - -func (l *dcLoader) AddToGraph(g osgraph.Graph) error { - for i := range l.items { - deploygraph.EnsureDeploymentConfigNode(g, &l.items[i]) - } - - return nil -} - -type bcLoader struct { - namespace string - lister client.BuildConfigsNamespacer - items []buildapi.BuildConfig -} - -func (l *bcLoader) Load() error { - list, err := l.lister.BuildConfigs(l.namespace).List(kapi.ListOptions{}) - if err != nil { - return errors.TolerateNotFoundError(err) - } - - l.items = list.Items - return nil -} - -func (l *bcLoader) AddToGraph(g osgraph.Graph) error { - for i := range l.items { - buildgraph.EnsureBuildConfigNode(g, &l.items[i]) - } - - return nil -} - -type buildLoader struct { - namespace string - lister client.BuildsNamespacer - items []buildapi.Build -} - -func (l *buildLoader) Load() error { - list, err := l.lister.Builds(l.namespace).List(kapi.ListOptions{}) - if err != nil { - return errors.TolerateNotFoundError(err) - } - - l.items = list.Items - return nil -} - -func (l *buildLoader) AddToGraph(g osgraph.Graph) error { - for i := range l.items { - buildgraph.EnsureBuildNode(g, &l.items[i]) - } - - return nil -} - -type routeLoader struct { - namespace string - lister client.RoutesNamespacer - items []routeapi.Route -} - -func (l *routeLoader) Load() error { - list, err := l.lister.Routes(l.namespace).List(kapi.ListOptions{}) - if err != nil { - return err - } - - l.items = list.Items - return nil -} - -func (l *routeLoader) AddToGraph(g osgraph.Graph) error { - for i := range l.items { - routegraph.EnsureRouteNode(g, &l.items[i]) - } - - return nil -} diff --git a/vendor/github.com/openshift/origin/pkg/cmd/flagtypes/addr.go b/vendor/github.com/openshift/origin/pkg/cmd/flagtypes/addr.go deleted file mode 100644 index b686463c4..000000000 --- a/vendor/github.com/openshift/origin/pkg/cmd/flagtypes/addr.go +++ /dev/null @@ -1,176 +0,0 @@ -package flagtypes - -import ( - "fmt" - "net" - "net/url" - "strconv" - "strings" -) - -// urlPrefixes is the list of string prefix values that may indicate a URL -// is present. -var urlPrefixes = []string{"http://", "https://", "tcp://"} - -// Addr is a flag type that attempts to load a host, IP, host:port, or -// URL value from a string argument. It tracks whether the value was set -// and allows the caller to provide defaults for the scheme and port. -type Addr struct { - // Specified by the caller - DefaultScheme string - DefaultPort int - AllowPrefix bool - - // Provided will be true if Set is invoked - Provided bool - // Value is the exact value provided on the flag - Value string - - // URL represents the user input. The Host field is guaranteed - // to be set if Provided is true - URL *url.URL - // Host is the hostname or IP portion of the user input - Host string - // IPv6Host is true if the hostname appears to be an IPv6 input - IPv6Host bool - // Port is the port portion of the user input. Will be 0 if no port was found - // and no default port could be established. - Port int -} - -// Default creates a new Address with the value set -func (a Addr) Default() Addr { - if err := a.Set(a.Value); err != nil { - panic(err) - } - a.Provided = false - return a -} - -// String returns the string representation of the Addr -func (a *Addr) String() string { - if a.URL == nil { - return a.Value - } - return a.URL.String() -} - -// Set attempts to set a string value to an address -func (a *Addr) Set(value string) error { - scheme := a.DefaultScheme - if len(scheme) == 0 { - scheme = "tcp" - } - addr := &url.URL{ - Scheme: scheme, - } - - switch { - case a.isURL(value): - parsed, err := url.Parse(value) - if err != nil { - return fmt.Errorf("not a valid URL: %v", err) - } - if !a.AllowPrefix { - parsed.Path = "" - } - parsed.RawQuery = "" - parsed.Fragment = "" - - if strings.Contains(parsed.Host, ":") { - host, port, err := net.SplitHostPort(parsed.Host) - if err != nil { - return fmt.Errorf("not a valid host:port: %v", err) - } - portNum, err := strconv.ParseUint(port, 10, 64) - if err != nil { - return fmt.Errorf("not a valid port: %v", err) - } - a.Host = host - a.Port = int(portNum) - - } else { - port := 0 - switch parsed.Scheme { - case "http": - port = 80 - case "https": - port = 443 - default: - return fmt.Errorf("no port specified") - } - a.Host = parsed.Host - a.Port = port - } - addr = parsed - - case isIPv6Host(value): - a.Host = value - a.Port = a.DefaultPort - - case strings.Contains(value, ":"): - host, port, err := net.SplitHostPort(value) - if err != nil { - return fmt.Errorf("not a valid host:port: %v", err) - } - portNum, err := strconv.ParseUint(port, 10, 64) - if err != nil { - return fmt.Errorf("not a valid port: %v", err) - } - a.Host = host - a.Port = int(portNum) - - default: - port := a.DefaultPort - if port == 0 { - switch a.DefaultScheme { - case "http": - port = 80 - case "https": - port = 443 - default: - return fmt.Errorf("no port specified") - } - } - a.Host = value - a.Port = port - } - addr.Host = net.JoinHostPort(a.Host, strconv.FormatInt(int64(a.Port), 10)) - - if value != a.Value { - a.Provided = true - } - a.URL = addr - a.IPv6Host = isIPv6Host(a.Host) - a.Value = value - - return nil -} - -// Type returns a string representation of what kind of value this is -func (a *Addr) Type() string { - return "string" -} - -// isURL returns true if the provided value appears to be a valid URL. -func (a *Addr) isURL(value string) bool { - prefixes := urlPrefixes - if a.DefaultScheme != "" { - prefixes = append(prefixes, fmt.Sprintf("%s://", a.DefaultScheme)) - } - for _, p := range prefixes { - if strings.HasPrefix(value, p) { - return true - } - } - return false -} - -// isIPv6Host returns true if the value appears to be an IPv6 host string (that does -// not include a port). -func isIPv6Host(value string) bool { - if strings.HasPrefix(value, "[") { - return false - } - return strings.Contains(value, "%") || strings.Count(value, ":") > 1 -} diff --git a/vendor/github.com/openshift/origin/pkg/cmd/flagtypes/doc.go b/vendor/github.com/openshift/origin/pkg/cmd/flagtypes/doc.go deleted file mode 100644 index caca72f1b..000000000 --- a/vendor/github.com/openshift/origin/pkg/cmd/flagtypes/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package flagtypes provides types that implement the pflags.Value interface for -// converting command line flags to objects. -package flagtypes diff --git a/vendor/github.com/openshift/origin/pkg/cmd/flagtypes/glog.go b/vendor/github.com/openshift/origin/pkg/cmd/flagtypes/glog.go deleted file mode 100644 index 6d2655455..000000000 --- a/vendor/github.com/openshift/origin/pkg/cmd/flagtypes/glog.go +++ /dev/null @@ -1,30 +0,0 @@ -package flagtypes - -import ( - "flag" - - "github.com/golang/glog" - "github.com/spf13/pflag" -) - -// GLog binds the log flags from the default Google "flag" package into a pflag.FlagSet. -func GLog(flags *pflag.FlagSet) { - from := flag.CommandLine - if flag := from.Lookup("v"); flag != nil { - level := flag.Value.(*glog.Level) - levelPtr := (*int32)(level) - flags.Int32Var(levelPtr, "loglevel", 0, "Set the level of log output (0-10)") - } - if flag := from.Lookup("vmodule"); flag != nil { - value := flag.Value - flags.Var(pflagValue{value}, "logspec", "Set per module logging with file|pattern=LEVEL,...") - } -} - -type pflagValue struct { - flag.Value -} - -func (pflagValue) Type() string { - return "string" -} diff --git a/vendor/github.com/openshift/origin/pkg/cmd/flagtypes/net.go b/vendor/github.com/openshift/origin/pkg/cmd/flagtypes/net.go deleted file mode 100644 index f939111f9..000000000 --- a/vendor/github.com/openshift/origin/pkg/cmd/flagtypes/net.go +++ /dev/null @@ -1,59 +0,0 @@ -package flagtypes - -import ( - "fmt" - "net" - "strings" -) - -// lifted from kubernetes/pkg/util/net.go. same flags vs pflags problem as we had with StringList - -// IP adapts net.IP for use as a flag. -type IP net.IP - -func (ip IP) String() string { - return net.IP(ip).String() -} - -func (ip *IP) Set(value string) error { - *ip = IP(net.ParseIP(strings.TrimSpace(value))) - if *ip == nil { - return fmt.Errorf("invalid IP address: '%s'", value) - } - return nil -} - -// Type returns a string representation of what kind of argument this is -func (ip *IP) Type() string { - return "cmd.flagtypes.IP" -} - -// IPNet adapts net.IPNet for use as a flag. -type IPNet net.IPNet - -func DefaultIPNet(value string) IPNet { - ret := IPNet{} - if err := ret.Set(value); err != nil { - panic(err) - } - return ret -} - -func (ipnet IPNet) String() string { - n := net.IPNet(ipnet) - return n.String() -} - -func (ipnet *IPNet) Set(value string) error { - _, n, err := net.ParseCIDR(strings.TrimSpace(value)) - if err != nil { - return err - } - *ipnet = IPNet(*n) - return nil -} - -// Type returns a string representation of what kind of argument this is -func (ipnet *IPNet) Type() string { - return "cmd.flagtypes.IPNet" -} diff --git a/vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/cached_discovery.go b/vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/cached_discovery.go deleted file mode 100644 index 731acbd7b..000000000 --- a/vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/cached_discovery.go +++ /dev/null @@ -1,136 +0,0 @@ -package clientcmd - -import ( - "errors" - "io/ioutil" - "os" - "path/filepath" - "time" - - "github.com/golang/glog" - - kapi "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/client/typed/discovery" - "k8s.io/kubernetes/pkg/runtime" -) - -// CachedDiscoveryClient implements the functions that discovery server-supported API groups, -// versions and resources. -type CachedDiscoveryClient struct { - discovery.DiscoveryInterface - - // cacheDirectory is the directory where discovery docs are held. It must be unique per host:port combination to work well. - cacheDirectory string - - // ttl is how long the cache should be considered valid - ttl time.Duration -} - -// ServerResourcesForGroupVersion returns the supported resources for a group and version. -func (d *CachedDiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (*unversioned.APIResourceList, error) { - filename := filepath.Join(d.cacheDirectory, groupVersion, "serverresources.json") - cachedBytes, err := d.getCachedFile(filename) - // don't fail on errors, we either don't have a file or won't be able to run the cached check. Either way we can fallback. - if err == nil { - cachedResources := &unversioned.APIResourceList{} - if err := runtime.DecodeInto(kapi.Codecs.UniversalDecoder(), cachedBytes, cachedResources); err == nil { - glog.V(6).Infof("returning cached discovery info from %v", filename) - return cachedResources, nil - } - } - - liveResources, err := d.DiscoveryInterface.ServerResourcesForGroupVersion(groupVersion) - if err != nil { - return liveResources, err - } - - if err := d.writeCachedFile(filename, liveResources); err != nil { - glog.V(3).Infof("failed to write cache to %v due to %v", filename, err) - } - - return liveResources, nil -} - -// ServerResources returns the supported resources for all groups and versions. -func (d *CachedDiscoveryClient) ServerResources() (map[string]*unversioned.APIResourceList, error) { - apiGroups, err := d.ServerGroups() - if err != nil { - return nil, err - } - groupVersions := unversioned.ExtractGroupVersions(apiGroups) - result := map[string]*unversioned.APIResourceList{} - for _, groupVersion := range groupVersions { - resources, err := d.ServerResourcesForGroupVersion(groupVersion) - if err != nil { - return nil, err - } - result[groupVersion] = resources - } - return result, nil -} - -func (d *CachedDiscoveryClient) ServerGroups() (*unversioned.APIGroupList, error) { - filename := filepath.Join(d.cacheDirectory, "servergroups.json") - cachedBytes, err := d.getCachedFile(filename) - // don't fail on errors, we either don't have a file or won't be able to run the cached check. Either way we can fallback. - if err == nil { - cachedGroups := &unversioned.APIGroupList{} - if err := runtime.DecodeInto(kapi.Codecs.UniversalDecoder(), cachedBytes, cachedGroups); err == nil { - glog.V(6).Infof("returning cached discovery info from %v", filename) - return cachedGroups, nil - } - } - - liveGroups, err := d.DiscoveryInterface.ServerGroups() - if err != nil { - return liveGroups, err - } - - if err := d.writeCachedFile(filename, liveGroups); err != nil { - glog.V(3).Infof("failed to write cache to %v due to %v", filename, err) - } - - return liveGroups, nil -} - -func (d *CachedDiscoveryClient) getCachedFile(filename string) ([]byte, error) { - file, err := os.Open(filename) - if err != nil { - return nil, err - } - - fileInfo, err := file.Stat() - if err != nil { - return nil, err - } - if time.Now().After(fileInfo.ModTime().Add(d.ttl)) { - return nil, errors.New("cache expired") - } - - // the cache is present and its valid. Try to read and use it. - cachedBytes, err := ioutil.ReadAll(file) - if err != nil { - return nil, err - } - - return cachedBytes, nil -} - -func (d *CachedDiscoveryClient) writeCachedFile(filename string, obj runtime.Object) error { - if err := os.MkdirAll(filepath.Dir(filename), 0755); err != nil { - return err - } - - bytes, err := runtime.Encode(kapi.Codecs.LegacyCodec(), obj) - if err != nil { - return err - } - - return ioutil.WriteFile(filename, bytes, 0755) -} - -// NewCachedDiscoveryClient creates a new DiscoveryClient. cacheDirectory is the directory where discovery docs are held. It must be unique per host:port combination to work well. -func NewCachedDiscoveryClient(delegate discovery.DiscoveryInterface, cacheDirectory string, ttl time.Duration) *CachedDiscoveryClient { - return &CachedDiscoveryClient{DiscoveryInterface: delegate, cacheDirectory: cacheDirectory, ttl: ttl} -} diff --git a/vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/clientcmd.go b/vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/clientcmd.go deleted file mode 100644 index 4e1e3ab2f..000000000 --- a/vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/clientcmd.go +++ /dev/null @@ -1,247 +0,0 @@ -package clientcmd - -import ( - "fmt" - "io/ioutil" - "strings" - - "github.com/golang/glog" - "github.com/spf13/pflag" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/restclient" - kclient "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" - - osclient "github.com/openshift/origin/pkg/client" - "github.com/openshift/origin/pkg/cmd/flagtypes" - "github.com/openshift/origin/pkg/cmd/util" -) - -const ConfigSyntax = " --master=" - -// Config contains all the necessary bits for client configuration -type Config struct { - // MasterAddr is the address the master can be reached on (host, host:port, or URL). - MasterAddr flagtypes.Addr - // KubernetesAddr is the address of the Kubernetes server (host, host:port, or URL). - // If omitted defaults to the master. - KubernetesAddr flagtypes.Addr - // CommonConfig is the shared base config for both the OpenShift config and Kubernetes config - CommonConfig restclient.Config - // Namespace is the namespace to act in - Namespace string - - // If set, allow kubeconfig file loading - FromFile bool - // If true, no environment is loaded (for testing, primarily) - SkipEnv bool - clientConfig clientcmd.ClientConfig -} - -// NewConfig returns a new configuration -func NewConfig() *Config { - return &Config{ - MasterAddr: flagtypes.Addr{Value: "localhost:8080", DefaultScheme: "http", DefaultPort: 8080, AllowPrefix: true}.Default(), - KubernetesAddr: flagtypes.Addr{Value: "localhost:8080", DefaultScheme: "http", DefaultPort: 8080}.Default(), - CommonConfig: restclient.Config{}, - } -} - -// AnonymousClientConfig returns a copy of the given config with all user credentials (cert/key, bearer token, and username/password) removed -func AnonymousClientConfig(config *restclient.Config) restclient.Config { - // copy only known safe fields - // TODO: expose a copy method on the config that is "auth free" - return restclient.Config{ - Host: config.Host, - APIPath: config.APIPath, - Prefix: config.Prefix, - ContentConfig: config.ContentConfig, - TLSClientConfig: restclient.TLSClientConfig{ - CAFile: config.TLSClientConfig.CAFile, - CAData: config.TLSClientConfig.CAData, - }, - RateLimiter: config.RateLimiter, - Insecure: config.Insecure, - UserAgent: config.UserAgent, - Transport: config.Transport, - WrapTransport: config.WrapTransport, - QPS: config.QPS, - Burst: config.Burst, - } -} - -// BindClientConfigSecurityFlags adds flags for the supplied client config -func BindClientConfigSecurityFlags(config *restclient.Config, flags *pflag.FlagSet) { - flags.BoolVar(&config.Insecure, "insecure-skip-tls-verify", config.Insecure, "If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.") - flags.StringVar(&config.CertFile, "client-certificate", config.CertFile, "Path to a client certificate file for TLS.") - flags.StringVar(&config.KeyFile, "client-key", config.KeyFile, "Path to a client key file for TLS.") - flags.StringVar(&config.CAFile, "certificate-authority", config.CAFile, "Path to a cert. file for the certificate authority") - flags.StringVar(&config.BearerToken, "token", config.BearerToken, "If present, the bearer token for this request.") -} - -// Bind binds configuration values to the passed flagset -func (cfg *Config) Bind(flags *pflag.FlagSet) { - flags.Var(&cfg.MasterAddr, "master", "The address the master can be reached on (host, host:port, or URL).") - flags.Var(&cfg.KubernetesAddr, "kubernetes", "The address of the Kubernetes server (host, host:port, or URL). If omitted defaults to the master.") - - if cfg.FromFile { - cfg.clientConfig = DefaultClientConfig(flags) - } else { - BindClientConfigSecurityFlags(&cfg.CommonConfig, flags) - } -} - -// BindToFile is used when this config will not be bound to flags, but should load the config file -// from disk if available. -func (cfg *Config) BindToFile() *Config { - cfg.clientConfig = DefaultClientConfig(pflag.NewFlagSet("empty", pflag.ContinueOnError)) - return cfg -} - -func EnvVars(host string, caData []byte, insecure bool, bearerTokenFile string) []api.EnvVar { - envvars := []api.EnvVar{ - {Name: "KUBERNETES_MASTER", Value: host}, - {Name: "OPENSHIFT_MASTER", Value: host}, - } - - if len(bearerTokenFile) > 0 { - envvars = append(envvars, api.EnvVar{Name: "BEARER_TOKEN_FILE", Value: bearerTokenFile}) - } - - if len(caData) > 0 { - envvars = append(envvars, api.EnvVar{Name: "OPENSHIFT_CA_DATA", Value: string(caData)}) - } else if insecure { - envvars = append(envvars, api.EnvVar{Name: "OPENSHIFT_INSECURE", Value: "true"}) - } - - return envvars -} - -func (cfg *Config) bindEnv() error { - // bypass loading from env - if cfg.SkipEnv { - return nil - } - var err error - - // callers may not use the config file if they have specified a master directly, for backwards - // compatibility with components that used to use env, switch to service account token, and have - // config defined in env. - _, masterSet := util.GetEnv("OPENSHIFT_MASTER") - specifiedMaster := masterSet || cfg.MasterAddr.Provided - - if cfg.clientConfig != nil && !specifiedMaster { - clientConfig, err := cfg.clientConfig.ClientConfig() - if err != nil { - return err - } - cfg.CommonConfig = *clientConfig - cfg.Namespace, _, err = cfg.clientConfig.Namespace() - if err != nil { - return err - } - - if !cfg.MasterAddr.Provided { - cfg.MasterAddr.Set(cfg.CommonConfig.Host) - } - if !cfg.KubernetesAddr.Provided { - cfg.KubernetesAddr.Set(cfg.CommonConfig.Host) - } - return nil - } - - // Legacy path - preserve env vars set on pods that previously were honored. - if value, ok := util.GetEnv("KUBERNETES_MASTER"); ok && !cfg.KubernetesAddr.Provided { - cfg.KubernetesAddr.Set(value) - } - if value, ok := util.GetEnv("OPENSHIFT_MASTER"); ok && !cfg.MasterAddr.Provided { - cfg.MasterAddr.Set(value) - } - if value, ok := util.GetEnv("BEARER_TOKEN"); ok && len(cfg.CommonConfig.BearerToken) == 0 { - cfg.CommonConfig.BearerToken = value - } - if value, ok := util.GetEnv("BEARER_TOKEN_FILE"); ok && len(cfg.CommonConfig.BearerToken) == 0 { - if tokenData, tokenErr := ioutil.ReadFile(value); tokenErr == nil { - cfg.CommonConfig.BearerToken = strings.TrimSpace(string(tokenData)) - if len(cfg.CommonConfig.BearerToken) == 0 { - err = fmt.Errorf("BEARER_TOKEN_FILE %q was empty", value) - } - } else { - err = fmt.Errorf("Error reading BEARER_TOKEN_FILE %q: %v", value, tokenErr) - } - } - - if value, ok := util.GetEnv("OPENSHIFT_CA_FILE"); ok && len(cfg.CommonConfig.CAFile) == 0 { - cfg.CommonConfig.CAFile = value - } else if value, ok := util.GetEnv("OPENSHIFT_CA_DATA"); ok && len(cfg.CommonConfig.CAData) == 0 { - cfg.CommonConfig.CAData = []byte(value) - } - - if value, ok := util.GetEnv("OPENSHIFT_CERT_FILE"); ok && len(cfg.CommonConfig.CertFile) == 0 { - cfg.CommonConfig.CertFile = value - } else if value, ok := util.GetEnv("OPENSHIFT_CERT_DATA"); ok && len(cfg.CommonConfig.CertData) == 0 { - cfg.CommonConfig.CertData = []byte(value) - } - - if value, ok := util.GetEnv("OPENSHIFT_KEY_FILE"); ok && len(cfg.CommonConfig.KeyFile) == 0 { - cfg.CommonConfig.KeyFile = value - } else if value, ok := util.GetEnv("OPENSHIFT_KEY_DATA"); ok && len(cfg.CommonConfig.KeyData) == 0 { - cfg.CommonConfig.KeyData = []byte(value) - } - - if value, ok := util.GetEnv("OPENSHIFT_INSECURE"); ok && len(value) != 0 { - cfg.CommonConfig.Insecure = value == "true" - } - - return err -} - -// KubeConfig returns the Kubernetes configuration -func (cfg *Config) KubeConfig() *restclient.Config { - err := cfg.bindEnv() - if err != nil { - glog.Error(err) - } - - kaddr := cfg.KubernetesAddr - if !kaddr.Provided { - kaddr = cfg.MasterAddr - } - - kConfig := cfg.CommonConfig - kConfig.Host = kaddr.URL.String() - - return &kConfig -} - -// OpenShiftConfig returns the OpenShift configuration -func (cfg *Config) OpenShiftConfig() *restclient.Config { - err := cfg.bindEnv() - if err != nil { - glog.Error(err) - } - - osConfig := cfg.CommonConfig - if len(osConfig.Host) == 0 || cfg.MasterAddr.Provided { - osConfig.Host = cfg.MasterAddr.String() - } - - return &osConfig -} - -// Clients returns an OpenShift and a Kubernetes client from a given configuration -func (cfg *Config) Clients() (osclient.Interface, kclient.Interface, error) { - cfg.bindEnv() - - kubeClient, err := kclient.New(cfg.KubeConfig()) - if err != nil { - return nil, nil, fmt.Errorf("Unable to configure Kubernetes client: %v", err) - } - - osClient, err := osclient.New(cfg.OpenShiftConfig()) - if err != nil { - return nil, nil, fmt.Errorf("Unable to configure Origin client: %v", err) - } - - return osClient, kubeClient, nil -} diff --git a/vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/clientconfig.go b/vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/clientconfig.go deleted file mode 100644 index c152100f3..000000000 --- a/vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/clientconfig.go +++ /dev/null @@ -1,29 +0,0 @@ -package clientcmd - -import ( - "github.com/spf13/cobra" - "github.com/spf13/pflag" - - "github.com/openshift/origin/pkg/cmd/cli/config" - "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" -) - -func DefaultClientConfig(flags *pflag.FlagSet) clientcmd.ClientConfig { - loadingRules := config.NewOpenShiftClientConfigLoadingRules() - flags.StringVar(&loadingRules.ExplicitPath, config.OpenShiftConfigFlagName, "", "Path to the config file to use for CLI requests.") - cobra.MarkFlagFilename(flags, config.OpenShiftConfigFlagName) - - overrides := &clientcmd.ConfigOverrides{} - overrideFlags := clientcmd.RecommendedConfigOverrideFlags("") - overrideFlags.ContextOverrideFlags.Namespace.ShortName = "n" - overrideFlags.AuthOverrideFlags.Username.LongName = "" - overrideFlags.AuthOverrideFlags.Password.LongName = "" - clientcmd.BindOverrideFlags(overrides, flags, overrideFlags) - cobra.MarkFlagFilename(flags, overrideFlags.AuthOverrideFlags.ClientCertificate.LongName) - cobra.MarkFlagFilename(flags, overrideFlags.AuthOverrideFlags.ClientKey.LongName) - cobra.MarkFlagFilename(flags, overrideFlags.ClusterOverrideFlags.CertificateAuthority.LongName) - - clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides) - - return clientConfig -} diff --git a/vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/errors.go b/vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/errors.go deleted file mode 100644 index f10006fd7..000000000 --- a/vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/errors.go +++ /dev/null @@ -1,108 +0,0 @@ -package clientcmd - -import ( - "errors" - "fmt" - "strings" - - kerrors "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" -) - -const ( - unknownReason = iota - noServerFoundReason - certificateAuthorityUnknownReason - configurationInvalidReason - tlsOversizedRecordReason - - certificateAuthorityUnknownMsg = "The server uses a certificate signed by unknown authority. You may need to use the --certificate-authority flag to provide the path to a certificate file for the certificate authority, or --insecure-skip-tls-verify to bypass the certificate check and use insecure connections." - notConfiguredMsg = `The client is not configured. You need to run the login command in order to create a default config for your server and credentials: - oc login -You can also run this command again providing the path to a config file directly, either through the --config flag of the KUBECONFIG environment variable. -` - tlsOversizedRecordMsg = `Unable to connect to %[2]s using TLS: %[1]s. -Ensure the specified server supports HTTPS.` -) - -// GetPrettyMessageFor prettifys the message of the provided error -func GetPrettyMessageFor(err error) string { - return GetPrettyMessageForServer(err, "") -} - -// GetPrettyMessageForServer prettifys the message of the provided error -func GetPrettyMessageForServer(err error, serverName string) string { - if err == nil { - return "" - } - - reason := detectReason(err) - - switch reason { - case noServerFoundReason: - return notConfiguredMsg - - case certificateAuthorityUnknownReason: - return certificateAuthorityUnknownMsg - - case tlsOversizedRecordReason: - if len(serverName) == 0 { - serverName = "server" - } - return fmt.Sprintf(tlsOversizedRecordMsg, err, serverName) - } - - return err.Error() -} - -// GetPrettyErrorFor prettifys the message of the provided error -func GetPrettyErrorFor(err error) error { - return GetPrettyErrorForServer(err, "") -} - -// GetPrettyErrorForServer prettifys the message of the provided error -func GetPrettyErrorForServer(err error, serverName string) error { - return errors.New(GetPrettyMessageForServer(err, serverName)) -} - -// IsNoServerFound checks whether the provided error is a 'no server found' error or not -func IsNoServerFound(err error) bool { - return detectReason(err) == noServerFoundReason -} - -// IsConfigurationInvalid checks whether the provided error is a 'invalid configuration' error or not -func IsConfigurationInvalid(err error) bool { - return detectReason(err) == configurationInvalidReason -} - -// IsCertificateAuthorityUnknown checks whether the provided error is a 'certificate authority unknown' error or not -func IsCertificateAuthorityUnknown(err error) bool { - return detectReason(err) == certificateAuthorityUnknownReason -} - -// IsForbidden checks whether the provided error is a 'forbidden' error or not -func IsForbidden(err error) bool { - return kerrors.IsForbidden(err) -} - -// IsTLSOversizedRecord checks whether the provided error is a url.Error -// with "tls: oversized record received", which usually means TLS not supported. -func IsTLSOversizedRecord(err error) bool { - return detectReason(err) == tlsOversizedRecordReason -} - -func detectReason(err error) int { - if err != nil { - switch { - case strings.Contains(err.Error(), "certificate signed by unknown authority"): - return certificateAuthorityUnknownReason - case strings.Contains(err.Error(), "no server defined"): - return noServerFoundReason - case clientcmd.IsConfigurationInvalid(err): - return configurationInvalidReason - case strings.Contains(err.Error(), "tls: oversized record received"): - return tlsOversizedRecordReason - } - } - return unknownReason -} diff --git a/vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/factory.go b/vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/factory.go deleted file mode 100644 index 6aed46886..000000000 --- a/vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/factory.go +++ /dev/null @@ -1,1071 +0,0 @@ -package clientcmd - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "regexp" - "sort" - "strconv" - "strings" - "time" - - "github.com/blang/semver" - "github.com/emicklei/go-restful/swagger" - "github.com/golang/glog" - "github.com/spf13/pflag" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/client/typed/discovery" - "k8s.io/kubernetes/pkg/client/typed/dynamic" - kclient "k8s.io/kubernetes/pkg/client/unversioned" - kclientcmd "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" - kclientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" - "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/kubectl" - cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/kubectl/resource" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/homedir" - - "github.com/openshift/origin/pkg/api/latest" - "github.com/openshift/origin/pkg/api/restmapper" - authorizationapi "github.com/openshift/origin/pkg/authorization/api" - authorizationreaper "github.com/openshift/origin/pkg/authorization/reaper" - buildapi "github.com/openshift/origin/pkg/build/api" - buildcmd "github.com/openshift/origin/pkg/build/cmd" - buildutil "github.com/openshift/origin/pkg/build/util" - "github.com/openshift/origin/pkg/client" - "github.com/openshift/origin/pkg/cmd/cli/describe" - "github.com/openshift/origin/pkg/cmd/util" - deployapi "github.com/openshift/origin/pkg/deploy/api" - deploycmd "github.com/openshift/origin/pkg/deploy/cmd" - deployutil "github.com/openshift/origin/pkg/deploy/util" - imageapi "github.com/openshift/origin/pkg/image/api" - routegen "github.com/openshift/origin/pkg/route/generator" - userapi "github.com/openshift/origin/pkg/user/api" - authenticationreaper "github.com/openshift/origin/pkg/user/reaper" -) - -// New creates a default Factory for commands that should share identical server -// connection behavior. Most commands should use this method to get a factory. -func New(flags *pflag.FlagSet) *Factory { - // TODO refactor this upstream: - // DefaultCluster should not be a global - // A call to ClientConfig() should always return the best clientCfg possible - // even if an error was returned, and let the caller decide what to do - kclientcmd.DefaultCluster.Server = "" - - // TODO: there should be two client configs, one for OpenShift, and one for Kubernetes - clientConfig := DefaultClientConfig(flags) - clientConfig = defaultingClientConfig{clientConfig} - f := NewFactory(clientConfig) - f.BindFlags(flags) - - return f -} - -// defaultingClientConfig detects whether the provided config is the default, and if -// so returns an error that indicates the user should set up their config. -type defaultingClientConfig struct { - nested kclientcmd.ClientConfig -} - -// RawConfig calls the nested method -func (c defaultingClientConfig) RawConfig() (kclientcmdapi.Config, error) { - return c.nested.RawConfig() -} - -// Namespace calls the nested method, and if an empty config error is returned -// it checks for the same default as kubectl - the value of POD_NAMESPACE or -// "default". -func (c defaultingClientConfig) Namespace() (string, bool, error) { - namespace, ok, err := c.nested.Namespace() - if err == nil { - return namespace, ok, nil - } - if !kclientcmd.IsEmptyConfig(err) { - return "", false, err - } - - // This way assumes you've set the POD_NAMESPACE environment variable using the downward API. - // This check has to be done first for backwards compatibility with the way InClusterConfig was originally set up - if ns := os.Getenv("POD_NAMESPACE"); ns != "" { - return ns, true, nil - } - - // Fall back to the namespace associated with the service account token, if available - if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { - if ns := strings.TrimSpace(string(data)); len(ns) > 0 { - return ns, true, nil - } - } - - return api.NamespaceDefault, false, nil -} - -// ConfigAccess implements ClientConfig -func (c defaultingClientConfig) ConfigAccess() kclientcmd.ConfigAccess { - return c.nested.ConfigAccess() -} - -type errConfigurationMissing struct { - err error -} - -func (e errConfigurationMissing) Error() string { - return fmt.Sprintf("%v", e.err) -} - -func IsConfigurationMissing(err error) bool { - switch err.(type) { - case errConfigurationMissing: - return true - } - return kclientcmd.IsContextNotFound(err) -} - -// ClientConfig returns a complete client config -func (c defaultingClientConfig) ClientConfig() (*restclient.Config, error) { - cfg, err := c.nested.ClientConfig() - if err == nil { - return cfg, nil - } - - if !kclientcmd.IsEmptyConfig(err) { - return nil, err - } - - // TODO: need to expose inClusterConfig upstream and use that - if icc, err := restclient.InClusterConfig(); err == nil { - glog.V(4).Infof("Using in-cluster configuration") - return icc, nil - } - - return nil, errConfigurationMissing{fmt.Errorf(`Missing or incomplete configuration info. Please login or point to an existing, complete config file: - - 1. Via the command-line flag --config - 2. Via the KUBECONFIG environment variable - 3. In your home directory as ~/.kube/config - -To view or setup config directly use the 'config' command.`)} -} - -// Factory provides common options for OpenShift commands -type Factory struct { - *cmdutil.Factory - OpenShiftClientConfig kclientcmd.ClientConfig - clients *clientCache -} - -func DefaultGenerators(cmdName string) map[string]kubectl.Generator { - generators := map[string]map[string]kubectl.Generator{} - generators["run"] = map[string]kubectl.Generator{ - "deploymentconfig/v1": deploycmd.BasicDeploymentConfigController{}, - "run-controller/v1": kubectl.BasicReplicationController{}, // legacy alias for run/v1 - } - generators["expose"] = map[string]kubectl.Generator{ - "route/v1": routegen.RouteGenerator{}, - } - - return generators[cmdName] -} - -// NewFactory creates an object that holds common methods across all OpenShift commands -func NewFactory(clientConfig kclientcmd.ClientConfig) *Factory { - restMapper := registered.RESTMapper() - - clients := &clientCache{ - clients: make(map[string]*client.Client), - configs: make(map[string]*restclient.Config), - loader: clientConfig, - } - - w := &Factory{ - Factory: cmdutil.NewFactory(clientConfig), - OpenShiftClientConfig: clientConfig, - clients: clients, - } - - w.Object = func(bool) (meta.RESTMapper, runtime.ObjectTyper) { - defaultMapper := ShortcutExpander{RESTMapper: kubectl.ShortcutExpander{RESTMapper: restMapper}} - defaultTyper := api.Scheme - - // Output using whatever version was negotiated in the client cache. The - // version we decode with may not be the same as what the server requires. - cfg, err := clients.ClientConfigForVersion(nil) - if err != nil { - return defaultMapper, defaultTyper - } - - cmdApiVersion := unversioned.GroupVersion{} - if cfg.GroupVersion != nil { - cmdApiVersion = *cfg.GroupVersion - } - - // at this point we've negotiated and can get the client - oclient, err := clients.ClientForVersion(nil) - if err != nil { - return defaultMapper, defaultTyper - } - - cacheDir := computeDiscoverCacheDir(filepath.Join(homedir.HomeDir(), ".kube"), cfg.Host) - cachedDiscoverClient := NewCachedDiscoveryClient(client.NewDiscoveryClient(oclient.RESTClient), cacheDir, time.Duration(10*time.Minute)) - - // if we can't find the server version or its too old to have Kind information in the discovery doc, skip the discovery RESTMapper - // and use our hardcoded levels - mapper := registered.RESTMapper() - if serverVersion, err := cachedDiscoverClient.ServerVersion(); err == nil && useDiscoveryRESTMapper(serverVersion.GitVersion) { - mapper = restmapper.NewDiscoveryRESTMapper(cachedDiscoverClient) - } - mapper = NewShortcutExpander(cachedDiscoverClient, kubectl.ShortcutExpander{RESTMapper: mapper}) - return kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}}, api.Scheme - } - - w.UnstructuredObject = func() (meta.RESTMapper, runtime.ObjectTyper, error) { - // load a discovery client from the default config - cfg, err := clients.ClientConfigForVersion(nil) - if err != nil { - return nil, nil, err - } - dc, err := discovery.NewDiscoveryClientForConfig(cfg) - if err != nil { - return nil, nil, err - } - cacheDir := computeDiscoverCacheDir(filepath.Join(homedir.HomeDir(), ".kube"), cfg.Host) - cachedDiscoverClient := NewCachedDiscoveryClient(client.NewDiscoveryClient(dc.RESTClient), cacheDir, time.Duration(10*time.Minute)) - - // enumerate all group resources - groupResources, err := discovery.GetAPIGroupResources(cachedDiscoverClient) - if err != nil { - return nil, nil, err - } - - // Register unknown APIs as third party for now to make - // validation happy. TODO perhaps make a dynamic schema - // validator to avoid this. - for _, group := range groupResources { - for _, version := range group.Group.Versions { - gv := unversioned.GroupVersion{Group: group.Group.Name, Version: version.Version} - if !registered.IsRegisteredVersion(gv) { - registered.AddThirdPartyAPIGroupVersions(gv) - } - } - } - - // construct unstructured mapper and typer - mapper := discovery.NewRESTMapper(groupResources, meta.InterfacesForUnstructured) - typer := discovery.NewUnstructuredObjectTyper(groupResources) - return NewShortcutExpander(cachedDiscoverClient, kubectl.ShortcutExpander{RESTMapper: mapper}), typer, nil - } - - kClientForMapping := w.Factory.ClientForMapping - w.ClientForMapping = func(mapping *meta.RESTMapping) (resource.RESTClient, error) { - if latest.OriginKind(mapping.GroupVersionKind) { - mappingVersion := mapping.GroupVersionKind.GroupVersion() - client, err := clients.ClientForVersion(&mappingVersion) - if err != nil { - return nil, err - } - return client.RESTClient, nil - } - return kClientForMapping(mapping) - } - - kUnstructuredClientForMapping := w.Factory.UnstructuredClientForMapping - w.UnstructuredClientForMapping = func(mapping *meta.RESTMapping) (resource.RESTClient, error) { - if latest.OriginKind(mapping.GroupVersionKind) { - cfg, err := clientConfig.ClientConfig() - if err != nil { - return nil, err - } - if err := client.SetOpenShiftDefaults(cfg); err != nil { - return nil, err - } - cfg.APIPath = "/apis" - if mapping.GroupVersionKind.Group == api.GroupName { - cfg.APIPath = "/oapi" - } - gv := mapping.GroupVersionKind.GroupVersion() - cfg.ContentConfig = dynamic.ContentConfig() - cfg.GroupVersion = &gv - return restclient.RESTClientFor(cfg) - } - return kUnstructuredClientForMapping(mapping) - } - - // Save original Describer function - kDescriberFunc := w.Factory.Describer - w.Describer = func(mapping *meta.RESTMapping) (kubectl.Describer, error) { - if latest.OriginKind(mapping.GroupVersionKind) { - oClient, kClient, err := w.Clients() - if err != nil { - return nil, fmt.Errorf("unable to create client %s: %v", mapping.GroupVersionKind.Kind, err) - } - - mappingVersion := mapping.GroupVersionKind.GroupVersion() - cfg, err := clients.ClientConfigForVersion(&mappingVersion) - if err != nil { - return nil, fmt.Errorf("unable to load a client %s: %v", mapping.GroupVersionKind.Kind, err) - } - - describer, ok := describe.DescriberFor(mapping.GroupVersionKind.GroupKind(), oClient, kClient, cfg.Host) - if !ok { - return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind) - } - return describer, nil - } - return kDescriberFunc(mapping) - } - kScalerFunc := w.Factory.Scaler - w.Scaler = func(mapping *meta.RESTMapping) (kubectl.Scaler, error) { - if mapping.GroupVersionKind.GroupKind() == deployapi.Kind("DeploymentConfig") { - oc, kc, err := w.Clients() - if err != nil { - return nil, err - } - return deploycmd.NewDeploymentConfigScaler(oc, kc), nil - } - return kScalerFunc(mapping) - } - kReaperFunc := w.Factory.Reaper - w.Reaper = func(mapping *meta.RESTMapping) (kubectl.Reaper, error) { - switch mapping.GroupVersionKind.GroupKind() { - case deployapi.Kind("DeploymentConfig"): - oc, kc, err := w.Clients() - if err != nil { - return nil, err - } - return deploycmd.NewDeploymentConfigReaper(oc, kc), nil - case authorizationapi.Kind("Role"): - oc, _, err := w.Clients() - if err != nil { - return nil, err - } - return authorizationreaper.NewRoleReaper(oc, oc), nil - case authorizationapi.Kind("ClusterRole"): - oc, _, err := w.Clients() - if err != nil { - return nil, err - } - return authorizationreaper.NewClusterRoleReaper(oc, oc, oc), nil - case userapi.Kind("User"): - oc, kc, err := w.Clients() - if err != nil { - return nil, err - } - return authenticationreaper.NewUserReaper( - client.UsersInterface(oc), - client.GroupsInterface(oc), - client.ClusterRoleBindingsInterface(oc), - client.RoleBindingsNamespacer(oc), - kclient.SecurityContextConstraintsInterface(kc), - ), nil - case userapi.Kind("Group"): - oc, kc, err := w.Clients() - if err != nil { - return nil, err - } - return authenticationreaper.NewGroupReaper( - client.GroupsInterface(oc), - client.ClusterRoleBindingsInterface(oc), - client.RoleBindingsNamespacer(oc), - kclient.SecurityContextConstraintsInterface(kc), - ), nil - case buildapi.Kind("BuildConfig"): - oc, _, err := w.Clients() - if err != nil { - return nil, err - } - return buildcmd.NewBuildConfigReaper(oc), nil - } - return kReaperFunc(mapping) - } - kGenerators := w.Factory.Generators - w.Generators = func(cmdName string) map[string]kubectl.Generator { - originGenerators := DefaultGenerators(cmdName) - kubeGenerators := kGenerators(cmdName) - - ret := map[string]kubectl.Generator{} - for k, v := range kubeGenerators { - ret[k] = v - } - for k, v := range originGenerators { - ret[k] = v - } - return ret - } - kMapBasedSelectorForObjectFunc := w.Factory.MapBasedSelectorForObject - w.MapBasedSelectorForObject = func(object runtime.Object) (string, error) { - switch t := object.(type) { - case *deployapi.DeploymentConfig: - return kubectl.MakeLabels(t.Spec.Selector), nil - default: - return kMapBasedSelectorForObjectFunc(object) - } - } - kPortsForObjectFunc := w.Factory.PortsForObject - w.PortsForObject = func(object runtime.Object) ([]string, error) { - switch t := object.(type) { - case *deployapi.DeploymentConfig: - return getPorts(t.Spec.Template.Spec), nil - default: - return kPortsForObjectFunc(object) - } - } - kLogsForObjectFunc := w.Factory.LogsForObject - w.LogsForObject = func(object, options runtime.Object) (*restclient.Request, error) { - switch t := object.(type) { - case *deployapi.DeploymentConfig: - dopts, ok := options.(*deployapi.DeploymentLogOptions) - if !ok { - return nil, errors.New("provided options object is not a DeploymentLogOptions") - } - oc, _, err := w.Clients() - if err != nil { - return nil, err - } - return oc.DeploymentLogs(t.Namespace).Get(t.Name, *dopts), nil - case *buildapi.Build: - bopts, ok := options.(*buildapi.BuildLogOptions) - if !ok { - return nil, errors.New("provided options object is not a BuildLogOptions") - } - if bopts.Version != nil { - return nil, errors.New("cannot specify a version and a build") - } - oc, _, err := w.Clients() - if err != nil { - return nil, err - } - return oc.BuildLogs(t.Namespace).Get(t.Name, *bopts), nil - case *buildapi.BuildConfig: - bopts, ok := options.(*buildapi.BuildLogOptions) - if !ok { - return nil, errors.New("provided options object is not a BuildLogOptions") - } - oc, _, err := w.Clients() - if err != nil { - return nil, err - } - builds, err := oc.Builds(t.Namespace).List(api.ListOptions{}) - if err != nil { - return nil, err - } - builds.Items = buildapi.FilterBuilds(builds.Items, buildapi.ByBuildConfigPredicate(t.Name)) - if len(builds.Items) == 0 { - return nil, fmt.Errorf("no builds found for %q", t.Name) - } - if bopts.Version != nil { - // If a version has been specified, try to get the logs from that build. - desired := buildutil.BuildNameForConfigVersion(t.Name, int(*bopts.Version)) - return oc.BuildLogs(t.Namespace).Get(desired, *bopts), nil - } - sort.Sort(sort.Reverse(buildapi.BuildSliceByCreationTimestamp(builds.Items))) - return oc.BuildLogs(t.Namespace).Get(builds.Items[0].Name, *bopts), nil - default: - return kLogsForObjectFunc(object, options) - } - } - // Saves current resource name (or alias if any) in PrintOptions. Once saved, it will not be overwritten by the - // kubernetes resource alias look-up, as it will notice a non-empty value in `options.Kind` - w.Printer = func(mapping *meta.RESTMapping, options kubectl.PrintOptions) (kubectl.ResourcePrinter, error) { - if mapping != nil { - options.Kind = mapping.Resource - if alias, ok := resourceShortFormFor(mapping.Resource); ok { - options.Kind = alias - } - } - return describe.NewHumanReadablePrinter(options), nil - } - kCanBeExposed := w.Factory.CanBeExposed - w.CanBeExposed = func(kind unversioned.GroupKind) error { - if kind == deployapi.Kind("DeploymentConfig") { - return nil - } - return kCanBeExposed(kind) - } - kCanBeAutoscaled := w.Factory.CanBeAutoscaled - w.CanBeAutoscaled = func(kind unversioned.GroupKind) error { - if kind == deployapi.Kind("DeploymentConfig") { - return nil - } - return kCanBeAutoscaled(kind) - } - kAttachablePodForObjectFunc := w.Factory.AttachablePodForObject - w.AttachablePodForObject = func(object runtime.Object) (*api.Pod, error) { - switch t := object.(type) { - case *deployapi.DeploymentConfig: - _, kc, err := w.Clients() - if err != nil { - return nil, err - } - selector := labels.SelectorFromSet(t.Spec.Selector) - f := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } - pod, _, err := cmdutil.GetFirstPod(kc, t.Namespace, selector, 1*time.Minute, f) - return pod, err - default: - return kAttachablePodForObjectFunc(object) - } - } - kUpdatePodSpecForObject := w.Factory.UpdatePodSpecForObject - w.UpdatePodSpecForObject = func(obj runtime.Object, fn func(*api.PodSpec) error) (bool, error) { - switch t := obj.(type) { - case *deployapi.DeploymentConfig: - template := t.Spec.Template - if template == nil { - t.Spec.Template = template - template = &api.PodTemplateSpec{} - } - return true, fn(&template.Spec) - default: - return kUpdatePodSpecForObject(obj, fn) - } - } - kProtocolsForObject := w.Factory.ProtocolsForObject - w.ProtocolsForObject = func(object runtime.Object) (map[string]string, error) { - switch t := object.(type) { - case *deployapi.DeploymentConfig: - return getProtocols(t.Spec.Template.Spec), nil - default: - return kProtocolsForObject(object) - } - } - - kSwaggerSchemaFunc := w.Factory.SwaggerSchema - w.Factory.SwaggerSchema = func(gvk unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) { - if !latest.OriginKind(gvk) { - return kSwaggerSchemaFunc(gvk) - } - // TODO: we need to register the OpenShift API under the Kube group, and start returning the OpenShift - // group from the scheme. - oc, _, err := w.Clients() - if err != nil { - return nil, err - } - return w.OriginSwaggerSchema(oc.RESTClient, gvk.GroupVersion()) - } - - w.EditorEnvs = func() []string { - return []string{"OC_EDITOR", "EDITOR"} - } - w.PrintObjectSpecificMessage = func(obj runtime.Object, out io.Writer) {} - kPauseObjectFunc := w.Factory.PauseObject - w.Factory.PauseObject = func(object runtime.Object) (bool, error) { - switch t := object.(type) { - case *deployapi.DeploymentConfig: - if t.Spec.Paused { - return true, nil - } - t.Spec.Paused = true - oc, _, err := w.Clients() - if err != nil { - return false, err - } - _, err = oc.DeploymentConfigs(t.Namespace).Update(t) - // TODO: Pause the deployer containers. - return false, err - default: - return kPauseObjectFunc(object) - } - } - kResumeObjectFunc := w.Factory.ResumeObject - w.Factory.ResumeObject = func(object runtime.Object) (bool, error) { - switch t := object.(type) { - case *deployapi.DeploymentConfig: - if !t.Spec.Paused { - return true, nil - } - t.Spec.Paused = false - oc, _, err := w.Clients() - if err != nil { - return false, err - } - _, err = oc.DeploymentConfigs(t.Namespace).Update(t) - // TODO: Resume the deployer containers. - return false, err - default: - return kResumeObjectFunc(object) - } - } - kHistoryViewerFunc := w.Factory.HistoryViewer - w.Factory.HistoryViewer = func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) { - switch mapping.GroupVersionKind.GroupKind() { - case deployapi.Kind("DeploymentConfig"): - oc, kc, err := w.Clients() - if err != nil { - return nil, err - } - return deploycmd.NewDeploymentConfigHistoryViewer(oc, kc), nil - } - return kHistoryViewerFunc(mapping) - } - kRollbackerFunc := w.Factory.Rollbacker - w.Factory.Rollbacker = func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) { - switch mapping.GroupVersionKind.GroupKind() { - case deployapi.Kind("DeploymentConfig"): - oc, _, err := w.Clients() - if err != nil { - return nil, err - } - return deploycmd.NewDeploymentConfigRollbacker(oc), nil - } - return kRollbackerFunc(mapping) - } - - return w -} - -// useDiscoveryRESTMapper checks the server version to see if its recent enough to have -// enough discovery information avaiable to reliably build a RESTMapper. If not, use the -// hardcoded mapper in this client (legacy behavior) -func useDiscoveryRESTMapper(serverVersion string) bool { - serverSemVer, err := semver.Parse(serverVersion[1:]) - if err != nil { - return false - } - if serverSemVer.LT(semver.MustParse("1.3.0")) { - return false - } - return true -} - -// overlyCautiousIllegalFileCharacters matches characters that *might* not be supported. Windows is really restrictive, so this is really restrictive -var overlyCautiousIllegalFileCharacters = regexp.MustCompile(`[^(\w/\.)]`) - -// computeDiscoverCacheDir takes the parentDir and the host and comes up with a "usually non-colliding" name. -func computeDiscoverCacheDir(parentDir, host string) string { - // strip the optional scheme from host if its there: - schemelessHost := strings.Replace(strings.Replace(host, "https://", "", 1), "http://", "", 1) - // now do a simple collapse of non-AZ09 characters. Collisions are possible but unlikely. Even if we do collide the problem is short lived - safeHost := overlyCautiousIllegalFileCharacters.ReplaceAllString(schemelessHost, "_") - - return filepath.Join(parentDir, safeHost) -} - -func getPorts(spec api.PodSpec) []string { - result := []string{} - for _, container := range spec.Containers { - for _, port := range container.Ports { - result = append(result, strconv.Itoa(int(port.ContainerPort))) - } - } - return result -} - -func getProtocols(spec api.PodSpec) map[string]string { - result := make(map[string]string) - for _, container := range spec.Containers { - for _, port := range container.Ports { - result[strconv.Itoa(int(port.ContainerPort))] = string(port.Protocol) - } - } - return result -} - -func ResourceMapper(f *Factory) *resource.Mapper { - mapper, typer := f.Object(false) - return &resource.Mapper{ - RESTMapper: mapper, - ObjectTyper: typer, - ClientMapper: resource.ClientMapperFunc(f.ClientForMapping), - } -} - -// UpdateObjectEnvironment update the environment variables in object specification. -func (f *Factory) UpdateObjectEnvironment(obj runtime.Object, fn func(*[]api.EnvVar) error) (bool, error) { - switch t := obj.(type) { - case *buildapi.BuildConfig: - if t.Spec.Strategy.CustomStrategy != nil { - return true, fn(&t.Spec.Strategy.CustomStrategy.Env) - } - if t.Spec.Strategy.SourceStrategy != nil { - return true, fn(&t.Spec.Strategy.SourceStrategy.Env) - } - if t.Spec.Strategy.DockerStrategy != nil { - return true, fn(&t.Spec.Strategy.DockerStrategy.Env) - } - } - return false, fmt.Errorf("object does not contain any environment variables") -} - -// ExtractFileContents returns a map of keys to contents, false if the object cannot support such an -// operation, or an error. -func (f *Factory) ExtractFileContents(obj runtime.Object) (map[string][]byte, bool, error) { - switch t := obj.(type) { - case *api.Secret: - return t.Data, true, nil - case *api.ConfigMap: - out := make(map[string][]byte) - for k, v := range t.Data { - out[k] = []byte(v) - } - return out, true, nil - default: - return nil, false, nil - } -} - -// ApproximatePodTemplateForObject returns a pod template object for the provided source. -// It may return both an error and a object. It attempt to return the best possible template -// available at the current time. -func (w *Factory) ApproximatePodTemplateForObject(object runtime.Object) (*api.PodTemplateSpec, error) { - switch t := object.(type) { - case *imageapi.ImageStreamTag: - // create a minimal pod spec that uses the image referenced by the istag without any introspection - // it possible that we could someday do a better job introspecting it - return &api.PodTemplateSpec{ - Spec: api.PodSpec{ - RestartPolicy: api.RestartPolicyNever, - Containers: []api.Container{ - {Name: "container-00", Image: t.Image.DockerImageReference}, - }, - }, - }, nil - case *imageapi.ImageStreamImage: - // create a minimal pod spec that uses the image referenced by the istag without any introspection - // it possible that we could someday do a better job introspecting it - return &api.PodTemplateSpec{ - Spec: api.PodSpec{ - RestartPolicy: api.RestartPolicyNever, - Containers: []api.Container{ - {Name: "container-00", Image: t.Image.DockerImageReference}, - }, - }, - }, nil - case *deployapi.DeploymentConfig: - fallback := t.Spec.Template - - _, kc, err := w.Clients() - if err != nil { - return fallback, err - } - - latestDeploymentName := deployutil.LatestDeploymentNameForConfig(t) - deployment, err := kc.ReplicationControllers(t.Namespace).Get(latestDeploymentName) - if err != nil { - return fallback, err - } - - fallback = deployment.Spec.Template - - pods, err := kc.Pods(deployment.Namespace).List(api.ListOptions{LabelSelector: labels.SelectorFromSet(deployment.Spec.Selector)}) - if err != nil { - return fallback, err - } - - for i := range pods.Items { - pod := &pods.Items[i] - if fallback == nil || pod.CreationTimestamp.Before(fallback.CreationTimestamp) { - fallback = &api.PodTemplateSpec{ - ObjectMeta: pod.ObjectMeta, - Spec: pod.Spec, - } - } - } - return fallback, nil - - default: - pod, err := w.AttachablePodForObject(object) - if pod != nil { - return &api.PodTemplateSpec{ - ObjectMeta: pod.ObjectMeta, - Spec: pod.Spec, - }, err - } - switch t := object.(type) { - case *api.ReplicationController: - return t.Spec.Template, err - case *extensions.ReplicaSet: - return &t.Spec.Template, err - case *extensions.DaemonSet: - return &t.Spec.Template, err - case *batch.Job: - return &t.Spec.Template, err - } - return nil, err - } -} - -func (f *Factory) PodForResource(resource string, timeout time.Duration) (string, error) { - sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } - namespace, _, err := f.DefaultNamespace() - if err != nil { - return "", err - } - mapper, _ := f.Object(false) - resourceType, name, err := util.ResolveResource(api.Resource("pods"), resource, mapper) - if err != nil { - return "", err - } - - switch resourceType { - case api.Resource("pods"): - return name, nil - case api.Resource("replicationcontrollers"): - kc, err := f.Client() - if err != nil { - return "", err - } - rc, err := kc.ReplicationControllers(namespace).Get(name) - if err != nil { - return "", err - } - selector := labels.SelectorFromSet(rc.Spec.Selector) - pod, _, err := cmdutil.GetFirstPod(kc, namespace, selector, timeout, sortBy) - if err != nil { - return "", err - } - return pod.Name, nil - case deployapi.Resource("deploymentconfigs"): - oc, kc, err := f.Clients() - if err != nil { - return "", err - } - dc, err := oc.DeploymentConfigs(namespace).Get(name) - if err != nil { - return "", err - } - selector := labels.SelectorFromSet(dc.Spec.Selector) - pod, _, err := cmdutil.GetFirstPod(kc, namespace, selector, timeout, sortBy) - if err != nil { - return "", err - } - return pod.Name, nil - case extensions.Resource("daemonsets"): - kc, err := f.Client() - if err != nil { - return "", err - } - ds, err := kc.Extensions().DaemonSets(namespace).Get(name) - if err != nil { - return "", err - } - selector, err := unversioned.LabelSelectorAsSelector(ds.Spec.Selector) - if err != nil { - return "", err - } - pod, _, err := cmdutil.GetFirstPod(kc, namespace, selector, timeout, sortBy) - if err != nil { - return "", err - } - return pod.Name, nil - case extensions.Resource("jobs"): - kc, err := f.Client() - if err != nil { - return "", err - } - job, err := kc.Extensions().Jobs(namespace).Get(name) - if err != nil { - return "", err - } - return podNameForJob(job, kc, timeout, sortBy) - case batch.Resource("jobs"): - kc, err := f.Client() - if err != nil { - return "", err - } - job, err := kc.Batch().Jobs(namespace).Get(name) - if err != nil { - return "", err - } - return podNameForJob(job, kc, timeout, sortBy) - default: - return "", fmt.Errorf("remote shell for %s is not supported", resourceType) - } -} - -func podNameForJob(job *batch.Job, kc *kclient.Client, timeout time.Duration, sortBy func(pods []*api.Pod) sort.Interface) (string, error) { - selector, err := unversioned.LabelSelectorAsSelector(job.Spec.Selector) - if err != nil { - return "", err - } - pod, _, err := cmdutil.GetFirstPod(kc, job.Namespace, selector, timeout, sortBy) - if err != nil { - return "", err - } - return pod.Name, nil -} - -// Clients returns an OpenShift and Kubernetes client. -func (f *Factory) Clients() (*client.Client, *kclient.Client, error) { - kClient, err := f.Client() - if err != nil { - return nil, nil, err - } - osClient, err := f.clients.ClientForVersion(nil) - if err != nil { - return nil, nil, err - } - return osClient, kClient, nil -} - -// OriginSwaggerSchema returns a swagger API doc for an Origin schema under the /oapi prefix. -func (f *Factory) OriginSwaggerSchema(client *restclient.RESTClient, version unversioned.GroupVersion) (*swagger.ApiDeclaration, error) { - if version.Empty() { - return nil, fmt.Errorf("groupVersion cannot be empty") - } - body, err := client.Get().AbsPath("/").Suffix("swaggerapi", "oapi", version.Version).Do().Raw() - if err != nil { - return nil, err - } - var schema swagger.ApiDeclaration - err = json.Unmarshal(body, &schema) - if err != nil { - return nil, fmt.Errorf("got '%s': %v", string(body), err) - } - return &schema, nil -} - -// clientCache caches previously loaded clients for reuse. This is largely -// copied from upstream (because of typing) but reuses the negotiation logic. -// TODO: Consolidate this entire concept with upstream's ClientCache. -type clientCache struct { - loader kclientcmd.ClientConfig - clients map[string]*client.Client - configs map[string]*restclient.Config - defaultConfig *restclient.Config - // negotiatingClient is used only for negotiating versions with the server. - negotiatingClient *kclient.Client -} - -// ClientConfigForVersion returns the correct config for a server -func (c *clientCache) ClientConfigForVersion(version *unversioned.GroupVersion) (*restclient.Config, error) { - if c.defaultConfig == nil { - config, err := c.loader.ClientConfig() - if err != nil { - return nil, err - } - c.defaultConfig = config - } - // TODO: have a better config copy method - cacheKey := "" - if version != nil { - cacheKey = version.String() - } - if config, ok := c.configs[cacheKey]; ok { - return config, nil - } - if c.negotiatingClient == nil { - // TODO: We want to reuse the upstream negotiation logic, which is coupled - // to a concrete kube Client. The negotiation will ultimately try and - // build an unversioned URL using the config prefix to ask for supported - // server versions. If we use the default kube client config, the prefix - // will be /api, while we need to use the OpenShift prefix to ask for the - // OpenShift server versions. For now, set OpenShift defaults on the - // config to ensure the right prefix gets used. The client cache and - // negotiation logic should be refactored upstream to support downstream - // reuse so that we don't need to do any of this cache or negotiation - // duplication. - negotiatingConfig := *c.defaultConfig - client.SetOpenShiftDefaults(&negotiatingConfig) - negotiatingClient, err := kclient.New(&negotiatingConfig) - if err != nil { - return nil, err - } - c.negotiatingClient = negotiatingClient - } - config := *c.defaultConfig - negotiatedVersion, err := negotiateVersion(c.negotiatingClient, &config, version, latest.Versions) - if err != nil { - return nil, err - } - config.GroupVersion = negotiatedVersion - client.SetOpenShiftDefaults(&config) - c.configs[cacheKey] = &config - - // `version` does not necessarily equal `config.Version`. However, we know that we call this method again with - // `config.Version`, we should get the the config we've just built. - configCopy := config - c.configs[config.GroupVersion.String()] = &configCopy - - return &config, nil -} - -// ClientForVersion initializes or reuses a client for the specified version, or returns an -// error if that is not possible -func (c *clientCache) ClientForVersion(version *unversioned.GroupVersion) (*client.Client, error) { - cacheKey := "" - if version != nil { - cacheKey = version.String() - } - if client, ok := c.clients[cacheKey]; ok { - return client, nil - } - config, err := c.ClientConfigForVersion(version) - if err != nil { - return nil, err - } - client, err := client.New(config) - if err != nil { - return nil, err - } - - c.clients[config.GroupVersion.String()] = client - return client, nil -} - -// FindAllCanonicalResources returns all resource names that map directly to their kind (Kind -> Resource -> Kind) -// and are not subresources. This is the closest mapping possible from the client side to resources that can be -// listed and updated. Note that this may return some virtual resources (like imagestreamtags) that can be otherwise -// represented. -// TODO: add a field to APIResources for "virtual" (or that points to the canonical resource). -// TODO: fallback to the scheme when discovery is not possible. -func FindAllCanonicalResources(d discovery.DiscoveryInterface, m meta.RESTMapper) ([]unversioned.GroupResource, error) { - set := make(map[unversioned.GroupResource]struct{}) - all, err := d.ServerResources() - if err != nil { - return nil, err - } - for apiVersion, v := range all { - gv, err := unversioned.ParseGroupVersion(apiVersion) - if err != nil { - continue - } - for _, r := range v.APIResources { - // ignore subresources - if strings.Contains(r.Name, "/") { - continue - } - // because discovery info doesn't tell us whether the object is virtual or not, perform a lookup - // by the kind for resource (which should be the canonical resource) and then verify that the reverse - // lookup (KindsFor) does not error. - if mapping, err := m.RESTMapping(unversioned.GroupKind{Group: gv.Group, Kind: r.Kind}, gv.Version); err == nil { - if _, err := m.KindsFor(mapping.GroupVersionKind.GroupVersion().WithResource(mapping.Resource)); err == nil { - set[unversioned.GroupResource{Group: mapping.GroupVersionKind.Group, Resource: mapping.Resource}] = struct{}{} - } - } - } - } - var groupResources []unversioned.GroupResource - for k := range set { - groupResources = append(groupResources, k) - } - sort.Sort(groupResourcesByName(groupResources)) - return groupResources, nil -} - -type groupResourcesByName []unversioned.GroupResource - -func (g groupResourcesByName) Len() int { return len(g) } -func (g groupResourcesByName) Less(i, j int) bool { - if g[i].Resource < g[j].Resource { - return true - } - if g[i].Resource > g[j].Resource { - return false - } - return g[i].Group < g[j].Group -} -func (g groupResourcesByName) Swap(i, j int) { g[i], g[j] = g[j], g[i] } diff --git a/vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/negotiate.go b/vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/negotiate.go deleted file mode 100644 index d93f403eb..000000000 --- a/vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/negotiate.go +++ /dev/null @@ -1,116 +0,0 @@ -package clientcmd - -import ( - "fmt" - - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/client/restclient" - kclient "k8s.io/kubernetes/pkg/client/unversioned" -) - -// negotiateVersion queries the server's supported api versions to find a version that both client and server support. -// - If no version is provided, try registered client versions in order of preference. -// - If version is provided, but not default config (explicitly requested via -// commandline flag), and is unsupported by the server, print a warning to -// stderr and try client's registered versions in order of preference. -// - If version is config default, and the server does not support it, return an error. -func negotiateVersion(client *kclient.Client, config *restclient.Config, requestedGV *unversioned.GroupVersion, clientGVs []unversioned.GroupVersion) (*unversioned.GroupVersion, error) { - // Ensure we have a client - var err error - if client == nil { - client, err = kclient.New(config) - if err != nil { - return nil, err - } - } - - // Determine our preferred version - preferredGV := copyGroupVersion(requestedGV) - if preferredGV == nil { - preferredGV = copyGroupVersion(config.GroupVersion) - } - - // Get server versions - serverGVs, err := serverAPIVersions(client, "/oapi") - if err != nil { - if errors.IsNotFound(err) { - glog.V(4).Infof("Server path /oapi was not found, returning the requested group version %v", preferredGV) - return preferredGV, nil - } - return nil, err - } - - // Find a version we can all agree on - matchedGV, err := matchAPIVersion(preferredGV, clientGVs, serverGVs) - if err != nil { - return nil, err - } - - // Enforce a match if the preferredGV is the config default - if config.GroupVersion != nil && (*preferredGV == *config.GroupVersion) && (*matchedGV != *config.GroupVersion) { - return nil, fmt.Errorf("server does not support API version %q", config.GroupVersion.String()) - } - - return matchedGV, err -} - -// serverAPIVersions fetches the server versions available from the groupless API at the given prefix -func serverAPIVersions(c *kclient.Client, grouplessPrefix string) ([]unversioned.GroupVersion, error) { - // Get versions doc - var v unversioned.APIVersions - if err := c.Get().AbsPath(grouplessPrefix).Do().Into(&v); err != nil { - return []unversioned.GroupVersion{}, err - } - - // Convert to GroupVersion structs - serverAPIVersions := []unversioned.GroupVersion{} - for _, version := range v.Versions { - gv, err := unversioned.ParseGroupVersion(version) - if err != nil { - return []unversioned.GroupVersion{}, err - } - serverAPIVersions = append(serverAPIVersions, gv) - } - return serverAPIVersions, nil -} - -func matchAPIVersion(preferredGV *unversioned.GroupVersion, clientGVs []unversioned.GroupVersion, serverGVs []unversioned.GroupVersion) (*unversioned.GroupVersion, error) { - // If version explicitly requested verify that both client and server support it. - // If server does not support warn, but try to negotiate a lower version. - if preferredGV != nil { - if !containsGroupVersion(clientGVs, *preferredGV) { - return nil, fmt.Errorf("client does not support API version %q; client supported API versions: %v", preferredGV, clientGVs) - } - if containsGroupVersion(serverGVs, *preferredGV) { - return preferredGV, nil - } - } - - for _, clientGV := range clientGVs { - if containsGroupVersion(serverGVs, clientGV) { - t := clientGV - return &t, nil - } - } - return nil, fmt.Errorf("failed to negotiate an api version; server supports: %v, client supports: %v", serverGVs, clientGVs) -} - -func copyGroupVersion(version *unversioned.GroupVersion) *unversioned.GroupVersion { - if version == nil { - return nil - } - versionCopy := *version - return &versionCopy -} - -func containsGroupVersion(versions []unversioned.GroupVersion, version unversioned.GroupVersion) bool { - for _, v := range versions { - if v == version { - return true - } - } - return false -} diff --git a/vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/shortcut_restmapper.go b/vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/shortcut_restmapper.go deleted file mode 100644 index 6d99796c5..000000000 --- a/vendor/github.com/openshift/origin/pkg/cmd/util/clientcmd/shortcut_restmapper.go +++ /dev/null @@ -1,141 +0,0 @@ -package clientcmd - -import ( - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/client/typed/discovery" -) - -// ShortcutExpander is a RESTMapper that can be used for OpenShift resources. It expands the resource first, then invokes the wrapped -type ShortcutExpander struct { - RESTMapper meta.RESTMapper - - All []string -} - -var _ meta.RESTMapper = &ShortcutExpander{} - -func NewShortcutExpander(discoveryClient discovery.DiscoveryInterface, delegate meta.RESTMapper) ShortcutExpander { - defaultMapper := ShortcutExpander{RESTMapper: delegate} - - // this assumes that legacy kube versions and legacy origin versions are the same, probably fair - apiResources, err := discoveryClient.ServerResources() - if err != nil { - return defaultMapper - } - - availableResources := []unversioned.GroupVersionResource{} - for groupVersionString, resourceList := range apiResources { - currVersion, err := unversioned.ParseGroupVersion(groupVersionString) - if err != nil { - return defaultMapper - } - - for _, resource := range resourceList.APIResources { - availableResources = append(availableResources, currVersion.WithResource(resource.Name)) - } - } - - availableAll := []string{} - for _, requestedResource := range userResources { - for _, availableResource := range availableResources { - if requestedResource == availableResource.Resource { - availableAll = append(availableAll, requestedResource) - break - } - } - } - - return ShortcutExpander{All: availableAll, RESTMapper: delegate} -} - -func (e ShortcutExpander) KindFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionKind, error) { - return e.RESTMapper.KindFor(expandResourceShortcut(resource)) -} - -func (e ShortcutExpander) KindsFor(resource unversioned.GroupVersionResource) ([]unversioned.GroupVersionKind, error) { - return e.RESTMapper.KindsFor(expandResourceShortcut(resource)) -} - -func (e ShortcutExpander) ResourcesFor(resource unversioned.GroupVersionResource) ([]unversioned.GroupVersionResource, error) { - return e.RESTMapper.ResourcesFor(expandResourceShortcut(resource)) -} - -func (e ShortcutExpander) ResourceFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionResource, error) { - return e.RESTMapper.ResourceFor(expandResourceShortcut(resource)) -} - -func (e ShortcutExpander) ResourceSingularizer(resource string) (string, error) { - return e.RESTMapper.ResourceSingularizer(expandResourceShortcut(unversioned.GroupVersionResource{Resource: resource}).Resource) -} - -func (e ShortcutExpander) RESTMapping(gk unversioned.GroupKind, versions ...string) (*meta.RESTMapping, error) { - return e.RESTMapper.RESTMapping(gk, versions...) -} - -func (e ShortcutExpander) RESTMappings(gk unversioned.GroupKind) ([]*meta.RESTMapping, error) { - return e.RESTMapper.RESTMappings(gk) -} - -// userResources are the resource names that apply to the primary, user facing resources used by -// client tools. They are in deletion-first order - dependent resources should be last. -var userResources = []string{ - "buildconfigs", "builds", - "imagestreams", - "deploymentconfigs", "replicationcontrollers", - "routes", "services", - "pods", -} - -// AliasesForResource returns whether a resource has an alias or not -func (e ShortcutExpander) AliasesForResource(resource string) ([]string, bool) { - aliases := map[string][]string{ - "all": userResources, - } - if len(e.All) != 0 { - aliases["all"] = e.All - } - - if res, ok := aliases[resource]; ok { - return res, true - } - return e.RESTMapper.AliasesForResource(expandResourceShortcut(unversioned.GroupVersionResource{Resource: resource}).Resource) -} - -// shortForms is the list of short names to their expanded names -var shortForms = map[string]string{ - "dc": "deploymentconfigs", - "bc": "buildconfigs", - "is": "imagestreams", - "istag": "imagestreamtags", - "isimage": "imagestreamimages", - "sa": "serviceaccounts", - "pv": "persistentvolumes", - "pvc": "persistentvolumeclaims", - "clusterquota": "clusterresourcequota", -} - -// expandResourceShortcut will return the expanded version of resource -// (something that a pkg/api/meta.RESTMapper can understand), if it is -// indeed a shortcut. Otherwise, will return resource unmodified. -func expandResourceShortcut(resource unversioned.GroupVersionResource) unversioned.GroupVersionResource { - if expanded, ok := shortForms[resource.Resource]; ok { - resource.Resource = expanded - return resource - } - return resource -} - -// resourceShortFormFor looks up for a short form of resource names. -func resourceShortFormFor(resource string) (string, bool) { - var alias string - exists := false - for k, val := range shortForms { - if val == resource { - alias = k - exists = true - break - } - } - return alias, exists -} diff --git a/vendor/github.com/openshift/origin/pkg/cmd/util/cmd.go b/vendor/github.com/openshift/origin/pkg/cmd/util/cmd.go index bce62fbdf..b0ac50a55 100644 --- a/vendor/github.com/openshift/origin/pkg/cmd/util/cmd.go +++ b/vendor/github.com/openshift/origin/pkg/cmd/util/cmd.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "path/filepath" + "regexp" "strings" "github.com/spf13/cobra" @@ -20,6 +21,8 @@ import ( // ErrExit is a marker interface for cli commands indicating that the response has been processed var ErrExit = fmt.Errorf("exit directly") +var commaSepVarsPattern = regexp.MustCompile(".*=.*,.*=.*") + // ReplaceCommandName recursively processes the examples in a given command to change a hardcoded // command name (like 'kubectl' to the appropriate target name). It returns c. func ReplaceCommandName(from, to string, c *cobra.Command) *cobra.Command { @@ -30,21 +33,6 @@ func ReplaceCommandName(from, to string, c *cobra.Command) *cobra.Command { return c } -// RequireNoArguments exits with a usage error if extra arguments are provided. -func RequireNoArguments(c *cobra.Command, args []string) { - if len(args) > 0 { - kcmdutil.CheckErr(kcmdutil.UsageError(c, fmt.Sprintf(`unknown command "%s"`, strings.Join(args, " ")))) - } -} - -func DefaultSubCommandRun(out io.Writer) func(c *cobra.Command, args []string) { - return func(c *cobra.Command, args []string) { - c.SetOutput(out) - RequireNoArguments(c, args) - c.Help() - } -} - // GetDisplayFilename returns the absolute path of the filename as long as there was no error, otherwise it returns the filename as-is func GetDisplayFilename(filename string) string { if absName, err := filepath.Abs(filename); err == nil { @@ -163,3 +151,11 @@ func VersionedPrintObject(fn func(*cobra.Command, meta.RESTMapper, runtime.Objec return fn(c, mapper, obj, out) } } + +func WarnAboutCommaSeparation(errout io.Writer, values []string, flag string) { + for _, value := range values { + if commaSepVarsPattern.MatchString(value) { + fmt.Fprintf(errout, "warning: %s no longer accepts comma-separated lists of values. %q will be treated as a single key-value pair.\n", flag, value) + } + } +} diff --git a/vendor/github.com/openshift/origin/pkg/cmd/util/terminal.go b/vendor/github.com/openshift/origin/pkg/cmd/util/terminal.go deleted file mode 100644 index a69464ad1..000000000 --- a/vendor/github.com/openshift/origin/pkg/cmd/util/terminal.go +++ /dev/null @@ -1,122 +0,0 @@ -package util - -import ( - "bufio" - "fmt" - "io" - "os" - "strings" - - "github.com/docker/docker/pkg/term" - "github.com/golang/glog" - - kterm "k8s.io/kubernetes/pkg/util/term" -) - -// PromptForString takes an io.Reader and prompts for user input if it's a terminal, returning the result. -func PromptForString(r io.Reader, w io.Writer, format string, a ...interface{}) string { - if w == nil { - w = os.Stdout - } - - fmt.Fprintf(w, format, a...) - return readInput(r) -} - -// PromptForPasswordString prompts for user input by disabling echo in terminal, useful for password prompt. -func PromptForPasswordString(r io.Reader, w io.Writer, format string, a ...interface{}) string { - if w == nil { - w = os.Stdout - } - - if file, ok := r.(*os.File); ok { - inFd := file.Fd() - - if term.IsTerminal(inFd) { - oldState, err := term.SaveState(inFd) - if err != nil { - glog.V(3).Infof("Unable to save terminal state") - return PromptForString(r, w, format, a...) - } - - fmt.Fprintf(w, format, a...) - - term.DisableEcho(inFd, oldState) - - input := readInput(r) - - defer term.RestoreTerminal(inFd, oldState) - - fmt.Fprintf(w, "\n") - - return input - } - glog.V(3).Infof("Stdin is not a terminal") - return PromptForString(r, w, format, a...) - } - return PromptForString(r, w, format, a...) -} - -// PromptForBool prompts for user input of a boolean value. The accepted values are: -// yes, y, true, t, 1 (not case sensitive) -// no, n, false, f, 0 (not case sensitive) -// A valid answer is mandatory so it will keep asking until an answer is provided. -func PromptForBool(r io.Reader, w io.Writer, format string, a ...interface{}) bool { - if w == nil { - w = os.Stdout - } - - str := PromptForString(r, w, format, a...) - switch strings.ToLower(str) { - case "1", "t", "true", "y", "yes": - return true - case "0", "f", "false", "n", "no": - return false - } - fmt.Println("Please enter 'yes' or 'no'.") - return PromptForBool(r, w, format, a...) -} - -// PromptForStringWithDefault prompts for user input but take a default in case nothing is provided. -func PromptForStringWithDefault(r io.Reader, w io.Writer, def string, format string, a ...interface{}) string { - if w == nil { - w = os.Stdout - } - - s := PromptForString(r, w, format, a...) - if len(s) == 0 { - return def - } - return s -} - -func readInput(r io.Reader) string { - if kterm.IsTerminal(r) { - return readInputFromTerminal(r) - } - return readInputFromReader(r) -} - -func readInputFromTerminal(r io.Reader) string { - reader := bufio.NewReader(r) - result, _ := reader.ReadString('\n') - return strings.TrimRight(result, "\r\n") -} - -func readInputFromReader(r io.Reader) string { - var result string - fmt.Fscan(r, &result) - return result -} - -// IsTerminalReader returns whether the passed io.Reader is a terminal or not -func IsTerminalReader(r io.Reader) bool { - file, ok := r.(*os.File) - return ok && term.IsTerminal(file.Fd()) -} - -// IsTerminalWriter returns whether the passed io.Writer is a terminal or not -func IsTerminalWriter(w io.Writer) bool { - file, ok := w.(*os.File) - return ok && term.IsTerminal(file.Fd()) -} diff --git a/vendor/github.com/openshift/origin/pkg/deploy/api/helpers.go b/vendor/github.com/openshift/origin/pkg/deploy/api/helpers.go index 4b222b197..2efd01120 100644 --- a/vendor/github.com/openshift/origin/pkg/deploy/api/helpers.go +++ b/vendor/github.com/openshift/origin/pkg/deploy/api/helpers.go @@ -49,6 +49,13 @@ func ScaleFromConfig(dc *DeploymentConfig) *extensions.Scale { } } +// RequestForConfig builds a new deployment request for a deployment config. +func RequestForConfig(dc *DeploymentConfig) *DeploymentRequest { + return &DeploymentRequest{ + Name: dc.Name, + } +} + // TemplateImage is a structure for helping a caller iterate over a PodSpec type TemplateImage struct { Image string diff --git a/vendor/github.com/openshift/origin/pkg/deploy/api/register.go b/vendor/github.com/openshift/origin/pkg/deploy/api/register.go index 2e0bb6849..d0f1d1df1 100644 --- a/vendor/github.com/openshift/origin/pkg/deploy/api/register.go +++ b/vendor/github.com/openshift/origin/pkg/deploy/api/register.go @@ -32,6 +32,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { &DeploymentConfig{}, &DeploymentConfigList{}, &DeploymentConfigRollback{}, + &DeploymentRequest{}, &DeploymentLog{}, &DeploymentLogOptions{}, ) @@ -41,5 +42,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { func (obj *DeploymentConfig) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } func (obj *DeploymentConfigList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } func (obj *DeploymentConfigRollback) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *DeploymentRequest) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } func (obj *DeploymentLog) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } func (obj *DeploymentLogOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/github.com/openshift/origin/pkg/deploy/api/types.go b/vendor/github.com/openshift/origin/pkg/deploy/api/types.go index ad5e43917..bc67246d9 100644 --- a/vendor/github.com/openshift/origin/pkg/deploy/api/types.go +++ b/vendor/github.com/openshift/origin/pkg/deploy/api/types.go @@ -6,181 +6,7 @@ import ( "k8s.io/kubernetes/pkg/util/intstr" ) -// DeploymentStatus describes the possible states a deployment can be in. -type DeploymentStatus string - -const ( - // DeploymentStatusNew means the deployment has been accepted but not yet acted upon. - DeploymentStatusNew DeploymentStatus = "New" - // DeploymentStatusPending means the deployment been handed over to a deployment strategy, - // but the strategy has not yet declared the deployment to be running. - DeploymentStatusPending DeploymentStatus = "Pending" - // DeploymentStatusRunning means the deployment strategy has reported the deployment as - // being in-progress. - DeploymentStatusRunning DeploymentStatus = "Running" - // DeploymentStatusComplete means the deployment finished without an error. - DeploymentStatusComplete DeploymentStatus = "Complete" - // DeploymentStatusFailed means the deployment finished with an error. - DeploymentStatusFailed DeploymentStatus = "Failed" -) - -// DeploymentStrategy describes how to perform a deployment. -type DeploymentStrategy struct { - // Type is the name of a deployment strategy. - Type DeploymentStrategyType - - // RecreateParams are the input to the Recreate deployment strategy. - RecreateParams *RecreateDeploymentStrategyParams - // RollingParams are the input to the Rolling deployment strategy. - RollingParams *RollingDeploymentStrategyParams - - // CustomParams are the input to the Custom deployment strategy, and may also - // be specified for the Recreate and Rolling strategies to customize the execution - // process that runs the deployment. - CustomParams *CustomDeploymentStrategyParams - - // Resources contains resource requirements to execute the deployment - Resources kapi.ResourceRequirements - // Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. - Labels map[string]string - // Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. - Annotations map[string]string -} - -// DeploymentStrategyType refers to a specific DeploymentStrategy implementation. -type DeploymentStrategyType string - -const ( - // DeploymentStrategyTypeRecreate is a simple strategy suitable as a default. - DeploymentStrategyTypeRecreate DeploymentStrategyType = "Recreate" - // DeploymentStrategyTypeCustom is a user defined strategy. It is optional to set. - DeploymentStrategyTypeCustom DeploymentStrategyType = "Custom" - // DeploymentStrategyTypeRolling uses the Kubernetes RollingUpdater. - DeploymentStrategyTypeRolling DeploymentStrategyType = "Rolling" -) - -// CustomDeploymentStrategyParams are the input to the Custom deployment strategy. -type CustomDeploymentStrategyParams struct { - // Image specifies a Docker image which can carry out a deployment. - Image string - // Environment holds the environment which will be given to the container for Image. - Environment []kapi.EnvVar - // Command is optional and overrides CMD in the container Image. - Command []string -} - -// RecreateDeploymentStrategyParams are the input to the Recreate deployment -// strategy. -type RecreateDeploymentStrategyParams struct { - // TimeoutSeconds is the time to wait for updates before giving up. If the - // value is nil, a default will be used. - TimeoutSeconds *int64 - // Pre is a lifecycle hook which is executed before the strategy manipulates - // the deployment. All LifecycleHookFailurePolicy values are supported. - Pre *LifecycleHook - // Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new - // pod is created. All LifecycleHookFailurePolicy values are supported. - Mid *LifecycleHook - // Post is a lifecycle hook which is executed after the strategy has - // finished all deployment logic. - Post *LifecycleHook -} - -// LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time. -type LifecycleHook struct { - // FailurePolicy specifies what action to take if the hook fails. - FailurePolicy LifecycleHookFailurePolicy - - // ExecNewPod specifies the options for a lifecycle hook backed by a pod. - ExecNewPod *ExecNewPodHook - - // TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag if the deployment succeeds. - TagImages []TagImageHook -} - -// LifecycleHookFailurePolicy describes possibles actions to take if a hook fails. -type LifecycleHookFailurePolicy string - -const ( - // LifecycleHookFailurePolicyRetry means retry the hook until it succeeds. - LifecycleHookFailurePolicyRetry LifecycleHookFailurePolicy = "Retry" - // LifecycleHookFailurePolicyAbort means abort the deployment (if possible). - LifecycleHookFailurePolicyAbort LifecycleHookFailurePolicy = "Abort" - // LifecycleHookFailurePolicyIgnore means ignore failure and continue the deployment. - LifecycleHookFailurePolicyIgnore LifecycleHookFailurePolicy = "Ignore" -) - -// ExecNewPodHook is a hook implementation which runs a command in a new pod -// based on the specified container which is assumed to be part of the -// deployment template. -type ExecNewPodHook struct { - // Command is the action command and its arguments. - Command []string - // Env is a set of environment variables to supply to the hook pod's container. - Env []kapi.EnvVar - // ContainerName is the name of a container in the deployment pod template - // whose Docker image will be used for the hook pod's container. - ContainerName string - // Volumes is a list of named volumes from the pod template which should be - // copied to the hook pod. - Volumes []string -} - -// TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag. -type TagImageHook struct { - // ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag - ContainerName string - // To is the target ImageStreamTag to set the image of - To kapi.ObjectReference -} - -// RollingDeploymentStrategyParams are the input to the Rolling deployment -// strategy. -type RollingDeploymentStrategyParams struct { - // UpdatePeriodSeconds is the time to wait between individual pod updates. - // If the value is nil, a default will be used. - UpdatePeriodSeconds *int64 - // IntervalSeconds is the time to wait between polling deployment status - // after update. If the value is nil, a default will be used. - IntervalSeconds *int64 - // TimeoutSeconds is the time to wait for updates before giving up. If the - // value is nil, a default will be used. - TimeoutSeconds *int64 - // The maximum number of pods that can be unavailable during the update. - // Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). - // Absolute number is calculated from percentage by rounding up. - // This can not be 0 if MaxSurge is 0. - // By default, a fixed value of 1 is used. - // Example: when this is set to 30%, the old RC can be scaled down by 30% - // immediately when the rolling update starts. Once new pods are ready, old RC - // can be scaled down further, followed by scaling up the new RC, ensuring - // that at least 70% of original number of pods are available at all times - // during the update. - MaxUnavailable intstr.IntOrString - // The maximum number of pods that can be scheduled above the original number of - // pods. - // Value can be an absolute number (ex: 5) or a percentage of total pods at - // the start of the update (ex: 10%). This can not be 0 if MaxUnavailable is 0. - // Absolute number is calculated from percentage by rounding up. - // By default, a value of 1 is used. - // Example: when this is set to 30%, the new RC can be scaled up by 30% - // immediately when the rolling update starts. Once old pods have been killed, - // new RC can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of original pods. - MaxSurge intstr.IntOrString - // UpdatePercent is the percentage of replicas to scale up or down each - // interval. If nil, one replica will be scaled up and down each interval. - // If negative, the scale order will be down/up instead of up/down. - // DEPRECATED: Use MaxUnavailable/MaxSurge instead. - UpdatePercent *int32 - // Pre is a lifecycle hook which is executed before the deployment process - // begins. All LifecycleHookFailurePolicy values are supported. - Pre *LifecycleHook - // Post is a lifecycle hook which is executed after the strategy has - // finished all deployment logic. - Post *LifecycleHook -} - +// These constants represent defaults used in the deployment process. const ( // DefaultRollingTimeoutSeconds is the default TimeoutSeconds for RollingDeploymentStrategyParams. DefaultRollingTimeoutSeconds int64 = 10 * 60 @@ -188,6 +14,10 @@ const ( DefaultRollingIntervalSeconds int64 = 1 // DefaultRollingUpdatePeriodSeconds is the default PeriodSeconds for RollingDeploymentStrategyParams. DefaultRollingUpdatePeriodSeconds int64 = 1 + // MaxDeploymentDurationSeconds represents the maximum duration that a deployment is allowed to run. + // This is set as the default value for ActiveDeadlineSeconds for the deployer pod. + // Currently set to 6 hours. + MaxDeploymentDurationSeconds int64 = 21600 ) // These constants represent keys used for correlating objects related to deployments. @@ -253,6 +83,13 @@ const ( PostHookPodSuffix = "hook-post" ) +// These constants represent values used in deployment annotations. +const ( + // DeploymentCancelledAnnotationValue represents the value for the DeploymentCancelledAnnotation + // annotation that signifies that the deployment should be cancelled + DeploymentCancelledAnnotationValue = "true" +) + // These constants represent the various reasons for cancelling a deployment // or for a deployment being placed in a failed state const ( @@ -262,18 +99,23 @@ const ( DeploymentFailedDeployerPodNoLongerExists = "deployer pod no longer exists" ) -// MaxDeploymentDurationSeconds represents the maximum duration that a deployment is allowed to run -// This is set as the default value for ActiveDeadlineSeconds for the deployer pod -// Currently set to 6 hours -const MaxDeploymentDurationSeconds int64 = 21600 - -// DeploymentCancelledAnnotationValue represents the value for the DeploymentCancelledAnnotation -// annotation that signifies that the deployment should be cancelled -const DeploymentCancelledAnnotationValue = "true" +// DeploymentStatus describes the possible states a deployment can be in. +type DeploymentStatus string -// DeploymentInstantiatedAnnotationValue represents the value for the DeploymentInstantiatedAnnotation -// annotation that signifies that the deployment should be instantiated. -const DeploymentInstantiatedAnnotationValue = "true" +const ( + // DeploymentStatusNew means the deployment has been accepted but not yet acted upon. + DeploymentStatusNew DeploymentStatus = "New" + // DeploymentStatusPending means the deployment been handed over to a deployment strategy, + // but the strategy has not yet declared the deployment to be running. + DeploymentStatusPending DeploymentStatus = "Pending" + // DeploymentStatusRunning means the deployment strategy has reported the deployment as + // being in-progress. + DeploymentStatusRunning DeploymentStatus = "Running" + // DeploymentStatusComplete means the deployment finished without an error. + DeploymentStatusComplete DeploymentStatus = "Complete" + // DeploymentStatusFailed means the deployment finished with an error. + DeploymentStatusFailed DeploymentStatus = "Failed" +) // +genclient=true @@ -331,25 +173,162 @@ type DeploymentConfigSpec struct { Template *kapi.PodTemplateSpec } -// DeploymentConfigStatus represents the current deployment state. -type DeploymentConfigStatus struct { - // LatestVersion is used to determine whether the current deployment associated with a deployment - // config is out of sync. - LatestVersion int64 - // ObservedGeneration is the most recent generation observed by the deployment config controller. - ObservedGeneration int64 - // Replicas is the total number of pods targeted by this deployment config. - Replicas int32 - // UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config - // that have the desired template spec. - UpdatedReplicas int32 - // AvailableReplicas is the total number of available pods targeted by this deployment config. - AvailableReplicas int32 - // UnavailableReplicas is the total number of unavailable pods targeted by this deployment config. - UnavailableReplicas int32 - // Details are the reasons for the update to this deployment config. - // This could be based on a change made by the user or caused by an automatic trigger - Details *DeploymentDetails +// DeploymentStrategy describes how to perform a deployment. +type DeploymentStrategy struct { + // Type is the name of a deployment strategy. + Type DeploymentStrategyType + + // CustomParams are the input to the Custom deployment strategy, and may also + // be specified for the Recreate and Rolling strategies to customize the execution + // process that runs the deployment. + CustomParams *CustomDeploymentStrategyParams + // RecreateParams are the input to the Recreate deployment strategy. + RecreateParams *RecreateDeploymentStrategyParams + // RollingParams are the input to the Rolling deployment strategy. + RollingParams *RollingDeploymentStrategyParams + + // Resources contains resource requirements to execute the deployment and any hooks. + Resources kapi.ResourceRequirements + // Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. + Labels map[string]string + // Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. + Annotations map[string]string +} + +// DeploymentStrategyType refers to a specific DeploymentStrategy implementation. +type DeploymentStrategyType string + +const ( + // DeploymentStrategyTypeRecreate is a simple strategy suitable as a default. + DeploymentStrategyTypeRecreate DeploymentStrategyType = "Recreate" + // DeploymentStrategyTypeCustom is a user defined strategy. + DeploymentStrategyTypeCustom DeploymentStrategyType = "Custom" + // DeploymentStrategyTypeRolling uses the Kubernetes RollingUpdater. + DeploymentStrategyTypeRolling DeploymentStrategyType = "Rolling" +) + +// CustomDeploymentStrategyParams are the input to the Custom deployment strategy. +type CustomDeploymentStrategyParams struct { + // Image specifies a Docker image which can carry out a deployment. + Image string + // Environment holds the environment which will be given to the container for Image. + Environment []kapi.EnvVar + // Command is optional and overrides CMD in the container Image. + Command []string +} + +// RecreateDeploymentStrategyParams are the input to the Recreate deployment +// strategy. +type RecreateDeploymentStrategyParams struct { + // TimeoutSeconds is the time to wait for updates before giving up. If the + // value is nil, a default will be used. + TimeoutSeconds *int64 + // Pre is a lifecycle hook which is executed before the strategy manipulates + // the deployment. All LifecycleHookFailurePolicy values are supported. + Pre *LifecycleHook + // Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new + // pod is created. All LifecycleHookFailurePolicy values are supported. + Mid *LifecycleHook + // Post is a lifecycle hook which is executed after the strategy has + // finished all deployment logic. All LifecycleHookFailurePolicy values are supported. + Post *LifecycleHook +} + +// RollingDeploymentStrategyParams are the input to the Rolling deployment +// strategy. +type RollingDeploymentStrategyParams struct { + // UpdatePeriodSeconds is the time to wait between individual pod updates. + // If the value is nil, a default will be used. + UpdatePeriodSeconds *int64 + // IntervalSeconds is the time to wait between polling deployment status + // after update. If the value is nil, a default will be used. + IntervalSeconds *int64 + // TimeoutSeconds is the time to wait for updates before giving up. If the + // value is nil, a default will be used. + TimeoutSeconds *int64 + // MaxUnavailable is the maximum number of pods that can be unavailable + // during the update. Value can be an absolute number (ex: 5) or a + // percentage of total pods at the start of update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // + // This cannot be 0 if MaxSurge is 0. By default, 25% is used. + // + // Example: when this is set to 30%, the old RC can be scaled down by 30% + // immediately when the rolling update starts. Once new pods are ready, old + // RC can be scaled down further, followed by scaling up the new RC, + // ensuring that at least 70% of original number of pods are available at + // all times during the update. + MaxUnavailable intstr.IntOrString + // MaxSurge is the maximum number of pods that can be scheduled above the + // original number of pods. Value can be an absolute number (ex: 5) or a + // percentage of total pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // + // This cannot be 0 if MaxUnavailable is 0. By default, 25% is used. + // + // Example: when this is set to 30%, the new RC can be scaled up by 30% + // immediately when the rolling update starts. Once old pods have been + // killed, new RC can be scaled up further, ensuring that total number of + // pods running at any time during the update is atmost 130% of original + // pods. + MaxSurge intstr.IntOrString + // Pre is a lifecycle hook which is executed before the deployment process + // begins. All LifecycleHookFailurePolicy values are supported. + Pre *LifecycleHook + // Post is a lifecycle hook which is executed after the strategy has + // finished all deployment logic. All LifecycleHookFailurePolicy values + // are supported. + Post *LifecycleHook +} + +// LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time. +type LifecycleHook struct { + // FailurePolicy specifies what action to take if the hook fails. + FailurePolicy LifecycleHookFailurePolicy + + // ExecNewPod specifies the options for a lifecycle hook backed by a pod. + ExecNewPod *ExecNewPodHook + + // TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag. + TagImages []TagImageHook +} + +// LifecycleHookFailurePolicy describes possibles actions to take if a hook fails. +type LifecycleHookFailurePolicy string + +const ( + // LifecycleHookFailurePolicyRetry means retry the hook until it succeeds. + LifecycleHookFailurePolicyRetry LifecycleHookFailurePolicy = "Retry" + // LifecycleHookFailurePolicyAbort means abort the deployment. + LifecycleHookFailurePolicyAbort LifecycleHookFailurePolicy = "Abort" + // LifecycleHookFailurePolicyIgnore means ignore failure and continue the deployment. + LifecycleHookFailurePolicyIgnore LifecycleHookFailurePolicy = "Ignore" +) + +// ExecNewPodHook is a hook implementation which runs a command in a new pod +// based on the specified container which is assumed to be part of the +// deployment template. +type ExecNewPodHook struct { + // Command is the action command and its arguments. + Command []string + // Env is a set of environment variables to supply to the hook pod's container. + Env []kapi.EnvVar + // ContainerName is the name of a container in the deployment pod template + // whose Docker image will be used for the hook pod's container. + ContainerName string + // Volumes is a list of named volumes from the pod template which should be + // copied to the hook pod. Volumes names not found in pod spec are ignored. + // An empty list means no volumes will be copied. + Volumes []string +} + +// TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag. +type TagImageHook struct { + // ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single + // container this value will be defaulted to the name of that container. + ContainerName string + // To is the target ImageStreamTag to set the container's image onto. + To kapi.ObjectReference } // DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment. @@ -377,9 +356,7 @@ const ( // DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger. type DeploymentTriggerImageChangeParams struct { // Automatic means that the detection of a new tag value should result in an image update - // inside the pod template. Deployment configs that haven't been deployed yet will always - // have their images updated. Deployment configs that have been deployed at least once, will - // have their images updated only if this is set to true. + // inside the pod template. Automatic bool // ContainerNames is used to restrict tag updates to the specified set of container names in a pod. ContainerNames []string @@ -391,6 +368,29 @@ type DeploymentTriggerImageChangeParams struct { LastTriggeredImage string } +// DeploymentConfigStatus represents the current deployment state. +type DeploymentConfigStatus struct { + // LatestVersion is used to determine whether the current deployment associated with a deployment + // config is out of sync. + LatestVersion int64 + // ObservedGeneration is the most recent generation observed by the deployment config controller. + ObservedGeneration int64 + // Replicas is the total number of pods targeted by this deployment config. + Replicas int32 + // UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config + // that have the desired template spec. + UpdatedReplicas int32 + // AvailableReplicas is the total number of available pods targeted by this deployment config. + AvailableReplicas int32 + // UnavailableReplicas is the total number of unavailable pods targeted by this deployment config. + UnavailableReplicas int32 + // Details are the reasons for the update to this deployment config. + // This could be based on a change made by the user or caused by an automatic trigger + Details *DeploymentDetails + // Conditions represents the latest available observations of a deployment config's current state. + Conditions []DeploymentCondition +} + // DeploymentDetails captures information about the causes of a deployment. type DeploymentDetails struct { // Message is the user specified change message, if this deployment was triggered manually by the user @@ -414,6 +414,37 @@ type DeploymentCauseImageTrigger struct { From kapi.ObjectReference } +type DeploymentConditionType string + +// These are valid conditions of a deployment config. +const ( + // DeploymentAvailable means the deployment config is available, ie. at least the minimum available + // replicas required are up and running for at least minReadySeconds. + DeploymentAvailable DeploymentConditionType = "Available" + // DeploymentProgressing means the deployment config is progressing. Progress for a deployment + // config is considered when a new replica set is created or adopted, and when new pods scale up or + // old pods scale down. Progress is not estimated for paused deployment configs, when the deployment + // config needs to rollback, or when progressDeadlineSeconds is not specified. + DeploymentProgressing DeploymentConditionType = "Progressing" + // DeploymentReplicaFailure is added in a deployment config when one of its pods + // fails to be created or deleted. + DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" +) + +// DeploymentCondition describes the state of a deployment config at a certain point. +type DeploymentCondition struct { + // Type of deployment condition. + Type DeploymentConditionType + // Status of the condition, one of True, False, Unknown. + Status kapi.ConditionStatus + // The last time the condition transitioned from one status to another. + LastTransitionTime unversioned.Time + // The reason for the condition's last transition. + Reason string + // A human readable message indicating details about the transition. + Message string +} + // DeploymentConfigList is a collection of deployment configs. type DeploymentConfigList struct { unversioned.TypeMeta @@ -450,6 +481,18 @@ type DeploymentConfigRollbackSpec struct { IncludeStrategy bool } +// DeploymentRequest is a request to a deployment config for a new deployment. +type DeploymentRequest struct { + unversioned.TypeMeta + // Name of the deployment config for requesting a new deployment. + Name string + // Latest will update the deployment config with the latest state from all triggers. + Latest bool + // Force will try to force a new deployment to run. If the deployment config is paused, + // then setting this to true will return an Invalid error. + Force bool +} + // DeploymentLog represents the logs for a deployment type DeploymentLog struct { unversioned.TypeMeta diff --git a/vendor/github.com/openshift/origin/pkg/deploy/api/v1/conversion.go b/vendor/github.com/openshift/origin/pkg/deploy/api/v1/conversion.go index 4d4a18570..42804873c 100644 --- a/vendor/github.com/openshift/origin/pkg/deploy/api/v1/conversion.go +++ b/vendor/github.com/openshift/origin/pkg/deploy/api/v1/conversion.go @@ -1,8 +1,6 @@ package v1 import ( - "fmt" - "math" "reflect" "strings" @@ -60,7 +58,6 @@ func Convert_v1_RollingDeploymentStrategyParams_To_api_RollingDeploymentStrategy out.UpdatePeriodSeconds = in.UpdatePeriodSeconds out.IntervalSeconds = in.IntervalSeconds out.TimeoutSeconds = in.TimeoutSeconds - out.UpdatePercent = in.UpdatePercent if in.Pre != nil { if err := s.Convert(&in.Pre, &out.Pre, 0); err != nil { @@ -72,18 +69,12 @@ func Convert_v1_RollingDeploymentStrategyParams_To_api_RollingDeploymentStrategy return err } } - - if in.UpdatePercent != nil { - pct := intstr.FromString(fmt.Sprintf("%d%%", int(math.Abs(float64(*in.UpdatePercent))))) - if *in.UpdatePercent > 0 { - out.MaxSurge = pct - } else { - out.MaxUnavailable = pct - } - } else { + if in.MaxUnavailable != nil { if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { return err } + } + if in.MaxSurge != nil { if err := s.Convert(in.MaxSurge, &out.MaxSurge, 0); err != nil { return err } @@ -95,7 +86,6 @@ func Convert_api_RollingDeploymentStrategyParams_To_v1_RollingDeploymentStrategy out.UpdatePeriodSeconds = in.UpdatePeriodSeconds out.IntervalSeconds = in.IntervalSeconds out.TimeoutSeconds = in.TimeoutSeconds - out.UpdatePercent = in.UpdatePercent if in.Pre != nil { if err := s.Convert(&in.Pre, &out.Pre, 0); err != nil { @@ -114,20 +104,11 @@ func Convert_api_RollingDeploymentStrategyParams_To_v1_RollingDeploymentStrategy if out.MaxSurge == nil { out.MaxSurge = &intstr.IntOrString{} } - if in.UpdatePercent != nil { - pct := intstr.FromString(fmt.Sprintf("%d%%", int(math.Abs(float64(*in.UpdatePercent))))) - if *in.UpdatePercent > 0 { - out.MaxSurge = &pct - } else { - out.MaxUnavailable = &pct - } - } else { - if err := s.Convert(&in.MaxUnavailable, out.MaxUnavailable, 0); err != nil { - return err - } - if err := s.Convert(&in.MaxSurge, out.MaxSurge, 0); err != nil { - return err - } + if err := s.Convert(&in.MaxUnavailable, out.MaxUnavailable, 0); err != nil { + return err + } + if err := s.Convert(&in.MaxSurge, out.MaxSurge, 0); err != nil { + return err } return nil } diff --git a/vendor/github.com/openshift/origin/pkg/deploy/api/v1/defaults.go b/vendor/github.com/openshift/origin/pkg/deploy/api/v1/defaults.go index 8986ed4e6..77511d3df 100644 --- a/vendor/github.com/openshift/origin/pkg/deploy/api/v1/defaults.go +++ b/vendor/github.com/openshift/origin/pkg/deploy/api/v1/defaults.go @@ -71,6 +71,7 @@ func SetDefaults_RecreateDeploymentStrategyParams(obj *RecreateDeploymentStrateg obj.TimeoutSeconds = mkintp(deployapi.DefaultRollingTimeoutSeconds) } } + func SetDefaults_RollingDeploymentStrategyParams(obj *RollingDeploymentStrategyParams) { if obj.IntervalSeconds == nil { obj.IntervalSeconds = mkintp(deployapi.DefaultRollingIntervalSeconds) @@ -84,16 +85,24 @@ func SetDefaults_RollingDeploymentStrategyParams(obj *RollingDeploymentStrategyP obj.TimeoutSeconds = mkintp(deployapi.DefaultRollingTimeoutSeconds) } - if obj.UpdatePercent == nil { - // Apply defaults. - if obj.MaxUnavailable == nil { - maxUnavailable := intstr.FromString("25%") - obj.MaxUnavailable = &maxUnavailable - } - if obj.MaxSurge == nil { - maxSurge := intstr.FromString("25%") - obj.MaxSurge = &maxSurge - } + if obj.MaxUnavailable == nil && obj.MaxSurge == nil { + maxUnavailable := intstr.FromString("25%") + obj.MaxUnavailable = &maxUnavailable + + maxSurge := intstr.FromString("25%") + obj.MaxSurge = &maxSurge + } + + if obj.MaxUnavailable == nil && obj.MaxSurge != nil && + (*obj.MaxSurge == intstr.FromInt(0) || *obj.MaxSurge == intstr.FromString("0%")) { + maxUnavailable := intstr.FromString("25%") + obj.MaxUnavailable = &maxUnavailable + } + + if obj.MaxSurge == nil && obj.MaxUnavailable != nil && + (*obj.MaxUnavailable == intstr.FromInt(0) || *obj.MaxUnavailable == intstr.FromString("0%")) { + maxSurge := intstr.FromString("25%") + obj.MaxSurge = &maxSurge } } diff --git a/vendor/github.com/openshift/origin/pkg/deploy/api/v1/generated.pb.go b/vendor/github.com/openshift/origin/pkg/deploy/api/v1/generated.pb.go index 79c3fab2c..9833fe256 100644 --- a/vendor/github.com/openshift/origin/pkg/deploy/api/v1/generated.pb.go +++ b/vendor/github.com/openshift/origin/pkg/deploy/api/v1/generated.pb.go @@ -12,6 +12,7 @@ CustomDeploymentStrategyParams DeploymentCause DeploymentCauseImageTrigger + DeploymentCondition DeploymentConfig DeploymentConfigList DeploymentConfigRollback @@ -21,6 +22,7 @@ DeploymentDetails DeploymentLog DeploymentLogOptions + DeploymentRequest DeploymentStrategy DeploymentTriggerImageChangeParams DeploymentTriggerPolicies @@ -73,96 +75,105 @@ func (*DeploymentCauseImageTrigger) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + func (m *DeploymentConfig) Reset() { *m = DeploymentConfig{} } func (*DeploymentConfig) ProtoMessage() {} -func (*DeploymentConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*DeploymentConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *DeploymentConfigList) Reset() { *m = DeploymentConfigList{} } func (*DeploymentConfigList) ProtoMessage() {} -func (*DeploymentConfigList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*DeploymentConfigList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *DeploymentConfigRollback) Reset() { *m = DeploymentConfigRollback{} } func (*DeploymentConfigRollback) ProtoMessage() {} func (*DeploymentConfigRollback) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} + return fileDescriptorGenerated, []int{6} } func (m *DeploymentConfigRollbackSpec) Reset() { *m = DeploymentConfigRollbackSpec{} } func (*DeploymentConfigRollbackSpec) ProtoMessage() {} func (*DeploymentConfigRollbackSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} + return fileDescriptorGenerated, []int{7} } func (m *DeploymentConfigSpec) Reset() { *m = DeploymentConfigSpec{} } func (*DeploymentConfigSpec) ProtoMessage() {} -func (*DeploymentConfigSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*DeploymentConfigSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *DeploymentConfigStatus) Reset() { *m = DeploymentConfigStatus{} } func (*DeploymentConfigStatus) ProtoMessage() {} -func (*DeploymentConfigStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*DeploymentConfigStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *DeploymentDetails) Reset() { *m = DeploymentDetails{} } func (*DeploymentDetails) ProtoMessage() {} -func (*DeploymentDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*DeploymentDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *DeploymentLog) Reset() { *m = DeploymentLog{} } func (*DeploymentLog) ProtoMessage() {} -func (*DeploymentLog) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*DeploymentLog) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func (m *DeploymentLogOptions) Reset() { *m = DeploymentLogOptions{} } func (*DeploymentLogOptions) ProtoMessage() {} -func (*DeploymentLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*DeploymentLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func (m *DeploymentRequest) Reset() { *m = DeploymentRequest{} } +func (*DeploymentRequest) ProtoMessage() {} +func (*DeploymentRequest) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *DeploymentTriggerImageChangeParams) Reset() { *m = DeploymentTriggerImageChangeParams{} } func (*DeploymentTriggerImageChangeParams) ProtoMessage() {} func (*DeploymentTriggerImageChangeParams) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{13} + return fileDescriptorGenerated, []int{15} } func (m *DeploymentTriggerPolicies) Reset() { *m = DeploymentTriggerPolicies{} } func (*DeploymentTriggerPolicies) ProtoMessage() {} func (*DeploymentTriggerPolicies) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{14} + return fileDescriptorGenerated, []int{16} } func (m *DeploymentTriggerPolicy) Reset() { *m = DeploymentTriggerPolicy{} } func (*DeploymentTriggerPolicy) ProtoMessage() {} func (*DeploymentTriggerPolicy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{15} + return fileDescriptorGenerated, []int{17} } func (m *ExecNewPodHook) Reset() { *m = ExecNewPodHook{} } func (*ExecNewPodHook) ProtoMessage() {} -func (*ExecNewPodHook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*ExecNewPodHook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *LifecycleHook) Reset() { *m = LifecycleHook{} } func (*LifecycleHook) ProtoMessage() {} -func (*LifecycleHook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*LifecycleHook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *RecreateDeploymentStrategyParams) Reset() { *m = RecreateDeploymentStrategyParams{} } func (*RecreateDeploymentStrategyParams) ProtoMessage() {} func (*RecreateDeploymentStrategyParams) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{18} + return fileDescriptorGenerated, []int{20} } func (m *RollingDeploymentStrategyParams) Reset() { *m = RollingDeploymentStrategyParams{} } func (*RollingDeploymentStrategyParams) ProtoMessage() {} func (*RollingDeploymentStrategyParams) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{19} + return fileDescriptorGenerated, []int{21} } func (m *TagImageHook) Reset() { *m = TagImageHook{} } func (*TagImageHook) ProtoMessage() {} -func (*TagImageHook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +func (*TagImageHook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } func init() { proto.RegisterType((*CustomDeploymentStrategyParams)(nil), "jackfan.us.kg.openshift.origin.pkg.deploy.api.v1.CustomDeploymentStrategyParams") proto.RegisterType((*DeploymentCause)(nil), "jackfan.us.kg.openshift.origin.pkg.deploy.api.v1.DeploymentCause") proto.RegisterType((*DeploymentCauseImageTrigger)(nil), "jackfan.us.kg.openshift.origin.pkg.deploy.api.v1.DeploymentCauseImageTrigger") + proto.RegisterType((*DeploymentCondition)(nil), "jackfan.us.kg.openshift.origin.pkg.deploy.api.v1.DeploymentCondition") proto.RegisterType((*DeploymentConfig)(nil), "jackfan.us.kg.openshift.origin.pkg.deploy.api.v1.DeploymentConfig") proto.RegisterType((*DeploymentConfigList)(nil), "jackfan.us.kg.openshift.origin.pkg.deploy.api.v1.DeploymentConfigList") proto.RegisterType((*DeploymentConfigRollback)(nil), "jackfan.us.kg.openshift.origin.pkg.deploy.api.v1.DeploymentConfigRollback") @@ -172,6 +183,7 @@ func init() { proto.RegisterType((*DeploymentDetails)(nil), "jackfan.us.kg.openshift.origin.pkg.deploy.api.v1.DeploymentDetails") proto.RegisterType((*DeploymentLog)(nil), "jackfan.us.kg.openshift.origin.pkg.deploy.api.v1.DeploymentLog") proto.RegisterType((*DeploymentLogOptions)(nil), "jackfan.us.kg.openshift.origin.pkg.deploy.api.v1.DeploymentLogOptions") + proto.RegisterType((*DeploymentRequest)(nil), "jackfan.us.kg.openshift.origin.pkg.deploy.api.v1.DeploymentRequest") proto.RegisterType((*DeploymentStrategy)(nil), "jackfan.us.kg.openshift.origin.pkg.deploy.api.v1.DeploymentStrategy") proto.RegisterType((*DeploymentTriggerImageChangeParams)(nil), "jackfan.us.kg.openshift.origin.pkg.deploy.api.v1.DeploymentTriggerImageChangeParams") proto.RegisterType((*DeploymentTriggerPolicies)(nil), "jackfan.us.kg.openshift.origin.pkg.deploy.api.v1.DeploymentTriggerPolicies") @@ -289,6 +301,48 @@ func (m *DeploymentCauseImageTrigger) MarshalTo(data []byte) (int, error) { return i, nil } +func (m *DeploymentCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) + n3, err := m.LastTransitionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + return i, nil +} + func (m *DeploymentConfig) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -307,27 +361,27 @@ func (m *DeploymentConfig) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(data[i:]) + n4, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n3 + i += n4 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n4, err := m.Spec.MarshalTo(data[i:]) + n5, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n4 + i += n5 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n5, err := m.Status.MarshalTo(data[i:]) + n6, err := m.Status.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n5 + i += n6 return i, nil } @@ -349,11 +403,11 @@ func (m *DeploymentConfigList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(data[i:]) + n7, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n6 + i += n7 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -408,11 +462,11 @@ func (m *DeploymentConfigRollback) MarshalTo(data []byte) (int, error) { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n7, err := m.Spec.MarshalTo(data[i:]) + n8, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n7 + i += n8 return i, nil } @@ -434,11 +488,11 @@ func (m *DeploymentConfigRollbackSpec) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.From.Size())) - n8, err := m.From.MarshalTo(data[i:]) + n9, err := m.From.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n8 + i += n9 data[i] = 0x10 i++ i = encodeVarintGenerated(data, i, uint64(m.Revision)) @@ -495,20 +549,20 @@ func (m *DeploymentConfigSpec) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.Strategy.Size())) - n9, err := m.Strategy.MarshalTo(data[i:]) + n10, err := m.Strategy.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n9 + i += n10 if m.Triggers != nil { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Triggers.Size())) - n10, err := m.Triggers.MarshalTo(data[i:]) + n11, err := m.Triggers.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n10 + i += n11 } data[i] = 0x18 i++ @@ -555,11 +609,11 @@ func (m *DeploymentConfigSpec) MarshalTo(data []byte) (int, error) { data[i] = 0x42 i++ i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n11, err := m.Template.MarshalTo(data[i:]) + n12, err := m.Template.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n11 + i += n12 } data[i] = 0x48 i++ @@ -604,11 +658,23 @@ func (m *DeploymentConfigStatus) MarshalTo(data []byte) (int, error) { data[i] = 0x3a i++ i = encodeVarintGenerated(data, i, uint64(m.Details.Size())) - n12, err := m.Details.MarshalTo(data[i:]) + n13, err := m.Details.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n12 + i += n13 + } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } } return i, nil } @@ -709,11 +775,11 @@ func (m *DeploymentLogOptions) MarshalTo(data []byte) (int, error) { data[i] = 0x2a i++ i = encodeVarintGenerated(data, i, uint64(m.SinceTime.Size())) - n13, err := m.SinceTime.MarshalTo(data[i:]) + n14, err := m.SinceTime.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n13 + i += n14 } data[i] = 0x30 i++ @@ -749,6 +815,44 @@ func (m *DeploymentLogOptions) MarshalTo(data []byte) (int, error) { return i, nil } +func (m *DeploymentRequest) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentRequest) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x10 + i++ + if m.Latest { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x18 + i++ + if m.Force { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + func (m *DeploymentStrategy) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -772,40 +876,40 @@ func (m *DeploymentStrategy) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.CustomParams.Size())) - n14, err := m.CustomParams.MarshalTo(data[i:]) + n15, err := m.CustomParams.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n14 + i += n15 } if m.RecreateParams != nil { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.RecreateParams.Size())) - n15, err := m.RecreateParams.MarshalTo(data[i:]) + n16, err := m.RecreateParams.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n15 + i += n16 } if m.RollingParams != nil { data[i] = 0x22 i++ i = encodeVarintGenerated(data, i, uint64(m.RollingParams.Size())) - n16, err := m.RollingParams.MarshalTo(data[i:]) + n17, err := m.RollingParams.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n16 + i += n17 } data[i] = 0x2a i++ i = encodeVarintGenerated(data, i, uint64(m.Resources.Size())) - n17, err := m.Resources.MarshalTo(data[i:]) + n18, err := m.Resources.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n17 + i += n18 if len(m.Labels) > 0 { for k := range m.Labels { data[i] = 0x32 @@ -884,11 +988,11 @@ func (m *DeploymentTriggerImageChangeParams) MarshalTo(data []byte) (int, error) data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.From.Size())) - n18, err := m.From.MarshalTo(data[i:]) + n19, err := m.From.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n18 + i += n19 data[i] = 0x22 i++ i = encodeVarintGenerated(data, i, uint64(len(m.LastTriggeredImage))) @@ -949,11 +1053,11 @@ func (m *DeploymentTriggerPolicy) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.ImageChangeParams.Size())) - n19, err := m.ImageChangeParams.MarshalTo(data[i:]) + n20, err := m.ImageChangeParams.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n19 + i += n20 } return i, nil } @@ -1045,11 +1149,11 @@ func (m *LifecycleHook) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.ExecNewPod.Size())) - n20, err := m.ExecNewPod.MarshalTo(data[i:]) + n21, err := m.ExecNewPod.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n20 + i += n21 } if len(m.TagImages) > 0 { for _, msg := range m.TagImages { @@ -1090,31 +1194,31 @@ func (m *RecreateDeploymentStrategyParams) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Pre.Size())) - n21, err := m.Pre.MarshalTo(data[i:]) + n22, err := m.Pre.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n21 + i += n22 } if m.Mid != nil { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Mid.Size())) - n22, err := m.Mid.MarshalTo(data[i:]) + n23, err := m.Mid.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n22 + i += n23 } if m.Post != nil { data[i] = 0x22 i++ i = encodeVarintGenerated(data, i, uint64(m.Post.Size())) - n23, err := m.Post.MarshalTo(data[i:]) + n24, err := m.Post.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n23 + i += n24 } return i, nil } @@ -1153,46 +1257,41 @@ func (m *RollingDeploymentStrategyParams) MarshalTo(data []byte) (int, error) { data[i] = 0x22 i++ i = encodeVarintGenerated(data, i, uint64(m.MaxUnavailable.Size())) - n24, err := m.MaxUnavailable.MarshalTo(data[i:]) + n25, err := m.MaxUnavailable.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n24 + i += n25 } if m.MaxSurge != nil { data[i] = 0x2a i++ i = encodeVarintGenerated(data, i, uint64(m.MaxSurge.Size())) - n25, err := m.MaxSurge.MarshalTo(data[i:]) + n26, err := m.MaxSurge.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n25 - } - if m.UpdatePercent != nil { - data[i] = 0x30 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.UpdatePercent)) + i += n26 } if m.Pre != nil { data[i] = 0x3a i++ i = encodeVarintGenerated(data, i, uint64(m.Pre.Size())) - n26, err := m.Pre.MarshalTo(data[i:]) + n27, err := m.Pre.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n26 + i += n27 } if m.Post != nil { data[i] = 0x42 i++ i = encodeVarintGenerated(data, i, uint64(m.Post.Size())) - n27, err := m.Post.MarshalTo(data[i:]) + n28, err := m.Post.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n27 + i += n28 } return i, nil } @@ -1219,11 +1318,11 @@ func (m *TagImageHook) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.To.Size())) - n28, err := m.To.MarshalTo(data[i:]) + n29, err := m.To.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n28 + i += n29 return i, nil } @@ -1294,6 +1393,22 @@ func (m *DeploymentCauseImageTrigger) Size() (n int) { return n } +func (m *DeploymentCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *DeploymentConfig) Size() (n int) { var l int _ = l @@ -1395,6 +1510,12 @@ func (m *DeploymentConfigStatus) Size() (n int) { l = m.Details.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1446,6 +1567,16 @@ func (m *DeploymentLogOptions) Size() (n int) { return n } +func (m *DeploymentRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + return n +} + func (m *DeploymentStrategy) Size() (n int) { var l int _ = l @@ -1610,9 +1741,6 @@ func (m *RollingDeploymentStrategyParams) Size() (n int) { l = m.MaxSurge.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.UpdatePercent != nil { - n += 1 + sovGenerated(uint64(*m.UpdatePercent)) - } if m.Pre != nil { l = m.Pre.Size() n += 1 + l + sovGenerated(uint64(l)) @@ -1680,6 +1808,20 @@ func (this *DeploymentCauseImageTrigger) String() string { }, "") return s } +func (this *DeploymentCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} func (this *DeploymentConfig) String() string { if this == nil { return "nil" @@ -1780,6 +1922,7 @@ func (this *DeploymentConfigStatus) String() string { `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, `Details:` + strings.Replace(fmt.Sprintf("%v", this.Details), "DeploymentDetails", "DeploymentDetails", 1) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1823,6 +1966,18 @@ func (this *DeploymentLogOptions) String() string { }, "") return s } +func (this *DeploymentRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentRequest{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Latest:` + fmt.Sprintf("%v", this.Latest) + `,`, + `Force:` + fmt.Sprintf("%v", this.Force) + `,`, + `}`, + }, "") + return s +} func (this *DeploymentStrategy) String() string { if this == nil { return "nil" @@ -1931,7 +2086,6 @@ func (this *RollingDeploymentStrategyParams) String() string { `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_kubernetes_pkg_util_intstr.IntOrString", 1) + `,`, `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_kubernetes_pkg_util_intstr.IntOrString", 1) + `,`, - `UpdatePercent:` + valueToStringGenerated(this.UpdatePercent) + `,`, `Pre:` + strings.Replace(fmt.Sprintf("%v", this.Pre), "LifecycleHook", "LifecycleHook", 1) + `,`, `Post:` + strings.Replace(fmt.Sprintf("%v", this.Post), "LifecycleHook", "LifecycleHook", 1) + `,`, `}`, @@ -2127,7 +2281,228 @@ func (m *DeploymentCause) Unmarshal(data []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentTriggerType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageTrigger", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImageTrigger == nil { + m.ImageTrigger = &DeploymentCauseImageTrigger{} + } + if err := m.ImageTrigger.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCauseImageTrigger) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCauseImageTrigger: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCauseImageTrigger: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2152,11 +2527,11 @@ func (m *DeploymentCause) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = DeploymentTriggerType(data[iNdEx:postIndex]) + m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImageTrigger", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2180,68 +2555,44 @@ func (m *DeploymentCause) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ImageTrigger == nil { - m.ImageTrigger = &DeploymentCauseImageTrigger{} - } - if err := m.ImageTrigger.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentCauseImageTrigger) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + postIndex := iNdEx + intStringLen + if postIndex > l { return io.ErrUnexpectedEOF } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentCauseImageTrigger: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentCauseImageTrigger: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2251,21 +2602,20 @@ func (m *DeploymentCauseImageTrigger) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.From.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } + m.Message = string(data[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -3469,6 +3819,37 @@ func (m *DeploymentConfigStatus) Unmarshal(data []byte) error { return err } iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DeploymentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -3922,6 +4303,125 @@ func (m *DeploymentLogOptions) Unmarshal(data []byte) error { } return nil } +func (m *DeploymentRequest) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Latest", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Latest = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *DeploymentStrategy) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 @@ -5338,26 +5838,6 @@ func (m *RollingDeploymentStrategyParams) Unmarshal(data []byte) error { return err } iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatePercent", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.UpdatePercent = &v case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Pre", wireType) @@ -5660,149 +6140,157 @@ var ( ) var fileDescriptorGenerated = []byte{ - // 2291 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x59, 0xcd, 0x6f, 0x24, 0x57, - 0x11, 0x77, 0x7b, 0xc6, 0xf6, 0xcc, 0x1b, 0x7f, 0xbe, 0xfd, 0x9a, 0x4c, 0x90, 0x6d, 0x35, 0x49, - 0xb4, 0x28, 0xd9, 0x1e, 0xad, 0x01, 0x91, 0x6c, 0xd8, 0x80, 0xc7, 0xf1, 0x66, 0x1d, 0xc6, 0x6b, - 0xe7, 0xd9, 0xbb, 0x81, 0x20, 0x84, 0xda, 0x33, 0xcf, 0xb3, 0x1d, 0x4f, 0xf7, 0x1b, 0xfa, 0x63, - 0x76, 0xe7, 0xc4, 0x4a, 0x1c, 0x10, 0x42, 0x48, 0x08, 0x24, 0x84, 0x94, 0x4b, 0x4e, 0x5c, 0x90, - 0xf8, 0x13, 0x38, 0x70, 0x61, 0x39, 0x11, 0x6e, 0x48, 0xa0, 0x55, 0x08, 0x57, 0xf8, 0x07, 0x38, - 0x51, 0xef, 0xab, 0x3f, 0xa6, 0x67, 0x9c, 0x1d, 0xdb, 0x39, 0x8c, 0xe4, 0x7e, 0x55, 0xf5, 0xab, - 0x7a, 0x55, 0xf5, 0xea, 0x55, 0x3d, 0xa3, 0xdb, 0x1d, 0x27, 0x7c, 0x18, 0x1d, 0x59, 0x2d, 0xe6, - 0xd6, 0x59, 0x8f, 0x7a, 0xc1, 0x43, 0xe7, 0x38, 0xac, 0x33, 0xdf, 0xe9, 0x38, 0x5e, 0xbd, 0x77, - 0xd2, 0xa9, 0xb7, 0x69, 0xaf, 0xcb, 0x06, 0x75, 0xbb, 0xe7, 0xd4, 0xfb, 0x37, 0xeb, 0x1d, 0xea, - 0x51, 0xdf, 0x0e, 0x69, 0xdb, 0xea, 0xf9, 0x2c, 0x64, 0xf8, 0x46, 0x22, 0x6e, 0xc5, 0xe2, 0x96, - 0x14, 0xb7, 0x40, 0xdc, 0x92, 0xe2, 0x16, 0x88, 0x5b, 0xfd, 0x9b, 0xb5, 0x14, 0x7b, 0xbd, 0xc3, - 0x3a, 0xac, 0x2e, 0x50, 0x8e, 0xa2, 0x63, 0xf1, 0x25, 0x3e, 0xc4, 0x5f, 0x12, 0xbd, 0xf6, 0xf5, - 0x93, 0xd7, 0x03, 0xcb, 0x61, 0xf5, 0x93, 0xe8, 0x88, 0xfa, 0x1e, 0x0d, 0x69, 0x20, 0x4c, 0xe2, - 0xb6, 0x44, 0x5e, 0x9f, 0xfa, 0x81, 0xc3, 0x3c, 0xda, 0x1e, 0x36, 0xaa, 0xf6, 0xda, 0x78, 0xb1, - 0xfc, 0x16, 0x6a, 0xb7, 0xc7, 0x72, 0x07, 0x75, 0xfa, 0x38, 0x84, 0x3d, 0x81, 0x96, 0x00, 0x24, - 0x8f, 0x68, 0x68, 0xe7, 0xc5, 0x6f, 0x8c, 0x16, 0xf7, 0x23, 0x2f, 0x74, 0x5c, 0x9a, 0x63, 0xbf, - 0x39, 0x9a, 0x3d, 0x0a, 0x9d, 0x6e, 0xdd, 0xf1, 0xc2, 0x20, 0xf4, 0x87, 0x45, 0xcc, 0xbf, 0x18, - 0x68, 0x75, 0x2b, 0x0a, 0x42, 0xe6, 0xbe, 0x2d, 0x9c, 0xe9, 0x52, 0x2f, 0x3c, 0x08, 0x39, 0x47, - 0x67, 0xb0, 0x6f, 0xfb, 0xb6, 0x1b, 0xe0, 0x2f, 0xa3, 0x19, 0xc7, 0xb5, 0x3b, 0xb4, 0x6a, 0xac, - 0x1b, 0xd7, 0xcb, 0x8d, 0x85, 0xa7, 0xcf, 0xd6, 0xa6, 0x3e, 0x7b, 0xb6, 0x36, 0xb3, 0xc3, 0x17, - 0x89, 0xa4, 0xe1, 0xef, 0xa3, 0x0a, 0xf5, 0xfa, 0x8e, 0xcf, 0x3c, 0x8e, 0x50, 0x9d, 0x5e, 0x2f, - 0x5c, 0xaf, 0x6c, 0xbc, 0x64, 0x49, 0x83, 0xac, 0xc4, 0x20, 0x11, 0x37, 0x19, 0x30, 0x6b, 0xdb, - 0xeb, 0x3f, 0xb0, 0xfd, 0xc6, 0x25, 0x05, 0x58, 0xd9, 0x4e, 0x00, 0x48, 0x1a, 0x0d, 0xbf, 0x8c, - 0xe6, 0x20, 0xa8, 0xae, 0xed, 0xb5, 0xab, 0x05, 0x00, 0x2e, 0x37, 0x2a, 0xc0, 0x3e, 0xb7, 0x25, - 0x97, 0x88, 0xa6, 0x99, 0x7f, 0x35, 0xd0, 0x52, 0xb2, 0x8b, 0x2d, 0x3b, 0x0a, 0x28, 0x7e, 0x03, - 0x15, 0xc3, 0x41, 0x4f, 0xdb, 0xfe, 0xb2, 0x52, 0x55, 0x3c, 0x84, 0xb5, 0xff, 0x3d, 0x5b, 0xbb, - 0x92, 0xb0, 0x1f, 0x42, 0x5a, 0x75, 0xa8, 0xcf, 0x09, 0x44, 0x88, 0xe0, 0x27, 0x06, 0x9a, 0x17, - 0x9b, 0x53, 0x24, 0xd8, 0x94, 0x01, 0x9b, 0x7a, 0xd7, 0x9a, 0x28, 0x2d, 0xad, 0x21, 0x8b, 0x76, - 0x52, 0x88, 0x8d, 0x65, 0xb0, 0x65, 0x3e, 0xbd, 0x42, 0x32, 0x1a, 0x4d, 0x0f, 0xbd, 0x78, 0x8a, - 0x38, 0xde, 0x43, 0xc5, 0x63, 0x9f, 0xb9, 0x62, 0x73, 0x95, 0x8d, 0x1b, 0xa7, 0x7b, 0x7b, 0xef, - 0xe8, 0x43, 0xda, 0x0a, 0x09, 0x3d, 0xa6, 0x3e, 0xf5, 0x5a, 0xb4, 0x31, 0xaf, 0x7d, 0x71, 0x07, - 0x20, 0x88, 0x00, 0x32, 0xff, 0x34, 0x8d, 0x96, 0x53, 0x0a, 0x99, 0x77, 0xec, 0x74, 0xf0, 0x77, - 0x51, 0xc9, 0x85, 0xec, 0x6c, 0xdb, 0xa1, 0xad, 0x34, 0x5d, 0x7f, 0x1e, 0x4d, 0xbb, 0x20, 0xd3, - 0xc0, 0x4a, 0x09, 0x4a, 0xd6, 0x48, 0x8c, 0x86, 0x29, 0x2a, 0x06, 0x3d, 0xda, 0x52, 0x8e, 0xdd, - 0x3a, 0xbb, 0x63, 0x85, 0xa1, 0x07, 0x00, 0x95, 0xec, 0x8a, 0x7f, 0x11, 0x01, 0x8f, 0x5d, 0x34, - 0x1b, 0x84, 0x76, 0x18, 0x05, 0x90, 0x3d, 0x5c, 0xd1, 0xf6, 0x79, 0x15, 0x09, 0xb0, 0xc6, 0xa2, - 0x52, 0x35, 0x2b, 0xbf, 0x89, 0x52, 0x62, 0xfe, 0xc3, 0x40, 0x97, 0x87, 0x45, 0x9a, 0x4e, 0x10, - 0xe2, 0x1f, 0xe4, 0x1c, 0x59, 0x3f, 0xc5, 0x91, 0xa9, 0x22, 0x64, 0x71, 0x71, 0xe1, 0xcf, 0x65, - 0xa5, 0xb3, 0xa4, 0x57, 0x52, 0xde, 0x6c, 0xc3, 0x39, 0x0d, 0xa9, 0x1b, 0xa8, 0xc3, 0xf7, 0xad, - 0x73, 0xee, 0x32, 0x75, 0xd0, 0x39, 0x2a, 0x91, 0xe0, 0xe6, 0xc7, 0x05, 0x54, 0x1d, 0x66, 0x25, - 0xac, 0xdb, 0x3d, 0xb2, 0x5b, 0x27, 0x78, 0x1d, 0x15, 0x3d, 0xdb, 0xd5, 0xa7, 0x2d, 0x8e, 0xc5, - 0x3d, 0x58, 0x23, 0x82, 0x82, 0x7f, 0x6f, 0x20, 0x1c, 0xf5, 0xda, 0xbc, 0x02, 0x6d, 0x7a, 0x1e, - 0x03, 0x8f, 0xf1, 0x02, 0xa8, 0x4c, 0xfe, 0xe1, 0x39, 0x4d, 0xd6, 0x76, 0x58, 0xf7, 0x73, 0x1a, - 0xb6, 0xbd, 0xd0, 0x1f, 0x34, 0x6a, 0xca, 0x22, 0x9c, 0x67, 0x20, 0x23, 0xcc, 0x82, 0xcc, 0x91, - 0x09, 0x2a, 0xf3, 0xe6, 0x3b, 0x17, 0x64, 0xde, 0xb8, 0x44, 0xad, 0x6d, 0xa3, 0x6b, 0x63, 0x2c, - 0xc7, 0xcb, 0xa8, 0x70, 0x42, 0x07, 0xd2, 0xb1, 0x84, 0xff, 0x89, 0x2f, 0xa3, 0x99, 0xbe, 0xdd, - 0x8d, 0xa8, 0x38, 0x3d, 0x65, 0x22, 0x3f, 0x6e, 0x4d, 0xbf, 0x6e, 0x98, 0x7f, 0x2c, 0xa0, 0x2f, - 0x9d, 0xa6, 0xfb, 0xc2, 0xeb, 0x06, 0x7e, 0x0d, 0x95, 0x7c, 0xda, 0x77, 0x78, 0xb6, 0x0a, 0x73, - 0x0a, 0x49, 0xa2, 0x12, 0xb5, 0x4e, 0x62, 0x0e, 0xbc, 0x89, 0x96, 0x1c, 0xaf, 0xd5, 0x8d, 0xda, - 0xba, 0x90, 0xc9, 0x83, 0x59, 0x6a, 0x5c, 0x53, 0x42, 0x4b, 0x3b, 0x59, 0x32, 0x19, 0xe6, 0x4f, - 0x43, 0x50, 0xb7, 0xd7, 0x05, 0x97, 0x55, 0x8b, 0xa3, 0x21, 0x14, 0x99, 0x0c, 0xf3, 0xe3, 0x07, - 0xe8, 0xaa, 0x5a, 0x22, 0xe0, 0x2b, 0xa7, 0x25, 0xbc, 0xcd, 0x8f, 0x54, 0x75, 0x46, 0x20, 0xad, - 0x2a, 0xa4, 0xab, 0x3b, 0x23, 0xb9, 0xc8, 0x18, 0xe9, 0x94, 0x69, 0xfa, 0x1e, 0xad, 0xce, 0x8e, - 0x34, 0x4d, 0x93, 0xc9, 0x30, 0xbf, 0xf9, 0xd1, 0x6c, 0xbe, 0x82, 0x88, 0xc0, 0x31, 0x54, 0x0a, - 0x34, 0xa8, 0x0c, 0xde, 0xe6, 0x99, 0x73, 0x52, 0x6b, 0x4b, 0x42, 0x15, 0x1b, 0x14, 0x2b, 0xc1, - 0x3e, 0x2a, 0x85, 0x3a, 0x46, 0xb2, 0x4a, 0xdf, 0x3d, 0xb3, 0x42, 0x15, 0xbc, 0x7d, 0x06, 0xee, - 0x72, 0x68, 0xd0, 0x98, 0xe7, 0x3a, 0xe3, 0x10, 0xc7, 0x7a, 0x64, 0x32, 0x09, 0x9f, 0xca, 0xbc, - 0x98, 0x49, 0x27, 0x93, 0x5c, 0x27, 0x31, 0x07, 0x6e, 0xa2, 0xcb, 0x3a, 0xb1, 0xee, 0x42, 0x4d, - 0x64, 0xfe, 0xa0, 0xe9, 0xb8, 0x4e, 0x28, 0xd2, 0x61, 0xa6, 0x51, 0x05, 0xa9, 0xcb, 0x64, 0x04, - 0x9d, 0x8c, 0x94, 0xe2, 0x05, 0x0c, 0xd2, 0x3f, 0x54, 0x29, 0x10, 0xa7, 0xfa, 0x21, 0xac, 0x11, - 0x41, 0xc1, 0xaf, 0xa0, 0xd9, 0x1e, 0xbf, 0x88, 0xdb, 0x2a, 0xaa, 0xf1, 0x2d, 0xb0, 0x2f, 0x56, - 0x89, 0xa2, 0xe2, 0x1f, 0x43, 0xa8, 0x68, 0x17, 0x4e, 0x0e, 0xf3, 0xab, 0x73, 0xa2, 0xba, 0xbd, - 0x77, 0x01, 0xf7, 0x9b, 0x75, 0xa0, 0x30, 0x65, 0x3d, 0x4b, 0x42, 0xa7, 0x96, 0x49, 0xac, 0x14, - 0xbf, 0x0f, 0xa1, 0xd3, 0x67, 0xa3, 0xf4, 0x3c, 0x07, 0x7d, 0x9f, 0xb5, 0xf5, 0xe1, 0x90, 0x15, - 0x4a, 0xc4, 0x47, 0x9f, 0x9f, 0x18, 0x8c, 0x27, 0xb8, 0xeb, 0x78, 0x84, 0xda, 0xed, 0xc1, 0x01, - 0x6d, 0x31, 0xaf, 0x1d, 0x54, 0xcb, 0xc2, 0xd9, 0x71, 0x82, 0xef, 0x66, 0xc9, 0x64, 0x98, 0xbf, - 0xf6, 0x26, 0x5a, 0xc8, 0x6c, 0x64, 0xa2, 0xf2, 0xf6, 0xeb, 0x22, 0xba, 0x3a, 0xfa, 0x4a, 0xc6, - 0x80, 0xcb, 0x4d, 0x0c, 0xc2, 0x07, 0xf2, 0xea, 0x14, 0x80, 0x85, 0xc6, 0x15, 0x65, 0xd8, 0x42, - 0x33, 0x4d, 0x24, 0x59, 0x5e, 0xfc, 0x2e, 0xc2, 0xec, 0x28, 0xa0, 0x7e, 0x9f, 0xb6, 0xdf, 0x91, - 0x5d, 0x72, 0x52, 0xce, 0xe2, 0x8b, 0x63, 0x2f, 0xc7, 0x41, 0x46, 0x48, 0x4d, 0x98, 0xc3, 0xe0, - 0x51, 0x75, 0xf9, 0x68, 0xa2, 0x4a, 0xdf, 0xd8, 0xa3, 0xf7, 0xb3, 0x64, 0x32, 0xcc, 0x8f, 0xdf, - 0x41, 0x2b, 0x76, 0xdf, 0x76, 0xba, 0xf6, 0x51, 0x97, 0xc6, 0x20, 0x33, 0x02, 0xe4, 0x05, 0x05, - 0xb2, 0xb2, 0x39, 0xcc, 0x40, 0xf2, 0x32, 0x78, 0x17, 0x5d, 0x8a, 0xbc, 0x3c, 0xd4, 0xac, 0x80, - 0x7a, 0x51, 0x41, 0x5d, 0xba, 0x9f, 0x67, 0x21, 0xa3, 0xe4, 0x70, 0x07, 0xcd, 0xb5, 0xa1, 0x2a, - 0x3a, 0xdd, 0x00, 0x4e, 0x01, 0x4f, 0xc2, 0x6f, 0x9f, 0xf9, 0x14, 0xbc, 0x2d, 0x71, 0x64, 0xf3, - 0xaf, 0x3e, 0x88, 0x46, 0x37, 0x7f, 0x67, 0xa0, 0x95, 0x1c, 0x2f, 0xfe, 0x0a, 0x9a, 0x73, 0x69, - 0x10, 0x24, 0xd3, 0xcb, 0x92, 0xda, 0xc1, 0xdc, 0xae, 0x5c, 0x26, 0x9a, 0x8e, 0x8f, 0xd1, 0x6c, - 0x8b, 0x1f, 0x5d, 0xdd, 0x8c, 0xbc, 0x75, 0xbe, 0x3e, 0x3f, 0x29, 0x0c, 0xe2, 0x13, 0xda, 0x43, - 0x89, 0x6e, 0x2e, 0xa1, 0x85, 0x84, 0xb5, 0xc9, 0x3a, 0xe6, 0x2f, 0x8a, 0xe9, 0x6a, 0x0f, 0x2b, - 0x7b, 0x3d, 0xd9, 0x7d, 0xd4, 0x51, 0x19, 0x8e, 0x0b, 0x6c, 0x04, 0xd2, 0x4a, 0x99, 0xbf, 0xa2, - 0x40, 0xcb, 0x5b, 0x9a, 0x40, 0x12, 0x1e, 0x5e, 0x9b, 0x8e, 0xe1, 0x9e, 0x67, 0x8f, 0x44, 0xd6, - 0xa6, 0x6a, 0xd3, 0x1d, 0xb1, 0x4a, 0x14, 0x95, 0x67, 0x67, 0x8f, 0x97, 0x3f, 0x16, 0xe9, 0x9b, - 0x37, 0xce, 0xce, 0x7d, 0xb5, 0x4e, 0x62, 0x0e, 0xfc, 0x35, 0x34, 0x1f, 0xc0, 0x0d, 0x45, 0xf5, - 0x61, 0x2f, 0xca, 0x0b, 0x9e, 0x8f, 0x2e, 0x07, 0xa9, 0x75, 0x92, 0xe1, 0x82, 0xa9, 0xa1, 0x2c, - 0xbe, 0x0f, 0x61, 0x50, 0x15, 0x89, 0x58, 0xd9, 0x78, 0xf5, 0x39, 0xbb, 0x5d, 0x2e, 0xd2, 0x58, - 0xe0, 0xbb, 0x3c, 0xd0, 0x08, 0x24, 0x01, 0xc3, 0x1b, 0x08, 0xf1, 0xe9, 0x17, 0xba, 0x6d, 0xb7, - 0x17, 0xa8, 0x2a, 0x1c, 0xcf, 0x19, 0x87, 0x31, 0x85, 0xa4, 0xb8, 0xf0, 0xab, 0xa8, 0xcc, 0x13, - 0xa2, 0x09, 0x6e, 0x92, 0x89, 0x58, 0x90, 0x0a, 0x0e, 0xf5, 0x22, 0x49, 0xe8, 0xd8, 0x42, 0xa8, - 0xcb, 0x6f, 0x83, 0xc6, 0x00, 0x2c, 0x14, 0xb5, 0xb3, 0xd0, 0x58, 0xe4, 0xe0, 0xcd, 0x78, 0x95, - 0xa4, 0x38, 0xb8, 0xdb, 0x3d, 0xf6, 0xc8, 0x86, 0x4b, 0xa7, 0x9c, 0x75, 0xfb, 0x3d, 0xf6, 0x3e, - 0xac, 0x12, 0x45, 0xe5, 0x63, 0xac, 0xda, 0x64, 0x15, 0x09, 0x50, 0x91, 0xc9, 0xba, 0x1a, 0x69, - 0x9a, 0xf9, 0xb7, 0x39, 0x84, 0xf3, 0xd7, 0x34, 0xbe, 0x95, 0x99, 0x64, 0x5f, 0x19, 0x9a, 0x64, - 0xaf, 0xe6, 0x25, 0x52, 0xa3, 0xec, 0x4f, 0x60, 0x94, 0x6d, 0x89, 0x29, 0x5f, 0xce, 0xf4, 0xea, - 0x2e, 0xdf, 0x9d, 0x30, 0xc5, 0x4f, 0x7f, 0x28, 0x90, 0x29, 0xb1, 0x95, 0x52, 0x43, 0x32, 0x4a, - 0xf1, 0xcf, 0x0d, 0xb4, 0xe8, 0xd3, 0x96, 0x4f, 0x41, 0x48, 0xd9, 0x21, 0x1b, 0xeb, 0xbd, 0x09, - 0xed, 0x20, 0x0a, 0x64, 0xac, 0x25, 0x18, 0x2c, 0x59, 0x24, 0x19, 0x55, 0x64, 0x48, 0x35, 0xfe, - 0xa9, 0x81, 0x16, 0x7c, 0x38, 0x0f, 0x8e, 0xd7, 0x51, 0xc6, 0x14, 0x85, 0x31, 0xf7, 0x26, 0x35, - 0x46, 0x62, 0x8c, 0xb5, 0x65, 0x85, 0x5f, 0x3c, 0x24, 0xad, 0x88, 0x64, 0xf5, 0xe2, 0x16, 0x2a, - 0xfb, 0x34, 0x60, 0x91, 0xdf, 0xa2, 0x81, 0x3a, 0x2a, 0x1b, 0xa7, 0x5f, 0xd5, 0x44, 0xb1, 0x13, - 0xfa, 0xa3, 0xc8, 0xf1, 0x29, 0xd7, 0x1a, 0x24, 0xb5, 0x41, 0x53, 0x21, 0xa9, 0x63, 0x5c, 0x1c, - 0xa1, 0x59, 0xa8, 0xcc, 0xb4, 0xcb, 0x4f, 0x4c, 0xe1, 0x0c, 0xb1, 0xcf, 0xef, 0xcf, 0x6a, 0x0a, - 0x3c, 0xd9, 0x89, 0xc4, 0x39, 0x2f, 0x17, 0x89, 0x52, 0x86, 0x7f, 0x66, 0xa0, 0x8a, 0x9d, 0x1a, - 0xf4, 0x64, 0x2b, 0x44, 0xce, 0xaf, 0x3c, 0x37, 0xdb, 0xc5, 0xcf, 0x48, 0xe9, 0xa1, 0x2e, 0xad, - 0xbb, 0xf6, 0x06, 0xaa, 0xa4, 0x4c, 0x9e, 0xa4, 0xe7, 0xa8, 0xbd, 0x85, 0x96, 0xcf, 0x35, 0x92, - 0xfd, 0x61, 0x1a, 0x99, 0xb9, 0x4e, 0x58, 0xbc, 0xe5, 0x6c, 0x3d, 0xb4, 0xbd, 0x8e, 0xce, 0x49, - 0xa8, 0xf8, 0x76, 0x04, 0x07, 0x06, 0xd4, 0xb4, 0x04, 0x70, 0x29, 0x89, 0xea, 0xa6, 0x26, 0x90, - 0x84, 0x07, 0x8a, 0xc2, 0x62, 0x5c, 0xfe, 0xf9, 0x94, 0x2d, 0x2f, 0xaf, 0xb2, 0x3c, 0x00, 0x5b, - 0x19, 0x0a, 0x19, 0xe2, 0x8c, 0xa7, 0xc0, 0xc2, 0x45, 0x4d, 0x81, 0xd0, 0x40, 0x75, 0xed, 0x40, - 0xef, 0x8e, 0xb6, 0xc5, 0xfe, 0xc4, 0xa9, 0x2a, 0x27, 0x0d, 0x54, 0x33, 0xc7, 0x41, 0x46, 0x48, - 0x99, 0xbf, 0x32, 0xd0, 0x0b, 0x63, 0x47, 0x07, 0x7c, 0xa2, 0x9f, 0x3a, 0x0c, 0x91, 0x4e, 0x77, - 0x2e, 0x64, 0x26, 0x19, 0x8c, 0x7e, 0xf1, 0xb8, 0x55, 0xfa, 0xed, 0xc7, 0x6b, 0x53, 0x4f, 0xfe, - 0xb9, 0x3e, 0x65, 0xfe, 0xd7, 0x40, 0xd7, 0xc6, 0xc8, 0x9e, 0xe7, 0xa1, 0xf1, 0x37, 0xd0, 0xba, - 0x38, 0xc3, 0xb9, 0xa0, 0x4a, 0xf4, 0x7b, 0xe7, 0xdd, 0x5a, 0x2e, 0xc9, 0x1a, 0x57, 0x78, 0x2f, - 0x98, 0x5b, 0x26, 0x79, 0x13, 0xcc, 0x4f, 0xa1, 0x60, 0x6f, 0x3f, 0xa6, 0xad, 0x7b, 0xf4, 0x11, - 0x0c, 0x07, 0x77, 0x19, 0x3b, 0x49, 0x3f, 0xc5, 0x1a, 0xe3, 0x9f, 0x62, 0xf1, 0x16, 0x2a, 0x50, - 0xaf, 0x3f, 0xd1, 0x33, 0x70, 0x45, 0xb9, 0xac, 0x00, 0xdf, 0x84, 0x4b, 0xf3, 0x6e, 0x3e, 0x93, - 0xb2, 0x22, 0x53, 0xcb, 0x49, 0x37, 0x9f, 0xc9, 0x6f, 0x92, 0xe5, 0x15, 0x97, 0x2d, 0xeb, 0x46, - 0xfc, 0x48, 0x14, 0x13, 0x43, 0x1f, 0xc8, 0x25, 0xa2, 0x69, 0xe6, 0x9f, 0xa7, 0xd1, 0x42, 0xd3, - 0x39, 0xa6, 0xad, 0x41, 0xab, 0x4b, 0xc5, 0x0e, 0xbf, 0x87, 0x16, 0x8e, 0xa1, 0x15, 0x88, 0x7c, - 0x2a, 0x23, 0xab, 0x22, 0xfa, 0x55, 0xad, 0xf5, 0x4e, 0x9a, 0x08, 0xa1, 0xad, 0x65, 0xc4, 0x33, - 0x54, 0x92, 0x45, 0xc2, 0x2e, 0x42, 0x34, 0x76, 0xa7, 0x0a, 0xf0, 0xed, 0x09, 0x03, 0x9c, 0x8d, - 0x87, 0xec, 0x4b, 0x92, 0x35, 0x92, 0x52, 0x80, 0xbb, 0xbc, 0xe9, 0xe9, 0x88, 0x48, 0x07, 0xe2, - 0xe1, 0xbc, 0xb2, 0xf1, 0xe6, 0x84, 0xda, 0x0e, 0x95, 0xbc, 0xd0, 0x15, 0x97, 0x22, 0xbd, 0x2a, - 0xba, 0x26, 0xf5, 0xa7, 0xf9, 0x9f, 0x69, 0xb4, 0xfe, 0x79, 0x17, 0x33, 0xaf, 0x57, 0xbc, 0x2b, - 0x63, 0x51, 0xa8, 0xbb, 0x49, 0x39, 0xa1, 0x89, 0x7a, 0x75, 0x98, 0xa1, 0x90, 0x21, 0x4e, 0x18, - 0x68, 0x0b, 0xd0, 0x93, 0x2a, 0xb7, 0x7d, 0x73, 0xc2, 0x8d, 0x64, 0x82, 0xd4, 0x98, 0xe3, 0x79, - 0x06, 0xad, 0x2e, 0xe1, 0x88, 0x1c, 0xd8, 0x75, 0xda, 0xaa, 0x0e, 0x5e, 0x00, 0xf0, 0xae, 0xd3, - 0x26, 0x1c, 0x11, 0x7f, 0x80, 0x8a, 0x3d, 0x16, 0x84, 0xaa, 0xb1, 0x38, 0x1f, 0x72, 0x89, 0x57, - 0x93, 0x7d, 0xc6, 0xdf, 0x21, 0x38, 0xa6, 0xf9, 0xd1, 0x0c, 0x5a, 0xfb, 0x9c, 0xd6, 0x03, 0xef, - 0xc0, 0x2c, 0x27, 0xe6, 0xc4, 0x7d, 0xea, 0x3b, 0xac, 0x9d, 0x75, 0xf9, 0x35, 0x31, 0xc7, 0xe5, - 0xc9, 0x64, 0x94, 0x0c, 0xbe, 0xcd, 0x5f, 0xb5, 0x42, 0x18, 0x73, 0xed, 0xae, 0x86, 0x91, 0x93, - 0xf1, 0x25, 0xf9, 0xa2, 0x95, 0x21, 0x91, 0x61, 0xde, 0x11, 0x71, 0x2f, 0x3c, 0x77, 0xdc, 0x3f, - 0x44, 0x8b, 0xae, 0xfd, 0x38, 0x35, 0x71, 0x2a, 0x7f, 0x5a, 0x63, 0xca, 0x0a, 0xff, 0x77, 0x97, - 0x25, 0xff, 0xdd, 0x65, 0x81, 0x61, 0x7b, 0x3e, 0x78, 0x05, 0xbc, 0x24, 0x75, 0xed, 0x66, 0x90, - 0xc8, 0x10, 0xb2, 0xf8, 0x5f, 0x87, 0xfd, 0xf8, 0x20, 0xf2, 0x3b, 0x7a, 0x68, 0x99, 0x54, 0x8b, - 0x78, 0x35, 0xd9, 0x55, 0x18, 0x24, 0x46, 0xc3, 0xdf, 0x40, 0x0b, 0xb1, 0x5f, 0x5b, 0xfc, 0x5f, - 0x64, 0x72, 0xa2, 0x16, 0xdd, 0xe1, 0xfd, 0x34, 0x81, 0x64, 0xf9, 0x74, 0xda, 0xcf, 0x5d, 0x78, - 0xda, 0xeb, 0xec, 0x2c, 0x7d, 0x01, 0xd9, 0x09, 0x57, 0xda, 0x7c, 0xba, 0x76, 0xe4, 0x6b, 0xb9, - 0x31, 0x41, 0x2d, 0xdf, 0x41, 0xd3, 0x21, 0x53, 0x07, 0x7f, 0xc2, 0x3e, 0x05, 0x29, 0x05, 0xd3, - 0x87, 0x8c, 0x00, 0x48, 0xe3, 0xa5, 0xa7, 0xff, 0x5a, 0x9d, 0xfa, 0x04, 0x7e, 0x7f, 0x87, 0xdf, - 0x93, 0xcf, 0x56, 0x8d, 0xa7, 0xf0, 0xfb, 0x04, 0x7e, 0x9f, 0xc2, 0xef, 0x97, 0xff, 0x5e, 0x9d, - 0xfa, 0x60, 0xba, 0x7f, 0xf3, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x09, 0xcb, 0xb2, 0xb9, - 0x1e, 0x00, 0x00, + // 2426 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x19, 0x4b, 0x6f, 0x5b, 0x59, + 0xb9, 0xd7, 0x76, 0x12, 0xfb, 0xe4, 0xd1, 0xf6, 0xb4, 0xd3, 0x78, 0x3c, 0x28, 0x89, 0x2e, 0x33, + 0x43, 0xd1, 0x4c, 0xaf, 0xd5, 0x00, 0xd2, 0x4c, 0x4b, 0x0b, 0x71, 0x26, 0x99, 0xa6, 0x38, 0x4d, + 0xe6, 0x24, 0xed, 0xc0, 0x20, 0x84, 0x6e, 0xec, 0x63, 0xf7, 0x4e, 0x7c, 0xef, 0x35, 0xf7, 0xe1, + 0xd6, 0x2b, 0x2a, 0x58, 0x20, 0x34, 0x42, 0x42, 0x2c, 0x10, 0x12, 0x9b, 0x59, 0xb1, 0x41, 0xe2, + 0x27, 0xb0, 0x60, 0x43, 0x59, 0x31, 0xec, 0x90, 0x40, 0xd5, 0x50, 0xb6, 0xb0, 0x65, 0xc1, 0x8a, + 0xef, 0xbc, 0xee, 0xd3, 0x4e, 0xeb, 0x38, 0x2c, 0x22, 0xe5, 0x7e, 0xef, 0xf3, 0xbd, 0xce, 0xf7, + 0x1d, 0xa3, 0x5b, 0x5d, 0x2b, 0x78, 0x18, 0x1e, 0x19, 0x2d, 0xd7, 0xae, 0xbb, 0x7d, 0xea, 0xf8, + 0x0f, 0xad, 0x4e, 0x50, 0x77, 0x3d, 0xab, 0x6b, 0x39, 0xf5, 0xfe, 0x71, 0xb7, 0xde, 0xa6, 0xfd, + 0x9e, 0x3b, 0xac, 0x9b, 0x7d, 0xab, 0x3e, 0xb8, 0x5e, 0xef, 0x52, 0x87, 0x7a, 0x66, 0x40, 0xdb, + 0x46, 0xdf, 0x73, 0x03, 0x17, 0x5f, 0x8b, 0xd9, 0x8d, 0x88, 0xdd, 0x10, 0xec, 0x06, 0xb0, 0x1b, + 0x82, 0xdd, 0x00, 0x76, 0x63, 0x70, 0xbd, 0x96, 0x20, 0xaf, 0x77, 0xdd, 0xae, 0x5b, 0xe7, 0x52, + 0x8e, 0xc2, 0x0e, 0xff, 0xe2, 0x1f, 0xfc, 0x3f, 0x21, 0xbd, 0xf6, 0xb5, 0xe3, 0x77, 0x7c, 0xc3, + 0x72, 0xeb, 0xc7, 0xe1, 0x11, 0xf5, 0x1c, 0x1a, 0x50, 0x9f, 0x9b, 0xc4, 0x6c, 0x09, 0x9d, 0x01, + 0xf5, 0x7c, 0xcb, 0x75, 0x68, 0x3b, 0x6b, 0x54, 0xed, 0xed, 0xf1, 0x6c, 0xf9, 0x23, 0xd4, 0x6e, + 0x8d, 0xa5, 0xf6, 0xeb, 0xf4, 0x71, 0x00, 0x67, 0x02, 0x2d, 0x3e, 0x70, 0x1e, 0xd1, 0xc0, 0xcc, + 0xb3, 0x5f, 0x1b, 0xcd, 0xee, 0x85, 0x4e, 0x60, 0xd9, 0x34, 0x47, 0x7e, 0x7d, 0x34, 0x79, 0x18, + 0x58, 0xbd, 0xba, 0xe5, 0x04, 0x7e, 0xe0, 0x65, 0x59, 0xf4, 0x3f, 0x69, 0x68, 0x65, 0x33, 0xf4, + 0x03, 0xd7, 0x7e, 0x8f, 0x3b, 0xd3, 0xa6, 0x4e, 0x70, 0x10, 0x30, 0x8a, 0xee, 0x70, 0xdf, 0xf4, + 0x4c, 0xdb, 0xc7, 0x5f, 0x44, 0x33, 0x96, 0x6d, 0x76, 0x69, 0x55, 0x5b, 0xd3, 0xae, 0x56, 0x1a, + 0x8b, 0x4f, 0x9f, 0xad, 0x9e, 0x7b, 0xfe, 0x6c, 0x75, 0x66, 0x87, 0x01, 0x89, 0xc0, 0xe1, 0xef, + 0xa2, 0x79, 0xea, 0x0c, 0x2c, 0xcf, 0x75, 0x98, 0x84, 0x6a, 0x61, 0xad, 0x78, 0x75, 0x7e, 0xfd, + 0x75, 0x43, 0x18, 0x64, 0xc4, 0x06, 0xf1, 0xb8, 0x89, 0x80, 0x19, 0x5b, 0xce, 0xe0, 0x81, 0xe9, + 0x35, 0x2e, 0x49, 0x81, 0xf3, 0x5b, 0xb1, 0x00, 0x92, 0x94, 0x86, 0xdf, 0x40, 0x73, 0x10, 0x54, + 0xdb, 0x74, 0xda, 0xd5, 0x22, 0x08, 0xae, 0x34, 0xe6, 0x81, 0x7c, 0x6e, 0x53, 0x80, 0x88, 0xc2, + 0xe9, 0x7f, 0xd6, 0xd0, 0xf9, 0xf8, 0x14, 0x9b, 0x66, 0xe8, 0x53, 0xfc, 0x2e, 0x2a, 0x05, 0xc3, + 0xbe, 0xb2, 0xfd, 0x0d, 0xa9, 0xaa, 0x74, 0x08, 0xb0, 0xff, 0x3e, 0x5b, 0x7d, 0x25, 0x26, 0x3f, + 0x84, 0xb4, 0xea, 0x52, 0x8f, 0x21, 0x08, 0x67, 0xc1, 0x4f, 0x34, 0xb4, 0xc0, 0x0f, 0x27, 0x51, + 0x70, 0x28, 0x0d, 0x0e, 0x75, 0xd7, 0x98, 0x28, 0x2d, 0x8d, 0x8c, 0x45, 0x3b, 0x09, 0x89, 0x8d, + 0x0b, 0x60, 0xcb, 0x42, 0x12, 0x42, 0x52, 0x1a, 0x75, 0x07, 0xbd, 0x76, 0x02, 0x3b, 0xde, 0x43, + 0xa5, 0x8e, 0xe7, 0xda, 0xfc, 0x70, 0xf3, 0xeb, 0xd7, 0x4e, 0xf6, 0xf6, 0xde, 0xd1, 0xc7, 0xb4, + 0x15, 0x10, 0xda, 0xa1, 0x1e, 0x75, 0x5a, 0xb4, 0xb1, 0xa0, 0x7c, 0xb1, 0x0d, 0x22, 0x08, 0x17, + 0xa4, 0xff, 0xa7, 0x80, 0x2e, 0x25, 0x14, 0xba, 0x4e, 0xdb, 0x0a, 0x20, 0x41, 0xf1, 0xcd, 0x94, + 0x17, 0xbf, 0x94, 0xf1, 0xe2, 0xf2, 0x08, 0x96, 0x84, 0x1f, 0x1f, 0xa0, 0x59, 0x3f, 0x30, 0x83, + 0xd0, 0xe7, 0x0e, 0xac, 0x34, 0x6e, 0x4b, 0xf6, 0xd9, 0x03, 0x0e, 0x05, 0x01, 0x27, 0xd6, 0x94, + 0x11, 0xc9, 0x14, 0xf4, 0x44, 0x4a, 0xc3, 0x8f, 0x10, 0xee, 0x99, 0x3e, 0x04, 0xce, 0x84, 0x2a, + 0xe2, 0x3a, 0xa1, 0x26, 0x20, 0x41, 0x98, 0x2f, 0xde, 0x3a, 0xc1, 0x17, 0x89, 0xea, 0x36, 0x18, + 0x4b, 0xa3, 0x26, 0x0d, 0xc2, 0xcd, 0x9c, 0x38, 0x32, 0x42, 0x05, 0x7e, 0x13, 0xcd, 0x7a, 0xd4, + 0xf4, 0x5d, 0xa7, 0x5a, 0xe2, 0x07, 0x5a, 0x52, 0x07, 0x22, 0x1c, 0x4a, 0x24, 0x16, 0x7f, 0x19, + 0xcd, 0xd9, 0xd4, 0xf7, 0x59, 0xe9, 0xcc, 0x70, 0xc2, 0xf3, 0x92, 0x70, 0x6e, 0x57, 0x80, 0x89, + 0xc2, 0xeb, 0x7f, 0x28, 0xa0, 0x0b, 0x29, 0x2f, 0x76, 0xac, 0x2e, 0xfe, 0x36, 0x2a, 0xdb, 0xd0, + 0x16, 0xda, 0x66, 0x60, 0xca, 0x10, 0x5f, 0x7d, 0x99, 0x10, 0xef, 0x02, 0x4f, 0x03, 0x4b, 0x55, + 0x28, 0x86, 0x91, 0x48, 0x1a, 0xa6, 0xa8, 0xe4, 0xf7, 0x69, 0x4b, 0x66, 0xf4, 0xe6, 0xe9, 0x33, + 0x9a, 0x1b, 0x7a, 0x00, 0xa2, 0xe2, 0x74, 0x62, 0x5f, 0x84, 0x8b, 0xc7, 0x76, 0x14, 0x79, 0x11, + 0x95, 0xad, 0x69, 0x15, 0x71, 0x61, 0xb1, 0xbf, 0xd3, 0x09, 0xa1, 0xff, 0x4d, 0x43, 0x97, 0xb3, + 0x2c, 0x4d, 0xcb, 0x0f, 0xf0, 0xf7, 0x72, 0x8e, 0xac, 0xbf, 0x64, 0x7e, 0x30, 0x76, 0xee, 0xcf, + 0x0b, 0x52, 0x67, 0x59, 0x41, 0x12, 0xde, 0x6c, 0x43, 0x83, 0x0c, 0xa8, 0xed, 0xcb, 0xae, 0xf7, + 0x8d, 0x29, 0x4f, 0x99, 0xe8, 0xb0, 0x4c, 0x2a, 0x11, 0xc2, 0xf5, 0x4f, 0x8b, 0xa8, 0x9a, 0x25, + 0x25, 0x6e, 0xaf, 0x77, 0x64, 0xb6, 0x8e, 0xf1, 0x1a, 0x2a, 0x39, 0xa6, 0xad, 0x0a, 0x34, 0x8a, + 0xc5, 0x3d, 0x80, 0x11, 0x8e, 0xc1, 0xbf, 0xd5, 0x10, 0x0e, 0xfb, 0x6d, 0xd6, 0xfa, 0x37, 0x1c, + 0xc7, 0x05, 0x8f, 0xb1, 0x9b, 0x47, 0x9a, 0xfc, 0xfd, 0x29, 0x4d, 0x56, 0x76, 0x18, 0xf7, 0x73, + 0x1a, 0xb6, 0x9c, 0xc0, 0x1b, 0xc6, 0x25, 0x96, 0x27, 0x20, 0x23, 0xcc, 0x82, 0xcc, 0x11, 0x09, + 0x2a, 0xf2, 0xe6, 0x5b, 0x67, 0x64, 0xde, 0xb8, 0x44, 0xad, 0x6d, 0xa1, 0xe5, 0x31, 0x96, 0xe3, + 0x0b, 0xa8, 0x78, 0x4c, 0x87, 0xc2, 0xb1, 0x84, 0xfd, 0x8b, 0x2f, 0xa3, 0x99, 0x81, 0xd9, 0x0b, + 0xa9, 0x68, 0x67, 0x44, 0x7c, 0xdc, 0x28, 0xbc, 0xa3, 0xe9, 0xbf, 0x2f, 0xa2, 0x2f, 0x9c, 0xa4, + 0xfb, 0xcc, 0x1b, 0x36, 0x7e, 0x1b, 0x95, 0x3d, 0x3a, 0xb0, 0x58, 0xb6, 0x72, 0x73, 0x8a, 0x71, + 0xa2, 0x12, 0x09, 0x27, 0x11, 0x05, 0xde, 0x40, 0xe7, 0x2d, 0xa7, 0xd5, 0x0b, 0xdb, 0xea, 0x06, + 0x11, 0x85, 0x59, 0x6e, 0x2c, 0x4b, 0xa6, 0xf3, 0x3b, 0x69, 0x34, 0xc9, 0xd2, 0x27, 0x45, 0x50, + 0xbb, 0xdf, 0x03, 0x97, 0xf1, 0x26, 0x38, 0x42, 0x84, 0x44, 0x93, 0x2c, 0x3d, 0xdc, 0x07, 0x57, + 0x24, 0x88, 0x80, 0xaf, 0xac, 0x16, 0xf7, 0x36, 0x2b, 0x29, 0xde, 0x25, 0xcb, 0x8d, 0x15, 0x29, + 0xe9, 0xca, 0xce, 0x48, 0x2a, 0x32, 0x86, 0x3b, 0x61, 0x9a, 0x1a, 0x60, 0xaa, 0xb3, 0x23, 0x4d, + 0x53, 0x68, 0x92, 0xa5, 0xd7, 0x7f, 0x3d, 0x9b, 0xef, 0x20, 0x3c, 0x70, 0x2e, 0x2a, 0xfb, 0x4a, + 0xa8, 0x08, 0xde, 0xc6, 0xa9, 0x73, 0x52, 0x69, 0x8b, 0x43, 0x15, 0x19, 0x14, 0x29, 0xc1, 0x1e, + 0x2a, 0x07, 0x2a, 0x46, 0xa2, 0x4b, 0xdf, 0x39, 0xb5, 0x42, 0x19, 0xbc, 0x7d, 0x17, 0xdc, 0x65, + 0x51, 0xbf, 0xb1, 0xc0, 0x74, 0x46, 0x21, 0x8e, 0xf4, 0x88, 0x64, 0xe2, 0x3e, 0x15, 0x79, 0x31, + 0x93, 0x4c, 0x26, 0x01, 0x27, 0x11, 0x05, 0x6e, 0xa2, 0xcb, 0x2a, 0xb1, 0xee, 0x40, 0x4f, 0x74, + 0xbd, 0x61, 0xd3, 0xb2, 0xad, 0x80, 0xa7, 0xc3, 0x4c, 0xa3, 0x0a, 0x5c, 0x97, 0xc9, 0x08, 0x3c, + 0x19, 0xc9, 0xc5, 0x1a, 0x18, 0xa4, 0x7f, 0x20, 0x53, 0x20, 0x4a, 0xf5, 0x43, 0x80, 0x11, 0x8e, + 0x61, 0xb7, 0x6e, 0x9f, 0x4d, 0x40, 0x6d, 0x19, 0xd5, 0xe8, 0x16, 0xd8, 0xe7, 0x50, 0x22, 0xb1, + 0xf8, 0x87, 0x10, 0x2a, 0xda, 0x83, 0xca, 0x71, 0xbd, 0xea, 0x1c, 0xef, 0x6e, 0x1f, 0x9c, 0xc1, + 0xfd, 0x66, 0x1c, 0x48, 0x99, 0xa2, 0x9f, 0xc5, 0xa1, 0x93, 0x60, 0x12, 0x29, 0xc5, 0x1f, 0x42, + 0xe8, 0x54, 0x6d, 0x94, 0x5f, 0xa6, 0xd0, 0xf7, 0xdd, 0xb6, 0x2a, 0x0e, 0xd1, 0xa1, 0x78, 0x7c, + 0x54, 0xfd, 0x44, 0xc2, 0x58, 0x82, 0xdb, 0x96, 0x03, 0x43, 0x46, 0x7b, 0x78, 0x40, 0x5b, 0x30, + 0x15, 0xf9, 0xd5, 0x0a, 0x77, 0x76, 0x94, 0xe0, 0xbb, 0x69, 0x34, 0xc9, 0xd2, 0xd7, 0x6e, 0xa2, + 0xc5, 0xd4, 0x41, 0x26, 0x6a, 0x6f, 0x9f, 0xcc, 0xa0, 0x2b, 0xa3, 0xaf, 0x64, 0x18, 0x10, 0x17, + 0x99, 0x89, 0x7e, 0xf0, 0x40, 0x5c, 0x9d, 0x5c, 0x60, 0xb1, 0xf1, 0x8a, 0x34, 0x6c, 0xb1, 0x99, + 0x44, 0x92, 0x34, 0x2d, 0xbe, 0x8b, 0xb0, 0x7b, 0xe4, 0x53, 0x6f, 0x40, 0xdb, 0xef, 0x8b, 0xf5, + 0x24, 0x6e, 0x67, 0xd1, 0xc5, 0xb1, 0x97, 0xa3, 0x20, 0x23, 0xb8, 0x26, 0xcc, 0x61, 0xf0, 0xa8, + 0xbc, 0x7c, 0x14, 0x52, 0xa6, 0x6f, 0xe4, 0xd1, 0xfb, 0x69, 0x34, 0xc9, 0xd2, 0xe3, 0xf7, 0xd1, + 0x45, 0x73, 0x60, 0x5a, 0x3d, 0xf3, 0xa8, 0x47, 0x23, 0x21, 0x33, 0x5c, 0xc8, 0xab, 0x52, 0xc8, + 0xc5, 0x8d, 0x2c, 0x01, 0xc9, 0xf3, 0xe0, 0x5d, 0x74, 0x29, 0x74, 0xf2, 0xa2, 0x66, 0xb9, 0xa8, + 0xd7, 0xa4, 0xa8, 0x4b, 0xf7, 0xf3, 0x24, 0x64, 0x14, 0x1f, 0xee, 0xa2, 0xb9, 0x36, 0x74, 0x45, + 0xab, 0xe7, 0x43, 0x15, 0xb0, 0x24, 0xfc, 0xe6, 0xa9, 0xab, 0xe0, 0x3d, 0x21, 0x47, 0x6c, 0x5d, + 0xf2, 0x83, 0x28, 0xe9, 0x78, 0x80, 0x50, 0x4b, 0x4d, 0xe8, 0x3e, 0x24, 0x3c, 0xab, 0xb8, 0xc6, + 0x34, 0x15, 0x27, 0x44, 0xc5, 0x13, 0x6c, 0x04, 0xf2, 0x49, 0x42, 0x93, 0xfe, 0x1b, 0x0d, 0x5d, + 0xcc, 0xd9, 0x98, 0x9c, 0xb9, 0xb5, 0x93, 0x67, 0x6e, 0xdc, 0x41, 0xb3, 0x2d, 0xd6, 0x32, 0xd4, + 0x10, 0x74, 0x7b, 0xba, 0xc5, 0x2e, 0x6e, 0x48, 0xfc, 0x13, 0xc6, 0x52, 0x21, 0x5d, 0x3f, 0x8f, + 0x16, 0x63, 0xd2, 0xa6, 0xdb, 0xd5, 0x7f, 0x56, 0x4a, 0xde, 0x32, 0x00, 0xd9, 0xeb, 0x8b, 0xa9, + 0xa7, 0x8e, 0x2a, 0x70, 0x40, 0x38, 0x08, 0xa4, 0xb3, 0x34, 0xff, 0xa2, 0x14, 0x5a, 0xd9, 0x54, + 0x08, 0x12, 0xd3, 0xb0, 0x9e, 0xd8, 0x81, 0xf9, 0xc2, 0x7d, 0xc4, 0xab, 0x25, 0xd1, 0x13, 0xb7, + 0x39, 0x94, 0x48, 0x2c, 0xab, 0x8a, 0x3e, 0x6b, 0xbb, 0x6e, 0xa8, 0x6e, 0xfc, 0xa8, 0x2a, 0xf6, + 0x25, 0x9c, 0x44, 0x14, 0xf8, 0xab, 0x68, 0xc1, 0x87, 0x9b, 0x91, 0xaa, 0x26, 0x53, 0x12, 0x83, + 0x05, 0xdb, 0x55, 0x0f, 0x12, 0x70, 0x92, 0xa2, 0x82, 0x6d, 0xa5, 0xc2, 0xbf, 0xf9, 0x16, 0x36, + 0x33, 0xf9, 0x16, 0xb6, 0xc8, 0x4e, 0x79, 0xa0, 0x24, 0x90, 0x58, 0x18, 0x5e, 0x47, 0x88, 0x3d, + 0x77, 0xc0, 0x94, 0x6f, 0xf7, 0x7d, 0xd9, 0xfd, 0xa3, 0xec, 0x38, 0x8c, 0x30, 0x24, 0x41, 0x85, + 0xdf, 0x42, 0x15, 0x96, 0x10, 0x4d, 0x70, 0x93, 0x28, 0x80, 0xa2, 0x50, 0x70, 0xa8, 0x80, 0x24, + 0xc6, 0x63, 0x03, 0xa1, 0x1e, 0xbb, 0x85, 0x1a, 0x43, 0xb0, 0x90, 0xf7, 0xec, 0x62, 0x63, 0x89, + 0x09, 0x6f, 0x46, 0x50, 0x92, 0xa0, 0x60, 0x6e, 0x77, 0xdc, 0x47, 0x26, 0x5c, 0x76, 0x95, 0xb4, + 0xdb, 0xef, 0xb9, 0x1f, 0x02, 0x94, 0x48, 0x2c, 0x7b, 0xb7, 0x90, 0x87, 0xac, 0x22, 0x2e, 0x94, + 0x57, 0x90, 0xea, 0x82, 0x0a, 0xa7, 0xff, 0x28, 0x95, 0xc9, 0x84, 0xfe, 0x20, 0x64, 0xf7, 0xdd, + 0x8b, 0x47, 0x7a, 0x30, 0x43, 0x34, 0xd2, 0x6c, 0xf4, 0x45, 0xb7, 0x25, 0x12, 0xcb, 0x1e, 0x70, + 0x3a, 0xae, 0xd7, 0xa2, 0x32, 0xf4, 0xd1, 0x7a, 0xb1, 0xcd, 0x80, 0x44, 0xe0, 0xf4, 0xbf, 0xcc, + 0x21, 0x9c, 0x9f, 0x51, 0xf0, 0x8d, 0xd4, 0xe6, 0xff, 0x66, 0x66, 0xf3, 0xbf, 0x92, 0xe7, 0x48, + 0x2c, 0xfe, 0x3f, 0xd6, 0xd0, 0x42, 0x8b, 0xbf, 0x2d, 0x89, 0x97, 0x24, 0x39, 0xc8, 0xec, 0x4e, + 0x58, 0x67, 0x27, 0x3f, 0x4f, 0x89, 0xbc, 0xdc, 0x4c, 0xa8, 0x21, 0x29, 0xa5, 0xf8, 0x13, 0x0d, + 0x2d, 0x79, 0xb4, 0x05, 0x3b, 0x79, 0x40, 0xa5, 0x1d, 0x62, 0xab, 0xd8, 0x9b, 0xd0, 0x0e, 0x22, + 0x85, 0x8c, 0xb5, 0x04, 0x83, 0x25, 0x4b, 0x24, 0xa5, 0x8a, 0x64, 0x54, 0xe3, 0x9f, 0x68, 0x68, + 0xd1, 0x83, 0xa2, 0xb4, 0x9c, 0xae, 0x34, 0xa6, 0xc4, 0x8d, 0xb9, 0x37, 0xa9, 0x31, 0x42, 0xc6, + 0x58, 0x5b, 0x2e, 0xb2, 0x5b, 0x97, 0x24, 0x15, 0x91, 0xb4, 0x5e, 0xdc, 0x42, 0x15, 0x8f, 0xfa, + 0x6e, 0x08, 0xc1, 0xf7, 0x65, 0xbd, 0xae, 0x9f, 0x3c, 0xa7, 0x10, 0x49, 0xce, 0x32, 0xd4, 0xf2, + 0x28, 0xd3, 0xea, 0xc7, 0x0d, 0x4a, 0x61, 0xa1, 0xb2, 0x22, 0xb9, 0x38, 0x64, 0x29, 0x7a, 0x44, + 0x7b, 0xac, 0x6c, 0x8b, 0xa7, 0x88, 0x7d, 0xfe, 0x7c, 0x46, 0x93, 0xcb, 0x13, 0x63, 0x58, 0x22, + 0xe3, 0x19, 0x90, 0x48, 0x65, 0xf8, 0xa7, 0x1a, 0x9a, 0x37, 0x13, 0x5b, 0xae, 0x98, 0x03, 0xc9, + 0xf4, 0xca, 0x73, 0x8b, 0x6d, 0xf4, 0x78, 0x99, 0xdc, 0x68, 0x93, 0xba, 0x6b, 0xef, 0xa2, 0xf9, + 0x84, 0xc9, 0x93, 0x0c, 0x5c, 0xb5, 0xdb, 0xe8, 0xc2, 0x54, 0xfb, 0xe8, 0xef, 0x0a, 0x48, 0xcf, + 0xad, 0x01, 0xfc, 0x05, 0x71, 0xf3, 0xa1, 0xe9, 0x74, 0x55, 0x4e, 0xc2, 0xb5, 0x63, 0x86, 0x50, + 0x30, 0xa0, 0xa6, 0xc5, 0x05, 0x97, 0xe3, 0xa8, 0x6e, 0x28, 0x04, 0x89, 0x69, 0xa0, 0x29, 0x2c, + 0x45, 0x77, 0x10, 0xeb, 0x47, 0xe2, 0x06, 0xad, 0x88, 0x02, 0xd8, 0x4c, 0x61, 0x48, 0x86, 0x32, + 0x5a, 0x81, 0x8b, 0x67, 0xb5, 0x02, 0xdf, 0x55, 0xcf, 0x80, 0xfc, 0x74, 0xb4, 0xcd, 0xcf, 0x27, + 0x5f, 0xe6, 0x32, 0x2f, 0x7b, 0x49, 0x0a, 0x32, 0x82, 0x4b, 0xff, 0x85, 0x86, 0x5e, 0x1d, 0xbb, + 0x37, 0xe1, 0x63, 0xf5, 0xce, 0xa3, 0xf1, 0x74, 0xda, 0x3e, 0x93, 0x85, 0x6c, 0x38, 0xfa, 0xb9, + 0xe7, 0x46, 0xf9, 0x57, 0x9f, 0xae, 0x9e, 0x7b, 0xf2, 0xf7, 0xb5, 0x73, 0xfa, 0xbf, 0x35, 0xb4, + 0x3c, 0x86, 0x77, 0x9a, 0xe7, 0xed, 0x5f, 0xc2, 0xad, 0x63, 0x65, 0x73, 0x41, 0xb6, 0xe8, 0x0f, + 0xa6, 0x3d, 0x5a, 0x2e, 0xc9, 0x1a, 0xaf, 0xb0, 0x41, 0x38, 0x07, 0x26, 0x79, 0x13, 0xf4, 0xcf, + 0xa1, 0x61, 0x6f, 0x3d, 0xa6, 0xad, 0x7b, 0xf4, 0x11, 0x6c, 0x46, 0x77, 0x5c, 0xf7, 0x38, 0xf9, + 0x03, 0x80, 0x36, 0xfe, 0x07, 0x00, 0xbc, 0x89, 0x8a, 0xd4, 0x19, 0x4c, 0xf4, 0xe3, 0xc3, 0xbc, + 0x74, 0x59, 0x11, 0xbe, 0x09, 0xe3, 0x66, 0xab, 0x4c, 0x2a, 0x65, 0x79, 0xa6, 0x56, 0xe2, 0x55, + 0x26, 0x95, 0xdf, 0x24, 0x4d, 0xcb, 0x6f, 0x7c, 0xb7, 0x17, 0xb2, 0x92, 0x28, 0xc5, 0x86, 0x3e, + 0x10, 0x20, 0xa2, 0x70, 0xfa, 0x1f, 0x0b, 0x68, 0xb1, 0x69, 0x75, 0x68, 0x6b, 0xd8, 0xea, 0x51, + 0x7e, 0xc2, 0xef, 0xa0, 0xc5, 0x0e, 0xcc, 0x23, 0xa1, 0x47, 0x45, 0x64, 0x65, 0x44, 0xbf, 0xa2, + 0xb4, 0x6e, 0x27, 0x91, 0x10, 0xda, 0x5a, 0x8a, 0x3d, 0x85, 0x25, 0x69, 0x49, 0xd8, 0x46, 0x88, + 0x46, 0xee, 0x94, 0x01, 0xbe, 0x35, 0x61, 0x80, 0xd3, 0xf1, 0x10, 0xc3, 0x51, 0x0c, 0x23, 0x09, + 0x05, 0xb8, 0xc7, 0x26, 0xaf, 0x2e, 0x8f, 0xb4, 0xcf, 0x7f, 0xae, 0x99, 0x5f, 0xbf, 0x39, 0xa1, + 0xb6, 0x43, 0xc9, 0xcf, 0x75, 0x45, 0xad, 0x48, 0x41, 0xf9, 0xe8, 0x26, 0xff, 0xd5, 0xff, 0x55, + 0x40, 0x6b, 0x2f, 0xba, 0x98, 0x59, 0xbf, 0x62, 0xa3, 0xa1, 0x1b, 0x06, 0x6a, 0xa4, 0x15, 0xeb, + 0x29, 0xef, 0x57, 0x87, 0x29, 0x0c, 0xc9, 0x50, 0xc2, 0x36, 0x5f, 0x84, 0xc1, 0x58, 0xba, 0xed, + 0xeb, 0x13, 0x1e, 0x24, 0x15, 0xa4, 0xc6, 0x1c, 0xcb, 0x33, 0x98, 0xb7, 0x09, 0x93, 0xc8, 0x04, + 0xdb, 0x56, 0x5b, 0xf6, 0xc1, 0x33, 0x10, 0xbc, 0x6b, 0xb5, 0x09, 0x93, 0x88, 0x3f, 0x42, 0xa5, + 0xbe, 0xeb, 0x07, 0x72, 0xb0, 0x98, 0x4e, 0x72, 0x99, 0x75, 0x93, 0x7d, 0x97, 0x3d, 0xc2, 0x30, + 0x99, 0xfa, 0xf3, 0x12, 0x5a, 0x7d, 0xc1, 0xe8, 0x81, 0x77, 0x60, 0x91, 0xe5, 0x4b, 0xf2, 0x3e, + 0xf5, 0x2c, 0xb7, 0x9d, 0x76, 0xf9, 0x32, 0x5f, 0x62, 0xf3, 0x68, 0x32, 0x8a, 0x07, 0xdf, 0x62, + 0x4f, 0x7a, 0x01, 0xec, 0xf8, 0x66, 0x4f, 0x89, 0x11, 0xcf, 0x02, 0x97, 0xc4, 0x73, 0x5e, 0x0a, + 0x45, 0xb2, 0xb4, 0x23, 0xe2, 0x5e, 0x7c, 0xe9, 0xb8, 0x7f, 0x8c, 0x96, 0x6c, 0xf3, 0x71, 0x62, + 0xdd, 0x96, 0xfe, 0x34, 0xc6, 0xb4, 0x15, 0xf6, 0x23, 0xab, 0x21, 0x7e, 0x64, 0x35, 0xc0, 0xb0, + 0x3d, 0x0f, 0xbc, 0x02, 0x5e, 0x12, 0xba, 0x76, 0x53, 0x92, 0x48, 0x46, 0x32, 0xff, 0xa1, 0xc7, + 0x7c, 0x7c, 0x10, 0x7a, 0x5d, 0xb5, 0x39, 0x4d, 0xaa, 0x85, 0x3f, 0x19, 0xed, 0x4a, 0x19, 0x24, + 0x92, 0xa6, 0xb2, 0x77, 0xee, 0xcc, 0xb3, 0x57, 0x25, 0x59, 0xf9, 0xff, 0x90, 0x64, 0x70, 0x33, + 0x2d, 0x24, 0x5b, 0x40, 0xbe, 0x25, 0x6b, 0x13, 0xb4, 0xe4, 0x1d, 0x54, 0x08, 0x5c, 0x59, 0xbf, + 0x13, 0x8e, 0x1b, 0x48, 0x2a, 0x28, 0x1c, 0xba, 0x04, 0x84, 0x34, 0x5e, 0x7f, 0xfa, 0x8f, 0x95, + 0x73, 0x9f, 0xc1, 0xdf, 0x5f, 0xe1, 0xef, 0xc9, 0xf3, 0x15, 0xed, 0x29, 0xfc, 0x7d, 0x06, 0x7f, + 0x9f, 0xc3, 0xdf, 0xcf, 0xff, 0xb9, 0x72, 0xee, 0xa3, 0xc2, 0xe0, 0xfa, 0xff, 0x02, 0x00, 0x00, + 0xff, 0xff, 0x49, 0x36, 0x34, 0xb4, 0xf6, 0x20, 0x00, 0x00, } diff --git a/vendor/github.com/openshift/origin/pkg/deploy/api/v1/generated.proto b/vendor/github.com/openshift/origin/pkg/deploy/api/v1/generated.proto deleted file mode 100644 index 8c753f8b0..000000000 --- a/vendor/github.com/openshift/origin/pkg/deploy/api/v1/generated.proto +++ /dev/null @@ -1,410 +0,0 @@ - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package jackfan.us.kg.openshift.origin.pkg.deploy.api.v1; - -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1"; - -// CustomDeploymentStrategyParams are the input to the Custom deployment strategy. -message CustomDeploymentStrategyParams { - // Image specifies a Docker image which can carry out a deployment. - optional string image = 1; - - // Environment holds the environment which will be given to the container for Image. - repeated k8s.io.kubernetes.pkg.api.v1.EnvVar environment = 2; - - // Command is optional and overrides CMD in the container Image. - repeated string command = 3; -} - -// DeploymentCause captures information about a particular cause of a deployment. -message DeploymentCause { - // Type of the trigger that resulted in the creation of a new deployment - optional string type = 1; - - // ImageTrigger contains the image trigger details, if this trigger was fired based on an image change - optional DeploymentCauseImageTrigger imageTrigger = 2; -} - -// DeploymentCauseImageTrigger represents details about the cause of a deployment originating -// from an image change trigger -message DeploymentCauseImageTrigger { - // From is a reference to the changed object which triggered a deployment. The field may have - // the kinds DockerImage, ImageStreamTag, or ImageStreamImage. - optional k8s.io.kubernetes.pkg.api.v1.ObjectReference from = 1; -} - -// DeploymentConfig represents a configuration for a single deployment (represented as a -// ReplicationController). It also contains details about changes which resulted in the current -// state of the DeploymentConfig. Each change to the DeploymentConfig which should result in -// a new deployment results in an increment of LatestVersion. -message DeploymentConfig { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec represents a desired deployment state and how to deploy to it. - optional DeploymentConfigSpec spec = 2; - - // Status represents the current deployment state. - optional DeploymentConfigStatus status = 3; -} - -// DeploymentConfigList is a collection of deployment configs. -message DeploymentConfigList { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is a list of deployment configs - repeated DeploymentConfig items = 2; -} - -// DeploymentConfigRollback provides the input to rollback generation. -message DeploymentConfigRollback { - // Name of the deployment config that will be rolled back. - optional string name = 1; - - // UpdatedAnnotations is a set of new annotations that will be added in the deployment config. - map updatedAnnotations = 2; - - // Spec defines the options to rollback generation. - optional DeploymentConfigRollbackSpec spec = 3; -} - -// DeploymentConfigRollbackSpec represents the options for rollback generation. -message DeploymentConfigRollbackSpec { - // From points to a ReplicationController which is a deployment. - optional k8s.io.kubernetes.pkg.api.v1.ObjectReference from = 1; - - // Revision to rollback to. If set to 0, rollback to the last revision. - optional int64 revision = 2; - - // IncludeTriggers specifies whether to include config Triggers. - optional bool includeTriggers = 3; - - // IncludeTemplate specifies whether to include the PodTemplateSpec. - optional bool includeTemplate = 4; - - // IncludeReplicationMeta specifies whether to include the replica count and selector. - optional bool includeReplicationMeta = 5; - - // IncludeStrategy specifies whether to include the deployment Strategy. - optional bool includeStrategy = 6; -} - -// DeploymentConfigSpec represents the desired state of the deployment. -message DeploymentConfigSpec { - // Strategy describes how a deployment is executed. - optional DeploymentStrategy strategy = 1; - - // MinReadySeconds is the minimum number of seconds for which a newly created pod should - // be ready without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - optional int32 minReadySeconds = 9; - - // Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers - // are defined, a new deployment can only occur as a result of an explicit client update to the - // DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger. - optional DeploymentTriggerPolicies triggers = 2; - - // Replicas is the number of desired replicas. - optional int32 replicas = 3; - - // RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. - // This field is a pointer to allow for differentiation between an explicit zero and not specified. - optional int32 revisionHistoryLimit = 4; - - // Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the - // deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding - // or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action. - optional bool test = 5; - - // Paused indicates that the deployment config is paused resulting in no new deployments on template - // changes or changes in the template caused by other triggers. - optional bool paused = 6; - - // Selector is a label query over pods that should match the Replicas count. - map selector = 7; - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. - optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 8; -} - -// DeploymentConfigStatus represents the current deployment state. -message DeploymentConfigStatus { - // LatestVersion is used to determine whether the current deployment associated with a deployment - // config is out of sync. - optional int64 latestVersion = 1; - - // ObservedGeneration is the most recent generation observed by the deployment config controller. - optional int64 observedGeneration = 2; - - // Replicas is the total number of pods targeted by this deployment config. - optional int32 replicas = 3; - - // UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config - // that have the desired template spec. - optional int32 updatedReplicas = 4; - - // AvailableReplicas is the total number of available pods targeted by this deployment config. - optional int32 availableReplicas = 5; - - // UnavailableReplicas is the total number of unavailable pods targeted by this deployment config. - optional int32 unavailableReplicas = 6; - - // Details are the reasons for the update to this deployment config. - // This could be based on a change made by the user or caused by an automatic trigger - optional DeploymentDetails details = 7; -} - -// DeploymentDetails captures information about the causes of a deployment. -message DeploymentDetails { - // Message is the user specified change message, if this deployment was triggered manually by the user - optional string message = 1; - - // Causes are extended data associated with all the causes for creating a new deployment - repeated DeploymentCause causes = 2; -} - -// DeploymentLog represents the logs for a deployment -message DeploymentLog { -} - -// DeploymentLogOptions is the REST options for a deployment log -message DeploymentLogOptions { - // The container for which to stream logs. Defaults to only container if there is one container in the pod. - optional string container = 1; - - // Follow if true indicates that the build log should be streamed until - // the build terminates. - optional bool follow = 2; - - // Return previous deployment logs. Defaults to false. - optional bool previous = 3; - - // A relative time in seconds before the current time from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - optional int64 sinceSeconds = 4; - - // An RFC3339 timestamp from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - optional k8s.io.kubernetes.pkg.api.unversioned.Time sinceTime = 5; - - // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line - // of log output. Defaults to false. - optional bool timestamps = 6; - - // If set, the number of lines from the end of the logs to show. If not specified, - // logs are shown from the creation of the container or sinceSeconds or sinceTime - optional int64 tailLines = 7; - - // If set, the number of bytes to read from the server before terminating the - // log output. This may not display a complete final line of logging, and may return - // slightly more or slightly less than the specified limit. - optional int64 limitBytes = 8; - - // NoWait if true causes the call to return immediately even if the deployment - // is not available yet. Otherwise the server will wait until the deployment has started. - // TODO: Fix the tag to 'noWait' in v2 - optional bool nowait = 9; - - // Version of the deployment for which to view logs. - optional int64 version = 10; -} - -// DeploymentStrategy describes how to perform a deployment. -message DeploymentStrategy { - // Type is the name of a deployment strategy. - optional string type = 1; - - // CustomParams are the input to the Custom deployment strategy. - optional CustomDeploymentStrategyParams customParams = 2; - - // RecreateParams are the input to the Recreate deployment strategy. - optional RecreateDeploymentStrategyParams recreateParams = 3; - - // RollingParams are the input to the Rolling deployment strategy. - optional RollingDeploymentStrategyParams rollingParams = 4; - - // Resources contains resource requirements to execute the deployment and any hooks - optional k8s.io.kubernetes.pkg.api.v1.ResourceRequirements resources = 5; - - // Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. - map labels = 6; - - // Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. - map annotations = 7; -} - -// DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger. -message DeploymentTriggerImageChangeParams { - // Automatic means that the detection of a new tag value should result in an image update - // inside the pod template. Deployment configs that haven't been deployed yet will always - // have their images updated. Deployment configs that have been deployed at least once, will - // have their images updated only if this is set to true. - optional bool automatic = 1; - - // ContainerNames is used to restrict tag updates to the specified set of container names in a pod. - repeated string containerNames = 2; - - // From is a reference to an image stream tag to watch for changes. From.Name is the only - // required subfield - if From.Namespace is blank, the namespace of the current deployment - // trigger will be used. - optional k8s.io.kubernetes.pkg.api.v1.ObjectReference from = 3; - - // LastTriggeredImage is the last image to be triggered. - optional string lastTriggeredImage = 4; -} - -// DeploymentTriggerPolicies is a list of policies where nil values and different from empty arrays. -// +protobuf.nullable=true -// +protobuf.options.(gogoproto.goproto_stringer)=false -message DeploymentTriggerPolicies { - // items, if empty, will result in an empty slice - - repeated DeploymentTriggerPolicy items = 1; -} - -// DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment. -message DeploymentTriggerPolicy { - // Type of the trigger - optional string type = 1; - - // ImageChangeParams represents the parameters for the ImageChange trigger. - optional DeploymentTriggerImageChangeParams imageChangeParams = 2; -} - -// ExecNewPodHook is a hook implementation which runs a command in a new pod -// based on the specified container which is assumed to be part of the -// deployment template. -message ExecNewPodHook { - // Command is the action command and its arguments. - repeated string command = 1; - - // Env is a set of environment variables to supply to the hook pod's container. - repeated k8s.io.kubernetes.pkg.api.v1.EnvVar env = 2; - - // ContainerName is the name of a container in the deployment pod template - // whose Docker image will be used for the hook pod's container. - optional string containerName = 3; - - // Volumes is a list of named volumes from the pod template which should be - // copied to the hook pod. Volumes names not found in pod spec are ignored. - // An empty list means no volumes will be copied. - repeated string volumes = 4; -} - -// LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time. -message LifecycleHook { - // FailurePolicy specifies what action to take if the hook fails. - optional string failurePolicy = 1; - - // ExecNewPod specifies the options for a lifecycle hook backed by a pod. - optional ExecNewPodHook execNewPod = 2; - - // TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag. - repeated TagImageHook tagImages = 3; -} - -// RecreateDeploymentStrategyParams are the input to the Recreate deployment -// strategy. -message RecreateDeploymentStrategyParams { - // TimeoutSeconds is the time to wait for updates before giving up. If the - // value is nil, a default will be used. - optional int64 timeoutSeconds = 1; - - // Pre is a lifecycle hook which is executed before the strategy manipulates - // the deployment. All LifecycleHookFailurePolicy values are supported. - optional LifecycleHook pre = 2; - - // Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new - // pod is created. All LifecycleHookFailurePolicy values are supported. - optional LifecycleHook mid = 3; - - // Post is a lifecycle hook which is executed after the strategy has - // finished all deployment logic. All LifecycleHookFailurePolicy values are supported. - optional LifecycleHook post = 4; -} - -// RollingDeploymentStrategyParams are the input to the Rolling deployment -// strategy. -message RollingDeploymentStrategyParams { - // UpdatePeriodSeconds is the time to wait between individual pod updates. - // If the value is nil, a default will be used. - optional int64 updatePeriodSeconds = 1; - - // IntervalSeconds is the time to wait between polling deployment status - // after update. If the value is nil, a default will be used. - optional int64 intervalSeconds = 2; - - // TimeoutSeconds is the time to wait for updates before giving up. If the - // value is nil, a default will be used. - optional int64 timeoutSeconds = 3; - - // MaxUnavailable is the maximum number of pods that can be unavailable - // during the update. Value can be an absolute number (ex: 5) or a - // percentage of total pods at the start of update (ex: 10%). Absolute - // number is calculated from percentage by rounding up. - // - // This cannot be 0 if MaxSurge is 0. By default, 25% is used. - // - // Example: when this is set to 30%, the old RC can be scaled down by 30% - // immediately when the rolling update starts. Once new pods are ready, old - // RC can be scaled down further, followed by scaling up the new RC, - // ensuring that at least 70% of original number of pods are available at - // all times during the update. - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString maxUnavailable = 4; - - // MaxSurge is the maximum number of pods that can be scheduled above the - // original number of pods. Value can be an absolute number (ex: 5) or a - // percentage of total pods at the start of the update (ex: 10%). Absolute - // number is calculated from percentage by rounding up. - // - // This cannot be 0 if MaxUnavailable is 0. By default, 25% is used. - // - // Example: when this is set to 30%, the new RC can be scaled up by 30% - // immediately when the rolling update starts. Once old pods have been - // killed, new RC can be scaled up further, ensuring that total number of - // pods running at any time during the update is atmost 130% of original - // pods. - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString maxSurge = 5; - - // UpdatePercent is the percentage of replicas to scale up or down each - // interval. If nil, one replica will be scaled up and down each interval. - // If negative, the scale order will be down/up instead of up/down. - // DEPRECATED: Use MaxUnavailable/MaxSurge instead. - optional int32 updatePercent = 6; - - // Pre is a lifecycle hook which is executed before the deployment process - // begins. All LifecycleHookFailurePolicy values are supported. - optional LifecycleHook pre = 7; - - // Post is a lifecycle hook which is executed after the strategy has - // finished all deployment logic. The LifecycleHookFailurePolicyAbort policy - // is NOT supported. - optional LifecycleHook post = 8; -} - -// TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag. -message TagImageHook { - // ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single - // container this value will be defaulted to the name of that container. - optional string containerName = 1; - - // To is the target ImageStreamTag to set the container's image onto. - optional k8s.io.kubernetes.pkg.api.v1.ObjectReference to = 2; -} - diff --git a/vendor/github.com/openshift/origin/pkg/deploy/api/v1/register.go b/vendor/github.com/openshift/origin/pkg/deploy/api/v1/register.go index 11a1deb87..883696b41 100644 --- a/vendor/github.com/openshift/origin/pkg/deploy/api/v1/register.go +++ b/vendor/github.com/openshift/origin/pkg/deploy/api/v1/register.go @@ -21,6 +21,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { &DeploymentConfig{}, &DeploymentConfigList{}, &DeploymentConfigRollback{}, + &DeploymentRequest{}, &DeploymentLog{}, &DeploymentLogOptions{}, ) @@ -30,5 +31,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { func (obj *DeploymentConfig) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } func (obj *DeploymentConfigList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } func (obj *DeploymentConfigRollback) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *DeploymentRequest) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } func (obj *DeploymentLog) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } func (obj *DeploymentLogOptions) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/github.com/openshift/origin/pkg/deploy/api/v1/swagger_doc.go b/vendor/github.com/openshift/origin/pkg/deploy/api/v1/swagger_doc.go index 70e9200eb..752978860 100644 --- a/vendor/github.com/openshift/origin/pkg/deploy/api/v1/swagger_doc.go +++ b/vendor/github.com/openshift/origin/pkg/deploy/api/v1/swagger_doc.go @@ -35,8 +35,21 @@ func (DeploymentCauseImageTrigger) SwaggerDoc() map[string]string { return map_DeploymentCauseImageTrigger } +var map_DeploymentCondition = map[string]string{ + "": "DeploymentCondition describes the state of a deployment config at a certain point.", + "type": "Type of deployment condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "The last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DeploymentCondition) SwaggerDoc() map[string]string { + return map_DeploymentCondition +} + var map_DeploymentConfig = map[string]string{ - "": "DeploymentConfig represents a configuration for a single deployment (represented as a ReplicationController). It also contains details about changes which resulted in the current state of the DeploymentConfig. Each change to the DeploymentConfig which should result in a new deployment results in an increment of LatestVersion.", + "": "Deployment Configs define the template for a pod and manages deploying new images or configuration changes. A single deployment configuration is usually analogous to a single micro-service. Can support many different deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller.\n\nA deployment is \"triggered\" when its configuration is changed or a tag in an Image Stream is changed. Triggers can be disabled to allow manual control over a deployment. The \"strategy\" determines how the deployment is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment is triggered by any means.", "metadata": "Standard object's metadata.", "spec": "Spec represents a desired deployment state and how to deploy to it.", "status": "Status represents the current deployment state.", @@ -107,6 +120,7 @@ var map_DeploymentConfigStatus = map[string]string{ "availableReplicas": "AvailableReplicas is the total number of available pods targeted by this deployment config.", "unavailableReplicas": "UnavailableReplicas is the total number of unavailable pods targeted by this deployment config.", "details": "Details are the reasons for the update to this deployment config. This could be based on a change made by the user or caused by an automatic trigger", + "conditions": "Conditions represents the latest available observations of a deployment config's current state.", } func (DeploymentConfigStatus) SwaggerDoc() map[string]string { @@ -149,13 +163,24 @@ func (DeploymentLogOptions) SwaggerDoc() map[string]string { return map_DeploymentLogOptions } +var map_DeploymentRequest = map[string]string{ + "": "DeploymentRequest is a request to a deployment config for a new deployment.", + "name": "Name of the deployment config for requesting a new deployment.", + "latest": "Latest will update the deployment config with the latest state from all triggers.", + "force": "Force will try to force a new deployment to run. If the deployment config is paused, then setting this to true will return an Invalid error.", +} + +func (DeploymentRequest) SwaggerDoc() map[string]string { + return map_DeploymentRequest +} + var map_DeploymentStrategy = map[string]string{ "": "DeploymentStrategy describes how to perform a deployment.", "type": "Type is the name of a deployment strategy.", - "customParams": "CustomParams are the input to the Custom deployment strategy.", + "customParams": "CustomParams are the input to the Custom deployment strategy, and may also be specified for the Recreate and Rolling strategies to customize the execution process that runs the deployment.", "recreateParams": "RecreateParams are the input to the Recreate deployment strategy.", "rollingParams": "RollingParams are the input to the Rolling deployment strategy.", - "resources": "Resources contains resource requirements to execute the deployment and any hooks", + "resources": "Resources contains resource requirements to execute the deployment and any hooks.", "labels": "Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.", "annotations": "Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.", } @@ -166,7 +191,7 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string { var map_DeploymentTriggerImageChangeParams = map[string]string{ "": "DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger.", - "automatic": "Automatic means that the detection of a new tag value should result in an image update inside the pod template. Deployment configs that haven't been deployed yet will always have their images updated. Deployment configs that have been deployed at least once, will have their images updated only if this is set to true.", + "automatic": "Automatic means that the detection of a new tag value should result in an image update inside the pod template.", "containerNames": "ContainerNames is used to restrict tag updates to the specified set of container names in a pod.", "from": "From is a reference to an image stream tag to watch for changes. From.Name is the only required subfield - if From.Namespace is blank, the namespace of the current deployment trigger will be used.", "lastTriggeredImage": "LastTriggeredImage is the last image to be triggered.", @@ -228,9 +253,8 @@ var map_RollingDeploymentStrategyParams = map[string]string{ "timeoutSeconds": "TimeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.", "maxUnavailable": "MaxUnavailable is the maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). Absolute number is calculated from percentage by rounding up.\n\nThis cannot be 0 if MaxSurge is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the old RC can be scaled down by 30% immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that at least 70% of original number of pods are available at all times during the update.", "maxSurge": "MaxSurge is the maximum number of pods that can be scheduled above the original number of pods. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up.\n\nThis cannot be 0 if MaxUnavailable is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the new RC can be scaled up by 30% immediately when the rolling update starts. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of original pods.", - "updatePercent": "UpdatePercent is the percentage of replicas to scale up or down each interval. If nil, one replica will be scaled up and down each interval. If negative, the scale order will be down/up instead of up/down. DEPRECATED: Use MaxUnavailable/MaxSurge instead.", "pre": "Pre is a lifecycle hook which is executed before the deployment process begins. All LifecycleHookFailurePolicy values are supported.", - "post": "Post is a lifecycle hook which is executed after the strategy has finished all deployment logic. The LifecycleHookFailurePolicyAbort policy is NOT supported.", + "post": "Post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.", } func (RollingDeploymentStrategyParams) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/origin/pkg/deploy/api/v1/types.go b/vendor/github.com/openshift/origin/pkg/deploy/api/v1/types.go index 6b031f0ee..1e22bb128 100644 --- a/vendor/github.com/openshift/origin/pkg/deploy/api/v1/types.go +++ b/vendor/github.com/openshift/origin/pkg/deploy/api/v1/types.go @@ -8,37 +8,83 @@ import ( "k8s.io/kubernetes/pkg/util/intstr" ) -// DeploymentPhase describes the possible states a deployment can be in. -type DeploymentPhase string +// +genclient=true -const ( - // DeploymentPhaseNew means the deployment has been accepted but not yet acted upon. - DeploymentPhaseNew DeploymentPhase = "New" - // DeploymentPhasePending means the deployment been handed over to a deployment strategy, - // but the strategy has not yet declared the deployment to be running. - DeploymentPhasePending DeploymentPhase = "Pending" - // DeploymentPhaseRunning means the deployment strategy has reported the deployment as - // being in-progress. - DeploymentPhaseRunning DeploymentPhase = "Running" - // DeploymentPhaseComplete means the deployment finished without an error. - DeploymentPhaseComplete DeploymentPhase = "Complete" - // DeploymentPhaseFailed means the deployment finished with an error. - DeploymentPhaseFailed DeploymentPhase = "Failed" -) +// Deployment Configs define the template for a pod and manages deploying new images or configuration changes. +// A single deployment configuration is usually analogous to a single micro-service. Can support many different +// deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as +// well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller. +// +// A deployment is "triggered" when its configuration is changed or a tag in an Image Stream is changed. +// Triggers can be disabled to allow manual control over a deployment. The "strategy" determines how the deployment +// is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment +// is triggered by any means. +type DeploymentConfig struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + kapi.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec represents a desired deployment state and how to deploy to it. + Spec DeploymentConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status represents the current deployment state. + Status DeploymentConfigStatus `json:"status" protobuf:"bytes,3,opt,name=status"` +} + +// DeploymentConfigSpec represents the desired state of the deployment. +type DeploymentConfigSpec struct { + // Strategy describes how a deployment is executed. + Strategy DeploymentStrategy `json:"strategy" protobuf:"bytes,1,opt,name=strategy"` + + // MinReadySeconds is the minimum number of seconds for which a newly created pod should + // be ready without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"` + + // Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers + // are defined, a new deployment can only occur as a result of an explicit client update to the + // DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger. + Triggers DeploymentTriggerPolicies `json:"triggers" protobuf:"bytes,2,rep,name=triggers"` + + // Replicas is the number of desired replicas. + Replicas int32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"` + + // RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. + // This field is a pointer to allow for differentiation between an explicit zero and not specified. + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,4,opt,name=revisionHistoryLimit"` + + // Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the + // deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding + // or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action. + Test bool `json:"test" protobuf:"varint,5,opt,name=test"` + + // Paused indicates that the deployment config is paused resulting in no new deployments on template + // changes or changes in the template caused by other triggers. + Paused bool `json:"paused,omitempty" protobuf:"varint,6,opt,name=paused"` + + // Selector is a label query over pods that should match the Replicas count. + Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,7,rep,name=selector"` + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + Template *kapi.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,8,opt,name=template"` +} // DeploymentStrategy describes how to perform a deployment. type DeploymentStrategy struct { // Type is the name of a deployment strategy. Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"` - // CustomParams are the input to the Custom deployment strategy. + // CustomParams are the input to the Custom deployment strategy, and may also + // be specified for the Recreate and Rolling strategies to customize the execution + // process that runs the deployment. CustomParams *CustomDeploymentStrategyParams `json:"customParams,omitempty" protobuf:"bytes,2,opt,name=customParams"` // RecreateParams are the input to the Recreate deployment strategy. RecreateParams *RecreateDeploymentStrategyParams `json:"recreateParams,omitempty" protobuf:"bytes,3,opt,name=recreateParams"` // RollingParams are the input to the Rolling deployment strategy. RollingParams *RollingDeploymentStrategyParams `json:"rollingParams,omitempty" protobuf:"bytes,4,opt,name=rollingParams"` - // Resources contains resource requirements to execute the deployment and any hooks + // Resources contains resource requirements to execute the deployment and any hooks. Resources kapi.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,5,opt,name=resources"` // Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,6,rep,name=labels"` @@ -85,56 +131,6 @@ type RecreateDeploymentStrategyParams struct { Post *LifecycleHook `json:"post,omitempty" protobuf:"bytes,4,opt,name=post"` } -// LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time. -type LifecycleHook struct { - // FailurePolicy specifies what action to take if the hook fails. - FailurePolicy LifecycleHookFailurePolicy `json:"failurePolicy" protobuf:"bytes,1,opt,name=failurePolicy,casttype=LifecycleHookFailurePolicy"` - - // ExecNewPod specifies the options for a lifecycle hook backed by a pod. - ExecNewPod *ExecNewPodHook `json:"execNewPod,omitempty" protobuf:"bytes,2,opt,name=execNewPod"` - - // TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag. - TagImages []TagImageHook `json:"tagImages,omitempty" protobuf:"bytes,3,rep,name=tagImages"` -} - -// LifecycleHookFailurePolicy describes possibles actions to take if a hook fails. -type LifecycleHookFailurePolicy string - -const ( - // LifecycleHookFailurePolicyRetry means retry the hook until it succeeds. - LifecycleHookFailurePolicyRetry LifecycleHookFailurePolicy = "Retry" - // LifecycleHookFailurePolicyAbort means abort the deployment (if possible). - LifecycleHookFailurePolicyAbort LifecycleHookFailurePolicy = "Abort" - // LifecycleHookFailurePolicyIgnore means ignore failure and continue the deployment. - LifecycleHookFailurePolicyIgnore LifecycleHookFailurePolicy = "Ignore" -) - -// ExecNewPodHook is a hook implementation which runs a command in a new pod -// based on the specified container which is assumed to be part of the -// deployment template. -type ExecNewPodHook struct { - // Command is the action command and its arguments. - Command []string `json:"command" protobuf:"bytes,1,rep,name=command"` - // Env is a set of environment variables to supply to the hook pod's container. - Env []kapi.EnvVar `json:"env,omitempty" protobuf:"bytes,2,rep,name=env"` - // ContainerName is the name of a container in the deployment pod template - // whose Docker image will be used for the hook pod's container. - ContainerName string `json:"containerName" protobuf:"bytes,3,opt,name=containerName"` - // Volumes is a list of named volumes from the pod template which should be - // copied to the hook pod. Volumes names not found in pod spec are ignored. - // An empty list means no volumes will be copied. - Volumes []string `json:"volumes,omitempty" protobuf:"bytes,4,rep,name=volumes"` -} - -// TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag. -type TagImageHook struct { - // ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single - // container this value will be defaulted to the name of that container. - ContainerName string `json:"containerName" protobuf:"bytes,1,opt,name=containerName"` - // To is the target ImageStreamTag to set the container's image onto. - To kapi.ObjectReference `json:"to" protobuf:"bytes,2,opt,name=to"` -} - // RollingDeploymentStrategyParams are the input to the Rolling deployment // strategy. type RollingDeploymentStrategyParams struct { @@ -173,85 +169,63 @@ type RollingDeploymentStrategyParams struct { // pods running at any time during the update is atmost 130% of original // pods. MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,5,opt,name=maxSurge"` - // UpdatePercent is the percentage of replicas to scale up or down each - // interval. If nil, one replica will be scaled up and down each interval. - // If negative, the scale order will be down/up instead of up/down. - // DEPRECATED: Use MaxUnavailable/MaxSurge instead. - UpdatePercent *int32 `json:"updatePercent,omitempty" protobuf:"varint,6,opt,name=updatePercent"` // Pre is a lifecycle hook which is executed before the deployment process // begins. All LifecycleHookFailurePolicy values are supported. Pre *LifecycleHook `json:"pre,omitempty" protobuf:"bytes,7,opt,name=pre"` // Post is a lifecycle hook which is executed after the strategy has - // finished all deployment logic. The LifecycleHookFailurePolicyAbort policy - // is NOT supported. + // finished all deployment logic. All LifecycleHookFailurePolicy values + // are supported. Post *LifecycleHook `json:"post,omitempty" protobuf:"bytes,8,opt,name=post"` } -// These constants represent keys used for correlating objects related to deployments. -const ( - // DeploymentConfigAnnotation is an annotation name used to correlate a deployment with the - // DeploymentConfig on which the deployment is based. - DeploymentConfigAnnotation = "openshift.io/deployment-config.name" - // DeploymentAnnotation is an annotation on a deployer Pod. The annotation value is the name - // of the deployment (a ReplicationController) on which the deployer Pod acts. - DeploymentAnnotation = "openshift.io/deployment.name" - // DeploymentPodAnnotation is an annotation on a deployment (a ReplicationController). The - // annotation value is the name of the deployer Pod which will act upon the ReplicationController - // to implement the deployment behavior. - DeploymentPodAnnotation = "openshift.io/deployer-pod.name" - // DeploymentPodTypeLabel is a label with which contains a type of deployment pod. - DeploymentPodTypeLabel = "openshift.io/deployer-pod.type" - // DeployerPodForDeploymentLabel is a label which groups pods related to a - // deployment. The value is a deployment name. The deployer pod and hook pods - // created by the internal strategies will have this label. Custom - // strategies can apply this label to any pods they create, enabling - // platform-provided cancellation and garbage collection support. - DeployerPodForDeploymentLabel = "openshift.io/deployer-pod-for.name" - // DeploymentPhaseAnnotation is an annotation name used to retrieve the DeploymentPhase of - // a deployment. - DeploymentPhaseAnnotation = "openshift.io/deployment.phase" - // DeploymentEncodedConfigAnnotation is an annotation name used to retrieve specific encoded - // DeploymentConfig on which a given deployment is based. - DeploymentEncodedConfigAnnotation = "openshift.io/encoded-deployment-config" - // DeploymentVersionAnnotation is an annotation on a deployment (a ReplicationController). The - // annotation value is the LatestVersion value of the DeploymentConfig which was the basis for - // the deployment. - DeploymentVersionAnnotation = "openshift.io/deployment-config.latest-version" - // DeploymentLabel is the name of a label used to correlate a deployment with the Pod created - // to execute the deployment logic. - // TODO: This is a workaround for upstream's lack of annotation support on PodTemplate. Once - // annotations are available on PodTemplate, audit this constant with the goal of removing it. - DeploymentLabel = "deployment" - // DeploymentConfigLabel is the name of a label used to correlate a deployment with the - // DeploymentConfigs on which the deployment is based. - DeploymentConfigLabel = "deploymentconfig" - // DeploymentStatusReasonAnnotation represents the reason for deployment being in a given state - // Used for specifying the reason for cancellation or failure of a deployment - DeploymentStatusReasonAnnotation = "openshift.io/deployment.status-reason" - // DeploymentCancelledAnnotation indicates that the deployment has been cancelled - // The annotation value does not matter and its mere presence indicates cancellation - DeploymentCancelledAnnotation = "openshift.io/deployment.cancelled" - // DeploymentInstantiatedAnnotation indicates that the deployment has been instantiated. - // The annotation value does not matter and its mere presence indicates instantiation. - DeploymentInstantiatedAnnotation = "openshift.io/deployment.instantiated" -) +// LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time. +type LifecycleHook struct { + // FailurePolicy specifies what action to take if the hook fails. + FailurePolicy LifecycleHookFailurePolicy `json:"failurePolicy" protobuf:"bytes,1,opt,name=failurePolicy,casttype=LifecycleHookFailurePolicy"` -// +genclient=true + // ExecNewPod specifies the options for a lifecycle hook backed by a pod. + ExecNewPod *ExecNewPodHook `json:"execNewPod,omitempty" protobuf:"bytes,2,opt,name=execNewPod"` -// DeploymentConfig represents a configuration for a single deployment (represented as a -// ReplicationController). It also contains details about changes which resulted in the current -// state of the DeploymentConfig. Each change to the DeploymentConfig which should result in -// a new deployment results in an increment of LatestVersion. -type DeploymentConfig struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - kapi.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag. + TagImages []TagImageHook `json:"tagImages,omitempty" protobuf:"bytes,3,rep,name=tagImages"` +} - // Spec represents a desired deployment state and how to deploy to it. - Spec DeploymentConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +// LifecycleHookFailurePolicy describes possibles actions to take if a hook fails. +type LifecycleHookFailurePolicy string - // Status represents the current deployment state. - Status DeploymentConfigStatus `json:"status" protobuf:"bytes,3,opt,name=status"` +const ( + // LifecycleHookFailurePolicyRetry means retry the hook until it succeeds. + LifecycleHookFailurePolicyRetry LifecycleHookFailurePolicy = "Retry" + // LifecycleHookFailurePolicyAbort means abort the deployment. + LifecycleHookFailurePolicyAbort LifecycleHookFailurePolicy = "Abort" + // LifecycleHookFailurePolicyIgnore means ignore failure and continue the deployment. + LifecycleHookFailurePolicyIgnore LifecycleHookFailurePolicy = "Ignore" +) + +// ExecNewPodHook is a hook implementation which runs a command in a new pod +// based on the specified container which is assumed to be part of the +// deployment template. +type ExecNewPodHook struct { + // Command is the action command and its arguments. + Command []string `json:"command" protobuf:"bytes,1,rep,name=command"` + // Env is a set of environment variables to supply to the hook pod's container. + Env []kapi.EnvVar `json:"env,omitempty" protobuf:"bytes,2,rep,name=env"` + // ContainerName is the name of a container in the deployment pod template + // whose Docker image will be used for the hook pod's container. + ContainerName string `json:"containerName" protobuf:"bytes,3,opt,name=containerName"` + // Volumes is a list of named volumes from the pod template which should be + // copied to the hook pod. Volumes names not found in pod spec are ignored. + // An empty list means no volumes will be copied. + Volumes []string `json:"volumes,omitempty" protobuf:"bytes,4,rep,name=volumes"` +} + +// TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag. +type TagImageHook struct { + // ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single + // container this value will be defaulted to the name of that container. + ContainerName string `json:"containerName" protobuf:"bytes,1,opt,name=containerName"` + // To is the target ImageStreamTag to set the container's image onto. + To kapi.ObjectReference `json:"to" protobuf:"bytes,2,opt,name=to"` } // DeploymentTriggerPolicies is a list of policies where nil values and different from empty arrays. @@ -263,66 +237,6 @@ func (t DeploymentTriggerPolicies) String() string { return fmt.Sprintf("%v", []DeploymentTriggerPolicy(t)) } -// DeploymentConfigSpec represents the desired state of the deployment. -type DeploymentConfigSpec struct { - // Strategy describes how a deployment is executed. - Strategy DeploymentStrategy `json:"strategy" protobuf:"bytes,1,opt,name=strategy"` - - // MinReadySeconds is the minimum number of seconds for which a newly created pod should - // be ready without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"` - - // Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers - // are defined, a new deployment can only occur as a result of an explicit client update to the - // DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger. - Triggers DeploymentTriggerPolicies `json:"triggers" protobuf:"bytes,2,rep,name=triggers"` - - // Replicas is the number of desired replicas. - Replicas int32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"` - - // RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. - // This field is a pointer to allow for differentiation between an explicit zero and not specified. - RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,4,opt,name=revisionHistoryLimit"` - - // Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the - // deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding - // or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action. - Test bool `json:"test" protobuf:"varint,5,opt,name=test"` - - // Paused indicates that the deployment config is paused resulting in no new deployments on template - // changes or changes in the template caused by other triggers. - Paused bool `json:"paused,omitempty" protobuf:"varint,6,opt,name=paused"` - - // Selector is a label query over pods that should match the Replicas count. - Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,7,rep,name=selector"` - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. - Template *kapi.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,8,opt,name=template"` -} - -// DeploymentConfigStatus represents the current deployment state. -type DeploymentConfigStatus struct { - // LatestVersion is used to determine whether the current deployment associated with a deployment - // config is out of sync. - LatestVersion int64 `json:"latestVersion,omitempty" protobuf:"varint,1,opt,name=latestVersion"` - // ObservedGeneration is the most recent generation observed by the deployment config controller. - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,2,opt,name=observedGeneration"` - // Replicas is the total number of pods targeted by this deployment config. - Replicas int32 `json:"replicas,omitempty" protobuf:"varint,3,opt,name=replicas"` - // UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config - // that have the desired template spec. - UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,4,opt,name=updatedReplicas"` - // AvailableReplicas is the total number of available pods targeted by this deployment config. - AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"` - // UnavailableReplicas is the total number of unavailable pods targeted by this deployment config. - UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,6,opt,name=unavailableReplicas"` - // Details are the reasons for the update to this deployment config. - // This could be based on a change made by the user or caused by an automatic trigger - Details *DeploymentDetails `json:"details,omitempty" protobuf:"bytes,7,opt,name=details"` -} - // DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment. type DeploymentTriggerPolicy struct { // Type of the trigger @@ -346,9 +260,7 @@ const ( // DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger. type DeploymentTriggerImageChangeParams struct { // Automatic means that the detection of a new tag value should result in an image update - // inside the pod template. Deployment configs that haven't been deployed yet will always - // have their images updated. Deployment configs that have been deployed at least once, will - // have their images updated only if this is set to true. + // inside the pod template. Automatic bool `json:"automatic,omitempty" protobuf:"varint,1,opt,name=automatic"` // ContainerNames is used to restrict tag updates to the specified set of container names in a pod. ContainerNames []string `json:"containerNames,omitempty" protobuf:"bytes,2,rep,name=containerNames"` @@ -360,6 +272,29 @@ type DeploymentTriggerImageChangeParams struct { LastTriggeredImage string `json:"lastTriggeredImage,omitempty" protobuf:"bytes,4,opt,name=lastTriggeredImage"` } +// DeploymentConfigStatus represents the current deployment state. +type DeploymentConfigStatus struct { + // LatestVersion is used to determine whether the current deployment associated with a deployment + // config is out of sync. + LatestVersion int64 `json:"latestVersion,omitempty" protobuf:"varint,1,opt,name=latestVersion"` + // ObservedGeneration is the most recent generation observed by the deployment config controller. + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,2,opt,name=observedGeneration"` + // Replicas is the total number of pods targeted by this deployment config. + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,3,opt,name=replicas"` + // UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config + // that have the desired template spec. + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,4,opt,name=updatedReplicas"` + // AvailableReplicas is the total number of available pods targeted by this deployment config. + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"` + // UnavailableReplicas is the total number of unavailable pods targeted by this deployment config. + UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,6,opt,name=unavailableReplicas"` + // Details are the reasons for the update to this deployment config. + // This could be based on a change made by the user or caused by an automatic trigger + Details *DeploymentDetails `json:"details,omitempty" protobuf:"bytes,7,opt,name=details"` + // Conditions represents the latest available observations of a deployment config's current state. + Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,8,rep,name=conditions"` +} + // DeploymentDetails captures information about the causes of a deployment. type DeploymentDetails struct { // Message is the user specified change message, if this deployment was triggered manually by the user @@ -384,6 +319,37 @@ type DeploymentCauseImageTrigger struct { From kapi.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` } +type DeploymentConditionType string + +// These are valid conditions of a deployment config. +const ( + // DeploymentAvailable means the deployment config is available, ie. at least the minimum available + // replicas required are up and running for at least minReadySeconds. + DeploymentAvailable DeploymentConditionType = "Available" + // DeploymentProgressing means the deployment config is progressing. Progress for a deployment + // config is considered when a new replica set is created or adopted, and when new pods scale up or + // old pods scale down. Progress is not estimated for paused deployment configs, when the deployment + // config needs to rollback, or when progressDeadlineSeconds is not specified. + DeploymentProgressing DeploymentConditionType = "Progressing" + // DeploymentReplicaFailure is added in a deployment config when one of its pods + // fails to be created or deleted. + DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" +) + +// DeploymentCondition describes the state of a deployment config at a certain point. +type DeploymentCondition struct { + // Type of deployment condition. + Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"` + // Status of the condition, one of True, False, Unknown. + Status kapi.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + // The last time the condition transitioned from one status to another. + LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + // DeploymentConfigList is a collection of deployment configs. type DeploymentConfigList struct { unversioned.TypeMeta `json:",inline"` @@ -421,6 +387,18 @@ type DeploymentConfigRollbackSpec struct { IncludeStrategy bool `json:"includeStrategy" protobuf:"varint,6,opt,name=includeStrategy"` } +// DeploymentRequest is a request to a deployment config for a new deployment. +type DeploymentRequest struct { + unversioned.TypeMeta `json:",inline"` + // Name of the deployment config for requesting a new deployment. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Latest will update the deployment config with the latest state from all triggers. + Latest bool `json:"latest" protobuf:"varint,2,opt,name=latest"` + // Force will try to force a new deployment to run. If the deployment config is paused, + // then setting this to true will return an Invalid error. + Force bool `json:"force" protobuf:"varint,3,opt,name=force"` +} + // DeploymentLog represents the logs for a deployment type DeploymentLog struct { unversioned.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/origin/pkg/deploy/api/v1/zz_generated.conversion.go b/vendor/github.com/openshift/origin/pkg/deploy/api/v1/zz_generated.conversion.go index 5a857e759..4525db6ff 100644 --- a/vendor/github.com/openshift/origin/pkg/deploy/api/v1/zz_generated.conversion.go +++ b/vendor/github.com/openshift/origin/pkg/deploy/api/v1/zz_generated.conversion.go @@ -26,6 +26,8 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_api_DeploymentCause_To_v1_DeploymentCause, Convert_v1_DeploymentCauseImageTrigger_To_api_DeploymentCauseImageTrigger, Convert_api_DeploymentCauseImageTrigger_To_v1_DeploymentCauseImageTrigger, + Convert_v1_DeploymentCondition_To_api_DeploymentCondition, + Convert_api_DeploymentCondition_To_v1_DeploymentCondition, Convert_v1_DeploymentConfig_To_api_DeploymentConfig, Convert_api_DeploymentConfig_To_v1_DeploymentConfig, Convert_v1_DeploymentConfigList_To_api_DeploymentConfigList, @@ -44,6 +46,8 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_api_DeploymentLog_To_v1_DeploymentLog, Convert_v1_DeploymentLogOptions_To_api_DeploymentLogOptions, Convert_api_DeploymentLogOptions_To_v1_DeploymentLogOptions, + Convert_v1_DeploymentRequest_To_api_DeploymentRequest, + Convert_api_DeploymentRequest_To_v1_DeploymentRequest, Convert_v1_DeploymentStrategy_To_api_DeploymentStrategy, Convert_api_DeploymentStrategy_To_v1_DeploymentStrategy, Convert_v1_DeploymentTriggerImageChangeParams_To_api_DeploymentTriggerImageChangeParams, @@ -163,6 +167,36 @@ func Convert_api_DeploymentCauseImageTrigger_To_v1_DeploymentCauseImageTrigger(i return autoConvert_api_DeploymentCauseImageTrigger_To_v1_DeploymentCauseImageTrigger(in, out, s) } +func autoConvert_v1_DeploymentCondition_To_api_DeploymentCondition(in *DeploymentCondition, out *api.DeploymentCondition, s conversion.Scope) error { + out.Type = api.DeploymentConditionType(in.Type) + out.Status = pkg_api.ConditionStatus(in.Status) + if err := pkg_api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_v1_DeploymentCondition_To_api_DeploymentCondition(in *DeploymentCondition, out *api.DeploymentCondition, s conversion.Scope) error { + return autoConvert_v1_DeploymentCondition_To_api_DeploymentCondition(in, out, s) +} + +func autoConvert_api_DeploymentCondition_To_v1_DeploymentCondition(in *api.DeploymentCondition, out *DeploymentCondition, s conversion.Scope) error { + out.Type = DeploymentConditionType(in.Type) + out.Status = api_v1.ConditionStatus(in.Status) + if err := pkg_api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_api_DeploymentCondition_To_v1_DeploymentCondition(in *api.DeploymentCondition, out *DeploymentCondition, s conversion.Scope) error { + return autoConvert_api_DeploymentCondition_To_v1_DeploymentCondition(in, out, s) +} + func autoConvert_v1_DeploymentConfig_To_api_DeploymentConfig(in *DeploymentConfig, out *api.DeploymentConfig, s conversion.Scope) error { SetDefaults_DeploymentConfig(in) if err := pkg_api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { @@ -409,6 +443,17 @@ func autoConvert_v1_DeploymentConfigStatus_To_api_DeploymentConfigStatus(in *Dep } else { out.Details = nil } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]api.DeploymentCondition, len(*in)) + for i := range *in { + if err := Convert_v1_DeploymentCondition_To_api_DeploymentCondition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Conditions = nil + } return nil } @@ -432,6 +477,17 @@ func autoConvert_api_DeploymentConfigStatus_To_v1_DeploymentConfigStatus(in *api } else { out.Details = nil } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + if err := Convert_api_DeploymentCondition_To_v1_DeploymentCondition(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Conditions = nil + } return nil } @@ -543,6 +599,34 @@ func Convert_api_DeploymentLogOptions_To_v1_DeploymentLogOptions(in *api.Deploym return autoConvert_api_DeploymentLogOptions_To_v1_DeploymentLogOptions(in, out, s) } +func autoConvert_v1_DeploymentRequest_To_api_DeploymentRequest(in *DeploymentRequest, out *api.DeploymentRequest, s conversion.Scope) error { + if err := pkg_api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + out.Name = in.Name + out.Latest = in.Latest + out.Force = in.Force + return nil +} + +func Convert_v1_DeploymentRequest_To_api_DeploymentRequest(in *DeploymentRequest, out *api.DeploymentRequest, s conversion.Scope) error { + return autoConvert_v1_DeploymentRequest_To_api_DeploymentRequest(in, out, s) +} + +func autoConvert_api_DeploymentRequest_To_v1_DeploymentRequest(in *api.DeploymentRequest, out *DeploymentRequest, s conversion.Scope) error { + if err := pkg_api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + out.Name = in.Name + out.Latest = in.Latest + out.Force = in.Force + return nil +} + +func Convert_api_DeploymentRequest_To_v1_DeploymentRequest(in *api.DeploymentRequest, out *DeploymentRequest, s conversion.Scope) error { + return autoConvert_api_DeploymentRequest_To_v1_DeploymentRequest(in, out, s) +} + func autoConvert_v1_DeploymentStrategy_To_api_DeploymentStrategy(in *DeploymentStrategy, out *api.DeploymentStrategy, s conversion.Scope) error { SetDefaults_DeploymentStrategy(in) out.Type = api.DeploymentStrategyType(in.Type) @@ -587,6 +671,15 @@ func Convert_v1_DeploymentStrategy_To_api_DeploymentStrategy(in *DeploymentStrat func autoConvert_api_DeploymentStrategy_To_v1_DeploymentStrategy(in *api.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error { out.Type = DeploymentStrategyType(in.Type) + if in.CustomParams != nil { + in, out := &in.CustomParams, &out.CustomParams + *out = new(CustomDeploymentStrategyParams) + if err := Convert_api_CustomDeploymentStrategyParams_To_v1_CustomDeploymentStrategyParams(*in, *out, s); err != nil { + return err + } + } else { + out.CustomParams = nil + } if in.RecreateParams != nil { in, out := &in.RecreateParams, &out.RecreateParams *out = new(RecreateDeploymentStrategyParams) @@ -605,15 +698,6 @@ func autoConvert_api_DeploymentStrategy_To_v1_DeploymentStrategy(in *api.Deploym } else { out.RollingParams = nil } - if in.CustomParams != nil { - in, out := &in.CustomParams, &out.CustomParams - *out = new(CustomDeploymentStrategyParams) - if err := Convert_api_CustomDeploymentStrategyParams_To_v1_CustomDeploymentStrategyParams(*in, *out, s); err != nil { - return err - } - } else { - out.CustomParams = nil - } if err := api_v1.Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { return err } diff --git a/vendor/github.com/openshift/origin/pkg/deploy/api/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/origin/pkg/deploy/api/v1/zz_generated.deepcopy.go index f9bc4f1a2..9bec5ebac 100644 --- a/vendor/github.com/openshift/origin/pkg/deploy/api/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/origin/pkg/deploy/api/v1/zz_generated.deepcopy.go @@ -24,6 +24,7 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_CustomDeploymentStrategyParams, InType: reflect.TypeOf(&CustomDeploymentStrategyParams{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DeploymentCause, InType: reflect.TypeOf(&DeploymentCause{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DeploymentCauseImageTrigger, InType: reflect.TypeOf(&DeploymentCauseImageTrigger{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DeploymentCondition, InType: reflect.TypeOf(&DeploymentCondition{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DeploymentConfig, InType: reflect.TypeOf(&DeploymentConfig{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DeploymentConfigList, InType: reflect.TypeOf(&DeploymentConfigList{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DeploymentConfigRollback, InType: reflect.TypeOf(&DeploymentConfigRollback{})}, @@ -33,6 +34,7 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DeploymentDetails, InType: reflect.TypeOf(&DeploymentDetails{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DeploymentLog, InType: reflect.TypeOf(&DeploymentLog{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DeploymentLogOptions, InType: reflect.TypeOf(&DeploymentLogOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DeploymentRequest, InType: reflect.TypeOf(&DeploymentRequest{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DeploymentStrategy, InType: reflect.TypeOf(&DeploymentStrategy{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DeploymentTriggerImageChangeParams, InType: reflect.TypeOf(&DeploymentTriggerImageChangeParams{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DeploymentTriggerPolicy, InType: reflect.TypeOf(&DeploymentTriggerPolicy{})}, @@ -96,6 +98,19 @@ func DeepCopy_v1_DeploymentCauseImageTrigger(in interface{}, out interface{}, c } } +func DeepCopy_v1_DeploymentCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentCondition) + out := out.(*DeploymentCondition) + out.Type = in.Type + out.Status = in.Status + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + out.Reason = in.Reason + out.Message = in.Message + return nil + } +} + func DeepCopy_v1_DeploymentConfig(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*DeploymentConfig) @@ -239,6 +254,17 @@ func DeepCopy_v1_DeploymentConfigStatus(in interface{}, out interface{}, c *conv } else { out.Details = nil } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + if err := DeepCopy_v1_DeploymentCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Conditions = nil + } return nil } } @@ -321,6 +347,18 @@ func DeepCopy_v1_DeploymentLogOptions(in interface{}, out interface{}, c *conver } } +func DeepCopy_v1_DeploymentRequest(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentRequest) + out := out.(*DeploymentRequest) + out.TypeMeta = in.TypeMeta + out.Name = in.Name + out.Latest = in.Latest + out.Force = in.Force + return nil + } +} + func DeepCopy_v1_DeploymentStrategy(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*DeploymentStrategy) @@ -556,13 +594,6 @@ func DeepCopy_v1_RollingDeploymentStrategyParams(in interface{}, out interface{} } else { out.MaxSurge = nil } - if in.UpdatePercent != nil { - in, out := &in.UpdatePercent, &out.UpdatePercent - *out = new(int32) - **out = **in - } else { - out.UpdatePercent = nil - } if in.Pre != nil { in, out := &in.Pre, &out.Pre *out = new(LifecycleHook) diff --git a/vendor/github.com/openshift/origin/pkg/deploy/api/zz_generated.deepcopy.go b/vendor/github.com/openshift/origin/pkg/deploy/api/zz_generated.deepcopy.go index 9dcc6b90b..618cec214 100644 --- a/vendor/github.com/openshift/origin/pkg/deploy/api/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/origin/pkg/deploy/api/zz_generated.deepcopy.go @@ -24,6 +24,7 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_CustomDeploymentStrategyParams, InType: reflect.TypeOf(&CustomDeploymentStrategyParams{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DeploymentCause, InType: reflect.TypeOf(&DeploymentCause{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DeploymentCauseImageTrigger, InType: reflect.TypeOf(&DeploymentCauseImageTrigger{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DeploymentCondition, InType: reflect.TypeOf(&DeploymentCondition{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DeploymentConfig, InType: reflect.TypeOf(&DeploymentConfig{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DeploymentConfigList, InType: reflect.TypeOf(&DeploymentConfigList{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DeploymentConfigRollback, InType: reflect.TypeOf(&DeploymentConfigRollback{})}, @@ -33,6 +34,7 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DeploymentDetails, InType: reflect.TypeOf(&DeploymentDetails{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DeploymentLog, InType: reflect.TypeOf(&DeploymentLog{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DeploymentLogOptions, InType: reflect.TypeOf(&DeploymentLogOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DeploymentRequest, InType: reflect.TypeOf(&DeploymentRequest{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DeploymentStrategy, InType: reflect.TypeOf(&DeploymentStrategy{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DeploymentTriggerImageChangeParams, InType: reflect.TypeOf(&DeploymentTriggerImageChangeParams{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DeploymentTriggerPolicy, InType: reflect.TypeOf(&DeploymentTriggerPolicy{})}, @@ -97,6 +99,19 @@ func DeepCopy_api_DeploymentCauseImageTrigger(in interface{}, out interface{}, c } } +func DeepCopy_api_DeploymentCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentCondition) + out := out.(*DeploymentCondition) + out.Type = in.Type + out.Status = in.Status + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + out.Reason = in.Reason + out.Message = in.Message + return nil + } +} + func DeepCopy_api_DeploymentConfig(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*DeploymentConfig) @@ -240,6 +255,17 @@ func DeepCopy_api_DeploymentConfigStatus(in interface{}, out interface{}, c *con } else { out.Details = nil } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + if err := DeepCopy_api_DeploymentCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Conditions = nil + } return nil } } @@ -322,11 +348,32 @@ func DeepCopy_api_DeploymentLogOptions(in interface{}, out interface{}, c *conve } } +func DeepCopy_api_DeploymentRequest(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentRequest) + out := out.(*DeploymentRequest) + out.TypeMeta = in.TypeMeta + out.Name = in.Name + out.Latest = in.Latest + out.Force = in.Force + return nil + } +} + func DeepCopy_api_DeploymentStrategy(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*DeploymentStrategy) out := out.(*DeploymentStrategy) out.Type = in.Type + if in.CustomParams != nil { + in, out := &in.CustomParams, &out.CustomParams + *out = new(CustomDeploymentStrategyParams) + if err := DeepCopy_api_CustomDeploymentStrategyParams(*in, *out, c); err != nil { + return err + } + } else { + out.CustomParams = nil + } if in.RecreateParams != nil { in, out := &in.RecreateParams, &out.RecreateParams *out = new(RecreateDeploymentStrategyParams) @@ -345,15 +392,6 @@ func DeepCopy_api_DeploymentStrategy(in interface{}, out interface{}, c *convers } else { out.RollingParams = nil } - if in.CustomParams != nil { - in, out := &in.CustomParams, &out.CustomParams - *out = new(CustomDeploymentStrategyParams) - if err := DeepCopy_api_CustomDeploymentStrategyParams(*in, *out, c); err != nil { - return err - } - } else { - out.CustomParams = nil - } if err := pkg_api.DeepCopy_api_ResourceRequirements(&in.Resources, &out.Resources, c); err != nil { return err } @@ -545,13 +583,6 @@ func DeepCopy_api_RollingDeploymentStrategyParams(in interface{}, out interface{ } out.MaxUnavailable = in.MaxUnavailable out.MaxSurge = in.MaxSurge - if in.UpdatePercent != nil { - in, out := &in.UpdatePercent, &out.UpdatePercent - *out = new(int32) - **out = **in - } else { - out.UpdatePercent = nil - } if in.Pre != nil { in, out := &in.Pre, &out.Pre *out = new(LifecycleHook) diff --git a/vendor/github.com/openshift/origin/pkg/deploy/cmd/delete.go b/vendor/github.com/openshift/origin/pkg/deploy/cmd/delete.go deleted file mode 100644 index 7a59422ac..000000000 --- a/vendor/github.com/openshift/origin/pkg/deploy/cmd/delete.go +++ /dev/null @@ -1,136 +0,0 @@ -package cmd - -import ( - "time" - - "github.com/golang/glog" - kapi "k8s.io/kubernetes/pkg/api" - kerrors "k8s.io/kubernetes/pkg/api/errors" - kclient "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/kubectl" - kutil "k8s.io/kubernetes/pkg/util" - "k8s.io/kubernetes/pkg/util/wait" - - "github.com/openshift/origin/pkg/client" - deployapi "github.com/openshift/origin/pkg/deploy/api" - "github.com/openshift/origin/pkg/deploy/util" -) - -// NewDeploymentConfigReaper returns a new reaper for deploymentConfigs -func NewDeploymentConfigReaper(oc client.Interface, kc kclient.Interface) kubectl.Reaper { - return &DeploymentConfigReaper{oc: oc, kc: kc, pollInterval: kubectl.Interval, timeout: kubectl.Timeout} -} - -// DeploymentConfigReaper implements the Reaper interface for deploymentConfigs -type DeploymentConfigReaper struct { - oc client.Interface - kc kclient.Interface - pollInterval, timeout time.Duration -} - -// pause marks the deployment configuration as paused to avoid triggering new -// deployments. -func (reaper *DeploymentConfigReaper) pause(namespace, name string) (*deployapi.DeploymentConfig, error) { - return client.UpdateConfigWithRetries(reaper.oc, namespace, name, func(d *deployapi.DeploymentConfig) { - d.Spec.RevisionHistoryLimit = kutil.Int32Ptr(0) - d.Spec.Replicas = 0 - d.Spec.Paused = true - }) -} - -// Stop scales a replication controller via its deployment configuration down to -// zero replicas, waits for all of them to get deleted and then deletes both the -// replication controller and its deployment configuration. -func (reaper *DeploymentConfigReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *kapi.DeleteOptions) error { - // Pause the deployment configuration to prevent the new deployments from - // being triggered. - config, err := reaper.pause(namespace, name) - configNotFound := kerrors.IsNotFound(err) - if err != nil && !configNotFound { - return err - } - - var ( - isPaused bool - legacy bool - ) - // Determine if the deployment config controller noticed the pause. - if !configNotFound { - if err := wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) { - dc, err := reaper.oc.DeploymentConfigs(namespace).Get(name) - if err != nil { - return false, err - } - isPaused = dc.Spec.Paused - return dc.Status.ObservedGeneration >= config.Generation, nil - }); err != nil { - return err - } - - // If we failed to pause the deployment config, it means we are talking to - // old API that does not support pausing. In that case, we delete the - // deployment config to stay backward compatible. - if !isPaused { - if err := reaper.oc.DeploymentConfigs(namespace).Delete(name); err != nil { - return err - } - // Setting this to true avoid deleting the config at the end. - legacy = true - } - } - - // Clean up deployments related to the config. Even if the deployment - // configuration has been deleted, we want to sweep the existing replication - // controllers and clean them up. - options := kapi.ListOptions{LabelSelector: util.ConfigSelector(name)} - rcList, err := reaper.kc.ReplicationControllers(namespace).List(options) - if err != nil { - return err - } - rcReaper, err := kubectl.ReaperFor(kapi.Kind("ReplicationController"), reaper.kc) - if err != nil { - return err - } - - // If there is neither a config nor any deployments, nor any deployer pods, we can return NotFound. - deployments := rcList.Items - - if configNotFound && len(deployments) == 0 { - return kerrors.NewNotFound(kapi.Resource("deploymentconfig"), name) - } - - for _, rc := range deployments { - if err = rcReaper.Stop(rc.Namespace, rc.Name, timeout, gracePeriod); err != nil { - // Better not error out here... - glog.Infof("Cannot delete ReplicationController %s/%s for deployment config %s/%s: %v", rc.Namespace, rc.Name, namespace, name, err) - } - - // Only remove deployer pods when the deployment was failed. For completed - // deployment the pods should be already deleted. - if !util.IsFailedDeployment(&rc) { - continue - } - - // Delete all deployer and hook pods - options = kapi.ListOptions{LabelSelector: util.DeployerPodSelector(rc.Name)} - podList, err := reaper.kc.Pods(rc.Namespace).List(options) - if err != nil { - return err - } - for _, pod := range podList.Items { - err := reaper.kc.Pods(pod.Namespace).Delete(pod.Name, gracePeriod) - if err != nil { - // Better not error out here... - glog.Infof("Cannot delete lifecycle Pod %s/%s for deployment config %s/%s: %v", pod.Namespace, pod.Name, namespace, name, err) - } - } - } - - // Nothing to delete or we already deleted the deployment config because we - // failed to pause. - if configNotFound || legacy { - return nil - } - - return reaper.oc.DeploymentConfigs(namespace).Delete(name) -} diff --git a/vendor/github.com/openshift/origin/pkg/deploy/cmd/doc.go b/vendor/github.com/openshift/origin/pkg/deploy/cmd/doc.go deleted file mode 100644 index e9976e44f..000000000 --- a/vendor/github.com/openshift/origin/pkg/deploy/cmd/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package cmd contains various interface implementations for command-line tools -// associated with deploymentconfigs. -package cmd diff --git a/vendor/github.com/openshift/origin/pkg/deploy/cmd/generate.go b/vendor/github.com/openshift/origin/pkg/deploy/cmd/generate.go deleted file mode 100644 index 327282048..000000000 --- a/vendor/github.com/openshift/origin/pkg/deploy/cmd/generate.go +++ /dev/null @@ -1,41 +0,0 @@ -package cmd - -import ( - "fmt" - "reflect" - - kapi "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/runtime" - - deployapi "github.com/openshift/origin/pkg/deploy/api" -) - -var basic = kubectl.BasicReplicationController{} - -type BasicDeploymentConfigController struct{} - -func (BasicDeploymentConfigController) ParamNames() []kubectl.GeneratorParam { - return basic.ParamNames() -} - -func (BasicDeploymentConfigController) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - obj, err := basic.Generate(genericParams) - if err != nil { - return nil, err - } - switch t := obj.(type) { - case *kapi.ReplicationController: - obj = &deployapi.DeploymentConfig{ - ObjectMeta: t.ObjectMeta, - Spec: deployapi.DeploymentConfigSpec{ - Selector: t.Spec.Selector, - Replicas: t.Spec.Replicas, - Template: t.Spec.Template, - }, - } - default: - return nil, fmt.Errorf("unrecognized object type: %v", reflect.TypeOf(t)) - } - return obj, nil -} diff --git a/vendor/github.com/openshift/origin/pkg/deploy/cmd/history.go b/vendor/github.com/openshift/origin/pkg/deploy/cmd/history.go deleted file mode 100644 index 7f52e0549..000000000 --- a/vendor/github.com/openshift/origin/pkg/deploy/cmd/history.go +++ /dev/null @@ -1,99 +0,0 @@ -package cmd - -import ( - "bytes" - "fmt" - "sort" - "text/tabwriter" - - kapi "k8s.io/kubernetes/pkg/api" - kclient "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/kubectl" - - "github.com/openshift/origin/pkg/client" - deployapi "github.com/openshift/origin/pkg/deploy/api" - deployutil "github.com/openshift/origin/pkg/deploy/util" -) - -func NewDeploymentConfigHistoryViewer(oc client.Interface, kc kclient.Interface) kubectl.HistoryViewer { - return &DeploymentConfigHistoryViewer{dn: oc, rn: kc} -} - -// DeploymentConfigHistoryViewer is an implementation of the kubectl HistoryViewer interface -// for deployment configs. -type DeploymentConfigHistoryViewer struct { - rn kclient.ReplicationControllersNamespacer - dn client.DeploymentConfigsNamespacer -} - -var _ kubectl.HistoryViewer = &DeploymentConfigHistoryViewer{} - -// ViewHistory returns a description of all the history it can find for a deployment config. -func (h *DeploymentConfigHistoryViewer) ViewHistory(namespace, name string, revision int64) (string, error) { - opts := kapi.ListOptions{LabelSelector: deployutil.ConfigSelector(name)} - deploymentList, err := h.rn.ReplicationControllers(namespace).List(opts) - if err != nil { - return "", err - } - history := deploymentList.Items - - if len(deploymentList.Items) == 0 { - return "No rollout history found.", nil - } - - // Print details of a specific revision - if revision > 0 { - var desired *kapi.PodTemplateSpec - // We could use a binary search here but brute-force is always faster to write - for i := range history { - rc := history[i] - - if deployutil.DeploymentVersionFor(&rc) == revision { - desired = rc.Spec.Template - break - } - } - - if desired == nil { - return "", fmt.Errorf("unable to find the specified revision") - } - - buf := bytes.NewBuffer([]byte{}) - kubectl.DescribePodTemplate(desired, buf) - return buf.String(), nil - } - - sort.Sort(deployutil.ByLatestVersionAsc(history)) - - return tabbedString(func(out *tabwriter.Writer) error { - fmt.Fprintf(out, "REVISION\tSTATUS\tCAUSE\n") - for i := range history { - rc := history[i] - - rev := deployutil.DeploymentVersionFor(&rc) - status := deployutil.DeploymentStatusFor(&rc) - cause := rc.Annotations[deployapi.DeploymentStatusReasonAnnotation] - if len(cause) == 0 { - cause = "" - } - fmt.Fprintf(out, "%d\t%s\t%s\n", rev, status, cause) - } - return nil - }) -} - -// TODO: Re-use from an utility package -func tabbedString(f func(*tabwriter.Writer) error) (string, error) { - out := new(tabwriter.Writer) - buf := &bytes.Buffer{} - out.Init(buf, 0, 8, 1, '\t', 0) - - err := f(out) - if err != nil { - return "", err - } - - out.Flush() - str := string(buf.String()) - return str, nil -} diff --git a/vendor/github.com/openshift/origin/pkg/deploy/cmd/rollback.go b/vendor/github.com/openshift/origin/pkg/deploy/cmd/rollback.go deleted file mode 100644 index cff7dc9e3..000000000 --- a/vendor/github.com/openshift/origin/pkg/deploy/cmd/rollback.go +++ /dev/null @@ -1,56 +0,0 @@ -package cmd - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/runtime" - - "github.com/openshift/origin/pkg/client" - deployapi "github.com/openshift/origin/pkg/deploy/api" -) - -func NewDeploymentConfigRollbacker(oc client.Interface) kubectl.Rollbacker { - return &DeploymentConfigRollbacker{dn: oc} -} - -// DeploymentConfigRollbacker is an implementation of the kubectl Rollbacker interface -// for deployment configs. -type DeploymentConfigRollbacker struct { - dn client.DeploymentConfigsNamespacer -} - -var _ kubectl.Rollbacker = &DeploymentConfigRollbacker{} - -// Rollback the provided deployment config to a specific revision. If revision is zero, we will -// rollback to the previous deployment. -func (r *DeploymentConfigRollbacker) Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64) (string, error) { - config, ok := obj.(*deployapi.DeploymentConfig) - if !ok { - return "", fmt.Errorf("passed object is not a deployment config: %#v", obj) - } - if config.Spec.Paused { - return "", fmt.Errorf("cannot rollback a paused config; resume it first with 'rollout resume dc/%s' and try again", config.Name) - } - - rollback := &deployapi.DeploymentConfigRollback{ - Name: config.Name, - UpdatedAnnotations: updatedAnnotations, - Spec: deployapi.DeploymentConfigRollbackSpec{ - Revision: toRevision, - IncludeTemplate: true, - }, - } - - rolledback, err := r.dn.DeploymentConfigs(config.Namespace).Rollback(rollback) - if err != nil { - return "", err - } - - _, err = r.dn.DeploymentConfigs(config.Namespace).Update(rolledback) - if err != nil { - return "", err - } - - return "rolled back", nil -} diff --git a/vendor/github.com/openshift/origin/pkg/deploy/cmd/scale.go b/vendor/github.com/openshift/origin/pkg/deploy/cmd/scale.go deleted file mode 100644 index 29d2e98ba..000000000 --- a/vendor/github.com/openshift/origin/pkg/deploy/cmd/scale.go +++ /dev/null @@ -1,98 +0,0 @@ -package cmd - -import ( - "time" - - kapi "k8s.io/kubernetes/pkg/api" - kclient "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/util/wait" - - "github.com/openshift/origin/pkg/client" - "github.com/openshift/origin/pkg/deploy/util" -) - -// NewDeploymentConfigScaler returns a new scaler for deploymentConfigs -func NewDeploymentConfigScaler(oc client.Interface, kc kclient.Interface) kubectl.Scaler { - return &DeploymentConfigScaler{rcClient: kc, dcClient: oc, clientInterface: kc} -} - -// DeploymentConfigScaler is a wrapper for the kubectl Scaler client -type DeploymentConfigScaler struct { - rcClient kclient.ReplicationControllersNamespacer - dcClient client.DeploymentConfigsNamespacer - - clientInterface kclient.Interface -} - -// Scale updates the DeploymentConfig with the provided namespace/name, to a -// new size, with optional precondition check (if preconditions is not nil), -// optional retries (if retry is not nil), and then optionally waits for its -// deployment replica count to reach the new value (if wait is not nil). -func (scaler *DeploymentConfigScaler) Scale(namespace, name string, newSize uint, preconditions *kubectl.ScalePrecondition, retry, waitForReplicas *kubectl.RetryParams) error { - if preconditions == nil { - preconditions = &kubectl.ScalePrecondition{Size: -1, ResourceVersion: ""} - } - if retry == nil { - // Make it try only once, immediately - retry = &kubectl.RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond} - } - cond := kubectl.ScaleCondition(scaler, preconditions, namespace, name, newSize, nil) - if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil { - return err - } - // TODO: convert to a watch and use resource version from the ScaleCondition - kubernetes/kubernetes#31051 - if waitForReplicas != nil { - dc, err := scaler.dcClient.DeploymentConfigs(namespace).Get(name) - if err != nil { - return err - } - rc, err := scaler.rcClient.ReplicationControllers(namespace).Get(util.LatestDeploymentNameForConfig(dc)) - if err != nil { - return err - } - return wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, controllerHasSpecifiedReplicas(scaler.clientInterface, rc, dc.Spec.Replicas)) - } - return nil -} - -// ScaleSimple does a simple one-shot attempt at scaling - not useful on its -// own, but a necessary building block for Scale. -func (scaler *DeploymentConfigScaler) ScaleSimple(namespace, name string, preconditions *kubectl.ScalePrecondition, newSize uint) (string, error) { - scale, err := scaler.dcClient.DeploymentConfigs(namespace).GetScale(name) - if err != nil { - return "", err - } - scale.Spec.Replicas = int32(newSize) - updated, err := scaler.dcClient.DeploymentConfigs(namespace).UpdateScale(scale) - if err != nil { - return "", kubectl.ScaleError{FailureType: kubectl.ScaleUpdateFailure, ResourceVersion: "Unknown", ActualError: err} - } - return updated.ResourceVersion, nil -} - -// controllerHasSpecifiedReplicas returns a condition that will be true if and -// only if the specified replica count for a controller's ReplicaSelector -// equals the Replicas count. -// -// This is a slightly modified version of -// unversioned.ControllerHasDesiredReplicas. This is necessary because when -// scaling an RC via a DC, the RC spec replica count is not immediately -// updated to match the owning DC. -func controllerHasSpecifiedReplicas(c kclient.Interface, controller *kapi.ReplicationController, specifiedReplicas int32) wait.ConditionFunc { - // If we're given a controller where the status lags the spec, it either means that the controller is stale, - // or that the rc manager hasn't noticed the update yet. Polling status.Replicas is not safe in the latter case. - desiredGeneration := controller.Generation - - return func() (bool, error) { - ctrl, err := c.ReplicationControllers(controller.Namespace).Get(controller.Name) - if err != nil { - return false, err - } - // There's a chance a concurrent update modifies the Spec.Replicas causing this check to pass, - // or, after this check has passed, a modification causes the rc manager to create more pods. - // This will not be an issue once we've implemented graceful delete for rcs, but till then - // concurrent stop operations on the same rc might have unintended side effects. - return ctrl.Status.ObservedGeneration >= desiredGeneration && ctrl.Status.Replicas == specifiedReplicas, nil - } -} diff --git a/vendor/github.com/openshift/origin/pkg/deploy/graph/analysis/dc.go b/vendor/github.com/openshift/origin/pkg/deploy/graph/analysis/dc.go deleted file mode 100644 index ca1c20ad9..000000000 --- a/vendor/github.com/openshift/origin/pkg/deploy/graph/analysis/dc.go +++ /dev/null @@ -1,126 +0,0 @@ -package analysis - -import ( - "fmt" - - "github.com/gonum/graph" - - osgraph "github.com/openshift/origin/pkg/api/graph" - buildedges "github.com/openshift/origin/pkg/build/graph" - buildutil "github.com/openshift/origin/pkg/build/util" - deployedges "github.com/openshift/origin/pkg/deploy/graph" - deploygraph "github.com/openshift/origin/pkg/deploy/graph/nodes" - imageedges "github.com/openshift/origin/pkg/image/graph" - imagegraph "github.com/openshift/origin/pkg/image/graph/nodes" -) - -const ( - MissingImageStreamErr = "MissingImageStream" - MissingImageStreamTagWarning = "MissingImageStreamTag" - MissingReadinessProbeWarning = "MissingReadinessProbe" -) - -// FindDeploymentConfigTriggerErrors checks for possible failures in deployment config -// image change triggers. -// -// Precedence of failures: -// 1. The image stream for the tag of interest does not exist. -// 2. The image stream tag does not exist. -func FindDeploymentConfigTriggerErrors(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { - markers := []osgraph.Marker{} - - for _, uncastDcNode := range g.NodesByKind(deploygraph.DeploymentConfigNodeKind) { - dcNode := uncastDcNode.(*deploygraph.DeploymentConfigNode) - marker := ictMarker(g, f, dcNode) - if marker != nil { - markers = append(markers, *marker) - } - } - - return markers -} - -// ictMarker inspects the image change triggers for the provided deploymentconfig and returns -// a marker in case of the following two scenarios: -// -// 1. The image stream pointed by the dc trigger doen not exist. -// 2. The image stream tag pointed by the dc trigger does not exist and there is no build in -// flight that could push to the tag. -func ictMarker(g osgraph.Graph, f osgraph.Namer, dcNode *deploygraph.DeploymentConfigNode) *osgraph.Marker { - for _, uncastIstNode := range g.PredecessorNodesByEdgeKind(dcNode, deployedges.TriggersDeploymentEdgeKind) { - if istNode := uncastIstNode.(*imagegraph.ImageStreamTagNode); !istNode.Found() { - // The image stream for the tag of interest does not exist. - if isNode, exists := doesImageStreamExist(g, uncastIstNode); !exists { - return &osgraph.Marker{ - Node: dcNode, - RelatedNodes: []graph.Node{uncastIstNode, isNode}, - - Severity: osgraph.ErrorSeverity, - Key: MissingImageStreamErr, - Message: fmt.Sprintf("The image trigger for %s will have no effect because %s does not exist.", - f.ResourceName(dcNode), f.ResourceName(isNode)), - // TODO: Suggest `oc create imagestream` once we have that. - } - } - - for _, bcNode := range buildedges.BuildConfigsForTag(g, istNode) { - // Avoid warning for the dc image trigger in case there is a build in flight. - if latestBuild := buildedges.GetLatestBuild(g, bcNode); latestBuild != nil && !buildutil.IsBuildComplete(latestBuild.Build) { - return nil - } - } - - // The image stream tag of interest does not exist. - return &osgraph.Marker{ - Node: dcNode, - RelatedNodes: []graph.Node{uncastIstNode}, - - Severity: osgraph.WarningSeverity, - Key: MissingImageStreamTagWarning, - Message: fmt.Sprintf("The image trigger for %s will have no effect until %s is imported or created by a build.", - f.ResourceName(dcNode), f.ResourceName(istNode)), - } - } - } - return nil -} - -func doesImageStreamExist(g osgraph.Graph, istag graph.Node) (graph.Node, bool) { - for _, imagestream := range g.SuccessorNodesByEdgeKind(istag, imageedges.ReferencedImageStreamGraphEdgeKind) { - return imagestream, imagestream.(*imagegraph.ImageStreamNode).Found() - } - for _, imagestream := range g.SuccessorNodesByEdgeKind(istag, imageedges.ReferencedImageStreamImageGraphEdgeKind) { - return imagestream, imagestream.(*imagegraph.ImageStreamNode).Found() - } - return nil, false -} - -// FindDeploymentConfigReadinessWarnings inspects deploymentconfigs and reports those that -// don't have readiness probes set up. -func FindDeploymentConfigReadinessWarnings(g osgraph.Graph, f osgraph.Namer, setProbeCommand string) []osgraph.Marker { - markers := []osgraph.Marker{} - -Node: - for _, uncastDcNode := range g.NodesByKind(deploygraph.DeploymentConfigNodeKind) { - dcNode := uncastDcNode.(*deploygraph.DeploymentConfigNode) - if t := dcNode.DeploymentConfig.Spec.Template; t != nil && len(t.Spec.Containers) > 0 { - for _, container := range t.Spec.Containers { - if container.ReadinessProbe != nil { - continue Node - } - } - // All of the containers in the deployment config lack a readiness probe - markers = append(markers, osgraph.Marker{ - Node: uncastDcNode, - Severity: osgraph.WarningSeverity, - Key: MissingReadinessProbeWarning, - Message: fmt.Sprintf("%s has no readiness probe to verify pods are ready to accept traffic or ensure deployment is successful.", - f.ResourceName(dcNode)), - Suggestion: osgraph.Suggestion(fmt.Sprintf("%s %s --readiness ...", setProbeCommand, f.ResourceName(dcNode))), - }) - continue Node - } - } - - return markers -} diff --git a/vendor/github.com/openshift/origin/pkg/deploy/graph/analysis/doc.go b/vendor/github.com/openshift/origin/pkg/deploy/graph/analysis/doc.go deleted file mode 100644 index cba9edf11..000000000 --- a/vendor/github.com/openshift/origin/pkg/deploy/graph/analysis/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package analysis provides functions that analyse deployment configurations and setup markers -// that will be reported by oc status -package analysis diff --git a/vendor/github.com/openshift/origin/pkg/deploy/graph/edges.go b/vendor/github.com/openshift/origin/pkg/deploy/graph/edges.go deleted file mode 100644 index ead9a385f..000000000 --- a/vendor/github.com/openshift/origin/pkg/deploy/graph/edges.go +++ /dev/null @@ -1,85 +0,0 @@ -package graph - -import ( - "github.com/gonum/graph" - - osgraph "github.com/openshift/origin/pkg/api/graph" - kubegraph "github.com/openshift/origin/pkg/api/kubegraph/nodes" - deployapi "github.com/openshift/origin/pkg/deploy/api" - deploygraph "github.com/openshift/origin/pkg/deploy/graph/nodes" - imageapi "github.com/openshift/origin/pkg/image/api" - imagegraph "github.com/openshift/origin/pkg/image/graph/nodes" -) - -const ( - // TriggersDeploymentEdgeKind points from DeploymentConfigs to ImageStreamTags that trigger the deployment - TriggersDeploymentEdgeKind = "TriggersDeployment" - // UsedInDeploymentEdgeKind points from DeploymentConfigs to DockerImageReferences that are used in the deployment - UsedInDeploymentEdgeKind = "UsedInDeployment" - // DeploymentEdgeKind points from DeploymentConfigs to the ReplicationControllers that are fulfilling the deployment - DeploymentEdgeKind = "Deployment" -) - -// AddTriggerEdges creates edges that point to named Docker image repositories for each image used in the deployment. -func AddTriggerEdges(g osgraph.MutableUniqueGraph, node *deploygraph.DeploymentConfigNode) *deploygraph.DeploymentConfigNode { - podTemplate := node.DeploymentConfig.Spec.Template - if podTemplate == nil { - return node - } - - deployapi.EachTemplateImage( - &podTemplate.Spec, - deployapi.DeploymentConfigHasTrigger(node.DeploymentConfig), - func(image deployapi.TemplateImage, err error) { - if err != nil { - return - } - if image.From != nil { - if len(image.From.Name) == 0 { - return - } - name, tag, _ := imageapi.SplitImageStreamTag(image.From.Name) - in := imagegraph.FindOrCreateSyntheticImageStreamTagNode(g, imagegraph.MakeImageStreamTagObjectMeta(image.From.Namespace, name, tag)) - g.AddEdge(in, node, TriggersDeploymentEdgeKind) - return - } - - tag := image.Ref.Tag - image.Ref.Tag = "" - in := imagegraph.EnsureDockerRepositoryNode(g, image.Ref.String(), tag) - g.AddEdge(in, node, UsedInDeploymentEdgeKind) - }) - - return node -} - -func AddAllTriggerEdges(g osgraph.MutableUniqueGraph) { - for _, node := range g.(graph.Graph).Nodes() { - if dcNode, ok := node.(*deploygraph.DeploymentConfigNode); ok { - AddTriggerEdges(g, dcNode) - } - } -} - -func AddDeploymentEdges(g osgraph.MutableUniqueGraph, node *deploygraph.DeploymentConfigNode) *deploygraph.DeploymentConfigNode { - for _, n := range g.(graph.Graph).Nodes() { - if rcNode, ok := n.(*kubegraph.ReplicationControllerNode); ok { - if rcNode.ReplicationController.Namespace != node.DeploymentConfig.Namespace { - continue - } - if BelongsToDeploymentConfig(node.DeploymentConfig, rcNode.ReplicationController) { - g.AddEdge(node, rcNode, DeploymentEdgeKind) - } - } - } - - return node -} - -func AddAllDeploymentEdges(g osgraph.MutableUniqueGraph) { - for _, node := range g.(graph.Graph).Nodes() { - if dcNode, ok := node.(*deploygraph.DeploymentConfigNode); ok { - AddDeploymentEdges(g, dcNode) - } - } -} diff --git a/vendor/github.com/openshift/origin/pkg/deploy/graph/helpers.go b/vendor/github.com/openshift/origin/pkg/deploy/graph/helpers.go deleted file mode 100644 index 6f99c6227..000000000 --- a/vendor/github.com/openshift/origin/pkg/deploy/graph/helpers.go +++ /dev/null @@ -1,49 +0,0 @@ -package graph - -import ( - "sort" - - kapi "k8s.io/kubernetes/pkg/api" - - osgraph "github.com/openshift/origin/pkg/api/graph" - kubegraph "github.com/openshift/origin/pkg/api/kubegraph/nodes" - deployapi "github.com/openshift/origin/pkg/deploy/api" - deploygraph "github.com/openshift/origin/pkg/deploy/graph/nodes" - deployutil "github.com/openshift/origin/pkg/deploy/util" -) - -// RelevantDeployments returns the active deployment and a list of inactive deployments (in order from newest to oldest) -func RelevantDeployments(g osgraph.Graph, dcNode *deploygraph.DeploymentConfigNode) (*kubegraph.ReplicationControllerNode, []*kubegraph.ReplicationControllerNode) { - allDeployments := []*kubegraph.ReplicationControllerNode{} - uncastDeployments := g.SuccessorNodesByEdgeKind(dcNode, DeploymentEdgeKind) - if len(uncastDeployments) == 0 { - return nil, []*kubegraph.ReplicationControllerNode{} - } - - for i := range uncastDeployments { - allDeployments = append(allDeployments, uncastDeployments[i].(*kubegraph.ReplicationControllerNode)) - } - - sort.Sort(RecentDeploymentReferences(allDeployments)) - - if dcNode.DeploymentConfig.Status.LatestVersion == deployutil.DeploymentVersionFor(allDeployments[0].ReplicationController) { - return allDeployments[0], allDeployments[1:] - } - - return nil, allDeployments -} - -func BelongsToDeploymentConfig(config *deployapi.DeploymentConfig, b *kapi.ReplicationController) bool { - if b.Annotations != nil { - return config.Name == deployutil.DeploymentConfigNameFor(b) - } - return false -} - -type RecentDeploymentReferences []*kubegraph.ReplicationControllerNode - -func (m RecentDeploymentReferences) Len() int { return len(m) } -func (m RecentDeploymentReferences) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -func (m RecentDeploymentReferences) Less(i, j int) bool { - return deployutil.DeploymentVersionFor(m[i].ReplicationController) > deployutil.DeploymentVersionFor(m[j].ReplicationController) -} diff --git a/vendor/github.com/openshift/origin/pkg/deploy/graph/nodes/nodes.go b/vendor/github.com/openshift/origin/pkg/deploy/graph/nodes/nodes.go deleted file mode 100644 index ac59ebc73..000000000 --- a/vendor/github.com/openshift/origin/pkg/deploy/graph/nodes/nodes.go +++ /dev/null @@ -1,38 +0,0 @@ -package nodes - -import ( - "github.com/gonum/graph" - - osgraph "github.com/openshift/origin/pkg/api/graph" - kubegraph "github.com/openshift/origin/pkg/api/kubegraph/nodes" - depoyapi "github.com/openshift/origin/pkg/deploy/api" -) - -// EnsureDeploymentConfigNode adds the provided deployment config to the graph if it does not exist -func EnsureDeploymentConfigNode(g osgraph.MutableUniqueGraph, dc *depoyapi.DeploymentConfig) *DeploymentConfigNode { - dcName := DeploymentConfigNodeName(dc) - dcNode := osgraph.EnsureUnique( - g, - dcName, - func(node osgraph.Node) graph.Node { - return &DeploymentConfigNode{Node: node, DeploymentConfig: dc, IsFound: true} - }, - ).(*DeploymentConfigNode) - - if dc.Spec.Template != nil { - podTemplateSpecNode := kubegraph.EnsurePodTemplateSpecNode(g, dc.Spec.Template, dc.Namespace, dcName) - g.AddEdge(dcNode, podTemplateSpecNode, osgraph.ContainsEdgeKind) - } - - return dcNode -} - -func FindOrCreateSyntheticDeploymentConfigNode(g osgraph.MutableUniqueGraph, dc *depoyapi.DeploymentConfig) *DeploymentConfigNode { - return osgraph.EnsureUnique( - g, - DeploymentConfigNodeName(dc), - func(node osgraph.Node) graph.Node { - return &DeploymentConfigNode{Node: node, DeploymentConfig: dc, IsFound: false} - }, - ).(*DeploymentConfigNode) -} diff --git a/vendor/github.com/openshift/origin/pkg/deploy/graph/nodes/types.go b/vendor/github.com/openshift/origin/pkg/deploy/graph/nodes/types.go deleted file mode 100644 index ce847ad3d..000000000 --- a/vendor/github.com/openshift/origin/pkg/deploy/graph/nodes/types.go +++ /dev/null @@ -1,39 +0,0 @@ -package nodes - -import ( - "reflect" - - osgraph "github.com/openshift/origin/pkg/api/graph" - deployapi "github.com/openshift/origin/pkg/deploy/api" -) - -var ( - DeploymentConfigNodeKind = reflect.TypeOf(deployapi.DeploymentConfig{}).Name() -) - -func DeploymentConfigNodeName(o *deployapi.DeploymentConfig) osgraph.UniqueName { - return osgraph.GetUniqueRuntimeObjectNodeName(DeploymentConfigNodeKind, o) -} - -type DeploymentConfigNode struct { - osgraph.Node - DeploymentConfig *deployapi.DeploymentConfig - - IsFound bool -} - -func (n DeploymentConfigNode) Found() bool { - return n.IsFound -} - -func (n DeploymentConfigNode) Object() interface{} { - return n.DeploymentConfig -} - -func (n DeploymentConfigNode) String() string { - return string(DeploymentConfigNodeName(n.DeploymentConfig)) -} - -func (*DeploymentConfigNode) Kind() string { - return DeploymentConfigNodeKind -} diff --git a/vendor/github.com/openshift/origin/pkg/deploy/util/util.go b/vendor/github.com/openshift/origin/pkg/deploy/util/util.go deleted file mode 100644 index ff3d4c550..000000000 --- a/vendor/github.com/openshift/origin/pkg/deploy/util/util.go +++ /dev/null @@ -1,496 +0,0 @@ -package util - -import ( - "errors" - "fmt" - "sort" - "strconv" - "strings" - "time" - - "k8s.io/kubernetes/pkg/api" - kdeplutil "k8s.io/kubernetes/pkg/controller/deployment/util" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/watch" - - deployapi "github.com/openshift/origin/pkg/deploy/api" - "github.com/openshift/origin/pkg/util/namer" - kclient "k8s.io/kubernetes/pkg/client/unversioned" -) - -// LatestDeploymentNameForConfig returns a stable identifier for config based on its version. -func LatestDeploymentNameForConfig(config *deployapi.DeploymentConfig) string { - return fmt.Sprintf("%s-%d", config.Name, config.Status.LatestVersion) -} - -// LatestDeploymentInfo returns info about the latest deployment for a config, -// or nil if there is no latest deployment. The latest deployment is not -// always the same as the active deployment. -func LatestDeploymentInfo(config *deployapi.DeploymentConfig, deployments []api.ReplicationController) (bool, *api.ReplicationController) { - if config.Status.LatestVersion == 0 || len(deployments) == 0 { - return false, nil - } - sort.Sort(ByLatestVersionDesc(deployments)) - candidate := &deployments[0] - return DeploymentVersionFor(candidate) == config.Status.LatestVersion, candidate -} - -// ActiveDeployment returns the latest complete deployment, or nil if there is -// no such deployment. The active deployment is not always the same as the -// latest deployment. -func ActiveDeployment(config *deployapi.DeploymentConfig, input []api.ReplicationController) *api.ReplicationController { - var activeDeployment *api.ReplicationController - var lastCompleteDeploymentVersion int64 = 0 - for i := range input { - deployment := &input[i] - deploymentVersion := DeploymentVersionFor(deployment) - if DeploymentStatusFor(deployment) == deployapi.DeploymentStatusComplete && deploymentVersion > lastCompleteDeploymentVersion { - activeDeployment = deployment - lastCompleteDeploymentVersion = deploymentVersion - } - } - return activeDeployment -} - -// DeployerPodSuffix is the suffix added to pods created from a deployment -const DeployerPodSuffix = "deploy" - -// DeployerPodNameForDeployment returns the name of a pod for a given deployment -func DeployerPodNameForDeployment(deployment string) string { - return namer.GetPodName(deployment, DeployerPodSuffix) -} - -// LabelForDeployment builds a string identifier for a Deployment. -func LabelForDeployment(deployment *api.ReplicationController) string { - return fmt.Sprintf("%s/%s", deployment.Namespace, deployment.Name) -} - -// LabelForDeploymentConfig builds a string identifier for a DeploymentConfig. -func LabelForDeploymentConfig(config *deployapi.DeploymentConfig) string { - return fmt.Sprintf("%s/%s", config.Namespace, config.Name) -} - -// DeploymentNameForConfigVersion returns the name of the version-th deployment -// for the config that has the provided name -func DeploymentNameForConfigVersion(name string, version int64) string { - return fmt.Sprintf("%s-%d", name, version) -} - -// ConfigSelector returns a label Selector which can be used to find all -// deployments for a DeploymentConfig. -// -// TODO: Using the annotation constant for now since the value is correct -// but we could consider adding a new constant to the public types. -func ConfigSelector(name string) labels.Selector { - return labels.Set{deployapi.DeploymentConfigAnnotation: name}.AsSelector() -} - -// DeployerPodSelector returns a label Selector which can be used to find all -// deployer pods associated with a deployment with name. -func DeployerPodSelector(name string) labels.Selector { - return labels.Set{deployapi.DeployerPodForDeploymentLabel: name}.AsSelector() -} - -// AnyDeployerPodSelector returns a label Selector which can be used to find -// all deployer pods across all deployments, including hook and custom -// deployer pods. -func AnyDeployerPodSelector() labels.Selector { - sel, _ := labels.Parse(deployapi.DeployerPodForDeploymentLabel) - return sel -} - -// HasChangeTrigger returns whether the provided deployment configuration has -// a config change trigger or not -func HasChangeTrigger(config *deployapi.DeploymentConfig) bool { - for _, trigger := range config.Spec.Triggers { - if trigger.Type == deployapi.DeploymentTriggerOnConfigChange { - return true - } - } - return false -} - -func DeploymentConfigDeepCopy(dc *deployapi.DeploymentConfig) (*deployapi.DeploymentConfig, error) { - objCopy, err := api.Scheme.DeepCopy(dc) - if err != nil { - return nil, err - } - copied, ok := objCopy.(*deployapi.DeploymentConfig) - if !ok { - return nil, fmt.Errorf("expected DeploymentConfig, got %#v", objCopy) - } - return copied, nil -} - -func DeploymentDeepCopy(rc *api.ReplicationController) (*api.ReplicationController, error) { - objCopy, err := api.Scheme.DeepCopy(rc) - if err != nil { - return nil, err - } - copied, ok := objCopy.(*api.ReplicationController) - if !ok { - return nil, fmt.Errorf("expected ReplicationController, got %#v", objCopy) - } - return copied, nil -} - -// DecodeDeploymentConfig decodes a DeploymentConfig from controller using codec. An error is returned -// if the controller doesn't contain an encoded config. -func DecodeDeploymentConfig(controller *api.ReplicationController, decoder runtime.Decoder) (*deployapi.DeploymentConfig, error) { - encodedConfig := []byte(EncodedDeploymentConfigFor(controller)) - decoded, err := runtime.Decode(decoder, encodedConfig) - if err != nil { - return nil, fmt.Errorf("failed to decode DeploymentConfig from controller: %v", err) - } - config, ok := decoded.(*deployapi.DeploymentConfig) - if !ok { - return nil, fmt.Errorf("decoded object from controller is not a DeploymentConfig") - } - return config, nil -} - -// EncodeDeploymentConfig encodes config as a string using codec. -func EncodeDeploymentConfig(config *deployapi.DeploymentConfig, codec runtime.Codec) (string, error) { - bytes, err := runtime.Encode(codec, config) - if err != nil { - return "", err - } - return string(bytes[:]), nil -} - -// MakeDeployment creates a deployment represented as a ReplicationController and based on the given -// DeploymentConfig. The controller replica count will be zero. -func MakeDeployment(config *deployapi.DeploymentConfig, codec runtime.Codec) (*api.ReplicationController, error) { - var err error - var encodedConfig string - - if encodedConfig, err = EncodeDeploymentConfig(config, codec); err != nil { - return nil, err - } - - deploymentName := LatestDeploymentNameForConfig(config) - - podSpec := api.PodSpec{} - if err := api.Scheme.Convert(&config.Spec.Template.Spec, &podSpec, nil); err != nil { - return nil, fmt.Errorf("couldn't clone podSpec: %v", err) - } - - controllerLabels := make(labels.Set) - for k, v := range config.Labels { - controllerLabels[k] = v - } - // Correlate the deployment with the config. - // TODO: Using the annotation constant for now since the value is correct - // but we could consider adding a new constant to the public types. - controllerLabels[deployapi.DeploymentConfigAnnotation] = config.Name - - // Ensure that pods created by this deployment controller can be safely associated back - // to the controller, and that multiple deployment controllers for the same config don't - // manipulate each others' pods. - selector := map[string]string{} - for k, v := range config.Spec.Selector { - selector[k] = v - } - selector[deployapi.DeploymentConfigLabel] = config.Name - selector[deployapi.DeploymentLabel] = deploymentName - - podLabels := make(labels.Set) - for k, v := range config.Spec.Template.Labels { - podLabels[k] = v - } - podLabels[deployapi.DeploymentConfigLabel] = config.Name - podLabels[deployapi.DeploymentLabel] = deploymentName - - podAnnotations := make(labels.Set) - for k, v := range config.Spec.Template.Annotations { - podAnnotations[k] = v - } - podAnnotations[deployapi.DeploymentAnnotation] = deploymentName - podAnnotations[deployapi.DeploymentConfigAnnotation] = config.Name - podAnnotations[deployapi.DeploymentVersionAnnotation] = strconv.FormatInt(config.Status.LatestVersion, 10) - - deployment := &api.ReplicationController{ - ObjectMeta: api.ObjectMeta{ - Name: deploymentName, - Namespace: config.Namespace, - Annotations: map[string]string{ - deployapi.DeploymentConfigAnnotation: config.Name, - deployapi.DeploymentStatusAnnotation: string(deployapi.DeploymentStatusNew), - deployapi.DeploymentEncodedConfigAnnotation: encodedConfig, - deployapi.DeploymentVersionAnnotation: strconv.FormatInt(config.Status.LatestVersion, 10), - // This is the target replica count for the new deployment. - deployapi.DesiredReplicasAnnotation: strconv.Itoa(int(config.Spec.Replicas)), - deployapi.DeploymentReplicasAnnotation: strconv.Itoa(0), - }, - Labels: controllerLabels, - }, - Spec: api.ReplicationControllerSpec{ - // The deployment should be inactive initially - Replicas: 0, - Selector: selector, - Template: &api.PodTemplateSpec{ - ObjectMeta: api.ObjectMeta{ - Labels: podLabels, - Annotations: podAnnotations, - }, - Spec: podSpec, - }, - }, - } - if config.Status.Details != nil && len(config.Status.Details.Message) > 0 { - deployment.Annotations[deployapi.DeploymentStatusReasonAnnotation] = config.Status.Details.Message - } - if value, ok := config.Annotations[deployapi.DeploymentIgnorePodAnnotation]; ok { - deployment.Annotations[deployapi.DeploymentIgnorePodAnnotation] = value - } - - return deployment, nil -} - -// GetReplicaCountForDeployments returns the sum of all replicas for the -// given deployments. -func GetReplicaCountForDeployments(deployments []api.ReplicationController) int32 { - totalReplicaCount := int32(0) - for _, deployment := range deployments { - totalReplicaCount += deployment.Spec.Replicas - } - return totalReplicaCount -} - -// GetStatusReplicaCountForDeployments returns the sum of the replicas reported in the -// status of the given deployments. -func GetStatusReplicaCountForDeployments(deployments []api.ReplicationController) int32 { - totalReplicaCount := int32(0) - for _, deployment := range deployments { - totalReplicaCount += deployment.Status.Replicas - } - return totalReplicaCount -} - -// GetAvailablePods returns all the available pods from the provided pod list. -func GetAvailablePods(pods []*api.Pod, minReadySeconds int32) int32 { - available := int32(0) - for i := range pods { - pod := pods[i] - if kdeplutil.IsPodAvailable(pod, minReadySeconds, time.Now()) { - available++ - } - } - return available -} - -func DeploymentConfigNameFor(obj runtime.Object) string { - return annotationFor(obj, deployapi.DeploymentConfigAnnotation) -} - -func DeploymentNameFor(obj runtime.Object) string { - return annotationFor(obj, deployapi.DeploymentAnnotation) -} - -func DeployerPodNameFor(obj runtime.Object) string { - return annotationFor(obj, deployapi.DeploymentPodAnnotation) -} - -func DeploymentStatusFor(obj runtime.Object) deployapi.DeploymentStatus { - return deployapi.DeploymentStatus(annotationFor(obj, deployapi.DeploymentStatusAnnotation)) -} - -func DeploymentStatusReasonFor(obj runtime.Object) string { - return annotationFor(obj, deployapi.DeploymentStatusReasonAnnotation) -} - -func DeploymentDesiredReplicas(obj runtime.Object) (int32, bool) { - return int32AnnotationFor(obj, deployapi.DesiredReplicasAnnotation) -} - -func DeploymentReplicas(obj runtime.Object) (int32, bool) { - return int32AnnotationFor(obj, deployapi.DeploymentReplicasAnnotation) -} - -func EncodedDeploymentConfigFor(obj runtime.Object) string { - return annotationFor(obj, deployapi.DeploymentEncodedConfigAnnotation) -} - -func DeploymentVersionFor(obj runtime.Object) int64 { - v, err := strconv.ParseInt(annotationFor(obj, deployapi.DeploymentVersionAnnotation), 10, 64) - if err != nil { - return -1 - } - return v -} - -func IsDeploymentCancelled(deployment *api.ReplicationController) bool { - value := annotationFor(deployment, deployapi.DeploymentCancelledAnnotation) - return strings.EqualFold(value, deployapi.DeploymentCancelledAnnotationValue) -} - -func HasSynced(dc *deployapi.DeploymentConfig) bool { - return dc.Status.ObservedGeneration >= dc.Generation -} - -// IsOwnedByConfig checks whether the provided replication controller is part of a -// deployment configuration. -// TODO: Switch to use owner references once we got those working. -func IsOwnedByConfig(deployment *api.ReplicationController) bool { - _, ok := deployment.Annotations[deployapi.DeploymentConfigAnnotation] - return ok -} - -// IsTerminatedDeployment returns true if the passed deployment has terminated (either -// complete or failed). -func IsTerminatedDeployment(deployment *api.ReplicationController) bool { - current := DeploymentStatusFor(deployment) - return current == deployapi.DeploymentStatusComplete || current == deployapi.DeploymentStatusFailed -} - -// IsFailedDeployment returns true if the passed deployment failed. -func IsFailedDeployment(deployment *api.ReplicationController) bool { - current := DeploymentStatusFor(deployment) - return current == deployapi.DeploymentStatusFailed -} - -// CanTransitionPhase returns whether it is allowed to go from the current to the next phase. -func CanTransitionPhase(current, next deployapi.DeploymentStatus) bool { - switch current { - case deployapi.DeploymentStatusNew: - switch next { - case deployapi.DeploymentStatusPending, - deployapi.DeploymentStatusRunning, - deployapi.DeploymentStatusFailed, - deployapi.DeploymentStatusComplete: - return true - } - case deployapi.DeploymentStatusPending: - switch next { - case deployapi.DeploymentStatusRunning, - deployapi.DeploymentStatusFailed, - deployapi.DeploymentStatusComplete: - return true - } - case deployapi.DeploymentStatusRunning: - switch next { - case deployapi.DeploymentStatusFailed, deployapi.DeploymentStatusComplete: - return true - } - } - return false -} - -// annotationFor returns the annotation with key for obj. -func annotationFor(obj runtime.Object, key string) string { - meta, err := api.ObjectMetaFor(obj) - if err != nil { - return "" - } - return meta.Annotations[key] -} - -func int32AnnotationFor(obj runtime.Object, key string) (int32, bool) { - s := annotationFor(obj, key) - if len(s) == 0 { - return 0, false - } - i, err := strconv.ParseInt(s, 10, 32) - if err != nil { - return 0, false - } - return int32(i), true -} - -// DeploymentsForCleanup determines which deployments for a configuration are relevant for the -// revision history limit quota -func DeploymentsForCleanup(configuration *deployapi.DeploymentConfig, deployments []api.ReplicationController) []api.ReplicationController { - // if the past deployment quota has been exceeded, we need to prune the oldest deployments - // until we are not exceeding the quota any longer, so we sort oldest first - sort.Sort(ByLatestVersionAsc(deployments)) - - relevantDeployments := []api.ReplicationController{} - activeDeployment := ActiveDeployment(configuration, deployments) - if activeDeployment == nil { - // if cleanup policy is set but no successful deployments have happened, there will be - // no active deployment. We can consider all of the deployments in this case except for - // the latest one - for i := range deployments { - deployment := &deployments[i] - if DeploymentVersionFor(deployment) != configuration.Status.LatestVersion { - relevantDeployments = append(relevantDeployments, *deployment) - } - } - } else { - // if there is an active deployment, we need to filter out any deployments that we don't - // care about, namely the active deployment and any newer deployments - for i := range deployments { - deployment := &deployments[i] - if deployment != activeDeployment && DeploymentVersionFor(deployment) < DeploymentVersionFor(activeDeployment) { - relevantDeployments = append(relevantDeployments, *deployment) - } - } - } - - return relevantDeployments -} - -// WaitForRunningDeployerPod waits a given period of time until the deployer pod -// for given replication controller is not running. -func WaitForRunningDeployerPod(podClient kclient.PodsNamespacer, rc *api.ReplicationController, timeout time.Duration) error { - podName := DeployerPodNameForDeployment(rc.Name) - canGetLogs := func(p *api.Pod) bool { - return api.PodSucceeded == p.Status.Phase || api.PodFailed == p.Status.Phase || api.PodRunning == p.Status.Phase - } - pod, err := podClient.Pods(rc.Namespace).Get(podName) - if err == nil && canGetLogs(pod) { - return nil - } - watcher, err := podClient.Pods(rc.Namespace).Watch( - api.ListOptions{ - FieldSelector: fields.Set{"metadata.name": podName}.AsSelector(), - }, - ) - if err != nil { - return err - } - - defer watcher.Stop() - if _, err := watch.Until(timeout, watcher, func(e watch.Event) (bool, error) { - if e.Type == watch.Error { - return false, fmt.Errorf("encountered error while watching for pod: %v", e.Object) - } - obj, isPod := e.Object.(*api.Pod) - if !isPod { - return false, errors.New("received unknown object while watching for pods") - } - return canGetLogs(obj), nil - }); err != nil { - return err - } - return nil -} - -// ByLatestVersionAsc sorts deployments by LatestVersion ascending. -type ByLatestVersionAsc []api.ReplicationController - -func (d ByLatestVersionAsc) Len() int { return len(d) } -func (d ByLatestVersionAsc) Swap(i, j int) { d[i], d[j] = d[j], d[i] } -func (d ByLatestVersionAsc) Less(i, j int) bool { - return DeploymentVersionFor(&d[i]) < DeploymentVersionFor(&d[j]) -} - -// ByLatestVersionDesc sorts deployments by LatestVersion descending. -type ByLatestVersionDesc []api.ReplicationController - -func (d ByLatestVersionDesc) Len() int { return len(d) } -func (d ByLatestVersionDesc) Swap(i, j int) { d[i], d[j] = d[j], d[i] } -func (d ByLatestVersionDesc) Less(i, j int) bool { - return DeploymentVersionFor(&d[j]) < DeploymentVersionFor(&d[i]) -} - -// ByMostRecent sorts deployments by most recently created. -type ByMostRecent []*api.ReplicationController - -func (s ByMostRecent) Len() int { return len(s) } -func (s ByMostRecent) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s ByMostRecent) Less(i, j int) bool { - return !s[i].CreationTimestamp.Before(s[j].CreationTimestamp) -} diff --git a/vendor/github.com/openshift/origin/pkg/image/api/helper.go b/vendor/github.com/openshift/origin/pkg/image/api/helper.go index 60338f5b8..a375ed3e8 100644 --- a/vendor/github.com/openshift/origin/pkg/image/api/helper.go +++ b/vendor/github.com/openshift/origin/pkg/image/api/helper.go @@ -18,6 +18,8 @@ import ( "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" "github.com/golang/glog" + + "github.com/openshift/origin/pkg/image/reference" ) const ( @@ -48,24 +50,6 @@ func (fn DefaultRegistryFunc) DefaultRegistry() (string, bool) { return fn() } -// parseRepositoryTag splits a string into its name component and either tag or id if present. -// TODO remove -func parseRepositoryTag(repos string) (base string, tag string, id string) { - n := strings.Index(repos, "@") - if n >= 0 { - parts := strings.Split(repos, "@") - return parts[0], "", parts[1] - } - n = strings.LastIndex(repos, ":") - if n < 0 { - return repos, "", "" - } - if tag := repos[n+1:]; !strings.Contains(tag, "/") { - return repos[:n], tag, "" - } - return repos, "", "" -} - // ParseImageStreamImageName splits a string into its name component and ID component, and returns an error // if the string is not in the right form. func ParseImageStreamImageName(input string) (name string, id string, err error) { @@ -109,16 +93,6 @@ func MakeImageStreamImageName(name, id string) string { return fmt.Sprintf("%s@%s", name, id) } -func isRegistryName(str string) bool { - switch { - case strings.Contains(str, ":"), - strings.Contains(str, "."), - str == "localhost": - return true - } - return false -} - // IsRegistryDockerHub returns true if the given registry name belongs to // Docker hub. func IsRegistryDockerHub(registry string) bool { @@ -134,60 +108,18 @@ func IsRegistryDockerHub(registry string) bool { // DockerImageReference. func ParseDockerImageReference(spec string) (DockerImageReference, error) { var ref DockerImageReference - // TODO replace with docker version once docker/docker PR11109 is merged upstream - stream, tag, id := parseRepositoryTag(spec) - repoParts := strings.Split(stream, "/") - switch len(repoParts) { - case 2: - if isRegistryName(repoParts[0]) { - // registry/name - ref.Registry = repoParts[0] - if IsRegistryDockerHub(ref.Registry) { - ref.Namespace = DockerDefaultNamespace - } - if len(repoParts[1]) == 0 { - return ref, fmt.Errorf("the docker pull spec %q must be two or three segments separated by slashes", spec) - } - ref.Name = repoParts[1] - ref.Tag = tag - ref.ID = id - break - } - // namespace/name - ref.Namespace = repoParts[0] - if len(repoParts[1]) == 0 { - return ref, fmt.Errorf("the docker pull spec %q must be two or three segments separated by slashes", spec) - } - ref.Name = repoParts[1] - ref.Tag = tag - ref.ID = id - break - case 3: - // registry/namespace/name - ref.Registry = repoParts[0] - ref.Namespace = repoParts[1] - if len(repoParts[2]) == 0 { - return ref, fmt.Errorf("the docker pull spec %q must be two or three segments separated by slashes", spec) - } - ref.Name = repoParts[2] - ref.Tag = tag - ref.ID = id - break - case 1: - // name - if len(repoParts[0]) == 0 { - return ref, fmt.Errorf("the docker pull spec %q must be two or three segments separated by slashes", spec) - } - ref.Name = repoParts[0] - ref.Tag = tag - ref.ID = id - break - default: - // TODO: this is no longer true with V2 - return ref, fmt.Errorf("the docker pull spec %q must be two or three segments separated by slashes", spec) + namedRef, err := reference.ParseNamedDockerImageReference(spec) + if err != nil { + return ref, err } + ref.Registry = namedRef.Registry + ref.Namespace = namedRef.Namespace + ref.Name = namedRef.Name + ref.Tag = namedRef.Tag + ref.ID = namedRef.ID + return ref, nil } diff --git a/vendor/github.com/openshift/origin/pkg/image/api/v1/generated.proto b/vendor/github.com/openshift/origin/pkg/image/api/v1/generated.proto deleted file mode 100644 index fe5371b28..000000000 --- a/vendor/github.com/openshift/origin/pkg/image/api/v1/generated.proto +++ /dev/null @@ -1,435 +0,0 @@ - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package jackfan.us.kg.openshift.origin.pkg.image.api.v1; - -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1"; - -// DockerImageReference points to a Docker image. -message DockerImageReference { - // Registry is the registry that contains the Docker image - optional string registry = 1; - - // Namespace is the namespace that contains the Docker image - optional string namespace = 2; - - // Name is the name of the Docker image - optional string name = 3; - - // Tag is which tag of the Docker image is being referenced - optional string tag = 4; - - // ID is the identifier for the Docker image - optional string iD = 5; -} - -// Image is an immutable representation of a Docker image and metadata at a point in time. -message Image { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // DockerImageReference is the string that can be used to pull this image. - optional string dockerImageReference = 2; - - // DockerImageMetadata contains metadata about this image - optional k8s.io.kubernetes.pkg.runtime.RawExtension dockerImageMetadata = 3; - - // DockerImageMetadataVersion conveys the version of the object, which if empty defaults to "1.0" - optional string dockerImageMetadataVersion = 4; - - // DockerImageManifest is the raw JSON of the manifest - optional string dockerImageManifest = 5; - - // DockerImageLayers represents the layers in the image. May not be set if the image does not define that data. - repeated ImageLayer dockerImageLayers = 6; - - // Signatures holds all signatures of the image. - repeated ImageSignature signatures = 7; - - // DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1. - repeated bytes dockerImageSignatures = 8; - - // DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2. - optional string dockerImageManifestMediaType = 9; - - // DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. - optional string dockerImageConfig = 10; -} - -// ImageImportSpec describes a request to import a specific image. -message ImageImportSpec { - // From is the source of an image to import; only kind DockerImage is allowed - optional k8s.io.kubernetes.pkg.api.v1.ObjectReference from = 1; - - // To is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used - optional k8s.io.kubernetes.pkg.api.v1.LocalObjectReference to = 2; - - // ImportPolicy is the policy controlling how the image is imported - optional TagImportPolicy importPolicy = 3; - - // IncludeManifest determines if the manifest for each image is returned in the response - optional bool includeManifest = 4; -} - -// ImageImportStatus describes the result of an image import. -message ImageImportStatus { - // Status is the status of the image import, including errors encountered while retrieving the image - optional k8s.io.kubernetes.pkg.api.unversioned.Status status = 1; - - // Image is the metadata of that image, if the image was located - optional Image image = 2; - - // Tag is the tag this image was located under, if any - optional string tag = 3; -} - -// ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none. -message ImageLayer { - // Name of the layer as defined by the underlying store. - optional string name = 1; - - // Size of the layer in bytes as defined by the underlying store. - optional int64 size = 2; - - // MediaType of the referenced object. - optional string mediaType = 3; -} - -// ImageList is a list of Image objects. -message ImageList { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is a list of images - repeated Image items = 2; -} - -// ImageSignature holds a signature of an image. It allows to verify image identity and possibly other claims -// as long as the signature is trusted. Based on this information it is possible to restrict runnable images -// to those matching cluster-wide policy. -// Mandatory fields should be parsed by clients doing image verification. The others are parsed from -// signature's content by the server. They serve just an informative purpose. -message ImageSignature { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Required: Describes a type of stored blob. - optional string type = 2; - - // Required: An opaque binary string which is an image's signature. - optional bytes content = 3; - - // Conditions represent the latest available observations of a signature's current state. - repeated SignatureCondition conditions = 4; - - // A human readable string representing image's identity. It could be a product name and version, or an - // image pull spec (e.g. "registry.access.redhat.com/rhel7/rhel:7.2"). - optional string imageIdentity = 5; - - // Contains claims from the signature. - map signedClaims = 6; - - // If specified, it is the time of signature's creation. - optional k8s.io.kubernetes.pkg.api.unversioned.Time created = 7; - - // If specified, it holds information about an issuer of signing certificate or key (a person or entity - // who signed the signing certificate or key). - optional SignatureIssuer issuedBy = 8; - - // If specified, it holds information about a subject of signing certificate or key (a person or entity - // who signed the image). - optional SignatureSubject issuedTo = 9; -} - -// ImageStream stores a mapping of tags to images, metadata overrides that are applied -// when images are tagged in a stream, and an optional reference to a Docker image -// repository on a registry. -message ImageStream { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec describes the desired state of this stream - optional ImageStreamSpec spec = 2; - - // Status describes the current state of this stream - optional ImageStreamStatus status = 3; -} - -// ImageStreamImage represents an Image that is retrieved by image name from an ImageStream. -message ImageStreamImage { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Image associated with the ImageStream and image name. - optional Image image = 2; -} - -// ImageStreamImport imports an image from remote repositories into OpenShift. -message ImageStreamImport { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec is a description of the images that the user wishes to import - optional ImageStreamImportSpec spec = 2; - - // Status is the the result of importing the image - optional ImageStreamImportStatus status = 3; -} - -// ImageStreamImportSpec defines what images should be imported. -message ImageStreamImportSpec { - // Import indicates whether to perform an import - if so, the specified tags are set on the spec - // and status of the image stream defined by the type meta. - optional bool import = 1; - - // Repository is an optional import of an entire Docker image repository. A maximum limit on the - // number of tags imported this way is imposed by the server. - optional RepositoryImportSpec repository = 2; - - // Images are a list of individual images to import. - repeated ImageImportSpec images = 3; -} - -// ImageStreamImportStatus contains information about the status of an image stream import. -message ImageStreamImportStatus { - // Import is the image stream that was successfully updated or created when 'to' was set. - optional ImageStream import = 1; - - // Repository is set if spec.repository was set to the outcome of the import - optional RepositoryImportStatus repository = 2; - - // Images is set with the result of importing spec.images - repeated ImageImportStatus images = 3; -} - -// ImageStreamList is a list of ImageStream objects. -message ImageStreamList { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is a list of imageStreams - repeated ImageStream items = 2; -} - -// ImageStreamMapping represents a mapping from a single tag to a Docker image as -// well as the reference to the Docker image stream the image came from. -message ImageStreamMapping { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Image is a Docker image. - optional Image image = 2; - - // Tag is a string value this image can be located with inside the stream. - optional string tag = 3; -} - -// ImageStreamSpec represents options for ImageStreams. -message ImageStreamSpec { - // DockerImageRepository is optional, if specified this stream is backed by a Docker repository on this server - optional string dockerImageRepository = 1; - - // Tags map arbitrary string values to specific image locators - repeated TagReference tags = 2; -} - -// ImageStreamStatus contains information about the state of this image stream. -message ImageStreamStatus { - // DockerImageRepository represents the effective location this stream may be accessed at. - // May be empty until the server determines where the repository is located - optional string dockerImageRepository = 1; - - // Tags are a historical record of images associated with each tag. The first entry in the - // TagEvent array is the currently tagged image. - repeated NamedTagEventList tags = 2; -} - -// ImageStreamTag represents an Image that is retrieved by tag name from an ImageStream. -message ImageStreamTag { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Tag is the spec tag associated with this image stream tag, and it may be null - // if only pushes have occurred to this image stream. - optional TagReference tag = 2; - - // Generation is the current generation of the tagged image - if tag is provided - // and this value is not equal to the tag generation, a user has requested an - // import that has not completed, or Conditions will be filled out indicating any - // error. - optional int64 generation = 3; - - // Conditions is an array of conditions that apply to the image stream tag. - repeated TagEventCondition conditions = 4; - - // Image associated with the ImageStream and tag. - optional Image image = 5; -} - -// ImageStreamTagList is a list of ImageStreamTag objects. -message ImageStreamTagList { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is the list of image stream tags - repeated ImageStreamTag items = 2; -} - -// NamedTagEventList relates a tag to its image history. -message NamedTagEventList { - // Tag is the tag for which the history is recorded - optional string tag = 1; - - // Standard object's metadata. - repeated TagEvent items = 2; - - // Conditions is an array of conditions that apply to the tag event list. - repeated TagEventCondition conditions = 3; -} - -// RepositoryImportSpec describes a request to import images from a Docker image repository. -message RepositoryImportSpec { - // From is the source for the image repository to import; only kind DockerImage and a name of a Docker image repository is allowed - optional k8s.io.kubernetes.pkg.api.v1.ObjectReference from = 1; - - // ImportPolicy is the policy controlling how the image is imported - optional TagImportPolicy importPolicy = 2; - - // IncludeManifest determines if the manifest for each image is returned in the response - optional bool includeManifest = 3; -} - -// RepositoryImportStatus describes the result of an image repository import -message RepositoryImportStatus { - // Status reflects whether any failure occurred during import - optional k8s.io.kubernetes.pkg.api.unversioned.Status status = 1; - - // Images is a list of images successfully retrieved by the import of the repository. - repeated ImageImportStatus images = 2; - - // AdditionalTags are tags that exist in the repository but were not imported because - // a maximum limit of automatic imports was applied. - repeated string additionalTags = 3; -} - -// SignatureCondition describes an image signature condition of particular kind at particular probe time. -message SignatureCondition { - // Type of signature condition, Complete or Failed. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // Last time the condition was checked. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3; - - // Last time the condition transit from one status to another. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; - - // (brief) reason for the condition's last transition. - optional string reason = 5; - - // Human readable message indicating details about last transition. - optional string message = 6; -} - -// SignatureGenericEntity holds a generic information about a person or entity who is an issuer or a subject -// of signing certificate or key. -message SignatureGenericEntity { - // Organization name. - optional string organization = 1; - - // Common name (e.g. openshift-signing-service). - optional string commonName = 2; -} - -// SignatureIssuer holds information about an issuer of signing certificate or key. -message SignatureIssuer { - optional SignatureGenericEntity signatureGenericEntity = 1; -} - -// SignatureSubject holds information about a person or entity who created the signature. -message SignatureSubject { - optional SignatureGenericEntity signatureGenericEntity = 1; - - // If present, it is a human readable key id of public key belonging to the subject used to verify image - // signature. It should contain at least 64 lowest bits of public key's fingerprint (e.g. - // 0x685ebe62bf278440). - optional string publicKeyID = 2; -} - -// TagEvent is used by ImageStreamStatus to keep a historical record of images associated with a tag. -message TagEvent { - // Created holds the time the TagEvent was created - optional k8s.io.kubernetes.pkg.api.unversioned.Time created = 1; - - // DockerImageReference is the string that can be used to pull this image - optional string dockerImageReference = 2; - - // Image is the image - optional string image = 3; - - // Generation is the spec tag generation that resulted in this tag being updated - optional int64 generation = 4; -} - -// TagEventCondition contains condition information for a tag event. -message TagEventCondition { - // Type of tag event condition, currently only ImportSuccess - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // LastTransitionTIme is the time the condition transitioned from one status to another. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 3; - - // Reason is a brief machine readable explanation for the condition's last transition. - optional string reason = 4; - - // Message is a human readable description of the details about last transition, complementing reason. - optional string message = 5; - - // Generation is the spec tag generation that this status corresponds to - optional int64 generation = 6; -} - -// TagImportPolicy describes the tag import policy -message TagImportPolicy { - // Insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import. - optional bool insecure = 1; - - // Scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported - optional bool scheduled = 2; -} - -// TagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track. -message TagReference { - // Name of the tag - optional string name = 1; - - // Annotations associated with images using this tag - map annotations = 2; - - // From is a reference to an image stream tag or image stream this tag should track - optional k8s.io.kubernetes.pkg.api.v1.ObjectReference from = 3; - - // Reference states if the tag will be imported. Default value is false, which means the tag will be imported. - optional bool reference = 4; - - // Generation is the image stream generation that updated this tag - setting it to 0 is an indication that the generation must be updated. - // Legacy clients will send this as nil, which means the client doesn't know or care. - optional int64 generation = 5; - - // Import is information that controls how images may be imported by the server. - optional TagImportPolicy importPolicy = 6; -} - diff --git a/vendor/github.com/openshift/origin/pkg/image/api/v1/swagger_doc.go b/vendor/github.com/openshift/origin/pkg/image/api/v1/swagger_doc.go index 5c7148561..3c1fa4128 100644 --- a/vendor/github.com/openshift/origin/pkg/image/api/v1/swagger_doc.go +++ b/vendor/github.com/openshift/origin/pkg/image/api/v1/swagger_doc.go @@ -119,7 +119,7 @@ func (ImageStreamImage) SwaggerDoc() map[string]string { } var map_ImageStreamImport = map[string]string{ - "": "ImageStreamImport imports an image from remote repositories into OpenShift.", + "": "The image stream import resource provides an easy way for a user to find and import Docker images from other Docker registries into the server. Individual images or an entire image repository may be imported, and users may choose to see the results of the import prior to tagging the resulting images into the specified image stream.\n\nThis API is intended for end-user tools that need to see the metadata of the image prior to import (for instance, to generate an application from it). Clients that know the desired image can continue to create spec.tags directly into their image streams.", "metadata": "Standard object's metadata.", "spec": "Spec is a description of the images that the user wishes to import", "status": "Status is the the result of importing the image", diff --git a/vendor/github.com/openshift/origin/pkg/image/api/v1/types.go b/vendor/github.com/openshift/origin/pkg/image/api/v1/types.go index 33ecf74c9..284399f0f 100644 --- a/vendor/github.com/openshift/origin/pkg/image/api/v1/types.go +++ b/vendor/github.com/openshift/origin/pkg/image/api/v1/types.go @@ -314,7 +314,14 @@ type DockerImageReference struct { ID string `protobuf:"bytes,5,opt,name=iD"` } -// ImageStreamImport imports an image from remote repositories into OpenShift. +// The image stream import resource provides an easy way for a user to find and import Docker images +// from other Docker registries into the server. Individual images or an entire image repository may +// be imported, and users may choose to see the results of the import prior to tagging the resulting +// images into the specified image stream. +// +// This API is intended for end-user tools that need to see the metadata of the image prior to import +// (for instance, to generate an application from it). Clients that know the desired image can continue +// to create spec.tags directly into their image streams. type ImageStreamImport struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. diff --git a/vendor/github.com/openshift/origin/pkg/image/graph/edges.go b/vendor/github.com/openshift/origin/pkg/image/graph/edges.go deleted file mode 100644 index d99f981a5..000000000 --- a/vendor/github.com/openshift/origin/pkg/image/graph/edges.go +++ /dev/null @@ -1,56 +0,0 @@ -package graph - -import ( - "github.com/gonum/graph" - - osgraph "github.com/openshift/origin/pkg/api/graph" - imageapi "github.com/openshift/origin/pkg/image/api" - imagegraph "github.com/openshift/origin/pkg/image/graph/nodes" -) - -const ( - // ReferencedImageStreamGraphEdgeKind is an edge that goes from an ImageStreamTag node back to an ImageStream - ReferencedImageStreamGraphEdgeKind = "ReferencedImageStreamGraphEdge" - // ReferencedImageStreamImageGraphEdgeKind is an edge that goes from an ImageStreamImage node back to an ImageStream - ReferencedImageStreamImageGraphEdgeKind = "ReferencedImageStreamImageGraphEdgeKind" -) - -// AddImageStreamTagRefEdge ensures that a directed edge exists between an IST Node and the IS it references -func AddImageStreamTagRefEdge(g osgraph.MutableUniqueGraph, node *imagegraph.ImageStreamTagNode) { - isName, _, _ := imageapi.SplitImageStreamTag(node.Name) - imageStream := &imageapi.ImageStream{} - imageStream.Namespace = node.Namespace - imageStream.Name = isName - - imageStreamNode := imagegraph.FindOrCreateSyntheticImageStreamNode(g, imageStream) - g.AddEdge(node, imageStreamNode, ReferencedImageStreamGraphEdgeKind) -} - -// AddImageStreamImageRefEdge ensures that a directed edge exists between an ImageStreamImage Node and the IS it references -func AddImageStreamImageRefEdge(g osgraph.MutableUniqueGraph, node *imagegraph.ImageStreamImageNode) { - dockImgRef, _ := imageapi.ParseDockerImageReference(node.Name) - imageStream := &imageapi.ImageStream{} - imageStream.Namespace = node.Namespace - imageStream.Name = dockImgRef.Name - - imageStreamNode := imagegraph.FindOrCreateSyntheticImageStreamNode(g, imageStream) - g.AddEdge(node, imageStreamNode, ReferencedImageStreamImageGraphEdgeKind) -} - -// AddAllImageStreamRefEdges calls AddImageStreamRefEdge for every ImageStreamTagNode in the graph -func AddAllImageStreamRefEdges(g osgraph.MutableUniqueGraph) { - for _, node := range g.(graph.Graph).Nodes() { - if istNode, ok := node.(*imagegraph.ImageStreamTagNode); ok { - AddImageStreamTagRefEdge(g, istNode) - } - } -} - -// AddAllImageStreamImageRefEdges calls AddImageStreamImageRefEdge for every ImageStreamImageNode in the graph -func AddAllImageStreamImageRefEdges(g osgraph.MutableUniqueGraph) { - for _, node := range g.(graph.Graph).Nodes() { - if isimageNode, ok := node.(*imagegraph.ImageStreamImageNode); ok { - AddImageStreamImageRefEdge(g, isimageNode) - } - } -} diff --git a/vendor/github.com/openshift/origin/pkg/image/graph/nodes/nodes.go b/vendor/github.com/openshift/origin/pkg/image/graph/nodes/nodes.go deleted file mode 100644 index 87475b02e..000000000 --- a/vendor/github.com/openshift/origin/pkg/image/graph/nodes/nodes.go +++ /dev/null @@ -1,172 +0,0 @@ -package nodes - -import ( - "github.com/gonum/graph" - - kapi "k8s.io/kubernetes/pkg/api" - - osgraph "github.com/openshift/origin/pkg/api/graph" - imageapi "github.com/openshift/origin/pkg/image/api" -) - -func EnsureImageNode(g osgraph.MutableUniqueGraph, img *imageapi.Image) graph.Node { - return osgraph.EnsureUnique(g, - ImageNodeName(img), - func(node osgraph.Node) graph.Node { - return &ImageNode{node, img} - }, - ) -} - -// EnsureAllImageStreamTagNodes creates all the ImageStreamTagNodes that are guaranteed to be present based on the ImageStream. -// This is different than inferring the presence of an object, since the IST is an object derived from a join between the ImageStream -// and the Image it references. -func EnsureAllImageStreamTagNodes(g osgraph.MutableUniqueGraph, is *imageapi.ImageStream) []*ImageStreamTagNode { - ret := []*ImageStreamTagNode{} - - for tag := range is.Status.Tags { - ist := &imageapi.ImageStreamTag{} - ist.Namespace = is.Namespace - ist.Name = imageapi.JoinImageStreamTag(is.Name, tag) - - istNode := EnsureImageStreamTagNode(g, ist) - ret = append(ret, istNode) - } - - return ret -} - -func FindImage(g osgraph.MutableUniqueGraph, imageName string) graph.Node { - return g.Find(ImageNodeName(&imageapi.Image{ObjectMeta: kapi.ObjectMeta{Name: imageName}})) -} - -// EnsureDockerRepositoryNode adds the named Docker repository tag reference to the graph if it does -// not already exist. If the reference is invalid, the Name field of the graph will be used directly. -func EnsureDockerRepositoryNode(g osgraph.MutableUniqueGraph, name, tag string) graph.Node { - ref, err := imageapi.ParseDockerImageReference(name) - if err == nil { - if len(tag) != 0 { - ref.Tag = tag - } - ref = ref.DockerClientDefaults() - } else { - ref = imageapi.DockerImageReference{Name: name} - } - - return osgraph.EnsureUnique(g, - DockerImageRepositoryNodeName(ref), - func(node osgraph.Node) graph.Node { - return &DockerImageRepositoryNode{node, ref} - }, - ) -} - -// MakeImageStreamTagObjectMeta returns an ImageStreamTag that has enough information to join the graph, but it is not -// based on a full IST object. This can be used to properly initialize the graph without having to retrieve all ISTs -func MakeImageStreamTagObjectMeta(namespace, name, tag string) *imageapi.ImageStreamTag { - return &imageapi.ImageStreamTag{ - ObjectMeta: kapi.ObjectMeta{ - Namespace: namespace, - Name: imageapi.JoinImageStreamTag(name, tag), - }, - } -} - -// MakeImageStreamTagObjectMeta2 returns an ImageStreamTag that has enough information to join the graph, but it is not -// based on a full IST object. This can be used to properly initialize the graph without having to retrieve all ISTs -func MakeImageStreamTagObjectMeta2(namespace, name string) *imageapi.ImageStreamTag { - return &imageapi.ImageStreamTag{ - ObjectMeta: kapi.ObjectMeta{ - Namespace: namespace, - Name: name, - }, - } -} - -// EnsureImageStreamTagNode adds a graph node for the specific tag in an Image Stream if it does not already exist. -func EnsureImageStreamTagNode(g osgraph.MutableUniqueGraph, ist *imageapi.ImageStreamTag) *ImageStreamTagNode { - return osgraph.EnsureUnique(g, - ImageStreamTagNodeName(ist), - func(node osgraph.Node) graph.Node { - return &ImageStreamTagNode{node, ist, true} - }, - ).(*ImageStreamTagNode) -} - -// FindOrCreateSyntheticImageStreamTagNode returns the existing ISTNode or creates a synthetic node in its place -func FindOrCreateSyntheticImageStreamTagNode(g osgraph.MutableUniqueGraph, ist *imageapi.ImageStreamTag) *ImageStreamTagNode { - return osgraph.EnsureUnique(g, - ImageStreamTagNodeName(ist), - func(node osgraph.Node) graph.Node { - return &ImageStreamTagNode{node, ist, false} - }, - ).(*ImageStreamTagNode) -} - -// MakeImageStreamImageObjectMeta returns an ImageStreamImage that has enough information to join the graph, but it is not -// based on a full ISI object. This can be used to properly initialize the graph without having to retrieve all ISIs -func MakeImageStreamImageObjectMeta(namespace, name string) *imageapi.ImageStreamImage { - return &imageapi.ImageStreamImage{ - ObjectMeta: kapi.ObjectMeta{ - Namespace: namespace, - Name: name, - }, - } -} - -// EnsureImageStreamImageNode adds a graph node for the specific ImageStreamImage if it -// does not already exist. -func EnsureImageStreamImageNode(g osgraph.MutableUniqueGraph, namespace, name string) graph.Node { - isi := &imageapi.ImageStreamImage{ - ObjectMeta: kapi.ObjectMeta{ - Namespace: namespace, - Name: name, - }, - } - return osgraph.EnsureUnique(g, - ImageStreamImageNodeName(isi), - func(node osgraph.Node) graph.Node { - return &ImageStreamImageNode{node, isi, true} - }, - ) -} - -// FindOrCreateSyntheticImageStreamImageNode returns the existing ISINode or creates a synthetic node in its place -func FindOrCreateSyntheticImageStreamImageNode(g osgraph.MutableUniqueGraph, isi *imageapi.ImageStreamImage) *ImageStreamImageNode { - return osgraph.EnsureUnique(g, - ImageStreamImageNodeName(isi), - func(node osgraph.Node) graph.Node { - return &ImageStreamImageNode{node, isi, false} - }, - ).(*ImageStreamImageNode) -} - -// EnsureImageStreamNode adds a graph node for the Image Stream if it does not already exist. -func EnsureImageStreamNode(g osgraph.MutableUniqueGraph, is *imageapi.ImageStream) graph.Node { - return osgraph.EnsureUnique(g, - ImageStreamNodeName(is), - func(node osgraph.Node) graph.Node { - return &ImageStreamNode{node, is, true} - }, - ) -} - -// FindOrCreateSyntheticImageStreamNode returns the existing ISNode or creates a synthetic node in its place -func FindOrCreateSyntheticImageStreamNode(g osgraph.MutableUniqueGraph, is *imageapi.ImageStream) *ImageStreamNode { - return osgraph.EnsureUnique(g, - ImageStreamNodeName(is), - func(node osgraph.Node) graph.Node { - return &ImageStreamNode{node, is, false} - }, - ).(*ImageStreamNode) -} - -// EnsureImageLayerNode adds a graph node for the layer if it does not already exist. -func EnsureImageLayerNode(g osgraph.MutableUniqueGraph, layer string) graph.Node { - return osgraph.EnsureUnique(g, - ImageLayerNodeName(layer), - func(node osgraph.Node) graph.Node { - return &ImageLayerNode{node, layer} - }, - ) -} diff --git a/vendor/github.com/openshift/origin/pkg/image/graph/nodes/types.go b/vendor/github.com/openshift/origin/pkg/image/graph/nodes/types.go deleted file mode 100644 index da923b6b9..000000000 --- a/vendor/github.com/openshift/origin/pkg/image/graph/nodes/types.go +++ /dev/null @@ -1,207 +0,0 @@ -package nodes - -import ( - "fmt" - "reflect" - - osgraph "github.com/openshift/origin/pkg/api/graph" - imageapi "github.com/openshift/origin/pkg/image/api" -) - -var ( - ImageStreamNodeKind = reflect.TypeOf(imageapi.ImageStream{}).Name() - ImageNodeKind = reflect.TypeOf(imageapi.Image{}).Name() - ImageStreamTagNodeKind = reflect.TypeOf(imageapi.ImageStreamTag{}).Name() - ImageStreamImageNodeKind = reflect.TypeOf(imageapi.ImageStreamImage{}).Name() - - // non-api types - DockerRepositoryNodeKind = reflect.TypeOf(imageapi.DockerImageReference{}).Name() - ImageLayerNodeKind = "ImageLayer" -) - -func ImageStreamNodeName(o *imageapi.ImageStream) osgraph.UniqueName { - return osgraph.GetUniqueRuntimeObjectNodeName(ImageStreamNodeKind, o) -} - -type ImageStreamNode struct { - osgraph.Node - *imageapi.ImageStream - - IsFound bool -} - -func (n ImageStreamNode) Found() bool { - return n.IsFound -} - -func (n ImageStreamNode) Object() interface{} { - return n.ImageStream -} - -func (n ImageStreamNode) String() string { - return string(ImageStreamNodeName(n.ImageStream)) -} - -func (n ImageStreamNode) UniqueName() osgraph.UniqueName { - return ImageStreamNodeName(n.ImageStream) -} - -func (*ImageStreamNode) Kind() string { - return ImageStreamNodeKind -} - -func ImageStreamTagNodeName(o *imageapi.ImageStreamTag) osgraph.UniqueName { - return osgraph.GetUniqueRuntimeObjectNodeName(ImageStreamTagNodeKind, o) -} - -type ImageStreamTagNode struct { - osgraph.Node - *imageapi.ImageStreamTag - - IsFound bool -} - -func (n ImageStreamTagNode) Found() bool { - return n.IsFound -} - -func (n ImageStreamTagNode) ImageSpec() string { - name, tag, _ := imageapi.SplitImageStreamTag(n.ImageStreamTag.Name) - return imageapi.DockerImageReference{Namespace: n.Namespace, Name: name, Tag: tag}.String() -} - -func (n ImageStreamTagNode) ImageTag() string { - _, tag, _ := imageapi.SplitImageStreamTag(n.ImageStreamTag.Name) - return tag -} - -func (n ImageStreamTagNode) Object() interface{} { - return n.ImageStreamTag -} - -func (n ImageStreamTagNode) String() string { - return string(ImageStreamTagNodeName(n.ImageStreamTag)) -} - -func (n ImageStreamTagNode) UniqueName() osgraph.UniqueName { - return ImageStreamTagNodeName(n.ImageStreamTag) -} - -func (*ImageStreamTagNode) Kind() string { - return ImageStreamTagNodeKind -} - -func ImageStreamImageNodeName(o *imageapi.ImageStreamImage) osgraph.UniqueName { - return osgraph.GetUniqueRuntimeObjectNodeName(ImageStreamImageNodeKind, o) -} - -type ImageStreamImageNode struct { - osgraph.Node - *imageapi.ImageStreamImage - - IsFound bool -} - -func (n ImageStreamImageNode) ImageSpec() string { - return n.ImageStreamImage.Namespace + "/" + n.ImageStreamImage.Name -} - -func (n ImageStreamImageNode) ImageTag() string { - _, id, _ := imageapi.SplitImageStreamImage(n.ImageStreamImage.Name) - return id -} - -func (n ImageStreamImageNode) Object() interface{} { - return n.ImageStreamImage -} - -func (n ImageStreamImageNode) String() string { - return string(ImageStreamImageNodeName(n.ImageStreamImage)) -} - -func (n ImageStreamImageNode) ResourceString() string { - return "isimage/" + n.Name -} - -func (n ImageStreamImageNode) UniqueName() osgraph.UniqueName { - return ImageStreamImageNodeName(n.ImageStreamImage) -} - -func (*ImageStreamImageNode) Kind() string { - return ImageStreamImageNodeKind -} - -func DockerImageRepositoryNodeName(o imageapi.DockerImageReference) osgraph.UniqueName { - return osgraph.UniqueName(fmt.Sprintf("%s|%s", DockerRepositoryNodeKind, o.String())) -} - -type DockerImageRepositoryNode struct { - osgraph.Node - Ref imageapi.DockerImageReference -} - -func (n DockerImageRepositoryNode) ImageSpec() string { - return n.Ref.String() -} - -func (n DockerImageRepositoryNode) ImageTag() string { - return n.Ref.DockerClientDefaults().Tag -} - -func (n DockerImageRepositoryNode) String() string { - return string(DockerImageRepositoryNodeName(n.Ref)) -} - -func (*DockerImageRepositoryNode) Kind() string { - return DockerRepositoryNodeKind -} - -func (n DockerImageRepositoryNode) UniqueName() osgraph.UniqueName { - return DockerImageRepositoryNodeName(n.Ref) -} - -func ImageNodeName(o *imageapi.Image) osgraph.UniqueName { - return osgraph.GetUniqueRuntimeObjectNodeName(ImageNodeKind, o) -} - -type ImageNode struct { - osgraph.Node - Image *imageapi.Image -} - -func (n ImageNode) Object() interface{} { - return n.Image -} - -func (n ImageNode) String() string { - return string(ImageNodeName(n.Image)) -} - -func (n ImageNode) UniqueName() osgraph.UniqueName { - return ImageNodeName(n.Image) -} - -func (*ImageNode) Kind() string { - return ImageNodeKind -} - -func ImageLayerNodeName(layer string) osgraph.UniqueName { - return osgraph.UniqueName(fmt.Sprintf("%s|%s", ImageLayerNodeKind, layer)) -} - -type ImageLayerNode struct { - osgraph.Node - Layer string -} - -func (n ImageLayerNode) Object() interface{} { - return n.Layer -} - -func (n ImageLayerNode) String() string { - return string(ImageLayerNodeName(n.Layer)) -} - -func (*ImageLayerNode) Kind() string { - return ImageLayerNodeKind -} diff --git a/vendor/github.com/openshift/origin/pkg/image/reference/reference.go b/vendor/github.com/openshift/origin/pkg/image/reference/reference.go new file mode 100644 index 000000000..8d1e91900 --- /dev/null +++ b/vendor/github.com/openshift/origin/pkg/image/reference/reference.go @@ -0,0 +1,51 @@ +package reference + +import ( + "strings" + + "github.com/docker/distribution/reference" +) + +// NamedDockerImageReference points to a Docker image. +type NamedDockerImageReference struct { + Registry string + Namespace string + Name string + Tag string + ID string +} + +// ParseNamedDockerImageReference parses a Docker pull spec string into a +// NamedDockerImageReference. +func ParseNamedDockerImageReference(spec string) (NamedDockerImageReference, error) { + var ref NamedDockerImageReference + + namedRef, err := reference.ParseNamed(spec) + if err != nil { + return ref, err + } + + name := namedRef.Name() + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ":.") && name[:i] != "localhost") { + ref.Name = name + } else { + ref.Registry, ref.Name = name[:i], name[i+1:] + } + + if named, ok := namedRef.(reference.NamedTagged); ok { + ref.Tag = named.Tag() + } + + if named, ok := namedRef.(reference.Canonical); ok { + ref.ID = named.Digest().String() + } + + // It's not enough just to use the reference.ParseNamed(). We have to fill + // ref.Namespace from ref.Name + if i := strings.IndexRune(ref.Name, '/'); i != -1 { + ref.Namespace, ref.Name = ref.Name[:i], ref.Name[i+1:] + } + + return ref, nil +} diff --git a/vendor/github.com/openshift/origin/pkg/oauth/api/register.go b/vendor/github.com/openshift/origin/pkg/oauth/api/register.go index 0a1d80f03..7d53e7445 100644 --- a/vendor/github.com/openshift/origin/pkg/oauth/api/register.go +++ b/vendor/github.com/openshift/origin/pkg/oauth/api/register.go @@ -37,6 +37,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { &OAuthClientList{}, &OAuthClientAuthorization{}, &OAuthClientAuthorizationList{}, + &OAuthRedirectReference{}, ) return nil } @@ -49,3 +50,4 @@ func (obj *OAuthAuthorizeTokenList) GetObjectKind() unversioned.ObjectKind func (obj *OAuthAuthorizeToken) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } func (obj *OAuthAccessTokenList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } func (obj *OAuthAccessToken) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } +func (obj *OAuthRedirectReference) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/github.com/openshift/origin/pkg/oauth/api/types.go b/vendor/github.com/openshift/origin/pkg/oauth/api/types.go index eb86813bc..109f6501c 100644 --- a/vendor/github.com/openshift/origin/pkg/oauth/api/types.go +++ b/vendor/github.com/openshift/origin/pkg/oauth/api/types.go @@ -59,6 +59,12 @@ type OAuthAuthorizeToken struct { // UserUID is the unique UID associated with this token. UserUID and UserName must both match // for this token to be valid. UserUID string + + // CodeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636 + CodeChallenge string + + // CodeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636 + CodeChallengeMethod string } // +genclient=true @@ -161,3 +167,15 @@ type OAuthClientAuthorizationList struct { unversioned.ListMeta Items []OAuthClientAuthorization } + +type OAuthRedirectReference struct { + unversioned.TypeMeta + kapi.ObjectMeta + Reference RedirectReference +} + +type RedirectReference struct { + Group string + Kind string + Name string +} diff --git a/vendor/github.com/openshift/origin/pkg/oauth/api/zz_generated.deepcopy.go b/vendor/github.com/openshift/origin/pkg/oauth/api/zz_generated.deepcopy.go index 87bdc2fe9..5850e1bea 100644 --- a/vendor/github.com/openshift/origin/pkg/oauth/api/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/origin/pkg/oauth/api/zz_generated.deepcopy.go @@ -28,6 +28,8 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_OAuthClientAuthorization, InType: reflect.TypeOf(&OAuthClientAuthorization{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_OAuthClientAuthorizationList, InType: reflect.TypeOf(&OAuthClientAuthorizationList{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_OAuthClientList, InType: reflect.TypeOf(&OAuthClientList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_OAuthRedirectReference, InType: reflect.TypeOf(&OAuthRedirectReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_RedirectReference, InType: reflect.TypeOf(&RedirectReference{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ScopeRestriction, InType: reflect.TypeOf(&ScopeRestriction{})}, ) } @@ -123,6 +125,8 @@ func DeepCopy_api_OAuthAuthorizeToken(in interface{}, out interface{}, c *conver out.State = in.State out.UserName = in.UserName out.UserUID = in.UserUID + out.CodeChallenge = in.CodeChallenge + out.CodeChallengeMethod = in.CodeChallengeMethod return nil } } @@ -252,6 +256,30 @@ func DeepCopy_api_OAuthClientList(in interface{}, out interface{}, c *conversion } } +func DeepCopy_api_OAuthRedirectReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*OAuthRedirectReference) + out := out.(*OAuthRedirectReference) + out.TypeMeta = in.TypeMeta + if err := pkg_api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + out.Reference = in.Reference + return nil + } +} + +func DeepCopy_api_RedirectReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RedirectReference) + out := out.(*RedirectReference) + out.Group = in.Group + out.Kind = in.Kind + out.Name = in.Name + return nil + } +} + func DeepCopy_api_ScopeRestriction(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*ScopeRestriction) diff --git a/vendor/github.com/openshift/origin/pkg/project/api/types.go b/vendor/github.com/openshift/origin/pkg/project/api/types.go index d44e38623..1df795828 100644 --- a/vendor/github.com/openshift/origin/pkg/project/api/types.go +++ b/vendor/github.com/openshift/origin/pkg/project/api/types.go @@ -29,6 +29,7 @@ type ProjectStatus struct { } // +genclient=true +// +nonNamespaced=true // Project is a logical top-level container for a set of origin resources type Project struct { diff --git a/vendor/github.com/openshift/origin/pkg/quota/api/types.go b/vendor/github.com/openshift/origin/pkg/quota/api/types.go index 2b07893f9..e5e77552e 100644 --- a/vendor/github.com/openshift/origin/pkg/quota/api/types.go +++ b/vendor/github.com/openshift/origin/pkg/quota/api/types.go @@ -2,6 +2,7 @@ package api import ( "container/list" + "reflect" kapi "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" @@ -115,6 +116,42 @@ func (o *ResourceQuotasStatusByNamespace) OrderedKeys() *list.List { return o.orderedMap.OrderedKeys() } +// DeepCopy implements a custom copy to correctly handle unexported fields +// Must match "func (t T) DeepCopy() T" for the deep copy generator to use it +func (o ResourceQuotasStatusByNamespace) DeepCopy() ResourceQuotasStatusByNamespace { + out := ResourceQuotasStatusByNamespace{} + for e := o.OrderedKeys().Front(); e != nil; e = e.Next() { + namespace := e.Value.(string) + instatus, _ := o.Get(namespace) + if outstatus, err := kapi.Scheme.DeepCopy(instatus); err != nil { + panic(err) // should never happen + } else { + out.Insert(namespace, outstatus.(kapi.ResourceQuotaStatus)) + } + } + return out +} + +func init() { + // Tell the reflection package how to compare our unexported type + if err := kapi.Semantic.AddFuncs( + func(o1, o2 ResourceQuotasStatusByNamespace) bool { + return reflect.DeepEqual(o1.orderedMap, o2.orderedMap) + }, + func(o1, o2 *ResourceQuotasStatusByNamespace) bool { + if o1 == nil && o2 == nil { + return true + } + if (o1 == nil) != (o2 == nil) { + return false + } + return reflect.DeepEqual(o1.orderedMap, o2.orderedMap) + }, + ); err != nil { + panic(err) + } +} + // orderedMap is a very simple ordering a map tracking insertion order. It allows fast and stable serializations // for our encoding. You could probably do something fancier with pointers to interfaces, but I didn't. type orderedMap struct { @@ -159,7 +196,7 @@ func (o *orderedMap) Remove(key string) { // OrderedKeys returns back the ordered keys. This can be used to build a stable serialization func (o *orderedMap) OrderedKeys() *list.List { if o.orderedKeys == nil { - o.orderedKeys = list.New() + return list.New() } return o.orderedKeys } diff --git a/vendor/github.com/openshift/origin/pkg/quota/api/zz_generated.deepcopy.go b/vendor/github.com/openshift/origin/pkg/quota/api/zz_generated.deepcopy.go index 7f2dd622c..59250ca72 100644 --- a/vendor/github.com/openshift/origin/pkg/quota/api/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/origin/pkg/quota/api/zz_generated.deepcopy.go @@ -156,9 +156,7 @@ func DeepCopy_api_ClusterResourceQuotaStatus(in interface{}, out interface{}, c if err := pkg_api.DeepCopy_api_ResourceQuotaStatus(&in.Total, &out.Total, c); err != nil { return err } - if err := DeepCopy_api_ResourceQuotasStatusByNamespace(&in.Namespaces, &out.Namespaces, c); err != nil { - return err - } + out.Namespaces = in.Namespaces.DeepCopy() return nil } } diff --git a/vendor/github.com/openshift/origin/pkg/route/api/helper.go b/vendor/github.com/openshift/origin/pkg/route/api/helper.go index 1d6e92588..25b2419b7 100644 --- a/vendor/github.com/openshift/origin/pkg/route/api/helper.go +++ b/vendor/github.com/openshift/origin/pkg/route/api/helper.go @@ -1,6 +1,8 @@ package api import ( + "strings" + kapi "k8s.io/kubernetes/pkg/api" ) @@ -20,11 +22,20 @@ func RouteLessThan(route1, route2 *Route) bool { if route1.CreationTimestamp.Before(route2.CreationTimestamp) { return true } - if route1.CreationTimestamp == route2.CreationTimestamp && route1.UID < route2.UID { - return true + + if route2.CreationTimestamp.Before(route1.CreationTimestamp) { + return false } - if route1.Namespace < route2.Namespace { - return true + + return route1.UID < route2.UID +} + +// GetDomainForHost returns the domain for the specified host. +// Note for top level domains, this will return an empty string. +func GetDomainForHost(host string) string { + if idx := strings.IndexRune(host, '.'); idx > -1 { + return host[idx+1:] } - return route1.Name < route2.Name + + return "" } diff --git a/vendor/github.com/openshift/origin/pkg/route/api/types.go b/vendor/github.com/openshift/origin/pkg/route/api/types.go index 21c6dc01b..5ff3fe9ab 100644 --- a/vendor/github.com/openshift/origin/pkg/route/api/types.go +++ b/vendor/github.com/openshift/origin/pkg/route/api/types.go @@ -42,6 +42,10 @@ type RouteSpec struct { //TLS provides the ability to configure certificates and termination for the route TLS *TLSConfig + + // Wildcard policy if any for the route. + // Currently only 'Subdomain' or 'None' is allowed. + WildcardPolicy WildcardPolicyType } // RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' @@ -77,6 +81,8 @@ type RouteIngress struct { RouterName string // Conditions is the state of the route, may be empty. Conditions []RouteIngressCondition + // Wildcard policy is the wildcard policy that was allowed where this route is exposed. + WildcardPolicy WildcardPolicyType } // RouteIngressConditionType is a valid value for RouteCondition @@ -180,3 +186,17 @@ const ( // insecure HTTP connections will be redirected to use HTTPS. InsecureEdgeTerminationPolicyRedirect InsecureEdgeTerminationPolicyType = "Redirect" ) + +// WildcardPolicyType indicates the type of wildcard support needed by routes. +type WildcardPolicyType string + +const ( + // WildcardPolicyNone indicates no wildcard support is needed. + WildcardPolicyNone WildcardPolicyType = "None" + + // WildcardPolicySubdomain indicates the host needs wildcard support for the subdomain. + // Example: With host = "www.acme.test", indicates that the router + // should support requests for *.acme.test + // Note that this will not match acme.test only *.acme.test + WildcardPolicySubdomain WildcardPolicyType = "Subdomain" +) diff --git a/vendor/github.com/openshift/origin/pkg/route/api/zz_generated.deepcopy.go b/vendor/github.com/openshift/origin/pkg/route/api/zz_generated.deepcopy.go index aa04f9e6f..d86f8b71c 100644 --- a/vendor/github.com/openshift/origin/pkg/route/api/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/origin/pkg/route/api/zz_generated.deepcopy.go @@ -68,6 +68,7 @@ func DeepCopy_api_RouteIngress(in interface{}, out interface{}, c *conversion.Cl } else { out.Conditions = nil } + out.WildcardPolicy = in.WildcardPolicy return nil } } @@ -155,6 +156,7 @@ func DeepCopy_api_RouteSpec(in interface{}, out interface{}, c *conversion.Clone } else { out.TLS = nil } + out.WildcardPolicy = in.WildcardPolicy return nil } } diff --git a/vendor/github.com/openshift/origin/pkg/route/generator/doc.go b/vendor/github.com/openshift/origin/pkg/route/generator/doc.go deleted file mode 100644 index c86c10352..000000000 --- a/vendor/github.com/openshift/origin/pkg/route/generator/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package generator implements the Generator interface for routes -package generator diff --git a/vendor/github.com/openshift/origin/pkg/route/generator/generate.go b/vendor/github.com/openshift/origin/pkg/route/generator/generate.go deleted file mode 100644 index b064b05ed..000000000 --- a/vendor/github.com/openshift/origin/pkg/route/generator/generate.go +++ /dev/null @@ -1,93 +0,0 @@ -package generator - -import ( - "fmt" - "strconv" - - kapi "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/intstr" - - "github.com/openshift/origin/pkg/route/api" -) - -// RouteGenerator generates routes from a given set of parameters -type RouteGenerator struct{} - -// RouteGenerator implements the kubectl.Generator interface for routes -var _ kubectl.Generator = RouteGenerator{} - -// ParamNames returns the parameters required for generating a route -func (RouteGenerator) ParamNames() []kubectl.GeneratorParam { - return []kubectl.GeneratorParam{ - {Name: "labels", Required: false}, - {Name: "default-name", Required: true}, - {Name: "port", Required: false}, - {Name: "name", Required: false}, - {Name: "hostname", Required: false}, - {Name: "path", Required: false}, - } -} - -// Generate accepts a set of parameters and maps them into a new route -func (RouteGenerator) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - var ( - labels map[string]string - err error - ) - - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - - labelString, found := params["labels"] - if found && len(labelString) > 0 { - labels, err = kubectl.ParseLabels(labelString) - if err != nil { - return nil, err - } - } - - name, found := params["name"] - if !found || len(name) == 0 { - name, found = params["default-name"] - if !found || len(name) == 0 { - return nil, fmt.Errorf("'name' is a required parameter.") - } - } - - route := &api.Route{ - ObjectMeta: kapi.ObjectMeta{ - Name: name, - Labels: labels, - }, - Spec: api.RouteSpec{ - Host: params["hostname"], - Path: params["path"], - To: api.RouteTargetReference{ - Name: params["default-name"], - }, - }, - } - - portString := params["port"] - if len(portString) > 0 { - var targetPort intstr.IntOrString - if port, err := strconv.Atoi(portString); err == nil { - targetPort = intstr.FromInt(port) - } else { - targetPort = intstr.FromString(portString) - } - route.Spec.Port = &api.RoutePort{ - TargetPort: targetPort, - } - } - - return route, nil -} diff --git a/vendor/github.com/openshift/origin/pkg/route/graph/analysis/analysis.go b/vendor/github.com/openshift/origin/pkg/route/graph/analysis/analysis.go deleted file mode 100644 index f44194be8..000000000 --- a/vendor/github.com/openshift/origin/pkg/route/graph/analysis/analysis.go +++ /dev/null @@ -1,228 +0,0 @@ -package analysis - -import ( - "fmt" - "strconv" - - "github.com/gonum/graph" - - kapi "k8s.io/kubernetes/pkg/api" - - osgraph "github.com/openshift/origin/pkg/api/graph" - kubegraph "github.com/openshift/origin/pkg/api/kubegraph/nodes" - routeapi "github.com/openshift/origin/pkg/route/api" - routeedges "github.com/openshift/origin/pkg/route/graph" - routegraph "github.com/openshift/origin/pkg/route/graph/nodes" -) - -const ( - // MissingRoutePortWarning is returned when a route has no route port specified - // and the service it routes to has multiple ports. - MissingRoutePortWarning = "MissingRoutePort" - // WrongRoutePortWarning is returned when a route has a route port specified - // but the service it points to has no such port (either as a named port or as - // a target port). - WrongRoutePortWarning = "WrongRoutePort" - // MissingServiceWarning is returned when there is no service for the specific route. - MissingServiceWarning = "MissingService" - // MissingTLSTerminationTypeErr is returned when a route with a tls config doesn't - // specify a tls termination type. - MissingTLSTerminationTypeErr = "MissingTLSTermination" - // PathBasedPassthroughErr is returned when a path based route is passthrough - // terminated. - PathBasedPassthroughErr = "PathBasedPassthrough" - // MissingTLSTerminationTypeErr is returned when a route with a tls config doesn't - // specify a tls termination type. - RouteNotAdmittedTypeErr = "RouteNotAdmitted" - // MissingRequiredRouterErr is returned when no router has been setup. - MissingRequiredRouterErr = "MissingRequiredRouter" -) - -// FindPortMappingIssues checks all routes and reports any issues related to their ports. -// Also non-existent services for routes are reported here. -func FindPortMappingIssues(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { - markers := []osgraph.Marker{} - - for _, uncastRouteNode := range g.NodesByKind(routegraph.RouteNodeKind) { - routeNode := uncastRouteNode.(*routegraph.RouteNode) - marker := routePortMarker(g, f, routeNode) - if marker != nil { - markers = append(markers, *marker) - } - } - - return markers -} - -func routePortMarker(g osgraph.Graph, f osgraph.Namer, routeNode *routegraph.RouteNode) *osgraph.Marker { - for _, uncastServiceNode := range g.SuccessorNodesByEdgeKind(routeNode, routeedges.ExposedThroughRouteEdgeKind) { - svcNode := uncastServiceNode.(*kubegraph.ServiceNode) - - if !svcNode.Found() { - return &osgraph.Marker{ - Node: routeNode, - RelatedNodes: []graph.Node{svcNode}, - - Severity: osgraph.WarningSeverity, - Key: MissingServiceWarning, - Message: fmt.Sprintf("%s is supposed to route traffic to %s but %s doesn't exist.", - f.ResourceName(routeNode), f.ResourceName(svcNode), f.ResourceName(svcNode)), - // TODO: Suggest 'oc create service' once that's a thing. - // See https://github.com/kubernetes/kubernetes/pull/19509 - } - } - - if len(svcNode.Spec.Ports) > 1 && (routeNode.Spec.Port == nil || len(routeNode.Spec.Port.TargetPort.String()) == 0) { - return &osgraph.Marker{ - Node: routeNode, - RelatedNodes: []graph.Node{svcNode}, - - Severity: osgraph.WarningSeverity, - Key: MissingRoutePortWarning, - Message: fmt.Sprintf("%s doesn't have a port specified and is routing traffic to %s which uses multiple ports.", - f.ResourceName(routeNode), f.ResourceName(svcNode)), - } - } - - if routeNode.Spec.Port == nil { - // If no port is specified, we don't need to analyze any further. - return nil - } - - routePortString := routeNode.Spec.Port.TargetPort.String() - if routePort, err := strconv.Atoi(routePortString); err == nil { - for _, port := range svcNode.Spec.Ports { - if port.TargetPort.IntValue() == routePort { - return nil - } - } - - // route has a numeric port, service has no port with that number as a targetPort. - marker := &osgraph.Marker{ - Node: routeNode, - RelatedNodes: []graph.Node{svcNode}, - - Severity: osgraph.WarningSeverity, - Key: WrongRoutePortWarning, - Message: fmt.Sprintf("%s has a port specified (%d) but %s has no such targetPort.", - f.ResourceName(routeNode), routePort, f.ResourceName(svcNode)), - } - if len(svcNode.Spec.Ports) == 1 { - marker.Suggestion = osgraph.Suggestion(fmt.Sprintf("oc patch %s -p '{\"spec\":{\"port\":{\"targetPort\": %d}}}'", f.ResourceName(routeNode), svcNode.Spec.Ports[0].TargetPort.IntValue())) - } - - return marker - } - - for _, port := range svcNode.Spec.Ports { - if port.Name == routePortString { - return nil - } - } - - // route has a named port, service has no port with that name. - marker := &osgraph.Marker{ - Node: routeNode, - RelatedNodes: []graph.Node{svcNode}, - - Severity: osgraph.WarningSeverity, - Key: WrongRoutePortWarning, - Message: fmt.Sprintf("%s has a named port specified (%q) but %s has no such named port.", - f.ResourceName(routeNode), routePortString, f.ResourceName(svcNode)), - } - if len(svcNode.Spec.Ports) == 1 { - marker.Suggestion = osgraph.Suggestion(fmt.Sprintf("oc patch %s -p '{\"spec\":{\"port\":{\"targetPort\": %d}}}'", f.ResourceName(routeNode), svcNode.Spec.Ports[0].TargetPort.IntValue())) - } - - return marker - } - return nil -} - -func FindMissingTLSTerminationType(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { - markers := []osgraph.Marker{} - - for _, uncastRouteNode := range g.NodesByKind(routegraph.RouteNodeKind) { - routeNode := uncastRouteNode.(*routegraph.RouteNode) - - if routeNode.Spec.TLS != nil && len(routeNode.Spec.TLS.Termination) == 0 { - markers = append(markers, osgraph.Marker{ - Node: routeNode, - - Severity: osgraph.ErrorSeverity, - Key: MissingTLSTerminationTypeErr, - Message: fmt.Sprintf("%s has a TLS configuration but no termination type specified.", f.ResourceName(routeNode)), - Suggestion: osgraph.Suggestion(fmt.Sprintf("oc patch %s -p '{\"spec\":{\"tls\":{\"termination\":\"\"}}}' (replace with a valid termination type: edge, passthrough, reencrypt)", f.ResourceName(routeNode)))}) - } - } - - return markers -} - -// FindRouteAdmissionFailures creates markers for any routes that were rejected by their routers -func FindRouteAdmissionFailures(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { - markers := []osgraph.Marker{} - - for _, uncastRouteNode := range g.NodesByKind(routegraph.RouteNodeKind) { - routeNode := uncastRouteNode.(*routegraph.RouteNode) - Route: - for _, ingress := range routeNode.Status.Ingress { - switch status, condition := routeapi.IngressConditionStatus(&ingress, routeapi.RouteAdmitted); status { - case kapi.ConditionFalse: - markers = append(markers, osgraph.Marker{ - Node: routeNode, - - Severity: osgraph.ErrorSeverity, - Key: RouteNotAdmittedTypeErr, - Message: fmt.Sprintf("%s was not accepted by router %q: %s (%s)", f.ResourceName(routeNode), ingress.RouterName, condition.Message, condition.Reason), - }) - break Route - } - } - } - - return markers -} - -// FindMissingRouter creates markers for all routes in case there is no running router. -func FindMissingRouter(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { - markers := []osgraph.Marker{} - - for _, uncastRouteNode := range g.NodesByKind(routegraph.RouteNodeKind) { - routeNode := uncastRouteNode.(*routegraph.RouteNode) - - if len(routeNode.Route.Status.Ingress) == 0 { - markers = append(markers, osgraph.Marker{ - Node: routeNode, - - Severity: osgraph.ErrorSeverity, - Key: MissingRequiredRouterErr, - Message: fmt.Sprintf("%s is routing traffic to svc/%s, but either the administrator has not installed a router or the router is not selecting this route.", f.ResourceName(routeNode), routeNode.Spec.To.Name), - Suggestion: osgraph.Suggestion("oc adm router -h"), - }) - } - } - - return markers -} - -func FindPathBasedPassthroughRoutes(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker { - markers := []osgraph.Marker{} - - for _, uncastRouteNode := range g.NodesByKind(routegraph.RouteNodeKind) { - routeNode := uncastRouteNode.(*routegraph.RouteNode) - - if len(routeNode.Spec.Path) > 0 && routeNode.Spec.TLS != nil && routeNode.Spec.TLS.Termination == routeapi.TLSTerminationPassthrough { - markers = append(markers, osgraph.Marker{ - Node: routeNode, - - Severity: osgraph.ErrorSeverity, - Key: PathBasedPassthroughErr, - Message: fmt.Sprintf("%s is path-based and uses passthrough termination, which is an invalid combination.", f.ResourceName(routeNode)), - Suggestion: osgraph.Suggestion(fmt.Sprintf("1. use spec.tls.termination=edge or 2. use spec.tls.termination=reencrypt and specify spec.tls.destinationCACertificate or 3. remove spec.path")), - }) - } - } - - return markers -} diff --git a/vendor/github.com/openshift/origin/pkg/route/graph/analysis/doc.go b/vendor/github.com/openshift/origin/pkg/route/graph/analysis/doc.go deleted file mode 100644 index 8f8de8708..000000000 --- a/vendor/github.com/openshift/origin/pkg/route/graph/analysis/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package analysis provides functions that analyse routes and setup markers -// that will be reported by oc status -package analysis diff --git a/vendor/github.com/openshift/origin/pkg/route/graph/doc.go b/vendor/github.com/openshift/origin/pkg/route/graph/doc.go deleted file mode 100644 index 71ae61755..000000000 --- a/vendor/github.com/openshift/origin/pkg/route/graph/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package graph contains graph utilities for routes -package graph diff --git a/vendor/github.com/openshift/origin/pkg/route/graph/edges.go b/vendor/github.com/openshift/origin/pkg/route/graph/edges.go deleted file mode 100644 index 38b816a10..000000000 --- a/vendor/github.com/openshift/origin/pkg/route/graph/edges.go +++ /dev/null @@ -1,45 +0,0 @@ -package graph - -import ( - "github.com/gonum/graph" - - kapi "k8s.io/kubernetes/pkg/api" - - osgraph "github.com/openshift/origin/pkg/api/graph" - kubegraph "github.com/openshift/origin/pkg/api/kubegraph/nodes" - routegraph "github.com/openshift/origin/pkg/route/graph/nodes" -) - -const ( - // ExposedThroughRouteEdgeKind is an edge from a route to any object that - // is exposed through routes - ExposedThroughRouteEdgeKind = "ExposedThroughRoute" -) - -// AddRouteEdges adds an edge that connect a service to a route in the given graph -func AddRouteEdges(g osgraph.MutableUniqueGraph, node *routegraph.RouteNode) { - syntheticService := &kapi.Service{} - syntheticService.Namespace = node.Namespace - syntheticService.Name = node.Spec.To.Name - - serviceNode := kubegraph.FindOrCreateSyntheticServiceNode(g, syntheticService) - g.AddEdge(node, serviceNode, ExposedThroughRouteEdgeKind) - - for _, svc := range node.Spec.AlternateBackends { - syntheticService := &kapi.Service{} - syntheticService.Namespace = node.Namespace - syntheticService.Name = svc.Name - - serviceNode := kubegraph.FindOrCreateSyntheticServiceNode(g, syntheticService) - g.AddEdge(node, serviceNode, ExposedThroughRouteEdgeKind) - } -} - -// AddAllRouteEdges adds service edges to all route nodes in the given graph -func AddAllRouteEdges(g osgraph.MutableUniqueGraph) { - for _, node := range g.(graph.Graph).Nodes() { - if routeNode, ok := node.(*routegraph.RouteNode); ok { - AddRouteEdges(g, routeNode) - } - } -} diff --git a/vendor/github.com/openshift/origin/pkg/route/graph/nodes/doc.go b/vendor/github.com/openshift/origin/pkg/route/graph/nodes/doc.go deleted file mode 100644 index 91a17a79e..000000000 --- a/vendor/github.com/openshift/origin/pkg/route/graph/nodes/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package nodes contains graph functions and types for routes -package nodes diff --git a/vendor/github.com/openshift/origin/pkg/route/graph/nodes/nodes.go b/vendor/github.com/openshift/origin/pkg/route/graph/nodes/nodes.go deleted file mode 100644 index c7a954dec..000000000 --- a/vendor/github.com/openshift/origin/pkg/route/graph/nodes/nodes.go +++ /dev/null @@ -1,22 +0,0 @@ -package nodes - -import ( - "github.com/gonum/graph" - - osgraph "github.com/openshift/origin/pkg/api/graph" - routeapi "github.com/openshift/origin/pkg/route/api" -) - -// EnsureRouteNode adds a graph node for the specific route if it does not exist -func EnsureRouteNode(g osgraph.MutableUniqueGraph, route *routeapi.Route) *RouteNode { - return osgraph.EnsureUnique( - g, - RouteNodeName(route), - func(node osgraph.Node) graph.Node { - return &RouteNode{ - Node: node, - Route: route, - } - }, - ).(*RouteNode) -} diff --git a/vendor/github.com/openshift/origin/pkg/route/graph/nodes/types.go b/vendor/github.com/openshift/origin/pkg/route/graph/nodes/types.go deleted file mode 100644 index 18e779e7d..000000000 --- a/vendor/github.com/openshift/origin/pkg/route/graph/nodes/types.go +++ /dev/null @@ -1,33 +0,0 @@ -package nodes - -import ( - "reflect" - - osgraph "github.com/openshift/origin/pkg/api/graph" - routeapi "github.com/openshift/origin/pkg/route/api" -) - -var ( - RouteNodeKind = reflect.TypeOf(routeapi.Route{}).Name() -) - -func RouteNodeName(o *routeapi.Route) osgraph.UniqueName { - return osgraph.GetUniqueRuntimeObjectNodeName(RouteNodeKind, o) -} - -type RouteNode struct { - osgraph.Node - *routeapi.Route -} - -func (n RouteNode) Object() interface{} { - return n.Route -} - -func (n RouteNode) String() string { - return string(RouteNodeName(n.Route)) -} - -func (*RouteNode) Kind() string { - return RouteNodeKind -} diff --git a/vendor/github.com/openshift/origin/pkg/sdn/api/plugin.go b/vendor/github.com/openshift/origin/pkg/sdn/api/plugin.go new file mode 100644 index 000000000..0a43a917e --- /dev/null +++ b/vendor/github.com/openshift/origin/pkg/sdn/api/plugin.go @@ -0,0 +1,31 @@ +package api + +import ( + "strings" +) + +const ( + SingleTenantPluginName = "redhat/openshift-ovs-subnet" + MultiTenantPluginName = "redhat/openshift-ovs-multitenant" + + IngressBandwidthAnnotation = "kubernetes.io/ingress-bandwidth" + EgressBandwidthAnnotation = "kubernetes.io/egress-bandwidth" + AssignMacvlanAnnotation = "pod.network.openshift.io/assign-macvlan" + AssignHostSubnetAnnotation = "pod.network.openshift.io/assign-subnet" + FixedVnidHost = "pod.network.openshift.io/fixed-vnid-host" +) + +func IsOpenShiftNetworkPlugin(pluginName string) bool { + switch strings.ToLower(pluginName) { + case SingleTenantPluginName, MultiTenantPluginName: + return true + } + return false +} + +func IsOpenShiftMultitenantNetworkPlugin(pluginName string) bool { + if strings.ToLower(pluginName) == MultiTenantPluginName { + return true + } + return false +} diff --git a/vendor/github.com/openshift/origin/pkg/user/reaper/bindings.go b/vendor/github.com/openshift/origin/pkg/user/reaper/bindings.go deleted file mode 100644 index 76a3a68f6..000000000 --- a/vendor/github.com/openshift/origin/pkg/user/reaper/bindings.go +++ /dev/null @@ -1,57 +0,0 @@ -package reaper - -import ( - "github.com/golang/glog" - kapi "k8s.io/kubernetes/pkg/api" - kerrors "k8s.io/kubernetes/pkg/api/errors" - - "github.com/openshift/origin/pkg/client" -) - -// reapClusterBindings removes the subject from cluster-level role bindings -func reapClusterBindings(removedSubject kapi.ObjectReference, c client.ClusterRoleBindingsInterface) error { - clusterBindings, err := c.ClusterRoleBindings().List(kapi.ListOptions{}) - if err != nil { - return err - } - for _, binding := range clusterBindings.Items { - retainedSubjects := []kapi.ObjectReference{} - for _, subject := range binding.Subjects { - if subject != removedSubject { - retainedSubjects = append(retainedSubjects, subject) - } - } - if len(retainedSubjects) != len(binding.Subjects) { - updatedBinding := binding - updatedBinding.Subjects = retainedSubjects - if _, err := c.ClusterRoleBindings().Update(&updatedBinding); err != nil && !kerrors.IsNotFound(err) { - glog.Infof("Cannot update clusterrolebinding/%s: %v", binding.Name, err) - } - } - } - return nil -} - -// reapNamespacedBindings removes the subject from namespaced role bindings -func reapNamespacedBindings(removedSubject kapi.ObjectReference, c client.RoleBindingsNamespacer) error { - namespacedBindings, err := c.RoleBindings(kapi.NamespaceAll).List(kapi.ListOptions{}) - if err != nil { - return err - } - for _, binding := range namespacedBindings.Items { - retainedSubjects := []kapi.ObjectReference{} - for _, subject := range binding.Subjects { - if subject != removedSubject { - retainedSubjects = append(retainedSubjects, subject) - } - } - if len(retainedSubjects) != len(binding.Subjects) { - updatedBinding := binding - updatedBinding.Subjects = retainedSubjects - if _, err := c.RoleBindings(binding.Namespace).Update(&updatedBinding); err != nil && !kerrors.IsNotFound(err) { - glog.Infof("Cannot update rolebinding/%s in %s: %v", binding.Name, binding.Namespace, err) - } - } - } - return nil -} diff --git a/vendor/github.com/openshift/origin/pkg/user/reaper/group.go b/vendor/github.com/openshift/origin/pkg/user/reaper/group.go deleted file mode 100644 index a34b41ad4..000000000 --- a/vendor/github.com/openshift/origin/pkg/user/reaper/group.go +++ /dev/null @@ -1,76 +0,0 @@ -package reaper - -import ( - "time" - - "github.com/golang/glog" - kapi "k8s.io/kubernetes/pkg/api" - kerrors "k8s.io/kubernetes/pkg/api/errors" - kclient "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/kubectl" - - "github.com/openshift/origin/pkg/client" -) - -func NewGroupReaper( - groupClient client.GroupsInterface, - clusterBindingClient client.ClusterRoleBindingsInterface, - bindingClient client.RoleBindingsNamespacer, - sccClient kclient.SecurityContextConstraintsInterface, -) kubectl.Reaper { - return &GroupReaper{ - groupClient: groupClient, - clusterBindingClient: clusterBindingClient, - bindingClient: bindingClient, - sccClient: sccClient, - } -} - -type GroupReaper struct { - groupClient client.GroupsInterface - clusterBindingClient client.ClusterRoleBindingsInterface - bindingClient client.RoleBindingsNamespacer - sccClient kclient.SecurityContextConstraintsInterface -} - -// Stop on a reaper is actually used for deletion. In this case, we'll delete referencing identities, clusterBindings, and bindings, -// then delete the group -func (r *GroupReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *kapi.DeleteOptions) error { - removedSubject := kapi.ObjectReference{Kind: "Group", Name: name} - - if err := reapClusterBindings(removedSubject, r.clusterBindingClient); err != nil { - return err - } - - if err := reapNamespacedBindings(removedSubject, r.bindingClient); err != nil { - return err - } - - // Remove the group from sccs - sccs, err := r.sccClient.SecurityContextConstraints().List(kapi.ListOptions{}) - if err != nil { - return err - } - for _, scc := range sccs.Items { - retainedGroups := []string{} - for _, group := range scc.Groups { - if group != name { - retainedGroups = append(retainedGroups, group) - } - } - if len(retainedGroups) != len(scc.Groups) { - updatedSCC := scc - updatedSCC.Groups = retainedGroups - if _, err := r.sccClient.SecurityContextConstraints().Update(&updatedSCC); err != nil && !kerrors.IsNotFound(err) { - glog.Infof("Cannot update scc/%s: %v", scc.Name, err) - } - } - } - - // Remove the group - if err := r.groupClient.Groups().Delete(name); err != nil && !kerrors.IsNotFound(err) { - return err - } - - return nil -} diff --git a/vendor/github.com/openshift/origin/pkg/user/reaper/user.go b/vendor/github.com/openshift/origin/pkg/user/reaper/user.go deleted file mode 100644 index 6c4084f0c..000000000 --- a/vendor/github.com/openshift/origin/pkg/user/reaper/user.go +++ /dev/null @@ -1,104 +0,0 @@ -package reaper - -import ( - "time" - - "github.com/golang/glog" - kapi "k8s.io/kubernetes/pkg/api" - kerrors "k8s.io/kubernetes/pkg/api/errors" - kclient "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/kubectl" - - "github.com/openshift/origin/pkg/client" -) - -func NewUserReaper( - userClient client.UsersInterface, - groupClient client.GroupsInterface, - clusterBindingClient client.ClusterRoleBindingsInterface, - bindingClient client.RoleBindingsNamespacer, - sccClient kclient.SecurityContextConstraintsInterface, -) kubectl.Reaper { - return &UserReaper{ - userClient: userClient, - groupClient: groupClient, - clusterBindingClient: clusterBindingClient, - bindingClient: bindingClient, - sccClient: sccClient, - } -} - -type UserReaper struct { - userClient client.UsersInterface - groupClient client.GroupsInterface - clusterBindingClient client.ClusterRoleBindingsInterface - bindingClient client.RoleBindingsNamespacer - sccClient kclient.SecurityContextConstraintsInterface -} - -// Stop on a reaper is actually used for deletion. In this case, we'll delete referencing identities, clusterBindings, and bindings, -// then delete the user -func (r *UserReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *kapi.DeleteOptions) error { - removedSubject := kapi.ObjectReference{Kind: "User", Name: name} - - if err := reapClusterBindings(removedSubject, r.clusterBindingClient); err != nil { - return err - } - - if err := reapNamespacedBindings(removedSubject, r.bindingClient); err != nil { - return err - } - - // Remove the user from sccs - sccs, err := r.sccClient.SecurityContextConstraints().List(kapi.ListOptions{}) - if err != nil { - return err - } - for _, scc := range sccs.Items { - retainedUsers := []string{} - for _, user := range scc.Users { - if user != name { - retainedUsers = append(retainedUsers, user) - } - } - if len(retainedUsers) != len(scc.Users) { - updatedSCC := scc - updatedSCC.Users = retainedUsers - if _, err := r.sccClient.SecurityContextConstraints().Update(&updatedSCC); err != nil && !kerrors.IsNotFound(err) { - glog.Infof("Cannot update scc/%s: %v", scc.Name, err) - } - } - } - - // Remove the user from groups - groups, err := r.groupClient.Groups().List(kapi.ListOptions{}) - if err != nil { - return err - } - for _, group := range groups.Items { - retainedUsers := []string{} - for _, user := range group.Users { - if user != name { - retainedUsers = append(retainedUsers, user) - } - } - if len(retainedUsers) != len(group.Users) { - updatedGroup := group - updatedGroup.Users = retainedUsers - if _, err := r.groupClient.Groups().Update(&updatedGroup); err != nil && !kerrors.IsNotFound(err) { - glog.Infof("Cannot update groups/%s: %v", group.Name, err) - } - } - } - - // Intentionally leave identities that reference the user - // The user does not "own" the identities - // If the admin wants to remove the identities, that is a distinct operation - - // Remove the user - if err := r.userClient.Users().Delete(name); err != nil && !kerrors.IsNotFound(err) { - return err - } - - return nil -} diff --git a/vendor/github.com/openshift/origin/pkg/util/doc.go b/vendor/github.com/openshift/origin/pkg/util/doc.go deleted file mode 100644 index 3ce2be728..000000000 --- a/vendor/github.com/openshift/origin/pkg/util/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package util implements various utility functions used in both testing and -// implementation of OpenShift. Package util may not depend on any other -// package in the OpenShift package tree. -package util diff --git a/vendor/github.com/openshift/origin/pkg/util/dot/dot.go b/vendor/github.com/openshift/origin/pkg/util/dot/dot.go deleted file mode 100644 index 01c90b618..000000000 --- a/vendor/github.com/openshift/origin/pkg/util/dot/dot.go +++ /dev/null @@ -1,14 +0,0 @@ -package dot - -import ( - "fmt" - "strings" -) - -// Quote takes an arbitrary DOT ID and escapes any quotes that is contains. -// The resulting string is quoted again to guarantee that it is a valid ID. -// DOT graph IDs can be any double-quoted string -// See http://www.graphviz.org/doc/info/lang.html -func Quote(id string) string { - return fmt.Sprintf(`"%s"`, strings.Replace(id, `"`, `\"`, -1)) -} diff --git a/vendor/github.com/openshift/origin/pkg/util/errors/doc.go b/vendor/github.com/openshift/origin/pkg/util/errors/doc.go deleted file mode 100644 index 9d0d52db7..000000000 --- a/vendor/github.com/openshift/origin/pkg/util/errors/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package errors provides utility functions for various errors -package errors diff --git a/vendor/github.com/openshift/origin/pkg/util/errors/errors.go b/vendor/github.com/openshift/origin/pkg/util/errors/errors.go deleted file mode 100644 index 1da1b0ae3..000000000 --- a/vendor/github.com/openshift/origin/pkg/util/errors/errors.go +++ /dev/null @@ -1,39 +0,0 @@ -package errors - -import "strings" - -import ( - kapierrors "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/unversioned" -) - -// TolerateNotFoundError tolerates 'not found' errors -func TolerateNotFoundError(err error) error { - if kapierrors.IsNotFound(err) { - return nil - } - return err -} - -// ErrorToSentence will capitalize the first letter of the error -// message and add a period to the end if one is not present. -func ErrorToSentence(err error) string { - msg := err.Error() - if len(msg) == 0 { - return msg - } - msg = strings.ToUpper(msg)[:1] + msg[1:] - if !strings.HasSuffix(msg, ".") { - msg = msg + "." - } - return msg -} - -// IsTimeoutErr returns true if the error indicates timeout -func IsTimeoutErr(err error) bool { - e, ok := err.(*kapierrors.StatusError) - if !ok { - return false - } - return e.ErrStatus.Reason == unversioned.StatusReasonTimeout -} diff --git a/vendor/github.com/openshift/origin/pkg/util/etcd.go b/vendor/github.com/openshift/origin/pkg/util/etcd.go deleted file mode 100644 index 754f49d9e..000000000 --- a/vendor/github.com/openshift/origin/pkg/util/etcd.go +++ /dev/null @@ -1,21 +0,0 @@ -package util - -import ( - "path" - - kapi "k8s.io/kubernetes/pkg/api" - kerrors "k8s.io/kubernetes/pkg/api/errors" -) - -// NoNamespaceKeyFunc is the default function for constructing etcd paths to a resource relative to prefix enforcing -// If a namespace is on context, it errors. -func NoNamespaceKeyFunc(ctx kapi.Context, prefix string, name string) (string, error) { - ns, ok := kapi.NamespaceFrom(ctx) - if ok && len(ns) > 0 { - return "", kerrors.NewBadRequest("Namespace parameter is not allowed.") - } - if len(name) == 0 { - return "", kerrors.NewBadRequest("Name parameter required.") - } - return path.Join(prefix, name), nil -} diff --git a/vendor/github.com/openshift/origin/pkg/util/labels.go b/vendor/github.com/openshift/origin/pkg/util/labels.go deleted file mode 100644 index a7a49087d..000000000 --- a/vendor/github.com/openshift/origin/pkg/util/labels.go +++ /dev/null @@ -1,278 +0,0 @@ -package util - -import ( - "fmt" - "reflect" - - kmeta "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" - - deployapi "github.com/openshift/origin/pkg/deploy/api" -) - -// MergeInto flags -const ( - OverwriteExistingDstKey = 1 << iota - ErrorOnExistingDstKey - ErrorOnDifferentDstKeyValue -) - -// AddObjectLabels adds new label(s) to a single runtime.Object -func AddObjectLabels(obj runtime.Object, labels labels.Set) error { - if labels == nil { - return nil - } - - accessor, err := kmeta.Accessor(obj) - - if err != nil { - if _, ok := obj.(*runtime.Unstructured); !ok { - // error out if it's not possible to get an accessor and it's also not an unstructured object - return err - } - } else { - metaLabels := accessor.GetLabels() - if metaLabels == nil { - metaLabels = make(map[string]string) - } - - switch objType := obj.(type) { - case *deployapi.DeploymentConfig: - if err := addDeploymentConfigNestedLabels(objType, labels); err != nil { - return fmt.Errorf("unable to add nested labels to %s/%s: %v", obj.GetObjectKind().GroupVersionKind(), accessor.GetName(), err) - } - } - - if err := MergeInto(metaLabels, labels, OverwriteExistingDstKey); err != nil { - return fmt.Errorf("unable to add labels to %s/%s: %v", obj.GetObjectKind().GroupVersionKind(), accessor.GetName(), err) - } - - accessor.SetLabels(metaLabels) - - return nil - } - - // handle unstructured object - // TODO: allow meta.Accessor to handle runtime.Unstructured - if unstruct, ok := obj.(*runtime.Unstructured); ok && unstruct.Object != nil { - // the presence of "metadata" is sufficient for us to apply the rules for Kube-like - // objects. - // TODO: add swagger detection to allow this to happen more effectively - if obj, ok := unstruct.Object["metadata"]; ok { - if m, ok := obj.(map[string]interface{}); ok { - - existing := make(map[string]string) - if l, ok := m["labels"]; ok { - if found, ok := interfaceToStringMap(l); ok { - existing = found - } - } - if err := MergeInto(existing, labels, OverwriteExistingDstKey); err != nil { - return err - } - m["labels"] = mapToGeneric(existing) - } - return nil - } - - // only attempt to set root labels if a root object called labels exists - // TODO: add swagger detection to allow this to happen more effectively - if obj, ok := unstruct.Object["labels"]; ok { - existing := make(map[string]string) - if found, ok := interfaceToStringMap(obj); ok { - existing = found - } - if err := MergeInto(existing, labels, OverwriteExistingDstKey); err != nil { - return err - } - unstruct.Object["labels"] = mapToGeneric(existing) - return nil - } - } - - return nil -} - -// AddObjectAnnotations adds new annotation(s) to a single runtime.Object -func AddObjectAnnotations(obj runtime.Object, annotations map[string]string) error { - if len(annotations) == 0 { - return nil - } - - accessor, err := kmeta.Accessor(obj) - - if err != nil { - if _, ok := obj.(*runtime.Unstructured); !ok { - // error out if it's not possible to get an accessor and it's also not an unstructured object - return err - } - } else { - metaAnnotations := accessor.GetAnnotations() - if metaAnnotations == nil { - metaAnnotations = make(map[string]string) - } - - switch objType := obj.(type) { - case *deployapi.DeploymentConfig: - if err := addDeploymentConfigNestedAnnotations(objType, annotations); err != nil { - return fmt.Errorf("unable to add nested annotations to %s/%s: %v", obj.GetObjectKind().GroupVersionKind(), accessor.GetName(), err) - } - } - - MergeInto(metaAnnotations, annotations, OverwriteExistingDstKey) - accessor.SetAnnotations(metaAnnotations) - - return nil - } - - // handle unstructured object - // TODO: allow meta.Accessor to handle runtime.Unstructured - if unstruct, ok := obj.(*runtime.Unstructured); ok && unstruct.Object != nil { - // the presence of "metadata" is sufficient for us to apply the rules for Kube-like - // objects. - // TODO: add swagger detection to allow this to happen more effectively - if obj, ok := unstruct.Object["metadata"]; ok { - if m, ok := obj.(map[string]interface{}); ok { - - existing := make(map[string]string) - if l, ok := m["annotations"]; ok { - if found, ok := interfaceToStringMap(l); ok { - existing = found - } - } - if err := MergeInto(existing, annotations, OverwriteExistingDstKey); err != nil { - return err - } - m["annotations"] = mapToGeneric(existing) - } - return nil - } - - // only attempt to set root annotations if a root object called annotations exists - // TODO: add swagger detection to allow this to happen more effectively - if obj, ok := unstruct.Object["annotations"]; ok { - existing := make(map[string]string) - if found, ok := interfaceToStringMap(obj); ok { - existing = found - } - if err := MergeInto(existing, annotations, OverwriteExistingDstKey); err != nil { - return err - } - unstruct.Object["annotations"] = mapToGeneric(existing) - return nil - } - } - - return nil -} - -// addDeploymentConfigNestedLabels adds new label(s) to a nested labels of a single DeploymentConfig object -func addDeploymentConfigNestedLabels(obj *deployapi.DeploymentConfig, labels labels.Set) error { - if obj.Spec.Template.Labels == nil { - obj.Spec.Template.Labels = make(map[string]string) - } - if err := MergeInto(obj.Spec.Template.Labels, labels, OverwriteExistingDstKey); err != nil { - return fmt.Errorf("unable to add labels to Template.DeploymentConfig.Template.ControllerTemplate.Template: %v", err) - } - return nil -} - -func addDeploymentConfigNestedAnnotations(obj *deployapi.DeploymentConfig, annotations map[string]string) error { - if obj.Spec.Template == nil { - return nil - } - - if obj.Spec.Template.Annotations == nil { - obj.Spec.Template.Annotations = make(map[string]string) - } - - if err := MergeInto(obj.Spec.Template.Annotations, annotations, OverwriteExistingDstKey); err != nil { - return fmt.Errorf("unable to add annotations to Template.DeploymentConfig.Template.ControllerTemplate.Template: %v", err) - } - return nil -} - -// interfaceToStringMap extracts a map[string]string from a map[string]interface{} -func interfaceToStringMap(obj interface{}) (map[string]string, bool) { - if obj == nil { - return nil, false - } - lm, ok := obj.(map[string]interface{}) - if !ok { - return nil, false - } - existing := make(map[string]string) - for k, v := range lm { - switch t := v.(type) { - case string: - existing[k] = t - } - } - return existing, true -} - -// mapToGeneric converts a map[string]string into a map[string]interface{} -func mapToGeneric(obj map[string]string) map[string]interface{} { - if obj == nil { - return nil - } - res := make(map[string]interface{}) - for k, v := range obj { - res[k] = v - } - return res -} - -// MergeInto merges items from a src map into a dst map. -// Returns an error when the maps are not of the same type. -// Flags: -// - ErrorOnExistingDstKey -// When set: Return an error if any of the dst keys is already set. -// - ErrorOnDifferentDstKeyValue -// When set: Return an error if any of the dst keys is already set -// to a different value than src key. -// - OverwriteDstKey -// When set: Overwrite existing dst key value with src key value. -func MergeInto(dst, src interface{}, flags int) error { - dstVal := reflect.ValueOf(dst) - srcVal := reflect.ValueOf(src) - - if dstVal.Kind() != reflect.Map { - return fmt.Errorf("dst is not a valid map: %v", dstVal.Kind()) - } - if srcVal.Kind() != reflect.Map { - return fmt.Errorf("src is not a valid map: %v", srcVal.Kind()) - } - if dstTyp, srcTyp := dstVal.Type(), srcVal.Type(); !dstTyp.AssignableTo(srcTyp) { - return fmt.Errorf("type mismatch, can't assign '%v' to '%v'", srcTyp, dstTyp) - } - - if dstVal.IsNil() { - return fmt.Errorf("dst value is nil") - } - if srcVal.IsNil() { - // Nothing to merge - return nil - } - - for _, k := range srcVal.MapKeys() { - if dstVal.MapIndex(k).IsValid() { - if flags&ErrorOnExistingDstKey != 0 { - return fmt.Errorf("dst key already set (ErrorOnExistingDstKey=1), '%v'='%v'", k, dstVal.MapIndex(k)) - } - if dstVal.MapIndex(k).String() != srcVal.MapIndex(k).String() { - if flags&ErrorOnDifferentDstKeyValue != 0 { - return fmt.Errorf("dst key already set to a different value (ErrorOnDifferentDstKeyValue=1), '%v'='%v'", k, dstVal.MapIndex(k)) - } - if flags&OverwriteExistingDstKey != 0 { - dstVal.SetMapIndex(k, srcVal.MapIndex(k)) - } - } - } else { - dstVal.SetMapIndex(k, srcVal.MapIndex(k)) - } - } - - return nil -} diff --git a/vendor/github.com/openshift/origin/pkg/util/parallel/parallel.go b/vendor/github.com/openshift/origin/pkg/util/parallel/parallel.go deleted file mode 100644 index 902cfb665..000000000 --- a/vendor/github.com/openshift/origin/pkg/util/parallel/parallel.go +++ /dev/null @@ -1,27 +0,0 @@ -package parallel - -import ( - "sync" -) - -// Run executes the provided functions in parallel and collects any errors they return. -func Run(fns ...func() error) []error { - wg := sync.WaitGroup{} - errCh := make(chan error, len(fns)) - wg.Add(len(fns)) - for i := range fns { - go func(i int) { - if err := fns[i](); err != nil { - errCh <- err - } - wg.Done() - }(i) - } - wg.Wait() - close(errCh) - var errs []error - for err := range errCh { - errs = append(errs, err) - } - return errs -} diff --git a/vendor/github.com/openshift/origin/pkg/util/strings.go b/vendor/github.com/openshift/origin/pkg/util/strings.go deleted file mode 100644 index cf1b4804b..000000000 --- a/vendor/github.com/openshift/origin/pkg/util/strings.go +++ /dev/null @@ -1,21 +0,0 @@ -package util - -import "sort" - -// UniqueStrings returns a sorted, uniquified slice of the specified strings -func UniqueStrings(strings []string) []string { - m := make(map[string]bool, len(strings)) - for _, s := range strings { - m[s] = true - } - - i := 0 - strings = make([]string, len(m), len(m)) - for s := range m { - strings[i] = s - i++ - } - - sort.Strings(strings) - return strings -} diff --git a/vendor/github.com/openshift/origin/pkg/version/version.go b/vendor/github.com/openshift/origin/pkg/version/version.go index 48b8d85dc..5fd7e15bf 100644 --- a/vendor/github.com/openshift/origin/pkg/version/version.go +++ b/vendor/github.com/openshift/origin/pkg/version/version.go @@ -1,7 +1,6 @@ package version import ( - "regexp" "strings" "github.com/prometheus/client_golang/prometheus" @@ -54,33 +53,12 @@ func (info Info) String() string { return version } -var ( - reCommitSegment = regexp.MustCompile(`\+[0-9a-f]{6,14}$`) - reCommitIncrement = regexp.MustCompile(`^[0-9a-f]+$`) -) - // LastSemanticVersion attempts to return a semantic version from the GitVersion - which // is either + or on release boundaries. func (info Info) LastSemanticVersion() string { version := info.GitVersion - parts := strings.Split(version, "-") - // strip the modifier - if len(parts) > 1 && parts[len(parts)-1] == "dirty" { - parts = parts[:len(parts)-1] - } - // strip the Git commit - if len(parts) > 0 && reCommitSegment.MatchString(parts[len(parts)-1]) { - parts[len(parts)-1] = reCommitSegment.ReplaceAllString(parts[len(parts)-1], "") - if len(parts[len(parts)-1]) == 0 { - parts = parts[:len(parts)-1] - } - // strip a version increment, but only if we found the commit - if len(parts) > 1 && reCommitIncrement.MatchString(parts[len(parts)-1]) { - parts = parts[:len(parts)-1] - } - } - - return strings.Join(parts, "-") + parts := strings.Split(version, "+") + return parts[0] } func init() { diff --git a/vendor/github.com/pborman/uuid/CONTRIBUTORS b/vendor/github.com/pborman/uuid/CONTRIBUTORS deleted file mode 100644 index b382a04ed..000000000 --- a/vendor/github.com/pborman/uuid/CONTRIBUTORS +++ /dev/null @@ -1 +0,0 @@ -Paul Borman diff --git a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore deleted file mode 100644 index 3460f0346..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore +++ /dev/null @@ -1 +0,0 @@ -command-line-arguments.test diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md deleted file mode 100644 index 81032bed8..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/README.md +++ /dev/null @@ -1,53 +0,0 @@ -# Overview -This is the [Prometheus](http://www.prometheus.io) telemetric -instrumentation client [Go](http://golang.org) client library. It -enable authors to define process-space metrics for their servers and -expose them through a web service interface for extraction, -aggregation, and a whole slew of other post processing techniques. - -# Installing - $ go get github.com/prometheus/client_golang/prometheus - -# Example -```go -package main - -import ( - "net/http" - - "github.com/prometheus/client_golang/prometheus" -) - -var ( - indexed = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "my_company", - Subsystem: "indexer", - Name: "documents_indexed", - Help: "The number of documents indexed.", - }) - size = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "my_company", - Subsystem: "storage", - Name: "documents_total_size_bytes", - Help: "The total size of all documents in the storage.", - }) -) - -func main() { - http.Handle("/metrics", prometheus.Handler()) - - indexed.Inc() - size.Set(5) - - http.ListenAndServe(":8080", nil) -} - -func init() { - prometheus.MustRegister(indexed) - prometheus.MustRegister(size) -} -``` - -# Documentation - -[![GoDoc](https://godoc.org/github.com/prometheus/client_golang?status.png)](https://godoc.org/github.com/prometheus/client_golang) diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt deleted file mode 100644 index 7723656d5..000000000 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt +++ /dev/null @@ -1,67 +0,0 @@ -PACKAGE - -package goautoneg -import "bitbucket.org/ww/goautoneg" - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -FUNCTIONS - -func Negotiate(header string, alternatives []string) (content_type string) -Negotiate the most appropriate content_type given the accept header -and a list of alternatives. - -func ParseAccept(header string) (accept []Accept) -Parse an Accept Header string returning a sorted list -of clauses - - -TYPES - -type Accept struct { - Type, SubType string - Q float32 - Params map[string]string -} -Structure to represent a clause in an HTTP Accept Header - - -SUBDIRECTORIES - - .hg diff --git a/vendor/github.com/prometheus/procfs/.travis.yml b/vendor/github.com/prometheus/procfs/.travis.yml deleted file mode 100644 index 25e169dd0..000000000 --- a/vendor/github.com/prometheus/procfs/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -sudo: false -language: go -go: - - 1.3 - - 1.4 - - 1.5 - - tip diff --git a/vendor/github.com/prometheus/procfs/AUTHORS.md b/vendor/github.com/prometheus/procfs/AUTHORS.md deleted file mode 100644 index f1c27ccb0..000000000 --- a/vendor/github.com/prometheus/procfs/AUTHORS.md +++ /dev/null @@ -1,20 +0,0 @@ -The Prometheus project was started by Matt T. Proud (emeritus) and -Julius Volz in 2012. - -Maintainers of this repository: - -* Tobias Schmidt - -The following individuals have contributed code to this repository -(listed in alphabetical order): - -* Armen Baghumian -* Bjoern Rabenstein -* David Cournapeau -* Ji-Hoon, Seol -* Jonas Große Sundrup -* Julius Volz -* Matthias Rampke -* Nicky Gerritsen -* Rémi Audebert -* Tobias Schmidt diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md deleted file mode 100644 index 5705f0fbe..000000000 --- a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md +++ /dev/null @@ -1,18 +0,0 @@ -# Contributing - -Prometheus uses GitHub to manage reviews of pull requests. - -* If you have a trivial fix or improvement, go ahead and create a pull - request, addressing (with `@...`) one or more of the maintainers - (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. - -* If you plan to do something more involved, first discuss your ideas - on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). - This will avoid unnecessary work and surely give you and us a good deal - of inspiration. - -* Relevant coding style guidelines are the [Go Code Review - Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) - and the _Formatting and style_ section of Peter Bourgon's [Go: Best - Practices for Production - Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile deleted file mode 100644 index e8acbbc5e..000000000 --- a/vendor/github.com/prometheus/procfs/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -ci: - go fmt - go vet - go test -v ./... - go get github.com/golang/lint/golint - golint *.go diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md deleted file mode 100644 index 6e7ee6b8b..000000000 --- a/vendor/github.com/prometheus/procfs/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# procfs - -This procfs package provides functions to retrieve system, kernel and process -metrics from the pseudo-filesystem proc. - -*WARNING*: This package is a work in progress. Its API may still break in -backwards-incompatible ways without warnings. Use it at your own risk. - -[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) -[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore deleted file mode 100644 index 1b8c7c261..000000000 --- a/vendor/github.com/spf13/cobra/.gitignore +++ /dev/null @@ -1,36 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore -# swap -[._]*.s[a-w][a-z] -[._]s[a-w][a-z] -# session -Session.vim -# temporary -.netrwhist -*~ -# auto-generated tag files -tags - -*.exe - -cobra.test diff --git a/vendor/github.com/spf13/cobra/.mailmap b/vendor/github.com/spf13/cobra/.mailmap deleted file mode 100644 index 94ec53068..000000000 --- a/vendor/github.com/spf13/cobra/.mailmap +++ /dev/null @@ -1,3 +0,0 @@ -Steve Francia -Bjørn Erik Pedersen -Fabiano Franz diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml deleted file mode 100644 index 6e84be54d..000000000 --- a/vendor/github.com/spf13/cobra/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go -go: - - 1.4.3 - - 1.5.4 - - 1.6.3 - - tip - -matrix: - allow_failures: - - go: tip - -before_install: - - mkdir -p bin - - curl -Lso bin/shellcheck https://github.com/caarlos0/shellcheck-docker/releases/download/v0.4.3/shellcheck - - chmod +x bin/shellcheck -script: - - PATH=$PATH:$PWD/bin go test -v ./... - - go build diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md deleted file mode 100644 index b338a0e44..000000000 --- a/vendor/github.com/spf13/cobra/README.md +++ /dev/null @@ -1,898 +0,0 @@ -![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png) - -Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files. - -Many of the most widely used Go projects are built using Cobra including: - -* [Kubernetes](http://kubernetes.io/) -* [Hugo](http://gohugo.io) -* [rkt](https://github.com/coreos/rkt) -* [etcd](https://github.com/coreos/etcd) -* [Docker (distribution)](https://github.com/docker/distribution) -* [OpenShift](https://www.openshift.com/) -* [Delve](https://github.com/derekparker/delve) -* [GopherJS](http://www.gopherjs.org/) -* [CockroachDB](http://www.cockroachlabs.com/) -* [Bleve](http://www.blevesearch.com/) -* [ProjectAtomic (enterprise)](http://www.projectatomic.io/) -* [Parse (CLI)](https://parse.com/) -* [GiantSwarm's swarm](https://github.com/giantswarm/cli) -* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) - - -[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra) -[![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra) -[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra) - -![cobra](https://cloud.githubusercontent.com/assets/173412/10911369/84832a8e-8212-11e5-9f82-cc96660a4794.gif) - -# Overview - -Cobra is a library providing a simple interface to create powerful modern CLI -interfaces similar to git & go tools. - -Cobra is also an application that will generate your application scaffolding to rapidly -develop a Cobra-based application. - -Cobra provides: -* Easy subcommand-based CLIs: `app server`, `app fetch`, etc. -* Fully POSIX-compliant flags (including short & long versions) -* Nested subcommands -* Global, local and cascading flags -* Easy generation of applications & commands with `cobra create appname` & `cobra add cmdname` -* Intelligent suggestions (`app srver`... did you mean `app server`?) -* Automatic help generation for commands and flags -* Automatic detailed help for `app help [command]` -* Automatic help flag recognition of `-h`, `--help`, etc. -* Automatically generated bash autocomplete for your application -* Automatically generated man pages for your application -* Command aliases so you can change things without breaking them -* The flexibilty to define your own help, usage, etc. -* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps - -Cobra has an exceptionally clean interface and simple design without needless -constructors or initialization methods. - -Applications built with Cobra commands are designed to be as user-friendly as -possible. Flags can be placed before or after the command (as long as a -confusing space isn’t provided). Both short and long flags can be used. A -command need not even be fully typed. Help is automatically generated and -available for the application or for a specific command using either the help -command or the `--help` flag. - -# Concepts - -Cobra is built on a structure of commands, arguments & flags. - -**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions. - -The best applications will read like sentences when used. Users will know how -to use the application because they will natively understand how to use it. - -The pattern to follow is -`APPNAME VERB NOUN --ADJECTIVE.` - or -`APPNAME COMMAND ARG --FLAG` - -A few good real world examples may better illustrate this point. - -In the following example, 'server' is a command, and 'port' is a flag: - - > hugo server --port=1313 - -In this command we are telling Git to clone the url bare. - - > git clone URL --bare - -## Commands - -Command is the central point of the application. Each interaction that -the application supports will be contained in a Command. A command can -have children commands and optionally run an action. - -In the example above, 'server' is the command. - -A Command has the following structure: - -```go -type Command struct { - Use string // The one-line usage message. - Short string // The short description shown in the 'help' output. - Long string // The long message shown in the 'help ' output. - Run func(cmd *Command, args []string) // Run runs the command. -} -``` - -## Flags - -A Flag is a way to modify the behavior of a command. Cobra supports -fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/). -A Cobra command can define flags that persist through to children commands -and flags that are only available to that command. - -In the example above, 'port' is the flag. - -Flag functionality is provided by the [pflag -library](https://github.com/ogier/pflag), a fork of the flag standard library -which maintains the same interface while adding POSIX compliance. - -## Usage - -Cobra works by creating a set of commands and then organizing them into a tree. -The tree defines the structure of the application. - -Once each command is defined with its corresponding flags, then the -tree is assigned to the commander which is finally executed. - -# Installing -Using Cobra is easy. First, use `go get` to install the latest version -of the library. This command will install the `cobra` generator executible -along with the library: - - > go get -v github.com/spf13/cobra/cobra - -Next, include Cobra in your application: - -```go -import "github.com/spf13/cobra" -``` - -# Getting Started - -While you are welcome to provide your own organization, typically a Cobra based -application will follow the following organizational structure. - -``` - ▾ appName/ - ▾ cmd/ - add.go - your.go - commands.go - here.go - main.go -``` - -In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra. - -```go -package main - -import "{pathToYourApp}/cmd" - -func main() { - if err := cmd.RootCmd.Execute(); err != nil { - fmt.Println(err) - os.Exit(-1) - } -} -``` - -## Using the Cobra Generator - -Cobra provides its own program that will create your application and add any -commands you want. It's the easiest way to incorporate Cobra into your application. - -In order to use the cobra command, compile it using the following command: - - > go install github.com/spf13/cobra/cobra - -This will create the cobra executable under your go path bin directory! - -### cobra init - -The `cobra init [yourApp]` command will create your initial application code -for you. It is a very powerful application that will populate your program with -the right structure so you can immediately enjoy all the benefits of Cobra. It -will also automatically apply the license you specify to your application. - -Cobra init is pretty smart. You can provide it a full path, or simply a path -similar to what is expected in the import. - -``` -cobra init github.com/spf13/newAppName -``` - -### cobra add - -Once an application is initialized Cobra can create additional commands for you. -Let's say you created an app and you wanted the following commands for it: - -* app serve -* app config -* app config create - -In your project directory (where your main.go file is) you would run the following: - -``` -cobra add serve -cobra add config -cobra add create -p 'configCmd' -``` - -Once you have run these three commands you would have an app structure that would look like: - -``` - ▾ app/ - ▾ cmd/ - serve.go - config.go - create.go - main.go -``` - -at this point you can run `go run main.go` and it would run your app. `go run -main.go serve`, `go run main.go config`, `go run main.go config create` along -with `go run main.go help serve`, etc would all work. - -Obviously you haven't added your own code to these yet, the commands are ready -for you to give them their tasks. Have fun. - -### Configuring the cobra generator - -The cobra generator will be easier to use if you provide a simple configuration -file which will help you eliminate providing a bunch of repeated information in -flags over and over. - -An example ~/.cobra.yaml file: - -```yaml -author: Steve Francia -license: MIT -``` - -You can specify no license by setting `license` to `none` or you can specify -a custom license: - -```yaml -license: - header: This file is part of {{ .appName }}. - text: | - {{ .copyright }} - - This is my license. There are many like it, but this one is mine. - My license is my best friend. It is my life. I must master it as I must - master my life. -``` - -## Manually implementing Cobra - -To manually implement cobra you need to create a bare main.go file and a RootCmd file. -You will optionally provide additional commands as you see fit. - -### Create the root command - -The root command represents your binary itself. - - -#### Manually create rootCmd - -Cobra doesn't require any special constructors. Simply create your commands. - -Ideally you place this in app/cmd/root.go: - -```go -var RootCmd = &cobra.Command{ - Use: "hugo", - Short: "Hugo is a very fast static site generator", - Long: `A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - Complete documentation is available at http://hugo.spf13.com`, - Run: func(cmd *cobra.Command, args []string) { - // Do Stuff Here - }, -} -``` - -You will additionally define flags and handle configuration in your init() function. - -for example cmd/root.go: - -```go -func init() { - cobra.OnInitialize(initConfig) - RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") - RootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/") - RootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution") - RootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)") - RootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration") - viper.BindPFlag("author", RootCmd.PersistentFlags().Lookup("author")) - viper.BindPFlag("projectbase", RootCmd.PersistentFlags().Lookup("projectbase")) - viper.BindPFlag("useViper", RootCmd.PersistentFlags().Lookup("viper")) - viper.SetDefault("author", "NAME HERE ") - viper.SetDefault("license", "apache") -} -``` - -### Create your main.go - -With the root command you need to have your main function execute it. -Execute should be run on the root for clarity, though it can be called on any command. - -In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra. - -```go -package main - -import "{pathToYourApp}/cmd" - -func main() { - if err := cmd.RootCmd.Execute(); err != nil { - fmt.Println(err) - os.Exit(-1) - } -} -``` - - -### Create additional commands - -Additional commands can be defined and typically are each given their own file -inside of the cmd/ directory. - -If you wanted to create a version command you would create cmd/version.go and -populate it with the following: - -```go -package cmd - -import ( - "github.com/spf13/cobra" -) - -func init() { - RootCmd.AddCommand(versionCmd) -} - -var versionCmd = &cobra.Command{ - Use: "version", - Short: "Print the version number of Hugo", - Long: `All software has versions. This is Hugo's`, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") - }, -} -``` - -### Attach command to its parent - - -If you notice in the above example we attach the command to its parent. In -this case the parent is the rootCmd. In this example we are attaching it to the -root, but commands can be attached at any level. - -```go -RootCmd.AddCommand(versionCmd) -``` - -### Remove a command from its parent - -Removing a command is not a common action in simple programs, but it allows 3rd -parties to customize an existing command tree. - -In this example, we remove the existing `VersionCmd` command of an existing -root command, and we replace it with our own version: - -```go -mainlib.RootCmd.RemoveCommand(mainlib.VersionCmd) -mainlib.RootCmd.AddCommand(versionCmd) -``` - -## Working with Flags - -Flags provide modifiers to control how the action command operates. - -### Assign flags to a command - -Since the flags are defined and used in different locations, we need to -define a variable outside with the correct scope to assign the flag to -work with. - -```go -var Verbose bool -var Source string -``` - -There are two different approaches to assign a flag. - -### Persistent Flags - -A flag can be 'persistent' meaning that this flag will be available to the -command it's assigned to as well as every command under that command. For -global flags, assign a flag as a persistent flag on the root. - -```go -RootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") -``` - -### Local Flags - -A flag can also be assigned locally which will only apply to that specific command. - -```go -RootCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") -``` - - -## Example - -In the example below, we have defined three commands. Two are at the top level -and one (cmdTimes) is a child of one of the top commands. In this case the root -is not executable meaning that a subcommand is required. This is accomplished -by not providing a 'Run' for the 'rootCmd'. - -We have only defined one flag for a single command. - -More documentation about flags is available at https://github.com/spf13/pflag - -```go -package main - -import ( - "fmt" - "strings" - - "github.com/spf13/cobra" -) - -func main() { - - var echoTimes int - - var cmdPrint = &cobra.Command{ - Use: "print [string to print]", - Short: "Print anything to the screen", - Long: `print is for printing anything back to the screen. - For many years people have printed back to the screen. - `, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdEcho = &cobra.Command{ - Use: "echo [string to echo]", - Short: "Echo anything to the screen", - Long: `echo is for echoing anything back. - Echo works a lot like print, except it has a child command. - `, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdTimes = &cobra.Command{ - Use: "times [# times] [string to echo]", - Short: "Echo anything to the screen more times", - Long: `echo things multiple times back to the user by providing - a count and a string.`, - Run: func(cmd *cobra.Command, args []string) { - for i := 0; i < echoTimes; i++ { - fmt.Println("Echo: " + strings.Join(args, " ")) - } - }, - } - - cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") - - var rootCmd = &cobra.Command{Use: "app"} - rootCmd.AddCommand(cmdPrint, cmdEcho) - cmdEcho.AddCommand(cmdTimes) - rootCmd.Execute() -} -``` - -For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/). - -## The Help Command - -Cobra automatically adds a help command to your application when you have subcommands. -This will be called when a user runs 'app help'. Additionally, help will also -support all other commands as input. Say, for instance, you have a command called -'create' without any additional configuration; Cobra will work when 'app help -create' is called. Every command will automatically have the '--help' flag added. - -### Example - -The following output is automatically generated by Cobra. Nothing beyond the -command and flag definitions are needed. - - > hugo help - - hugo is the main command, used to build your Hugo site. - - Hugo is a Fast and Flexible Static Site Generator - built with love by spf13 and friends in Go. - - Complete documentation is available at http://gohugo.io/. - - Usage: - hugo [flags] - hugo [command] - - Available Commands: - server Hugo runs its own webserver to render the files - version Print the version number of Hugo - config Print the site configuration - check Check content in the source directory - benchmark Benchmark hugo by building a site a number of times. - convert Convert your content to different formats - new Create new content for your site - list Listing out various types of content - undraft Undraft changes the content's draft status from 'True' to 'False' - genautocomplete Generate shell autocompletion script for Hugo - gendoc Generate Markdown documentation for the Hugo CLI. - genman Generate man page for Hugo - import Import your site from others. - - Flags: - -b, --baseURL="": hostname (and path) to the root, e.g. http://spf13.com/ - -D, --buildDrafts[=false]: include content marked as draft - -F, --buildFuture[=false]: include content with publishdate in the future - --cacheDir="": filesystem path to cache directory. Defaults: $TMPDIR/hugo_cache/ - --canonifyURLs[=false]: if true, all relative URLs will be canonicalized using baseURL - --config="": config file (default is path/config.yaml|json|toml) - -d, --destination="": filesystem path to write files to - --disableRSS[=false]: Do not build RSS files - --disableSitemap[=false]: Do not build Sitemap file - --editor="": edit new content with this editor, if provided - --ignoreCache[=false]: Ignores the cache directory for reading but still writes to it - --log[=false]: Enable Logging - --logFile="": Log File path (if set, logging enabled automatically) - --noTimes[=false]: Don't sync modification time of files - --pluralizeListTitles[=true]: Pluralize titles in lists using inflect - --preserveTaxonomyNames[=false]: Preserve taxonomy names as written ("Gérard Depardieu" vs "gerard-depardieu") - -s, --source="": filesystem path to read files relative from - --stepAnalysis[=false]: display memory and timing of different steps of the program - -t, --theme="": theme to use (located in /themes/THEMENAME/) - --uglyURLs[=false]: if true, use /filename.html instead of /filename/ - -v, --verbose[=false]: verbose output - --verboseLog[=false]: verbose logging - -w, --watch[=false]: watch filesystem for changes and recreate as needed - - Use "hugo [command] --help" for more information about a command. - - -Help is just a command like any other. There is no special logic or behavior -around it. In fact, you can provide your own if you want. - -### Defining your own help - -You can provide your own Help command or your own template for the default command to use. - -The default help command is - -```go -func (c *Command) initHelp() { - if c.helpCommand == nil { - c.helpCommand = &Command{ - Use: "help [command]", - Short: "Help about any command", - Long: `Help provides help for any command in the application. - Simply type ` + c.Name() + ` help [path to command] for full details.`, - Run: c.HelpFunc(), - } - } - c.AddCommand(c.helpCommand) -} -``` - -You can provide your own command, function or template through the following methods: - -```go -command.SetHelpCommand(cmd *Command) - -command.SetHelpFunc(f func(*Command, []string)) - -command.SetHelpTemplate(s string) -``` - -The latter two will also apply to any children commands. - -## Usage - -When the user provides an invalid flag or invalid command, Cobra responds by -showing the user the 'usage'. - -### Example -You may recognize this from the help above. That's because the default help -embeds the usage as part of its output. - - Usage: - hugo [flags] - hugo [command] - - Available Commands: - server Hugo runs its own webserver to render the files - version Print the version number of Hugo - config Print the site configuration - check Check content in the source directory - benchmark Benchmark hugo by building a site a number of times. - convert Convert your content to different formats - new Create new content for your site - list Listing out various types of content - undraft Undraft changes the content's draft status from 'True' to 'False' - genautocomplete Generate shell autocompletion script for Hugo - gendoc Generate Markdown documentation for the Hugo CLI. - genman Generate man page for Hugo - import Import your site from others. - - Flags: - -b, --baseURL="": hostname (and path) to the root, e.g. http://spf13.com/ - -D, --buildDrafts[=false]: include content marked as draft - -F, --buildFuture[=false]: include content with publishdate in the future - --cacheDir="": filesystem path to cache directory. Defaults: $TMPDIR/hugo_cache/ - --canonifyURLs[=false]: if true, all relative URLs will be canonicalized using baseURL - --config="": config file (default is path/config.yaml|json|toml) - -d, --destination="": filesystem path to write files to - --disableRSS[=false]: Do not build RSS files - --disableSitemap[=false]: Do not build Sitemap file - --editor="": edit new content with this editor, if provided - --ignoreCache[=false]: Ignores the cache directory for reading but still writes to it - --log[=false]: Enable Logging - --logFile="": Log File path (if set, logging enabled automatically) - --noTimes[=false]: Don't sync modification time of files - --pluralizeListTitles[=true]: Pluralize titles in lists using inflect - --preserveTaxonomyNames[=false]: Preserve taxonomy names as written ("Gérard Depardieu" vs "gerard-depardieu") - -s, --source="": filesystem path to read files relative from - --stepAnalysis[=false]: display memory and timing of different steps of the program - -t, --theme="": theme to use (located in /themes/THEMENAME/) - --uglyURLs[=false]: if true, use /filename.html instead of /filename/ - -v, --verbose[=false]: verbose output - --verboseLog[=false]: verbose logging - -w, --watch[=false]: watch filesystem for changes and recreate as needed - -### Defining your own usage -You can provide your own usage function or template for Cobra to use. - -The default usage function is: - -```go -return func(c *Command) error { - err := tmpl(c.Out(), c.UsageTemplate(), c) - return err -} -``` - -Like help, the function and template are overridable through public methods: - -```go -command.SetUsageFunc(f func(*Command) error) - -command.SetUsageTemplate(s string) -``` - -## PreRun or PostRun Hooks - -It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherrited by children if they do not declare their own. These function are run in the following order: - -- `PersistentPreRun` -- `PreRun` -- `Run` -- `PostRun` -- `PersistentPostRun` - -An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: - -```go -package main - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func main() { - - var rootCmd = &cobra.Command{ - Use: "root [sub]", - Short: "My root command", - PersistentPreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) - }, - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) - }, - } - - var subCmd = &cobra.Command{ - Use: "sub [no options!]", - Short: "My subcommand", - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) - }, - } - - rootCmd.AddCommand(subCmd) - - rootCmd.SetArgs([]string{""}) - _ = rootCmd.Execute() - fmt.Print("\n") - rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) - _ = rootCmd.Execute() -} -``` - - -## Alternative Error Handling - -Cobra also has functions where the return signature is an error. This allows for errors to bubble up to the top, -providing a way to handle the errors in one location. The current list of functions that return an error is: - -* PersistentPreRunE -* PreRunE -* RunE -* PostRunE -* PersistentPostRunE - -If you would like to silence the default `error` and `usage` output in favor of your own, you can set `SilenceUsage` -and `SilenceErrors` to `false` on the command. A child command respects these flags if they are set on the parent -command. - -**Example Usage using RunE:** - -```go -package main - -import ( - "errors" - "log" - - "github.com/spf13/cobra" -) - -func main() { - var rootCmd = &cobra.Command{ - Use: "hugo", - Short: "Hugo is a very fast static site generator", - Long: `A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - Complete documentation is available at http://hugo.spf13.com`, - RunE: func(cmd *cobra.Command, args []string) error { - // Do Stuff Here - return errors.New("some random error") - }, - } - - if err := rootCmd.Execute(); err != nil { - log.Fatal(err) - } -} -``` - -## Suggestions when "unknown command" happens - -Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: - -``` -$ hugo srever -Error: unknown command "srever" for "hugo" - -Did you mean this? - server - -Run 'hugo --help' for usage. -``` - -Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. - -If you need to disable suggestions or tweak the string distance in your command, use: - -```go -command.DisableSuggestions = true -``` - -or - -```go -command.SuggestionsMinimumDistance = 1 -``` - -You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example: - -``` -$ kubectl remove -Error: unknown command "remove" for "kubectl" - -Did you mean this? - delete - -Run 'kubectl help' for usage. -``` - -## Generating Markdown-formatted documentation for your command - -Cobra can generate a Markdown-formatted document based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Markdown Docs](doc/md_docs.md). - -## Generating man pages for your command - -Cobra can generate a man page based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Man Docs](doc/man_docs.md). - -## Generating bash completions for your command - -Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible. Read more about it in [Bash Completions](bash_completions.md). - -## Debugging - -Cobra provides a ‘DebugFlags’ method on a command which, when called, will print -out everything Cobra knows about the flags for each command. - -### Example - -```go -command.DebugFlags() -``` - -## Release Notes -* **0.9.0** June 17, 2014 - * flags can appears anywhere in the args (provided they are unambiguous) - * --help prints usage screen for app or command - * Prefix matching for commands - * Cleaner looking help and usage output - * Extensive test suite -* **0.8.0** Nov 5, 2013 - * Reworked interface to remove commander completely - * Command now primary structure - * No initialization needed - * Usage & Help templates & functions definable at any level - * Updated Readme -* **0.7.0** Sept 24, 2013 - * Needs more eyes - * Test suite - * Support for automatic error messages - * Support for help command - * Support for printing to any io.Writer instead of os.Stderr - * Support for persistent flags which cascade down tree - * Ready for integration into Hugo -* **0.1.0** Sept 3, 2013 - * Implement first draft - -## Extensions - -Libraries for extending Cobra: - -* [cmdns](https://github.com/gosuri/cmdns): Enables name spacing a command's immediate children. It provides an alternative way to structure subcommands, similar to `heroku apps:create` and `ovrclk clusters:launch`. - -## ToDo -* Launch proper documentation site - -## Contributing - -1. Fork it -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Commit your changes (`git commit -am 'Add some feature'`) -4. Push to the branch (`git push origin my-new-feature`) -5. Create new Pull Request - -## Contributors - -Names in no particular order: - -* [spf13](https://github.com/spf13), -[eparis](https://github.com/eparis), -[bep](https://github.com/bep), and many more! - -## License - -Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt) - - -[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/spf13/cobra/trend.png)](https://bitdeli.com/free "Bitdeli Badge") diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md deleted file mode 100644 index 6e3b71f13..000000000 --- a/vendor/github.com/spf13/cobra/bash_completions.md +++ /dev/null @@ -1,206 +0,0 @@ -# Generating Bash Completions For Your Own cobra.Command - -Generating bash completions from a cobra command is incredibly easy. An actual program which does so for the kubernetes kubectl binary is as follows: - -```go -package main - -import ( - "io/ioutil" - "os" - - "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd" -) - -func main() { - kubectl := cmd.NewFactory(nil).NewKubectlCommand(os.Stdin, ioutil.Discard, ioutil.Discard) - kubectl.GenBashCompletionFile("out.sh") -} -``` - -That will get you completions of subcommands and flags. If you make additional annotations to your code, you can get even more intelligent and flexible behavior. - -## Creating your own custom functions - -Some more actual code that works in kubernetes: - -```bash -const ( - bash_completion_func = `__kubectl_parse_get() -{ - local kubectl_output out - if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then - out=($(echo "${kubectl_output}" | awk '{print $1}')) - COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) ) - fi -} - -__kubectl_get_resource() -{ - if [[ ${#nouns[@]} -eq 0 ]]; then - return 1 - fi - __kubectl_parse_get ${nouns[${#nouns[@]} -1]} - if [[ $? -eq 0 ]]; then - return 0 - fi -} - -__custom_func() { - case ${last_command} in - kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop) - __kubectl_get_resource - return - ;; - *) - ;; - esac -} -`) -``` - -And then I set that in my command definition: - -```go -cmds := &cobra.Command{ - Use: "kubectl", - Short: "kubectl controls the Kubernetes cluster manager", - Long: `kubectl controls the Kubernetes cluster manager. - -Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`, - Run: runHelp, - BashCompletionFunction: bash_completion_func, -} -``` - -The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__custom_func()` to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods! - -## Have the completions code complete your 'nouns' - -In the above example "pod" was assumed to already be typed. But if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like: - -```go -validArgs []string = { "pod", "node", "service", "replicationcontroller" } - -cmd := &cobra.Command{ - Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)", - Short: "Display one or many resources", - Long: get_long, - Example: get_example, - Run: func(cmd *cobra.Command, args []string) { - err := RunGet(f, out, cmd, args) - util.CheckErr(err) - }, - ValidArgs: validArgs, -} -``` - -Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like - -```bash -# kubectl get [tab][tab] -node pod replicationcontroller service -``` - -## Plural form and shortcuts for nouns - -If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`: - -```go` -argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" } - -cmd := &cobra.Command{ - ... - ValidArgs: validArgs, - ArgAliases: argAliases -} -``` - -The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by -the completion algorithm if entered manually, e.g. in: - -```bash -# kubectl get rc [tab][tab] -backend frontend database -``` - -Note that without declaring `rc` as an alias, the completion algorithm would show the list of nouns -in this example again instead of the replication controllers. - -## Mark flags as required - -Most of the time completions will only show subcommands. But if a flag is required to make a subcommand work, you probably want it to show up when the user types [tab][tab]. Marking a flag as 'Required' is incredibly easy. - -```go -cmd.MarkFlagRequired("pod") -cmd.MarkFlagRequired("container") -``` - -and you'll get something like - -```bash -# kubectl exec [tab][tab][tab] --c --container= -p --pod= -``` - -# Specify valid filename extensions for flags that take a filename - -In this example we use --filename= and expect to get a json or yaml file as the argument. To make this easier we annotate the --filename flag with valid filename extensions. - -```go - annotations := []string{"json", "yaml", "yml"} - annotation := make(map[string][]string) - annotation[cobra.BashCompFilenameExt] = annotations - - flag := &pflag.Flag{ - Name: "filename", - Shorthand: "f", - Usage: usage, - Value: value, - DefValue: value.String(), - Annotations: annotation, - } - cmd.Flags().AddFlag(flag) -``` - -Now when you run a command with this filename flag you'll get something like - -```bash -# kubectl create -f -test/ example/ rpmbuild/ -hello.yml test.json -``` - -So while there are many other files in the CWD it only shows me subdirs and those with valid extensions. - -# Specifiy custom flag completion - -Similar to the filename completion and filtering using cobra.BashCompFilenameExt, you can specifiy -a custom flag completion function with cobra.BashCompCustom: - -```go - annotation := make(map[string][]string) - annotation[cobra.BashCompFilenameExt] = []string{"__kubectl_get_namespaces"} - - flag := &pflag.Flag{ - Name: "namespace", - Usage: usage, - Annotations: annotation, - } - cmd.Flags().AddFlag(flag) -``` - -In addition add the `__handle_namespace_flag` implementation in the `BashCompletionFunction` -value, e.g.: - -```bash -__kubectl_get_namespaces() -{ - local template - template="{{ range .items }}{{ .metadata.name }} {{ end }}" - local kubectl_out - if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then - COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) ) - fi -} -``` diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml deleted file mode 100644 index 580ad22f5..000000000 --- a/vendor/github.com/spf13/pflag/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -sudo: false - -language: go - -go: - - 1.5.4 - - 1.6.3 - - tip - -install: - - go get github.com/golang/lint/golint - - export PATH=$GOPATH/bin:$PATH - - go install ./... - -script: - - verify/all.sh -v - - go test ./... diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md deleted file mode 100644 index 08ad94565..000000000 --- a/vendor/github.com/spf13/pflag/README.md +++ /dev/null @@ -1,275 +0,0 @@ -[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag) - -## Description - -pflag is a drop-in replacement for Go's flag package, implementing -POSIX/GNU-style --flags. - -pflag is compatible with the [GNU extensions to the POSIX recommendations -for command-line options][1]. For a more precise description, see the -"Command-line flag syntax" section below. - -[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html - -pflag is available under the same style of BSD license as the Go language, -which can be found in the LICENSE file. - -## Installation - -pflag is available using the standard `go get` command. - -Install by running: - - go get github.com/spf13/pflag - -Run tests by running: - - go test github.com/spf13/pflag - -## Usage - -pflag is a drop-in replacement of Go's native flag package. If you import -pflag under the name "flag" then all code should continue to function -with no changes. - -``` go -import flag "github.com/spf13/pflag" -``` - -There is one exception to this: if you directly instantiate the Flag struct -there is one more field "Shorthand" that you will need to set. -Most code never instantiates this struct directly, and instead uses -functions such as String(), BoolVar(), and Var(), and is therefore -unaffected. - -Define flags using flag.String(), Bool(), Int(), etc. - -This declares an integer flag, -flagname, stored in the pointer ip, with type *int. - -``` go -var ip *int = flag.Int("flagname", 1234, "help message for flagname") -``` - -If you like, you can bind the flag to a variable using the Var() functions. - -``` go -var flagvar int -func init() { - flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") -} -``` - -Or you can create custom flags that satisfy the Value interface (with -pointer receivers) and couple them to flag parsing by - -``` go -flag.Var(&flagVal, "name", "help message for flagname") -``` - -For such flags, the default value is just the initial value of the variable. - -After all flags are defined, call - -``` go -flag.Parse() -``` - -to parse the command line into the defined flags. - -Flags may then be used directly. If you're using the flags themselves, -they are all pointers; if you bind to variables, they're values. - -``` go -fmt.Println("ip has value ", *ip) -fmt.Println("flagvar has value ", flagvar) -``` - -There are helpers function to get values later if you have the FlagSet but -it was difficult to keep up with all of the flag pointers in your code. -If you have a pflag.FlagSet with a flag called 'flagname' of type int you -can use GetInt() to get the int value. But notice that 'flagname' must exist -and it must be an int. GetString("flagname") will fail. - -``` go -i, err := flagset.GetInt("flagname") -``` - -After parsing, the arguments after the flag are available as the -slice flag.Args() or individually as flag.Arg(i). -The arguments are indexed from 0 through flag.NArg()-1. - -The pflag package also defines some new functions that are not in flag, -that give one-letter shorthands for flags. You can use these by appending -'P' to the name of any function that defines a flag. - -``` go -var ip = flag.IntP("flagname", "f", 1234, "help message") -var flagvar bool -func init() { - flag.BoolVarP("boolname", "b", true, "help message") -} -flag.VarP(&flagVar, "varname", "v", 1234, "help message") -``` - -Shorthand letters can be used with single dashes on the command line. -Boolean shorthand flags can be combined with other shorthand flags. - -The default set of command-line flags is controlled by -top-level functions. The FlagSet type allows one to define -independent sets of flags, such as to implement subcommands -in a command-line interface. The methods of FlagSet are -analogous to the top-level functions for the command-line -flag set. - -## Setting no option default values for flags - -After you create a flag it is possible to set the pflag.NoOptDefVal for -the given flag. Doing this changes the meaning of the flag slightly. If -a flag has a NoOptDefVal and the flag is set on the command line without -an option the flag will be set to the NoOptDefVal. For example given: - -``` go -var ip = flag.IntP("flagname", "f", 1234, "help message") -flag.Lookup("flagname").NoOptDefVal = "4321" -``` - -Would result in something like - -| Parsed Arguments | Resulting Value | -| ------------- | ------------- | -| --flagname=1357 | ip=1357 | -| --flagname | ip=4321 | -| [nothing] | ip=1234 | - -## Command line flag syntax - -``` ---flag // boolean flags, or flags with no option default values ---flag x // only on flags without a default value ---flag=x -``` - -Unlike the flag package, a single dash before an option means something -different than a double dash. Single dashes signify a series of shorthand -letters for flags. All but the last shorthand letter must be boolean flags -or a flag with a default value - -``` -// boolean or flags where the 'no option default value' is set --f --f=true --abc -but --b true is INVALID - -// non-boolean and flags without a 'no option default value' --n 1234 --n=1234 --n1234 - -// mixed --abcs "hello" --absd="hello" --abcs1234 -``` - -Flag parsing stops after the terminator "--". Unlike the flag package, -flags can be interspersed with arguments anywhere on the command line -before this terminator. - -Integer flags accept 1234, 0664, 0x1234 and may be negative. -Boolean flags (in their long form) accept 1, 0, t, f, true, false, -TRUE, FALSE, True, False. -Duration flags accept any input valid for time.ParseDuration. - -## Mutating or "Normalizing" Flag names - -It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow. - -**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag - -``` go -func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - from := []string{"-", "_"} - to := "." - for _, sep := range from { - name = strings.Replace(name, sep, to, -1) - } - return pflag.NormalizedName(name) -} - -myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc) -``` - -**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name - -``` go -func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - switch name { - case "old-flag-name": - name = "new-flag-name" - break - } - return pflag.NormalizedName(name) -} - -myFlagSet.SetNormalizeFunc(aliasNormalizeFunc) -``` - -## Deprecating a flag or its shorthand -It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used. - -**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead. -```go -// deprecate a flag by specifying its name and a usage message -flags.MarkDeprecated("badflag", "please use --good-flag instead") -``` -This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used. - -**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n". -```go -// deprecate a flag shorthand by specifying its flag name and a usage message -flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only") -``` -This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used. - -Note that usage message is essential here, and it should not be empty. - -## Hidden flags -It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text. - -**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available. -```go -// hide a flag by specifying its name -flags.MarkHidden("secretFlag") -``` - -## Supporting Go flags when using pflag -In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary -to support flags defined by third-party dependencies (e.g. `golang/glog`). - -**Example**: You want to add the Go flags to the `CommandLine` flagset -```go -import ( - goflag "flag" - flag "github.com/spf13/pflag" -) - -var ip *int = flag.Int("flagname", 1234, "help message for flagname") - -func main() { - flag.CommandLine.AddGoFlagSet(goflag.CommandLine) - flag.Parse() -} -``` - -## More info - -You can see the full reference documentation of the pflag package -[at godoc.org][3], or through go's standard documentation system by -running `godoc -http=:6060` and browsing to -[http://localhost:6060/pkg/github.com/ogier/pflag][2] after -installation. - -[2]: http://localhost:6060/pkg/github.com/ogier/pflag -[3]: http://godoc.org/github.com/ogier/pflag diff --git a/vendor/github.com/spf13/pflag/bool.go b/vendor/github.com/spf13/pflag/bool.go index d272e40bd..c4c5c0bfd 100644 --- a/vendor/github.com/spf13/pflag/bool.go +++ b/vendor/github.com/spf13/pflag/bool.go @@ -1,9 +1,6 @@ package pflag -import ( - "fmt" - "strconv" -) +import "strconv" // optional interface to indicate boolean flags that can be // supplied without "=value" text @@ -30,7 +27,7 @@ func (b *boolValue) Type() string { return "bool" } -func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } +func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) } func (b *boolValue) IsBoolFlag() bool { return true } diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go index 7b1f142e7..d22be41f2 100644 --- a/vendor/github.com/spf13/pflag/count.go +++ b/vendor/github.com/spf13/pflag/count.go @@ -1,9 +1,6 @@ package pflag -import ( - "fmt" - "strconv" -) +import "strconv" // -- count Value type countValue int @@ -28,7 +25,7 @@ func (i *countValue) Type() string { return "count" } -func (i *countValue) String() string { return fmt.Sprintf("%v", *i) } +func (i *countValue) String() string { return strconv.Itoa(int(*i)) } func countConv(sval string) (interface{}, error) { i, err := strconv.Atoi(sval) diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index 965df1379..fa815642e 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -416,23 +416,39 @@ func Set(name, value string) error { // otherwise, the default values of all defined flags in the set. func (f *FlagSet) PrintDefaults() { usages := f.FlagUsages() - fmt.Fprintf(f.out(), "%s", usages) -} - -// isZeroValue guesses whether the string represents the zero -// value for a flag. It is not accurate but in practice works OK. -func isZeroValue(value string) bool { - switch value { - case "false": - return true - case "": - return true - case "": - return true - case "0": - return true + fmt.Fprint(f.out(), usages) +} + +// defaultIsZeroValue returns true if the default value for this flag represents +// a zero value. +func (f *Flag) defaultIsZeroValue() bool { + switch f.Value.(type) { + case boolFlag: + return f.DefValue == "false" + case *durationValue: + // Beginning in Go 1.7, duration zero values are "0s" + return f.DefValue == "0" || f.DefValue == "0s" + case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value: + return f.DefValue == "0" + case *stringValue: + return f.DefValue == "" + case *ipValue, *ipMaskValue, *ipNetValue: + return f.DefValue == "" + case *intSliceValue, *stringSliceValue, *stringArrayValue: + return f.DefValue == "[]" + default: + switch f.Value.String() { + case "false": + return true + case "": + return true + case "": + return true + case "0": + return true + } + return false } - return false } // UnquoteUsage extracts a back-quoted name from the usage @@ -455,22 +471,19 @@ func UnquoteUsage(flag *Flag) (name string, usage string) { break // Only one back quote; use type name. } } - // No explicit name, so use type if we can find one. - name = "value" - switch flag.Value.(type) { - case boolFlag: + + name = flag.Value.Type() + switch name { + case "bool": name = "" - case *durationValue: - name = "duration" - case *float64Value: + case "float64": name = "float" - case *intValue, *int64Value: + case "int64": name = "int" - case *stringValue: - name = "string" - case *uintValue, *uint64Value: + case "uint64": name = "uint" } + return } @@ -501,7 +514,7 @@ func (f *FlagSet) FlagUsages() string { if len(flag.NoOptDefVal) > 0 { switch flag.Value.Type() { case "string": - line += fmt.Sprintf("[=%q]", flag.NoOptDefVal) + line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal) case "bool": if flag.NoOptDefVal != "true" { line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) @@ -519,9 +532,9 @@ func (f *FlagSet) FlagUsages() string { } line += usage - if !isZeroValue(flag.DefValue) { + if !flag.defaultIsZeroValue() { if flag.Value.Type() == "string" { - line += fmt.Sprintf(" (default %q)", flag.DefValue) + line += fmt.Sprintf(" (default \"%s\")", flag.DefValue) } else { line += fmt.Sprintf(" (default %s)", flag.DefValue) } diff --git a/vendor/github.com/spf13/pflag/float32.go b/vendor/github.com/spf13/pflag/float32.go index 7683fae1b..a243f81f7 100644 --- a/vendor/github.com/spf13/pflag/float32.go +++ b/vendor/github.com/spf13/pflag/float32.go @@ -1,9 +1,6 @@ package pflag -import ( - "fmt" - "strconv" -) +import "strconv" // -- float32 Value type float32Value float32 @@ -23,7 +20,7 @@ func (f *float32Value) Type() string { return "float32" } -func (f *float32Value) String() string { return fmt.Sprintf("%v", *f) } +func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) } func float32Conv(sval string) (interface{}, error) { v, err := strconv.ParseFloat(sval, 32) diff --git a/vendor/github.com/spf13/pflag/float64.go b/vendor/github.com/spf13/pflag/float64.go index 50fbf8cc1..04b5492a7 100644 --- a/vendor/github.com/spf13/pflag/float64.go +++ b/vendor/github.com/spf13/pflag/float64.go @@ -1,9 +1,6 @@ package pflag -import ( - "fmt" - "strconv" -) +import "strconv" // -- float64 Value type float64Value float64 @@ -23,7 +20,7 @@ func (f *float64Value) Type() string { return "float64" } -func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } +func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) } func float64Conv(sval string) (interface{}, error) { return strconv.ParseFloat(sval, 64) diff --git a/vendor/github.com/spf13/pflag/int.go b/vendor/github.com/spf13/pflag/int.go index b6560368a..1474b89df 100644 --- a/vendor/github.com/spf13/pflag/int.go +++ b/vendor/github.com/spf13/pflag/int.go @@ -1,9 +1,6 @@ package pflag -import ( - "fmt" - "strconv" -) +import "strconv" // -- int Value type intValue int @@ -23,7 +20,7 @@ func (i *intValue) Type() string { return "int" } -func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } +func (i *intValue) String() string { return strconv.Itoa(int(*i)) } func intConv(sval string) (interface{}, error) { return strconv.Atoi(sval) diff --git a/vendor/github.com/spf13/pflag/int32.go b/vendor/github.com/spf13/pflag/int32.go index 41659a9af..9b95944f0 100644 --- a/vendor/github.com/spf13/pflag/int32.go +++ b/vendor/github.com/spf13/pflag/int32.go @@ -1,9 +1,6 @@ package pflag -import ( - "fmt" - "strconv" -) +import "strconv" // -- int32 Value type int32Value int32 @@ -23,7 +20,7 @@ func (i *int32Value) Type() string { return "int32" } -func (i *int32Value) String() string { return fmt.Sprintf("%v", *i) } +func (i *int32Value) String() string { return strconv.FormatInt(int64(*i), 10) } func int32Conv(sval string) (interface{}, error) { v, err := strconv.ParseInt(sval, 0, 32) diff --git a/vendor/github.com/spf13/pflag/int64.go b/vendor/github.com/spf13/pflag/int64.go index 6e67e380f..0026d781d 100644 --- a/vendor/github.com/spf13/pflag/int64.go +++ b/vendor/github.com/spf13/pflag/int64.go @@ -1,9 +1,6 @@ package pflag -import ( - "fmt" - "strconv" -) +import "strconv" // -- int64 Value type int64Value int64 @@ -23,7 +20,7 @@ func (i *int64Value) Type() string { return "int64" } -func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } +func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) } func int64Conv(sval string) (interface{}, error) { return strconv.ParseInt(sval, 0, 64) diff --git a/vendor/github.com/spf13/pflag/int8.go b/vendor/github.com/spf13/pflag/int8.go index 400db21f5..4da92228e 100644 --- a/vendor/github.com/spf13/pflag/int8.go +++ b/vendor/github.com/spf13/pflag/int8.go @@ -1,9 +1,6 @@ package pflag -import ( - "fmt" - "strconv" -) +import "strconv" // -- int8 Value type int8Value int8 @@ -23,7 +20,7 @@ func (i *int8Value) Type() string { return "int8" } -func (i *int8Value) String() string { return fmt.Sprintf("%v", *i) } +func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) } func int8Conv(sval string) (interface{}, error) { v, err := strconv.ParseInt(sval, 0, 8) diff --git a/vendor/github.com/spf13/pflag/string.go b/vendor/github.com/spf13/pflag/string.go index e296136e5..04e0a26ff 100644 --- a/vendor/github.com/spf13/pflag/string.go +++ b/vendor/github.com/spf13/pflag/string.go @@ -1,7 +1,5 @@ package pflag -import "fmt" - // -- string Value type stringValue string @@ -18,7 +16,7 @@ func (s *stringValue) Type() string { return "string" } -func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } +func (s *stringValue) String() string { return string(*s) } func stringConv(sval string) (interface{}, error) { return sval, nil diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go new file mode 100644 index 000000000..93b4e4329 --- /dev/null +++ b/vendor/github.com/spf13/pflag/string_array.go @@ -0,0 +1,109 @@ +package pflag + +import ( + "fmt" +) + +var _ = fmt.Fprint + +// -- stringArray Value +type stringArrayValue struct { + value *[]string + changed bool +} + +func newStringArrayValue(val []string, p *[]string) *stringArrayValue { + ssv := new(stringArrayValue) + ssv.value = p + *ssv.value = val + return ssv +} + +func (s *stringArrayValue) Set(val string) error { + if !s.changed { + *s.value = []string{val} + s.changed = true + } else { + *s.value = append(*s.value, val) + } + return nil +} + +func (s *stringArrayValue) Type() string { + return "stringArray" +} + +func (s *stringArrayValue) String() string { + str, _ := writeAsCSV(*s.value) + return "[" + str + "]" +} + +func stringArrayConv(sval string) (interface{}, error) { + sval = sval[1 : len(sval)-1] + // An empty string would cause a array with one (empty) string + if len(sval) == 0 { + return []string{}, nil + } + return readAsCSV(sval) +} + +// GetStringArray return the []string value of a flag with the given name +func (f *FlagSet) GetStringArray(name string) ([]string, error) { + val, err := f.getFlagType(name, "stringArray", stringArrayConv) + if err != nil { + return []string{}, err + } + return val.([]string), nil +} + +// StringArrayVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a []string variable in which to store the values of the multiple flags. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) { + f.VarP(newStringArrayValue(value, p), name, "", usage) +} + +// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { + f.VarP(newStringArrayValue(value, p), name, shorthand, usage) +} + +// StringArrayVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a []string variable in which to store the value of the flag. +// The value of each argument will not try to be separated by comma +func StringArrayVar(p *[]string, name string, value []string, usage string) { + CommandLine.VarP(newStringArrayValue(value, p), name, "", usage) +} + +// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. +func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { + CommandLine.VarP(newStringArrayValue(value, p), name, shorthand, usage) +} + +// StringArray defines a string flag with specified name, default value, and usage string. +// The return value is the address of a []string variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string { + p := []string{} + f.StringArrayVarP(&p, name, "", value, usage) + return &p +} + +// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage string) *[]string { + p := []string{} + f.StringArrayVarP(&p, name, shorthand, value, usage) + return &p +} + +// StringArray defines a string flag with specified name, default value, and usage string. +// The return value is the address of a []string variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func StringArray(name string, value []string, usage string) *[]string { + return CommandLine.StringArrayP(name, "", value, usage) +} + +// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. +func StringArrayP(name, shorthand string, value []string, usage string) *[]string { + return CommandLine.StringArrayP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go index b53648b2e..7829cfafb 100644 --- a/vendor/github.com/spf13/pflag/string_slice.go +++ b/vendor/github.com/spf13/pflag/string_slice.go @@ -1,6 +1,7 @@ package pflag import ( + "bytes" "encoding/csv" "fmt" "strings" @@ -21,10 +22,28 @@ func newStringSliceValue(val []string, p *[]string) *stringSliceValue { return ssv } -func (s *stringSliceValue) Set(val string) error { +func readAsCSV(val string) ([]string, error) { + if val == "" { + return []string{}, nil + } stringReader := strings.NewReader(val) csvReader := csv.NewReader(stringReader) - v, err := csvReader.Read() + return csvReader.Read() +} + +func writeAsCSV(vals []string) (string, error) { + b := &bytes.Buffer{} + w := csv.NewWriter(b) + err := w.Write(vals) + if err != nil { + return "", err + } + w.Flush() + return strings.TrimSuffix(b.String(), fmt.Sprintln()), nil +} + +func (s *stringSliceValue) Set(val string) error { + v, err := readAsCSV(val) if err != nil { return err } @@ -41,16 +60,18 @@ func (s *stringSliceValue) Type() string { return "stringSlice" } -func (s *stringSliceValue) String() string { return "[" + strings.Join(*s.value, ",") + "]" } +func (s *stringSliceValue) String() string { + str, _ := writeAsCSV(*s.value) + return "[" + str + "]" +} func stringSliceConv(sval string) (interface{}, error) { - sval = strings.Trim(sval, "[]") + sval = sval[1 : len(sval)-1] // An empty string would cause a slice with one (empty) string if len(sval) == 0 { return []string{}, nil } - v := strings.Split(sval, ",") - return v, nil + return readAsCSV(sval) } // GetStringSlice return the []string value of a flag with the given name diff --git a/vendor/github.com/spf13/pflag/uint.go b/vendor/github.com/spf13/pflag/uint.go index e142b4996..dcbc2b758 100644 --- a/vendor/github.com/spf13/pflag/uint.go +++ b/vendor/github.com/spf13/pflag/uint.go @@ -1,9 +1,6 @@ package pflag -import ( - "fmt" - "strconv" -) +import "strconv" // -- uint Value type uintValue uint @@ -23,7 +20,7 @@ func (i *uintValue) Type() string { return "uint" } -func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } +func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) } func uintConv(sval string) (interface{}, error) { v, err := strconv.ParseUint(sval, 0, 0) diff --git a/vendor/github.com/spf13/pflag/uint16.go b/vendor/github.com/spf13/pflag/uint16.go index 5c96c19dc..7e9914edd 100644 --- a/vendor/github.com/spf13/pflag/uint16.go +++ b/vendor/github.com/spf13/pflag/uint16.go @@ -1,9 +1,6 @@ package pflag -import ( - "fmt" - "strconv" -) +import "strconv" // -- uint16 value type uint16Value uint16 @@ -12,7 +9,7 @@ func newUint16Value(val uint16, p *uint16) *uint16Value { *p = val return (*uint16Value)(p) } -func (i *uint16Value) String() string { return fmt.Sprintf("%d", *i) } + func (i *uint16Value) Set(s string) error { v, err := strconv.ParseUint(s, 0, 16) *i = uint16Value(v) @@ -23,6 +20,8 @@ func (i *uint16Value) Type() string { return "uint16" } +func (i *uint16Value) String() string { return strconv.FormatUint(uint64(*i), 10) } + func uint16Conv(sval string) (interface{}, error) { v, err := strconv.ParseUint(sval, 0, 16) if err != nil { diff --git a/vendor/github.com/spf13/pflag/uint32.go b/vendor/github.com/spf13/pflag/uint32.go index 294fcaa32..d8024539b 100644 --- a/vendor/github.com/spf13/pflag/uint32.go +++ b/vendor/github.com/spf13/pflag/uint32.go @@ -1,18 +1,15 @@ package pflag -import ( - "fmt" - "strconv" -) +import "strconv" -// -- uint16 value +// -- uint32 value type uint32Value uint32 func newUint32Value(val uint32, p *uint32) *uint32Value { *p = val return (*uint32Value)(p) } -func (i *uint32Value) String() string { return fmt.Sprintf("%d", *i) } + func (i *uint32Value) Set(s string) error { v, err := strconv.ParseUint(s, 0, 32) *i = uint32Value(v) @@ -23,6 +20,8 @@ func (i *uint32Value) Type() string { return "uint32" } +func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) } + func uint32Conv(sval string) (interface{}, error) { v, err := strconv.ParseUint(sval, 0, 32) if err != nil { diff --git a/vendor/github.com/spf13/pflag/uint64.go b/vendor/github.com/spf13/pflag/uint64.go index c68188505..f62240f2c 100644 --- a/vendor/github.com/spf13/pflag/uint64.go +++ b/vendor/github.com/spf13/pflag/uint64.go @@ -1,9 +1,6 @@ package pflag -import ( - "fmt" - "strconv" -) +import "strconv" // -- uint64 Value type uint64Value uint64 @@ -23,7 +20,7 @@ func (i *uint64Value) Type() string { return "uint64" } -func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } +func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) } func uint64Conv(sval string) (interface{}, error) { v, err := strconv.ParseUint(sval, 0, 64) diff --git a/vendor/github.com/spf13/pflag/uint8.go b/vendor/github.com/spf13/pflag/uint8.go index 26db418ad..bb0e83c1f 100644 --- a/vendor/github.com/spf13/pflag/uint8.go +++ b/vendor/github.com/spf13/pflag/uint8.go @@ -1,9 +1,6 @@ package pflag -import ( - "fmt" - "strconv" -) +import "strconv" // -- uint8 Value type uint8Value uint8 @@ -23,7 +20,7 @@ func (i *uint8Value) Type() string { return "uint8" } -func (i *uint8Value) String() string { return fmt.Sprintf("%v", *i) } +func (i *uint8Value) String() string { return strconv.FormatUint(uint64(*i), 10) } func uint8Conv(sval string) (interface{}, error) { v, err := strconv.ParseUint(sval, 0, 8) diff --git a/vendor/github.com/ugorji/go/codec/README.md b/vendor/github.com/ugorji/go/codec/README.md deleted file mode 100644 index a790a52bb..000000000 --- a/vendor/github.com/ugorji/go/codec/README.md +++ /dev/null @@ -1,148 +0,0 @@ -# Codec - -High Performance, Feature-Rich Idiomatic Go codec/encoding library for -binc, msgpack, cbor, json. - -Supported Serialization formats are: - - - msgpack: https://github.com/msgpack/msgpack - - binc: http://github.com/ugorji/binc - - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049 - - json: http://json.org http://tools.ietf.org/html/rfc7159 - - simple: - -To install: - - go get github.com/ugorji/go/codec - -This package understands the `unsafe` tag, to allow using unsafe semantics: - - - When decoding into a struct, you need to read the field name as a string - so you can find the struct field it is mapped to. - Using `unsafe` will bypass the allocation and copying overhead of `[]byte->string` conversion. - -To use it, you must pass the `unsafe` tag during install: - -``` -go install -tags=unsafe github.com/ugorji/go/codec -``` - -Online documentation: http://godoc.org/github.com/ugorji/go/codec -Detailed Usage/How-to Primer: http://ugorji.net/blog/go-codec-primer - -The idiomatic Go support is as seen in other encoding packages in -the standard library (ie json, xml, gob, etc). - -Rich Feature Set includes: - - - Simple but extremely powerful and feature-rich API - - Very High Performance. - Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. - - Multiple conversions: - Package coerces types where appropriate - e.g. decode an int in the stream into a float, etc. - - Corner Cases: - Overflows, nil maps/slices, nil values in streams are handled correctly - - Standard field renaming via tags - - Support for omitting empty fields during an encoding - - Encoding from any value and decoding into pointer to any value - (struct, slice, map, primitives, pointers, interface{}, etc) - - Extensions to support efficient encoding/decoding of any named types - - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces - - Decoding without a schema (into a interface{}). - Includes Options to configure what specific map or slice type to use - when decoding an encoded list or map into a nil interface{} - - Encode a struct as an array, and decode struct from an array in the data stream - - Comprehensive support for anonymous fields - - Fast (no-reflection) encoding/decoding of common maps and slices - - Code-generation for faster performance. - - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats - - Support indefinite-length formats to enable true streaming - (for formats which support it e.g. json, cbor) - - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes. - This mostly applies to maps, where iteration order is non-deterministic. - - NIL in data stream decoded as zero value - - Never silently skip data when decoding. - User decides whether to return an error or silently skip data when keys or indexes - in the data stream do not map to fields in the struct. - - Encode/Decode from/to chan types (for iterative streaming support) - - Drop-in replacement for encoding/json. `json:` key in struct tag supported. - - Provides a RPC Server and Client Codec for net/rpc communication protocol. - - Handle unique idiosynchracies of codecs e.g. - - For messagepack, configure how ambiguities in handling raw bytes are resolved - - For messagepack, provide rpc server/client codec to support - msgpack-rpc protocol defined at: - https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md - -## Extension Support - -Users can register a function to handle the encoding or decoding of -their custom types. - -There are no restrictions on what the custom type can be. Some examples: - - type BisSet []int - type BitSet64 uint64 - type UUID string - type MyStructWithUnexportedFields struct { a int; b bool; c []int; } - type GifImage struct { ... } - -As an illustration, MyStructWithUnexportedFields would normally be -encoded as an empty map because it has no exported fields, while UUID -would be encoded as a string. However, with extension support, you can -encode any of these however you like. - -## RPC - -RPC Client and Server Codecs are implemented, so the codecs can be used -with the standard net/rpc package. - -## Usage - -Typical usage model: - - // create and configure Handle - var ( - bh codec.BincHandle - mh codec.MsgpackHandle - ch codec.CborHandle - ) - - mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) - - // configure extensions - // e.g. for msgpack, define functions and enable Time support for tag 1 - // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt) - - // create and use decoder/encoder - var ( - r io.Reader - w io.Writer - b []byte - h = &bh // or mh to use msgpack - ) - - dec = codec.NewDecoder(r, h) - dec = codec.NewDecoderBytes(b, h) - err = dec.Decode(&v) - - enc = codec.NewEncoder(w, h) - enc = codec.NewEncoderBytes(&b, h) - err = enc.Encode(v) - - //RPC Server - go func() { - for { - conn, err := listener.Accept() - rpcCodec := codec.GoRpc.ServerCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) - rpc.ServeCodec(rpcCodec) - } - }() - - //RPC Communication (client side) - conn, err = net.Dial("tcp", "localhost:5555") - rpcCodec := codec.GoRpc.ClientCodec(conn, h) - //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) - client := rpc.NewClientWithCodec(rpcCodec) - diff --git a/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl b/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl deleted file mode 100644 index 04c173fba..000000000 --- a/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl +++ /dev/null @@ -1,540 +0,0 @@ -// +build !notfastpath - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED from fast-path.go.tmpl -// ************************************************************ - -package codec - -// Fast path functions try to create a fast path encode or decode implementation -// for common maps and slices. -// -// We define the functions and register then in this single file -// so as not to pollute the encode.go and decode.go, and create a dependency in there. -// This file can be omitted without causing a build failure. -// -// The advantage of fast paths is: -// - Many calls bypass reflection altogether -// -// Currently support -// - slice of all builtin types, -// - map of all builtin types to string or interface value -// - symetrical maps of all builtin types (e.g. str-str, uint8-uint8) -// This should provide adequate "typical" implementations. -// -// Note that fast track decode functions must handle values for which an address cannot be obtained. -// For example: -// m2 := map[string]int{} -// p2 := []interface{}{m2} -// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. -// - -import ( - "reflect" - "sort" -) - -const fastpathCheckNilFalse = false // for reflect -const fastpathCheckNilTrue = true // for type switch - -type fastpathT struct {} - -var fastpathTV fastpathT - -type fastpathE struct { - rtid uintptr - rt reflect.Type - encfn func(*encFnInfo, reflect.Value) - decfn func(*decFnInfo, reflect.Value) -} - -type fastpathA [{{ .FastpathLen }}]fastpathE - -func (x *fastpathA) index(rtid uintptr) int { - // use binary search to grab the index (adapted from sort/search.go) - h, i, j := 0, 0, {{ .FastpathLen }} // len(x) - for i < j { - h = i + (j-i)/2 - if x[h].rtid < rtid { - i = h + 1 - } else { - j = h - } - } - if i < {{ .FastpathLen }} && x[i].rtid == rtid { - return i - } - return -1 -} - -type fastpathAslice []fastpathE - -func (x fastpathAslice) Len() int { return len(x) } -func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid } -func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -var fastpathAV fastpathA - -// due to possible initialization loop error, make fastpath in an init() -func init() { - if !fastpathEnabled { - return - } - i := 0 - fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) { - xrt := reflect.TypeOf(v) - xptr := reflect.ValueOf(xrt).Pointer() - fastpathAV[i] = fastpathE{xptr, xrt, fe, fd} - i++ - return - } - - {{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - fn([]{{ .Elem }}(nil), (*encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (*decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}} - - {{range .Values}}{{if not .Primitive}}{{if .MapKey }} - fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (*decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}} - - sort.Sort(fastpathAslice(fastpathAV[:])) -} - -// -- encode - -// -- -- fast path type switch -func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { - if !fastpathEnabled { - return false - } - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - case []{{ .Elem }}:{{else}} - case map[{{ .MapKey }}]{{ .Elem }}:{{end}} - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e){{if not .MapKey }} - case *[]{{ .Elem }}:{{else}} - case *map[{{ .MapKey }}]{{ .Elem }}:{{end}} - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e) -{{end}}{{end}} - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { - if !fastpathEnabled { - return false - } - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - case []{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e) - case *[]{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e) -{{end}}{{end}}{{end}} - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { - if !fastpathEnabled { - return false - } - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if .MapKey }} - case map[{{ .MapKey }}]{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e) - case *map[{{ .MapKey }}]{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e) -{{end}}{{end}}{{end}} - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -// -- -- fast path functions -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - -func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) { - if f.ti.mbs { - fastpathTV.{{ .MethodNamePfx "EncAsMap" false }}V(rv.Interface().([]{{ .Elem }}), fastpathCheckNilFalse, f.e) - } else { - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().([]{{ .Elem }}), fastpathCheckNilFalse, f.e) - } -} -func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) - for _, v2 := range v { - if cr != nil { cr.sendContainerState(containerArrayElem) } - {{ encmd .Elem "v2"}} - } - if cr != nil { cr.sendContainerState(containerArrayEnd) }{{/* ee.EncodeEnd() */}} -} - -func (_ fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - if len(v)%2 == 1 { - e.errorf("mapBySlice requires even slice length, but got %v", len(v)) - return - } - ee.EncodeMapStart(len(v) / 2) - for j, v2 := range v { - if cr != nil { - if j%2 == 0 { - cr.sendContainerState(containerMapKey) - } else { - cr.sendContainerState(containerMapValue) - } - } - {{ encmd .Elem "v2"}} - } - if cr != nil { cr.sendContainerState(containerMapEnd) } -} - -{{end}}{{end}}{{end}} - -{{range .Values}}{{if not .Primitive}}{{if .MapKey }} - -func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) { - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().(map[{{ .MapKey }}]{{ .Elem }}), fastpathCheckNilFalse, f.e) -} -func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) - {{if eq .MapKey "string"}}asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - {{end}}if e.h.Canonical { - {{if eq .MapKey "interface{}"}}{{/* out of band - */}}var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - v2 := make([]bytesI, len(v)) - var i, l int - var vp *bytesI {{/* put loop variables outside. seems currently needed for better perf */}} - for k2, _ := range v { - l = len(mksv) - e2.MustEncode(k2) - vp = &v2[i] - vp.v = mksv[l:] - vp.i = k2 - i++ - } - sort.Sort(bytesISlice(v2)) - for j := range v2 { - if cr != nil { cr.sendContainerState(containerMapKey) } - e.asis(v2[j].v) - if cr != nil { cr.sendContainerState(containerMapValue) } - e.encode(v[v2[j].i]) - } {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v)) - var i int - for k, _ := range v { - v2[i] = {{ $x }}(k) - i++ - } - sort.Sort({{ sorttype .MapKey false}}(v2)) - for _, k2 := range v2 { - if cr != nil { cr.sendContainerState(containerMapKey) } - {{if eq .MapKey "string"}}if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - }{{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}} - if cr != nil { cr.sendContainerState(containerMapValue) } - {{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }} - } {{end}} - } else { - for k2, v2 := range v { - if cr != nil { cr.sendContainerState(containerMapKey) } - {{if eq .MapKey "string"}}if asSymbols { - ee.EncodeSymbol(k2) - } else { - ee.EncodeString(c_UTF8, k2) - }{{else}}{{ encmd .MapKey "k2"}}{{end}} - if cr != nil { cr.sendContainerState(containerMapValue) } - {{ encmd .Elem "v2"}} - } - } - if cr != nil { cr.sendContainerState(containerMapEnd) }{{/* ee.EncodeEnd() */}} -} - -{{end}}{{end}}{{end}} - -// -- decode - -// -- -- fast path type switch -func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { - if !fastpathEnabled { - return false - } - switch v := iv.(type) { -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - case []{{ .Elem }}:{{else}} - case map[{{ .MapKey }}]{{ .Elem }}:{{end}} - fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, d){{if not .MapKey }} - case *[]{{ .Elem }}:{{else}} - case *map[{{ .MapKey }}]{{ .Elem }}:{{end}} - v2, changed2 := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, fastpathCheckNilFalse, true, d) - if changed2 { - *v = v2 - } -{{end}}{{end}} - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -// -- -- fast path functions -{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} -{{/* -Slices can change if they -- did not come from an array -- are addressable (from a ptr) -- are settable (e.g. contained in an interface{}) -*/}} -func (f *decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { {{/* // CanSet => CanAddr + Exported */}} - vp := rv.Addr().Interface().(*[]{{ .Elem }}) - v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().([]{{ .Elem }}) - fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, f.d) - } -} - -func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, checkNil bool, d *Decoder) { - v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, checkNil bool, canChange bool, d *Decoder) (_ []{{ .Elem }}, changed bool) { - dd := d.d - {{/* // if dd.isContainerType(valueTypeNil) { dd.TryDecodeAsNil() */}} - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - slh, containerLenS := d.decSliceHelperStart() - if containerLenS == 0 { - if canChange { - if v == nil { - v = []{{ .Elem }}{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool - if containerLenS > cap(v) { - if canChange { {{/* - // fast-path is for "basic" immutable types, so no need to copy them over - // s := make([]{{ .Elem }}, decInferLen(containerLenS, d.h.MaxInitLen)) - // copy(s, v[:cap(v)]) - // v = s */}} - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }}) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]{{ .Elem }}, xlen) - } - } else { - v = make([]{{ .Elem }}, xlen) - } - changed = true - } else { - d.arrayCannotExpand(len(v), containerLenS) - } - x2read = len(v) - } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } {{/* // all checks done. cannot go past len. */}} - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - {{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }} - } - if xtrunc { {{/* // means canChange=true, changed=true already. */}} - for ; j < containerLenS; j++ { - v = append(v, {{ zerocmd .Elem }}) - slh.ElemContainerState(j) - {{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }} - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() {{/* check break first, so we can initialize v with a capacity of 4 if necessary */}} - if breakFound { - if canChange { - if v == nil { - v = []{{ .Elem }}{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]{{ .Elem }}, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, {{ zerocmd .Elem }}) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { {{/* // all checks done. cannot go past len. */}} - {{ if eq .Elem "interface{}" }}d.decode(&v[j]) - {{ else }}v[j] = {{ decmd .Elem }}{{ end }} - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] - changed = true - } - } - slh.End() - return v, changed -} - -{{end}}{{end}}{{end}} - - -{{range .Values}}{{if not .Primitive}}{{if .MapKey }} -{{/* -Maps can change if they are -- addressable (from a ptr) -- settable (e.g. contained in an interface{}) -*/}} -func (f *decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[{{ .MapKey }}]{{ .Elem }}) - v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { - *vp = v - } - } else { - v := rv.Interface().(map[{{ .MapKey }}]{{ .Elem }}) - fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, f.d) - } -} -func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, d *Decoder) { - v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, checkNil, true, d) - if changed { - *vp = v - } -} -func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, canChange bool, - d *Decoder) (_ map[{{ .MapKey }}]{{ .Elem }}, changed bool) { - dd := d.d - cr := d.cr - {{/* // if dd.isContainerType(valueTypeNil) {dd.TryDecodeAsNil() */}} - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - - containerLen := dd.ReadMapStart() - if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }}) - v = make(map[{{ .MapKey }}]{{ .Elem }}, xlen) - changed = true - } - {{ if eq .Elem "interface{}" }}mapGet := !d.h.MapValueReset && !d.h.InterfaceReset{{end}} - var mk {{ .MapKey }} - var mv {{ .Elem }} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { cr.sendContainerState(containerMapKey) } - {{ if eq .MapKey "interface{}" }}mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}} - }{{ else }}mk = {{ decmd .MapKey }}{{ end }} - if cr != nil { cr.sendContainerState(containerMapValue) } - {{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil } - d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }} - if v != nil { - v[mk] = mv - } - } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { cr.sendContainerState(containerMapKey) } - {{ if eq .MapKey "interface{}" }}mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}} - }{{ else }}mk = {{ decmd .MapKey }}{{ end }} - if cr != nil { cr.sendContainerState(containerMapValue) } - {{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil } - d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }} - if v != nil { - v[mk] = mv - } - } - } - if cr != nil { cr.sendContainerState(containerMapEnd) } - return v, changed -} - -{{end}}{{end}}{{end}} diff --git a/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl deleted file mode 100644 index 32df54144..000000000 --- a/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl +++ /dev/null @@ -1,104 +0,0 @@ -{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} -{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} -var {{var "c"}} bool {{/* // changed */}} -_ = {{var "c"}}{{end}} -if {{var "l"}} == 0 { - {{if isSlice }}if {{var "v"}} == nil { - {{var "v"}} = []{{ .Typ }}{} - {{var "c"}} = true - } else if len({{var "v"}}) != 0 { - {{var "v"}} = {{var "v"}}[:0] - {{var "c"}} = true - } {{end}} {{if isChan }}if {{var "v"}} == nil { - {{var "v"}} = make({{ .CTyp }}, 0) - {{var "c"}} = true - } {{end}} -} else if {{var "l"}} > 0 { - {{if isChan }}if {{var "v"}} == nil { - {{var "rl"}}, _ = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - {{var "v"}} = make({{ .CTyp }}, {{var "rl"}}) - {{var "c"}} = true - } - for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ { - {{var "h"}}.ElemContainerState({{var "r"}}) - var {{var "t"}} {{ .Typ }} - {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} - {{var "v"}} <- {{var "t"}} - } - {{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}} - var {{var "rt"}} bool {{/* truncated */}} - _, _ = {{var "rl"}}, {{var "rt"}} - {{var "rr"}} = {{var "l"}} // len({{var "v"}}) - if {{var "l"}} > cap({{var "v"}}) { - {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}}) - {{ else }}{{if not .Immutable }} - {{var "rg"}} := len({{var "v"}}) > 0 - {{var "v2"}} := {{var "v"}} {{end}} - {{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - if {{var "rt"}} { - if {{var "rl"}} <= cap({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "rl"}}] - } else { - {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) - } - } else { - {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) - } - {{var "c"}} = true - {{var "rr"}} = len({{var "v"}}) {{if not .Immutable }} - if {{var "rg"}} { copy({{var "v"}}, {{var "v2"}}) } {{end}} {{end}}{{/* end not Immutable, isArray */}} - } {{if isSlice }} else if {{var "l"}} != len({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "l"}}] - {{var "c"}} = true - } {{end}} {{/* end isSlice:47 */}} - {{var "j"}} := 0 - for ; {{var "j"}} < {{var "rr"}} ; {{var "j"}}++ { - {{var "h"}}.ElemContainerState({{var "j"}}) - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } - {{if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { - {{var "h"}}.ElemContainerState({{var "j"}}) - z.DecSwallow() - } - {{ else }}if {{var "rt"}} { - for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { - {{var "v"}} = append({{var "v"}}, {{ zero}}) - {{var "h"}}.ElemContainerState({{var "j"}}) - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } - } {{end}} {{/* end isArray:56 */}} - {{end}} {{/* end isChan:16 */}} -} else { {{/* len < 0 */}} - {{var "j"}} := 0 - for ; !r.CheckBreak(); {{var "j"}}++ { - {{if isChan }} - {{var "h"}}.ElemContainerState({{var "j"}}) - var {{var "t"}} {{ .Typ }} - {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} - {{var "v"}} <- {{var "t"}} - {{ else }} - if {{var "j"}} >= len({{var "v"}}) { - {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1) - {{ else }}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }} - {{var "c"}} = true {{end}} - } - {{var "h"}}.ElemContainerState({{var "j"}}) - if {{var "j"}} < len({{var "v"}}) { - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } else { - z.DecSwallow() - } - {{end}} - } - {{if isSlice }}if {{var "j"}} < len({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "j"}}] - {{var "c"}} = true - } else if {{var "j"}} == 0 && {{var "v"}} == nil { - {{var "v"}} = []{{ .Typ }}{} - {{var "c"}} = true - }{{end}} -} -{{var "h"}}.End() -{{if not isArray }}if {{var "c"}} { - *{{ .Varname }} = {{var "v"}} -}{{end}} diff --git a/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl deleted file mode 100644 index 77400e0a1..000000000 --- a/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl +++ /dev/null @@ -1,58 +0,0 @@ -{{var "v"}} := *{{ .Varname }} -{{var "l"}} := r.ReadMapStart() -{{var "bh"}} := z.DecBasicHandle() -if {{var "v"}} == nil { - {{var "rl"}}, _ := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) - {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) - *{{ .Varname }} = {{var "v"}} -} -var {{var "mk"}} {{ .KTyp }} -var {{var "mv"}} {{ .Typ }} -var {{var "mg"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool -if {{var "bh"}}.MapValueReset { - {{if decElemKindPtr}}{{var "mg"}} = true - {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } - {{else if not decElemKindImmutable}}{{var "mg"}} = true - {{end}} } -if {{var "l"}} > 0 { -for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ { - z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) - {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} -{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { - {{var "mk"}} = string({{var "bv"}}) - }{{ end }}{{if decElemKindPtr}} - {{var "ms"}} = true{{end}} - if {{var "mg"}} { - {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] - if {{var "mok"}} { - {{var "ms"}} = false - } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} - } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} - z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) - {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} - if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { - {{var "v"}}[{{var "mk"}}] = {{var "mv"}} - } -} -} else if {{var "l"}} < 0 { -for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ { - z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) - {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} -{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { - {{var "mk"}} = string({{var "bv"}}) - }{{ end }}{{if decElemKindPtr}} - {{var "ms"}} = true {{ end }} - if {{var "mg"}} { - {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] - if {{var "mok"}} { - {{var "ms"}} = false - } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} - } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} - z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) - {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} - if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { - {{var "v"}}[{{var "mk"}}] = {{var "mv"}} - } -} -} // else len==0: TODO: Should we clear map entries? -z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl deleted file mode 100644 index 31958574f..000000000 --- a/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl +++ /dev/null @@ -1,364 +0,0 @@ -// //+build ignore - -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl -// ************************************************************ - -package codec - -import ( - "encoding" - "reflect" -) - -// This file is used to generate helper code for codecgen. -// The values here i.e. genHelper(En|De)coder are not to be used directly by -// library users. They WILL change continously and without notice. -// -// To help enforce this, we create an unexported type with exported members. -// The only way to get the type is via the one exported type that we control (somewhat). -// -// When static codecs are created for types, they will use this value -// to perform encoding or decoding of primitives or known slice or map types. - -// GenHelperEncoder is exported so that it can be used externally by codecgen. -// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. -func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) { - return genHelperEncoder{e:e}, e.e -} - -// GenHelperDecoder is exported so that it can be used externally by codecgen. -// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. -func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) { - return genHelperDecoder{d:d}, d.d -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -type genHelperEncoder struct { - e *Encoder - F fastpathT -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -type genHelperDecoder struct { - d *Decoder - F fastpathT -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBasicHandle() *BasicHandle { - return f.e.h -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBinary() bool { - return f.e.be // f.e.hh.isBinaryEncoding() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncFallback(iv interface{}) { - // println(">>>>>>>>> EncFallback") - f.e.encodeI(iv, false, false) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { - bs, fnerr := iv.MarshalText() - f.e.marshal(bs, fnerr, false, c_UTF8) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { - bs, fnerr := iv.MarshalJSON() - f.e.marshal(bs, fnerr, true, c_UTF8) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { - bs, fnerr := iv.MarshalBinary() - f.e.marshal(bs, fnerr, false, c_RAW) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) TimeRtidIfBinc() uintptr { - if _, ok := f.e.hh.(*BincHandle); ok { - return timeTypId - } - return 0 -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) IsJSONHandle() bool { - return f.e.js -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) HasExtensions() bool { - return len(f.e.h.extHandle) != 0 -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncExt(v interface{}) (r bool) { - rt := reflect.TypeOf(v) - if rt.Kind() == reflect.Ptr { - rt = rt.Elem() - } - rtid := reflect.ValueOf(rt).Pointer() - if xfFn := f.e.h.getExt(rtid); xfFn != nil { - f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) - return true - } - return false -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncSendContainerState(c containerState) { - if f.e.cr != nil { - f.e.cr.sendContainerState(c) - } -} - -// ---------------- DECODER FOLLOWS ----------------- - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBasicHandle() *BasicHandle { - return f.d.h -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBinary() bool { - return f.d.be // f.d.hh.isBinaryEncoding() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSwallow() { - f.d.swallow() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecScratchBuffer() []byte { - return f.d.b[:] -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { - // println(">>>>>>>>> DecFallback") - f.d.decodeI(iv, chkPtr, false, false, false) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { - return f.d.decSliceHelperStart() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { - f.d.structFieldNotFound(index, name) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { - f.d.arrayCannotExpand(sliceLen, streamLen) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { - fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true)) - if fnerr != nil { - panic(fnerr) - } -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { - // bs := f.dd.DecodeBytes(f.d.b[:], true, true) - // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. - fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) - if fnerr != nil { - panic(fnerr) - } -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { - fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, false, true)) - if fnerr != nil { - panic(fnerr) - } -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) TimeRtidIfBinc() uintptr { - if _, ok := f.d.hh.(*BincHandle); ok { - return timeTypId - } - return 0 -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) IsJSONHandle() bool { - return f.d.js -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) HasExtensions() bool { - return len(f.d.h.extHandle) != 0 -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecExt(v interface{}) (r bool) { - rt := reflect.TypeOf(v).Elem() - rtid := reflect.ValueOf(rt).Pointer() - if xfFn := f.d.h.getExt(rtid); xfFn != nil { - f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) - return true - } - return false -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) { - return decInferLen(clen, maxlen, unit) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSendContainerState(c containerState) { - if f.d.cr != nil { - f.d.cr.sendContainerState(c) - } -} - -{{/* - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncDriver() encDriver { - return f.e.e -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecDriver() decDriver { - return f.d.d -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncNil() { - f.e.e.EncodeNil() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBytes(v []byte) { - f.e.e.EncodeStringBytes(c_RAW, v) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncArrayStart(length int) { - f.e.e.EncodeArrayStart(length) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncArrayEnd() { - f.e.e.EncodeArrayEnd() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncArrayEntrySeparator() { - f.e.e.EncodeArrayEntrySeparator() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncMapStart(length int) { - f.e.e.EncodeMapStart(length) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncMapEnd() { - f.e.e.EncodeMapEnd() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncMapEntrySeparator() { - f.e.e.EncodeMapEntrySeparator() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncMapKVSeparator() { - f.e.e.EncodeMapKVSeparator() -} - -// --------- - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBytes(v *[]byte) { - *v = f.d.d.DecodeBytes(*v) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecTryNil() bool { - return f.d.d.TryDecodeAsNil() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecContainerIsNil() (b bool) { - return f.d.d.IsContainerType(valueTypeNil) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecContainerIsMap() (b bool) { - return f.d.d.IsContainerType(valueTypeMap) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecContainerIsArray() (b bool) { - return f.d.d.IsContainerType(valueTypeArray) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecCheckBreak() bool { - return f.d.d.CheckBreak() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecMapStart() int { - return f.d.d.ReadMapStart() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayStart() int { - return f.d.d.ReadArrayStart() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecMapEnd() { - f.d.d.ReadMapEnd() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayEnd() { - f.d.d.ReadArrayEnd() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayEntrySeparator() { - f.d.d.ReadArrayEntrySeparator() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecMapEntrySeparator() { - f.d.d.ReadMapEntrySeparator() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecMapKVSeparator() { - f.d.d.ReadMapKVSeparator() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) ReadStringAsBytes(bs []byte) []byte { - return f.d.d.DecodeStringAsBytes(bs) -} - - -// -- encode calls (primitives) -{{range .Values}}{{if .Primitive }}{{if ne .Primitive "interface{}" }} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) {{ .MethodNamePfx "Enc" true }}(v {{ .Primitive }}) { - ee := f.e.e - {{ encmd .Primitive "v" }} -} -{{ end }}{{ end }}{{ end }} - -// -- decode calls (primitives) -{{range .Values}}{{if .Primitive }}{{if ne .Primitive "interface{}" }} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) {{ .MethodNamePfx "Dec" true }}(vp *{{ .Primitive }}) { - dd := f.d.d - *vp = {{ decmd .Primitive }} -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) {{ .MethodNamePfx "Read" true }}() (v {{ .Primitive }}) { - dd := f.d.d - v = {{ decmd .Primitive }} - return -} -{{ end }}{{ end }}{{ end }} - - -// -- encode calls (slices/maps) -{{range .Values}}{{if not .Primitive }}{{if .Slice }} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) {{ .MethodNamePfx "Enc" false }}(v []{{ .Elem }}) { {{ else }} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) {{ .MethodNamePfx "Enc" false }}(v map[{{ .MapKey }}]{{ .Elem }}) { {{end}} - f.F.{{ .MethodNamePfx "Enc" false }}V(v, false, f.e) -} -{{ end }}{{ end }} - -// -- decode calls (slices/maps) -{{range .Values}}{{if not .Primitive }} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -{{if .Slice }}func (f genHelperDecoder) {{ .MethodNamePfx "Dec" false }}(vp *[]{{ .Elem }}) { -{{else}}func (f genHelperDecoder) {{ .MethodNamePfx "Dec" false }}(vp *map[{{ .MapKey }}]{{ .Elem }}) { {{end}} - v, changed := f.F.{{ .MethodNamePfx "Dec" false }}V(*vp, false, true, f.d) - if changed { - *vp = v - } -} -{{ end }}{{ end }} -*/}} diff --git a/vendor/github.com/ugorji/go/codec/prebuild.sh b/vendor/github.com/ugorji/go/codec/prebuild.sh deleted file mode 100755 index 909f4bb0f..000000000 --- a/vendor/github.com/ugorji/go/codec/prebuild.sh +++ /dev/null @@ -1,199 +0,0 @@ -#!/bin/bash - -# _needgen is a helper function to tell if we need to generate files for msgp, codecgen. -_needgen() { - local a="$1" - zneedgen=0 - if [[ ! -e "$a" ]] - then - zneedgen=1 - echo 1 - return 0 - fi - for i in `ls -1 *.go.tmpl gen.go values_test.go` - do - if [[ "$a" -ot "$i" ]] - then - zneedgen=1 - echo 1 - return 0 - fi - done - echo 0 -} - -# _build generates fast-path.go and gen-helper.go. -# -# It is needed because there is some dependency between the generated code -# and the other classes. Consequently, we have to totally remove the -# generated files and put stubs in place, before calling "go run" again -# to recreate them. -_build() { - if ! [[ "${zforce}" == "1" || - "1" == $( _needgen "fast-path.generated.go" ) || - "1" == $( _needgen "gen-helper.generated.go" ) || - "1" == $( _needgen "gen.generated.go" ) || - 1 == 0 ]] - then - return 0 - fi - - # echo "Running prebuild" - if [ "${zbak}" == "1" ] - then - # echo "Backing up old generated files" - _zts=`date '+%m%d%Y_%H%M%S'` - _gg=".generated.go" - [ -e "gen-helper${_gg}" ] && mv gen-helper${_gg} gen-helper${_gg}__${_zts}.bak - [ -e "fast-path${_gg}" ] && mv fast-path${_gg} fast-path${_gg}__${_zts}.bak - # [ -e "safe${_gg}" ] && mv safe${_gg} safe${_gg}__${_zts}.bak - # [ -e "unsafe${_gg}" ] && mv unsafe${_gg} unsafe${_gg}__${_zts}.bak - else - rm -f fast-path.generated.go gen.generated.go gen-helper.generated.go \ - *safe.generated.go *_generated_test.go *.generated_ffjson_expose.go - fi - - cat > gen.generated.go <> gen.generated.go < gen-dec-map.go.tmpl - - cat >> gen.generated.go <> gen.generated.go < gen-dec-array.go.tmpl - - cat >> gen.generated.go < gen-from-tmpl.codec.generated.go < gen-from-tmpl.generated.go < 0 - if stopTimeSec > 0: - def myStopRpcServer(): - server.stop() - t = threading.Timer(stopTimeSec, myStopRpcServer) - t.start() - server.start() - -def doRpcClientToPythonSvc(port): - address = msgpackrpc.Address('localhost', port) - client = msgpackrpc.Client(address, unpack_encoding='utf-8') - print client.call("Echo123", "A1", "B2", "C3") - print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) - -def doRpcClientToGoSvc(port): - # print ">>>> port: ", port, " <<<<<" - address = msgpackrpc.Address('localhost', port) - client = msgpackrpc.Client(address, unpack_encoding='utf-8') - print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"]) - print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}) - -def doMain(args): - if len(args) == 2 and args[0] == "testdata": - build_test_data(args[1]) - elif len(args) == 3 and args[0] == "rpc-server": - doRpcServer(int(args[1]), int(args[2])) - elif len(args) == 2 and args[0] == "rpc-client-python-service": - doRpcClientToPythonSvc(int(args[1])) - elif len(args) == 2 and args[0] == "rpc-client-go-service": - doRpcClientToGoSvc(int(args[1])) - else: - print("Usage: test.py " + - "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...") - -if __name__ == "__main__": - doMain(sys.argv[1:]) - diff --git a/vendor/github.com/ugorji/go/codec/tests.sh b/vendor/github.com/ugorji/go/codec/tests.sh deleted file mode 100755 index 00857b620..000000000 --- a/vendor/github.com/ugorji/go/codec/tests.sh +++ /dev/null @@ -1,80 +0,0 @@ -#!/bin/bash - -# Run all the different permutations of all the tests. -# This helps ensure that nothing gets broken. - -_run() { - # 1. VARIATIONS: regular (t), canonical (c), IO R/W (i), - # binc-nosymbols (n), struct2array (s), intern string (e), - # json-indent (d), circular (l) - # 2. MODE: reflection (r), external (x), codecgen (g), unsafe (u), notfastpath (f) - # 3. OPTIONS: verbose (v), reset (z), must (m), - # - # Use combinations of mode to get exactly what you want, - # and then pass the variations you need. - - ztags="" - zargs="" - local OPTIND - OPTIND=1 - while getopts "_xurtcinsvgzmefdl" flag - do - case "x$flag" in - 'xr') ;; - 'xf') ztags="$ztags notfastpath" ;; - 'xg') ztags="$ztags codecgen" ;; - 'xx') ztags="$ztags x" ;; - 'xu') ztags="$ztags unsafe" ;; - 'xv') zargs="$zargs -tv" ;; - 'xz') zargs="$zargs -tr" ;; - 'xm') zargs="$zargs -tm" ;; - 'xl') zargs="$zargs -tl" ;; - *) ;; - esac - done - # shift $((OPTIND-1)) - printf '............. TAGS: %s .............\n' "$ztags" - # echo ">>>>>>> TAGS: $ztags" - - OPTIND=1 - while getopts "_xurtcinsvgzmefdl" flag - do - case "x$flag" in - 'xt') printf ">>>>>>> REGULAR : "; go test "-tags=$ztags" $zargs ; sleep 2 ;; - 'xc') printf ">>>>>>> CANONICAL : "; go test "-tags=$ztags" $zargs -tc; sleep 2 ;; - 'xi') printf ">>>>>>> I/O : "; go test "-tags=$ztags" $zargs -ti; sleep 2 ;; - 'xn') printf ">>>>>>> NO_SYMBOLS : "; go test "-tags=$ztags" -run=Binc $zargs -tn; sleep 2 ;; - 'xs') printf ">>>>>>> TO_ARRAY : "; go test "-tags=$ztags" $zargs -ts; sleep 2 ;; - 'xe') printf ">>>>>>> INTERN : "; go test "-tags=$ztags" $zargs -te; sleep 2 ;; - 'xd') printf ">>>>>>> INDENT : "; - go test "-tags=$ztags" -run=JsonCodecsTable -td=-1 $zargs; - go test "-tags=$ztags" -run=JsonCodecsTable -td=8 $zargs; - sleep 2 ;; - *) ;; - esac - done - shift $((OPTIND-1)) - - OPTIND=1 -} - -# echo ">>>>>>> RUNNING VARIATIONS OF TESTS" -if [[ "x$@" = "x" ]]; then - # All: r, x, g, gu - _run "-_tcinsed_ml" # regular - _run "-_tcinsed_ml_z" # regular with reset - _run "-_tcinsed_ml_f" # regular with no fastpath (notfastpath) - _run "-x_tcinsed_ml" # external - _run "-gx_tcinsed_ml" # codecgen: requires external - _run "-gxu_tcinsed_ml" # codecgen + unsafe -elif [[ "x$@" = "x-Z" ]]; then - # Regular - _run "-_tcinsed_ml" # regular - _run "-_tcinsed_ml_z" # regular with reset -elif [[ "x$@" = "x-F" ]]; then - # regular with notfastpath - _run "-_tcinsed_ml_f" # regular - _run "-_tcinsed_ml_zf" # regular with reset -else - _run "$@" -fi diff --git a/vendor/github.com/urfave/cli/.travis.yml b/vendor/github.com/urfave/cli/.travis.yml deleted file mode 100644 index c2b5c8de0..000000000 --- a/vendor/github.com/urfave/cli/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go -sudo: false - -go: -- 1.1.2 -- 1.2.2 -- 1.3.3 -- 1.4.2 -- 1.5.1 -- tip - -matrix: - allow_failures: - - go: tip - -script: -- go vet ./... -- go test -v ./... diff --git a/vendor/github.com/urfave/cli/LICENSE b/vendor/github.com/urfave/cli/LICENSE index 5515ccfb7..42a597e29 100644 --- a/vendor/github.com/urfave/cli/LICENSE +++ b/vendor/github.com/urfave/cli/LICENSE @@ -1,21 +1,21 @@ -Copyright (C) 2013 Jeremy Saenz -All Rights Reserved. +MIT License -MIT LICENSE +Copyright (c) 2016 Jeremy Saenz & Contributors -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/urfave/cli/README.md b/vendor/github.com/urfave/cli/README.md deleted file mode 100644 index d9371cfee..000000000 --- a/vendor/github.com/urfave/cli/README.md +++ /dev/null @@ -1,434 +0,0 @@ -[![Coverage](http://gocover.io/_badge/github.com/codegangsta/cli?0)](http://gocover.io/github.com/codegangsta/cli) -[![Build Status](https://travis-ci.org/codegangsta/cli.svg?branch=master)](https://travis-ci.org/codegangsta/cli) -[![GoDoc](https://godoc.org/github.com/codegangsta/cli?status.svg)](https://godoc.org/github.com/codegangsta/cli) -[![codebeat](https://codebeat.co/badges/0a8f30aa-f975-404b-b878-5fab3ae1cc5f)](https://codebeat.co/projects/jackfan.us.kg-codegangsta-cli) - -# cli.go - -`cli.go` is simple, fast, and fun package for building command line apps in Go. The goal is to enable developers to write fast and distributable command line applications in an expressive way. - -## Overview - -Command line apps are usually so tiny that there is absolutely no reason why your code should *not* be self-documenting. Things like generating help text and parsing command flags/options should not hinder productivity when writing a command line app. - -**This is where `cli.go` comes into play.** `cli.go` makes command line programming fun, organized, and expressive! - -## Installation - -Make sure you have a working Go environment (go 1.1+ is *required*). [See the install instructions](http://golang.org/doc/install.html). - -To install `cli.go`, simply run: -``` -$ go get github.com/codegangsta/cli -``` - -Make sure your `PATH` includes to the `$GOPATH/bin` directory so your commands can be easily used: -``` -export PATH=$PATH:$GOPATH/bin -``` - -## Getting Started - -One of the philosophies behind `cli.go` is that an API should be playful and full of discovery. So a `cli.go` app can be as little as one line of code in `main()`. - -``` go -package main - -import ( - "os" - "github.com/codegangsta/cli" -) - -func main() { - cli.NewApp().Run(os.Args) -} -``` - -This app will run and show help text, but is not very useful. Let's give an action to execute and some help documentation: - -``` go -package main - -import ( - "os" - "github.com/codegangsta/cli" -) - -func main() { - app := cli.NewApp() - app.Name = "boom" - app.Usage = "make an explosive entrance" - app.Action = func(c *cli.Context) { - println("boom! I say!") - } - - app.Run(os.Args) -} -``` - -Running this already gives you a ton of functionality, plus support for things like subcommands and flags, which are covered below. - -## Example - -Being a programmer can be a lonely job. Thankfully by the power of automation that is not the case! Let's create a greeter app to fend off our demons of loneliness! - -Start by creating a directory named `greet`, and within it, add a file, `greet.go` with the following code in it: - -``` go -package main - -import ( - "os" - "github.com/codegangsta/cli" -) - -func main() { - app := cli.NewApp() - app.Name = "greet" - app.Usage = "fight the loneliness!" - app.Action = func(c *cli.Context) { - println("Hello friend!") - } - - app.Run(os.Args) -} -``` - -Install our command to the `$GOPATH/bin` directory: - -``` -$ go install -``` - -Finally run our new command: - -``` -$ greet -Hello friend! -``` - -`cli.go` also generates neat help text: - -``` -$ greet help -NAME: - greet - fight the loneliness! - -USAGE: - greet [global options] command [command options] [arguments...] - -VERSION: - 0.0.0 - -COMMANDS: - help, h Shows a list of commands or help for one command - -GLOBAL OPTIONS - --version Shows version information -``` - -### Arguments - -You can lookup arguments by calling the `Args` function on `cli.Context`. - -``` go -... -app.Action = func(c *cli.Context) { - println("Hello", c.Args()[0]) -} -... -``` - -### Flags - -Setting and querying flags is simple. - -``` go -... -app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang", - Value: "english", - Usage: "language for the greeting", - }, -} -app.Action = func(c *cli.Context) { - name := "someone" - if c.NArg() > 0 { - name = c.Args()[0] - } - if c.String("lang") == "spanish" { - println("Hola", name) - } else { - println("Hello", name) - } -} -... -``` - -You can also set a destination variable for a flag, to which the content will be scanned. - -``` go -... -var language string -app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang", - Value: "english", - Usage: "language for the greeting", - Destination: &language, - }, -} -app.Action = func(c *cli.Context) { - name := "someone" - if c.NArg() > 0 { - name = c.Args()[0] - } - if language == "spanish" { - println("Hola", name) - } else { - println("Hello", name) - } -} -... -``` - -See full list of flags at http://godoc.org/github.com/codegangsta/cli - -#### Alternate Names - -You can set alternate (or short) names for flags by providing a comma-delimited list for the `Name`. e.g. - -``` go -app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang, l", - Value: "english", - Usage: "language for the greeting", - }, -} -``` - -That flag can then be set with `--lang spanish` or `-l spanish`. Note that giving two different forms of the same flag in the same command invocation is an error. - -#### Values from the Environment - -You can also have the default value set from the environment via `EnvVar`. e.g. - -``` go -app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang, l", - Value: "english", - Usage: "language for the greeting", - EnvVar: "APP_LANG", - }, -} -``` - -The `EnvVar` may also be given as a comma-delimited "cascade", where the first environment variable that resolves is used as the default. - -``` go -app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang, l", - Value: "english", - Usage: "language for the greeting", - EnvVar: "LEGACY_COMPAT_LANG,APP_LANG,LANG", - }, -} -``` - -#### Values from alternate input sources (YAML and others) - -There is a separate package altsrc that adds support for getting flag values from other input sources like YAML. - -In order to get values for a flag from an alternate input source the following code would be added to wrap an existing cli.Flag like below: - -``` go - altsrc.NewIntFlag(cli.IntFlag{Name: "test"}) -``` - -Initialization must also occur for these flags. Below is an example initializing getting data from a yaml file below. - -``` go - command.Before = altsrc.InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load")) -``` - -The code above will use the "load" string as a flag name to get the file name of a yaml file from the cli.Context. -It will then use that file name to initialize the yaml input source for any flags that are defined on that command. -As a note the "load" flag used would also have to be defined on the command flags in order for this code snipped to work. - -Currently only YAML files are supported but developers can add support for other input sources by implementing the -altsrc.InputSourceContext for their given sources. - -Here is a more complete sample of a command using YAML support: - -``` go - command := &cli.Command{ - Name: "test-cmd", - Aliases: []string{"tc"}, - Usage: "this is for testing", - Description: "testing", - Action: func(c *cli.Context) { - // Action to run - }, - Flags: []cli.Flag{ - NewIntFlag(cli.IntFlag{Name: "test"}), - cli.StringFlag{Name: "load"}}, - } - command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load")) - err := command.Run(c) -``` - -### Subcommands - -Subcommands can be defined for a more git-like command line app. - -```go -... -app.Commands = []cli.Command{ - { - Name: "add", - Aliases: []string{"a"}, - Usage: "add a task to the list", - Action: func(c *cli.Context) { - println("added task: ", c.Args().First()) - }, - }, - { - Name: "complete", - Aliases: []string{"c"}, - Usage: "complete a task on the list", - Action: func(c *cli.Context) { - println("completed task: ", c.Args().First()) - }, - }, - { - Name: "template", - Aliases: []string{"r"}, - Usage: "options for task templates", - Subcommands: []cli.Command{ - { - Name: "add", - Usage: "add a new template", - Action: func(c *cli.Context) { - println("new task template: ", c.Args().First()) - }, - }, - { - Name: "remove", - Usage: "remove an existing template", - Action: func(c *cli.Context) { - println("removed task template: ", c.Args().First()) - }, - }, - }, - }, -} -... -``` - -### Subcommands categories - -For additional organization in apps that have many subcommands, you can -associate a category for each command to group them together in the help -output. - -E.g. - -```go -... - app.Commands = []cli.Command{ - { - Name: "noop", - }, - { - Name: "add", - Category: "template", - }, - { - Name: "remove", - Category: "template", - }, - } -... -``` - -Will include: - -``` -... -COMMANDS: - noop - - Template actions: - add - remove -... -``` - -### Bash Completion - -You can enable completion commands by setting the `EnableBashCompletion` -flag on the `App` object. By default, this setting will only auto-complete to -show an app's subcommands, but you can write your own completion methods for -the App or its subcommands. - -```go -... -var tasks = []string{"cook", "clean", "laundry", "eat", "sleep", "code"} -app := cli.NewApp() -app.EnableBashCompletion = true -app.Commands = []cli.Command{ - { - Name: "complete", - Aliases: []string{"c"}, - Usage: "complete a task on the list", - Action: func(c *cli.Context) { - println("completed task: ", c.Args().First()) - }, - BashComplete: func(c *cli.Context) { - // This will complete if no args are passed - if c.NArg() > 0 { - return - } - for _, t := range tasks { - fmt.Println(t) - } - }, - } -} -... -``` - -#### To Enable - -Source the `autocomplete/bash_autocomplete` file in your `.bashrc` file while -setting the `PROG` variable to the name of your program: - -`PROG=myprogram source /.../cli/autocomplete/bash_autocomplete` - -#### To Distribute - -Copy `autocomplete/bash_autocomplete` into `/etc/bash_completion.d/` and rename -it to the name of the program you wish to add autocomplete support for (or -automatically install it there if you are distributing a package). Don't forget -to source the file to make it active in the current shell. - -``` -sudo cp src/bash_autocomplete /etc/bash_completion.d/ -source /etc/bash_completion.d/ -``` - -Alternatively, you can just document that users should source the generic -`autocomplete/bash_autocomplete` in their bash configuration with `$PROG` set -to the name of their program (as above). - -## Contribution Guidelines - -Feel free to put up a pull request to fix a bug or maybe add a feature. I will give it a code review and make sure that it does not break backwards compatibility. If I or any other collaborators agree that it is in line with the vision of the project, we will work with you to get the code into a mergeable state and merge it into the master branch. - -If you have contributed something significant to the project, I will most likely add you as a collaborator. As a collaborator you are given the ability to merge others pull requests. It is very important that new code does not break existing code, so be careful about what code you do choose to merge. If you have any questions feel free to link @codegangsta to the issue in question and we can review it together. - -If you feel like you have contributed to the project but have not yet been added as a collaborator, I probably forgot to add you. Hit @codegangsta up over email and we will get it figured out. diff --git a/vendor/github.com/urfave/cli/app.go b/vendor/github.com/urfave/cli/app.go index bd20a2d34..95ffc0b97 100644 --- a/vendor/github.com/urfave/cli/app.go +++ b/vendor/github.com/urfave/cli/app.go @@ -10,6 +10,18 @@ import ( "time" ) +var ( + changeLogURL = "https://github.com/urfave/cli/blob/master/CHANGELOG.md" + appActionDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-action-signature", changeLogURL) + runAndExitOnErrorDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-runandexitonerror", changeLogURL) + + contactSysadmin = "This is an error in the application. Please contact the distributor of this application if this is not you." + + errInvalidActionType = NewExitError("ERROR invalid Action type. "+ + fmt.Sprintf("Must be `func(*Context`)` or `func(*Context) error). %s", contactSysadmin)+ + fmt.Sprintf("See %s", appActionDeprecationURL), 2) +) + // App is the main structure of a cli application. It is recommended that // an app be created with the cli.NewApp() function type App struct { @@ -25,6 +37,8 @@ type App struct { ArgsUsage string // Version of the program Version string + // Description of the program + Description string // List of commands to execute Commands []Command // List of flags to parse @@ -35,24 +49,26 @@ type App struct { HideHelp bool // Boolean to hide built-in version flag and the VERSION section of help HideVersion bool - // Populate on app startup, only gettable throught method Categories() + // Populate on app startup, only gettable through method Categories() categories CommandCategories // An action to execute when the bash-completion flag is set - BashComplete func(context *Context) + BashComplete BashCompleteFunc // An action to execute before any subcommands are run, but after the context is ready // If a non-nil error is returned, no subcommands are run - Before func(context *Context) error + Before BeforeFunc // An action to execute after any subcommands are run, but after the subcommand has finished // It is run even if Action() panics - After func(context *Context) error + After AfterFunc + // The action to execute when no subcommands are specified - Action func(context *Context) + // Expects a `cli.ActionFunc` but will accept the *deprecated* signature of `func(*cli.Context) {}` + // *Note*: support for the deprecated `Action` signature will be removed in a future version + Action interface{} + // Execute this function if the proper command cannot be found - CommandNotFound func(context *Context, command string) - // Execute this function, if an usage error occurs. This is useful for displaying customized usage error messages. - // This function is able to replace the original error messages. - // If this function is not set, the "Incorrect usage" is displayed and the execution is interrupted. - OnUsageError func(context *Context, err error, isSubcommand bool) error + CommandNotFound CommandNotFoundFunc + // Execute this function if an usage error occurs + OnUsageError OnUsageErrorFunc // Compilation date Compiled time.Time // List of all authors who contributed @@ -65,6 +81,12 @@ type App struct { Email string // Writer writer to write output to Writer io.Writer + // ErrWriter writes error output + ErrWriter io.Writer + // Other custom info + Metadata map[string]interface{} + + didSetup bool } // Tries to find out when this binary was compiled. @@ -77,7 +99,8 @@ func compileTime() time.Time { return info.ModTime() } -// Creates a new cli Application with some reasonable defaults for Name, Usage, Version and Action. +// NewApp creates a new cli Application with some reasonable defaults for Name, +// Usage, Version and Action. func NewApp() *App { return &App{ Name: filepath.Base(os.Args[0]), @@ -92,8 +115,16 @@ func NewApp() *App { } } -// Entry point to the cli app. Parses the arguments slice and routes to the proper flag/args combination -func (a *App) Run(arguments []string) (err error) { +// Setup runs initialization code to ensure all data structures are ready for +// `Run` or inspection prior to `Run`. It is internally called by `Run`, but +// will return early if setup has already happened. +func (a *App) Setup() { + if a.didSetup { + return + } + + a.didSetup = true + if a.Author != "" || a.Email != "" { a.Authors = append(a.Authors, Author{Name: a.Author, Email: a.Email}) } @@ -107,13 +138,6 @@ func (a *App) Run(arguments []string) (err error) { } a.Commands = newCmds - a.categories = CommandCategories{} - for _, command := range a.Commands { - a.categories = a.categories.AddCommand(command.Category, command) - } - sort.Sort(a.categories) - - // append help to commands if a.Command(helpCommand.Name) == nil && !a.HideHelp { a.Commands = append(a.Commands, helpCommand) if (HelpFlag != BoolFlag{}) { @@ -121,17 +145,44 @@ func (a *App) Run(arguments []string) (err error) { } } - //append version/help flags - if a.EnableBashCompletion { - a.appendFlag(BashCompletionFlag) - } - if !a.HideVersion { a.appendFlag(VersionFlag) } + a.categories = CommandCategories{} + for _, command := range a.Commands { + a.categories = a.categories.AddCommand(command.Category, command) + } + sort.Sort(a.categories) + + if a.Metadata == nil { + a.Metadata = make(map[string]interface{}) + } + + if a.Writer == nil { + a.Writer = os.Stdout + } +} + +// Run is the entry point to the cli app. Parses the arguments slice and routes +// to the proper flag/args combination +func (a *App) Run(arguments []string) (err error) { + a.Setup() + + // handle the completion flag separately from the flagset since + // completion could be attempted after a flag, but before its value was put + // on the command line. this causes the flagset to interpret the completion + // flag name as the value of the flag before it which is undesirable + // note that we can only do this because the shell autocomplete function + // always appends the completion flag at the end of the command + shellComplete, arguments := checkShellCompleteFlag(a, arguments) + // parse flags - set := flagSet(a.Name, a.Flags) + set, err := flagSet(a.Name, a.Flags) + if err != nil { + return err + } + set.SetOutput(ioutil.Discard) err = set.Parse(arguments[1:]) nerr := normalizeFlags(a.Flags, set) @@ -141,6 +192,7 @@ func (a *App) Run(arguments []string) (err error) { ShowAppHelp(context) return nerr } + context.shellComplete = shellComplete if checkCompletions(context) { return nil @@ -149,12 +201,12 @@ func (a *App) Run(arguments []string) (err error) { if err != nil { if a.OnUsageError != nil { err := a.OnUsageError(context, err, false) - return err - } else { - fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.") - ShowAppHelp(context) + HandleExitCoder(err) return err } + fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error()) + ShowAppHelp(context) + return err } if !a.HideHelp && checkHelp(context) { @@ -180,10 +232,12 @@ func (a *App) Run(arguments []string) (err error) { } if a.Before != nil { - err = a.Before(context) - if err != nil { - fmt.Fprintf(a.Writer, "%v\n\n", err) + beforeErr := a.Before(context) + if beforeErr != nil { + fmt.Fprintf(a.Writer, "%v\n\n", beforeErr) ShowAppHelp(context) + HandleExitCoder(beforeErr) + err = beforeErr return err } } @@ -197,20 +251,31 @@ func (a *App) Run(arguments []string) (err error) { } } + if a.Action == nil { + a.Action = helpCommand.Action + } + // Run default Action - a.Action(context) - return nil + err = HandleAction(a.Action, context) + + HandleExitCoder(err) + return err } -// Another entry point to the cli app, takes care of passing arguments and error handling +// RunAndExitOnError calls .Run() and exits non-zero if an error was returned +// +// Deprecated: instead you should return an error that fulfills cli.ExitCoder +// to cli.App.Run. This will cause the application to exit with the given eror +// code in the cli.ExitCoder func (a *App) RunAndExitOnError() { if err := a.Run(os.Args); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) + fmt.Fprintln(a.errWriter(), err) + OsExiter(1) } } -// Invokes the subcommand given the context, parses ctx.Args() to generate command-specific flags +// RunAsSubcommand invokes the subcommand given the context, parses ctx.Args() to +// generate command-specific flags func (a *App) RunAsSubcommand(ctx *Context) (err error) { // append help to commands if len(a.Commands) > 0 { @@ -231,13 +296,12 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) { } a.Commands = newCmds - // append flags - if a.EnableBashCompletion { - a.appendFlag(BashCompletionFlag) + // parse flags + set, err := flagSet(a.Name, a.Flags) + if err != nil { + return err } - // parse flags - set := flagSet(a.Name, a.Flags) set.SetOutput(ioutil.Discard) err = set.Parse(ctx.Args().Tail()) nerr := normalizeFlags(a.Flags, set) @@ -261,12 +325,12 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) { if err != nil { if a.OnUsageError != nil { err = a.OnUsageError(context, err, true) - return err - } else { - fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.") - ShowSubcommandHelp(context) + HandleExitCoder(err) return err } + fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error()) + ShowSubcommandHelp(context) + return err } if len(a.Commands) > 0 { @@ -283,6 +347,7 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) { defer func() { afterErr := a.After(context) if afterErr != nil { + HandleExitCoder(err) if err != nil { err = NewMultiError(err, afterErr) } else { @@ -293,8 +358,10 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) { } if a.Before != nil { - err := a.Before(context) - if err != nil { + beforeErr := a.Before(context) + if beforeErr != nil { + HandleExitCoder(beforeErr) + err = beforeErr return err } } @@ -309,12 +376,13 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) { } // Run default Action - a.Action(context) + err = HandleAction(a.Action, context) - return nil + HandleExitCoder(err) + return err } -// Returns the named command on App. Returns nil if the command does not exist +// Command returns the named command on App. Returns nil if the command does not exist func (a *App) Command(name string) *Command { for _, c := range a.Commands { if c.HasName(name) { @@ -325,11 +393,46 @@ func (a *App) Command(name string) *Command { return nil } -// Returnes the array containing all the categories with the commands they contain +// Categories returns a slice containing all the categories with the commands they contain func (a *App) Categories() CommandCategories { return a.categories } +// VisibleCategories returns a slice of categories and commands that are +// Hidden=false +func (a *App) VisibleCategories() []*CommandCategory { + ret := []*CommandCategory{} + for _, category := range a.categories { + if visible := func() *CommandCategory { + for _, command := range category.Commands { + if !command.Hidden { + return category + } + } + return nil + }(); visible != nil { + ret = append(ret, visible) + } + } + return ret +} + +// VisibleCommands returns a slice of the Commands with Hidden=false +func (a *App) VisibleCommands() []Command { + ret := []Command{} + for _, command := range a.Commands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} + +// VisibleFlags returns a slice of the Flags with Hidden=false +func (a *App) VisibleFlags() []Flag { + return visibleFlags(a.Flags) +} + func (a *App) hasFlag(flag Flag) bool { for _, f := range a.Flags { if flag == f { @@ -340,6 +443,16 @@ func (a *App) hasFlag(flag Flag) bool { return false } +func (a *App) errWriter() io.Writer { + + // When the app ErrWriter is nil use the package level one. + if a.ErrWriter == nil { + return ErrWriter + } + + return a.ErrWriter +} + func (a *App) appendFlag(flag Flag) { if !a.hasFlag(flag) { a.Flags = append(a.Flags, flag) @@ -356,8 +469,24 @@ type Author struct { func (a Author) String() string { e := "" if a.Email != "" { - e = "<" + a.Email + "> " + e = " <" + a.Email + ">" } - return fmt.Sprintf("%v %v", a.Name, e) + return fmt.Sprintf("%v%v", a.Name, e) +} + +// HandleAction attempts to figure out which Action signature was used. If +// it's an ActionFunc or a func with the legacy signature for Action, the func +// is run! +func HandleAction(action interface{}, context *Context) (err error) { + if a, ok := action.(ActionFunc); ok { + return a(context) + } else if a, ok := action.(func(*Context) error); ok { + return a(context) + } else if a, ok := action.(func(*Context)); ok { // deprecated function signature + a(context) + return nil + } else { + return errInvalidActionType + } } diff --git a/vendor/github.com/urfave/cli/appveyor.yml b/vendor/github.com/urfave/cli/appveyor.yml deleted file mode 100644 index 3ca7afabd..000000000 --- a/vendor/github.com/urfave/cli/appveyor.yml +++ /dev/null @@ -1,16 +0,0 @@ -version: "{build}" - -os: Windows Server 2012 R2 - -install: - - go version - - go env - -build_script: - - cd %APPVEYOR_BUILD_FOLDER% - - go vet ./... - - go test -v ./... - -test: off - -deploy: off diff --git a/vendor/github.com/urfave/cli/category.go b/vendor/github.com/urfave/cli/category.go index 7dbf2182f..1a6055023 100644 --- a/vendor/github.com/urfave/cli/category.go +++ b/vendor/github.com/urfave/cli/category.go @@ -1,7 +1,9 @@ package cli +// CommandCategories is a slice of *CommandCategory. type CommandCategories []*CommandCategory +// CommandCategory is a category containing commands. type CommandCategory struct { Name string Commands Commands @@ -19,6 +21,7 @@ func (c CommandCategories) Swap(i, j int) { c[i], c[j] = c[j], c[i] } +// AddCommand adds a command to a category. func (c CommandCategories) AddCommand(category string, command Command) CommandCategories { for _, commandCategory := range c { if commandCategory.Name == category { @@ -28,3 +31,14 @@ func (c CommandCategories) AddCommand(category string, command Command) CommandC } return append(c, &CommandCategory{Name: category, Commands: []Command{command}}) } + +// VisibleCommands returns a slice of the Commands with Hidden=false +func (c *CommandCategory) VisibleCommands() []Command { + ret := []Command{} + for _, command := range c.Commands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} diff --git a/vendor/github.com/urfave/cli/cli.go b/vendor/github.com/urfave/cli/cli.go index 31dc9124d..74fd101f4 100644 --- a/vendor/github.com/urfave/cli/cli.go +++ b/vendor/github.com/urfave/cli/cli.go @@ -10,7 +10,7 @@ // app := cli.NewApp() // app.Name = "greet" // app.Usage = "say a greeting" -// app.Action = func(c *cli.Context) { +// app.Action = func(c *cli.Context) error { // println("Greetings") // } // @@ -18,23 +18,4 @@ // } package cli -import ( - "strings" -) - -type MultiError struct { - Errors []error -} - -func NewMultiError(err ...error) MultiError { - return MultiError{Errors: err} -} - -func (m MultiError) Error() string { - errs := make([]string, len(m.Errors)) - for i, err := range m.Errors { - errs[i] = err.Error() - } - - return strings.Join(errs, "\n") -} +//go:generate python ./generate-flag-types cli -i flag-types.json -o flag_generated.go diff --git a/vendor/github.com/urfave/cli/command.go b/vendor/github.com/urfave/cli/command.go index 1a05b5497..2628fbf48 100644 --- a/vendor/github.com/urfave/cli/command.go +++ b/vendor/github.com/urfave/cli/command.go @@ -26,34 +26,42 @@ type Command struct { // The category the command is part of Category string // The function to call when checking for bash command completions - BashComplete func(context *Context) + BashComplete BashCompleteFunc // An action to execute before any sub-subcommands are run, but after the context is ready // If a non-nil error is returned, no sub-subcommands are run - Before func(context *Context) error - // An action to execute after any subcommands are run, but before the subcommand has finished + Before BeforeFunc + // An action to execute after any subcommands are run, but after the subcommand has finished // It is run even if Action() panics - After func(context *Context) error + After AfterFunc // The function to call when this command is invoked - Action func(context *Context) - // Execute this function, if an usage error occurs. This is useful for displaying customized usage error messages. - // This function is able to replace the original error messages. - // If this function is not set, the "Incorrect usage" is displayed and the execution is interrupted. - OnUsageError func(context *Context, err error) error + Action interface{} + // TODO: replace `Action: interface{}` with `Action: ActionFunc` once some kind + // of deprecation period has passed, maybe? + + // Execute this function if a usage error occurs. + OnUsageError OnUsageErrorFunc // List of child commands Subcommands Commands // List of flags to parse Flags []Flag // Treat all flags as normal arguments if true SkipFlagParsing bool + // Skip argument reordering which attempts to move flags before arguments, + // but only works if all flags appear after all arguments. This behavior was + // removed n version 2 since it only works under specific conditions so we + // backport here by exposing it as an option for compatibility. + SkipArgReorder bool // Boolean to hide built-in help command HideHelp bool + // Boolean to hide this command from help or completion + Hidden bool // Full name of command for help, defaults to full command name, including parent commands. HelpName string commandNamePath []string } -// Returns the full name of the command. +// FullName returns the full name of the command. // For subcommands this ensures that parent commands are part of the command path func (c Command) FullName() string { if c.commandNamePath == nil { @@ -62,9 +70,10 @@ func (c Command) FullName() string { return strings.Join(c.commandNamePath, " ") } +// Commands is a slice of Command type Commands []Command -// Invokes the command given the context, parses ctx.Args() to generate command-specific flags +// Run invokes the command given the context, parses ctx.Args() to generate command-specific flags func (c Command) Run(ctx *Context) (err error) { if len(c.Subcommands) > 0 { return c.startApp(ctx) @@ -78,14 +87,15 @@ func (c Command) Run(ctx *Context) (err error) { ) } - if ctx.App.EnableBashCompletion { - c.Flags = append(c.Flags, BashCompletionFlag) + set, err := flagSet(c.Name, c.Flags) + if err != nil { + return err } - - set := flagSet(c.Name, c.Flags) set.SetOutput(ioutil.Discard) - if !c.SkipFlagParsing { + if c.SkipFlagParsing { + err = set.Parse(append([]string{"--"}, ctx.Args().Tail()...)) + } else if !c.SkipArgReorder { firstFlagIndex := -1 terminatorIndex := -1 for index, arg := range ctx.Args() { @@ -118,21 +128,7 @@ func (c Command) Run(ctx *Context) (err error) { err = set.Parse(ctx.Args().Tail()) } } else { - if c.SkipFlagParsing { - err = set.Parse(append([]string{"--"}, ctx.Args().Tail()...)) - } - } - - if err != nil { - if c.OnUsageError != nil { - err := c.OnUsageError(ctx, err) - return err - } else { - fmt.Fprintln(ctx.App.Writer, "Incorrect Usage.") - fmt.Fprintln(ctx.App.Writer) - ShowCommandHelp(ctx, c.Name) - return err - } + err = set.Parse(ctx.Args().Tail()) } nerr := normalizeFlags(c.Flags, set) @@ -142,12 +138,24 @@ func (c Command) Run(ctx *Context) (err error) { ShowCommandHelp(ctx, c.Name) return nerr } - context := NewContext(ctx.App, set, ctx) + context := NewContext(ctx.App, set, ctx) if checkCommandCompletions(context, c.Name) { return nil } + if err != nil { + if c.OnUsageError != nil { + err := c.OnUsageError(ctx, err, false) + HandleExitCoder(err) + return err + } + fmt.Fprintln(ctx.App.Writer, "Incorrect Usage:", err.Error()) + fmt.Fprintln(ctx.App.Writer) + ShowCommandHelp(ctx, c.Name) + return err + } + if checkCommandHelp(context, c.Name) { return nil } @@ -156,6 +164,7 @@ func (c Command) Run(ctx *Context) (err error) { defer func() { afterErr := c.After(context) if afterErr != nil { + HandleExitCoder(err) if err != nil { err = NewMultiError(err, afterErr) } else { @@ -166,20 +175,30 @@ func (c Command) Run(ctx *Context) (err error) { } if c.Before != nil { - err := c.Before(context) + err = c.Before(context) if err != nil { fmt.Fprintln(ctx.App.Writer, err) fmt.Fprintln(ctx.App.Writer) ShowCommandHelp(ctx, c.Name) + HandleExitCoder(err) return err } } + if c.Action == nil { + c.Action = helpSubcommand.Action + } + context.Command = c - c.Action(context) - return nil + err = HandleAction(c.Action, context) + + if err != nil { + HandleExitCoder(err) + } + return err } +// Names returns the names including short names and aliases. func (c Command) Names() []string { names := []string{c.Name} @@ -190,7 +209,7 @@ func (c Command) Names() []string { return append(names, c.Aliases...) } -// Returns true if Command.Name or Command.ShortName matches given name +// HasName returns true if Command.Name or Command.ShortName matches given name func (c Command) HasName(name string) bool { for _, n := range c.Names() { if n == name { @@ -202,7 +221,7 @@ func (c Command) HasName(name string) bool { func (c Command) startApp(ctx *Context) error { app := NewApp() - + app.Metadata = ctx.App.Metadata // set the name and usage app.Name = fmt.Sprintf("%s %s", ctx.App.Name, c.Name) if c.HelpName == "" { @@ -260,3 +279,8 @@ func (c Command) startApp(ctx *Context) error { return app.RunAsSubcommand(ctx) } + +// VisibleFlags returns a slice of the Flags with Hidden=false +func (c Command) VisibleFlags() []Flag { + return visibleFlags(c.Flags) +} diff --git a/vendor/github.com/urfave/cli/context.go b/vendor/github.com/urfave/cli/context.go index b66d278d0..cb89e92a0 100644 --- a/vendor/github.com/urfave/cli/context.go +++ b/vendor/github.com/urfave/cli/context.go @@ -3,9 +3,9 @@ package cli import ( "errors" "flag" - "strconv" + "reflect" "strings" - "time" + "syscall" ) // Context is a type that is passed through to @@ -13,154 +13,119 @@ import ( // can be used to retrieve context-specific Args and // parsed command-line options. type Context struct { - App *App - Command Command - flagSet *flag.FlagSet - setFlags map[string]bool - globalSetFlags map[string]bool - parentContext *Context + App *App + Command Command + shellComplete bool + flagSet *flag.FlagSet + setFlags map[string]bool + parentContext *Context } -// Creates a new context. For use in when invoking an App or Command action. +// NewContext creates a new context. For use in when invoking an App or Command action. func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context { - return &Context{App: app, flagSet: set, parentContext: parentCtx} -} - -// Looks up the value of a local int flag, returns 0 if no int flag exists -func (c *Context) Int(name string) int { - return lookupInt(name, c.flagSet) -} - -// Looks up the value of a local time.Duration flag, returns 0 if no time.Duration flag exists -func (c *Context) Duration(name string) time.Duration { - return lookupDuration(name, c.flagSet) -} - -// Looks up the value of a local float64 flag, returns 0 if no float64 flag exists -func (c *Context) Float64(name string) float64 { - return lookupFloat64(name, c.flagSet) -} - -// Looks up the value of a local bool flag, returns false if no bool flag exists -func (c *Context) Bool(name string) bool { - return lookupBool(name, c.flagSet) -} + c := &Context{App: app, flagSet: set, parentContext: parentCtx} -// Looks up the value of a local boolT flag, returns false if no bool flag exists -func (c *Context) BoolT(name string) bool { - return lookupBoolT(name, c.flagSet) -} + if parentCtx != nil { + c.shellComplete = parentCtx.shellComplete + } -// Looks up the value of a local string flag, returns "" if no string flag exists -func (c *Context) String(name string) string { - return lookupString(name, c.flagSet) + return c } -// Looks up the value of a local string slice flag, returns nil if no string slice flag exists -func (c *Context) StringSlice(name string) []string { - return lookupStringSlice(name, c.flagSet) +// NumFlags returns the number of flags set +func (c *Context) NumFlags() int { + return c.flagSet.NFlag() } -// Looks up the value of a local int slice flag, returns nil if no int slice flag exists -func (c *Context) IntSlice(name string) []int { - return lookupIntSlice(name, c.flagSet) +// Set sets a context flag to a value. +func (c *Context) Set(name, value string) error { + return c.flagSet.Set(name, value) } -// Looks up the value of a local generic flag, returns nil if no generic flag exists -func (c *Context) Generic(name string) interface{} { - return lookupGeneric(name, c.flagSet) +// GlobalSet sets a context flag to a value on the global flagset +func (c *Context) GlobalSet(name, value string) error { + return globalContext(c).flagSet.Set(name, value) } -// Looks up the value of a global int flag, returns 0 if no int flag exists -func (c *Context) GlobalInt(name string) int { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupInt(name, fs) - } - return 0 -} +// IsSet determines if the flag was actually set +func (c *Context) IsSet(name string) bool { + if c.setFlags == nil { + c.setFlags = make(map[string]bool) -// Looks up the value of a global time.Duration flag, returns 0 if no time.Duration flag exists -func (c *Context) GlobalDuration(name string) time.Duration { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupDuration(name, fs) - } - return 0 -} + c.flagSet.Visit(func(f *flag.Flag) { + c.setFlags[f.Name] = true + }) -// Looks up the value of a global bool flag, returns false if no bool flag exists -func (c *Context) GlobalBool(name string) bool { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupBool(name, fs) - } - return false -} + c.flagSet.VisitAll(func(f *flag.Flag) { + if _, ok := c.setFlags[f.Name]; ok { + return + } + c.setFlags[f.Name] = false + }) -// Looks up the value of a global string flag, returns "" if no string flag exists -func (c *Context) GlobalString(name string) string { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupString(name, fs) - } - return "" -} + // XXX hack to support IsSet for flags with EnvVar + // + // There isn't an easy way to do this with the current implementation since + // whether a flag was set via an environment variable is very difficult to + // determine here. Instead, we intend to introduce a backwards incompatible + // change in version 2 to add `IsSet` to the Flag interface to push the + // responsibility closer to where the information required to determine + // whether a flag is set by non-standard means such as environment + // variables is avaliable. + // + // See https://github.com/urfave/cli/issues/294 for additional discussion + flags := c.Command.Flags + if c.Command.Name == "" { // cannot == Command{} since it contains slice types + if c.App != nil { + flags = c.App.Flags + } + } + for _, f := range flags { + eachName(f.GetName(), func(name string) { + if isSet, ok := c.setFlags[name]; isSet || !ok { + return + } -// Looks up the value of a global string slice flag, returns nil if no string slice flag exists -func (c *Context) GlobalStringSlice(name string) []string { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupStringSlice(name, fs) - } - return nil -} + val := reflect.ValueOf(f) + if val.Kind() == reflect.Ptr { + val = val.Elem() + } -// Looks up the value of a global int slice flag, returns nil if no int slice flag exists -func (c *Context) GlobalIntSlice(name string) []int { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupIntSlice(name, fs) - } - return nil -} + envVarValue := val.FieldByName("EnvVar") + if !envVarValue.IsValid() { + return + } -// Looks up the value of a global generic flag, returns nil if no generic flag exists -func (c *Context) GlobalGeneric(name string) interface{} { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupGeneric(name, fs) + eachName(envVarValue.String(), func(envVar string) { + envVar = strings.TrimSpace(envVar) + if _, ok := syscall.Getenv(envVar); ok { + c.setFlags[name] = true + return + } + }) + }) + } } - return nil -} -// Returns the number of flags set -func (c *Context) NumFlags() int { - return c.flagSet.NFlag() + return c.setFlags[name] } -// Determines if the flag was actually set -func (c *Context) IsSet(name string) bool { - if c.setFlags == nil { - c.setFlags = make(map[string]bool) - c.flagSet.Visit(func(f *flag.Flag) { - c.setFlags[f.Name] = true - }) +// GlobalIsSet determines if the global flag was actually set +func (c *Context) GlobalIsSet(name string) bool { + ctx := c + if ctx.parentContext != nil { + ctx = ctx.parentContext } - return c.setFlags[name] == true -} -// Determines if the global flag was actually set -func (c *Context) GlobalIsSet(name string) bool { - if c.globalSetFlags == nil { - c.globalSetFlags = make(map[string]bool) - ctx := c - if ctx.parentContext != nil { - ctx = ctx.parentContext - } - for ; ctx != nil && c.globalSetFlags[name] == false; ctx = ctx.parentContext { - ctx.flagSet.Visit(func(f *flag.Flag) { - c.globalSetFlags[f.Name] = true - }) + for ; ctx != nil; ctx = ctx.parentContext { + if ctx.IsSet(name) { + return true } } - return c.globalSetFlags[name] + return false } -// Returns a slice of flag names used in this context. +// FlagNames returns a slice of flag names used in this context. func (c *Context) FlagNames() (names []string) { for _, flag := range c.Command.Flags { name := strings.Split(flag.GetName(), ",")[0] @@ -172,7 +137,7 @@ func (c *Context) FlagNames() (names []string) { return } -// Returns a slice of global flag names used by the app. +// GlobalFlagNames returns a slice of global flag names used by the app. func (c *Context) GlobalFlagNames() (names []string) { for _, flag := range c.App.Flags { name := strings.Split(flag.GetName(), ",")[0] @@ -184,25 +149,31 @@ func (c *Context) GlobalFlagNames() (names []string) { return } -// Returns the parent context, if any +// Parent returns the parent context, if any func (c *Context) Parent() *Context { return c.parentContext } +// value returns the value of the flag coressponding to `name` +func (c *Context) value(name string) interface{} { + return c.flagSet.Lookup(name).Value.(flag.Getter).Get() +} + +// Args contains apps console arguments type Args []string -// Returns the command line arguments associated with the context. +// Args returns the command line arguments associated with the context. func (c *Context) Args() Args { args := Args(c.flagSet.Args()) return args } -// Returns the number of the command line arguments. +// NArg returns the number of the command line arguments. func (c *Context) NArg() int { return len(c.Args()) } -// Returns the nth argument, or else a blank string +// Get returns the nth argument, or else a blank string func (a Args) Get(n int) string { if len(a) > n { return a[n] @@ -210,12 +181,12 @@ func (a Args) Get(n int) string { return "" } -// Returns the first argument, or else a blank string +// First returns the first argument, or else a blank string func (a Args) First() string { return a.Get(0) } -// Return the rest of the arguments (not the first one) +// Tail returns the rest of the arguments (not the first one) // or else an empty string slice func (a Args) Tail() []string { if len(a) >= 2 { @@ -224,12 +195,12 @@ func (a Args) Tail() []string { return []string{} } -// Checks if there are any arguments present +// Present checks if there are any arguments present func (a Args) Present() bool { return len(a) != 0 } -// Swaps arguments at the given indexes +// Swap swaps arguments at the given indexes func (a Args) Swap(from, to int) error { if from >= len(a) || to >= len(a) { return errors.New("index out of range") @@ -238,6 +209,19 @@ func (a Args) Swap(from, to int) error { return nil } +func globalContext(ctx *Context) *Context { + if ctx == nil { + return nil + } + + for { + if ctx.parentContext == nil { + return ctx + } + ctx = ctx.parentContext + } +} + func lookupGlobalFlagSet(name string, ctx *Context) *flag.FlagSet { if ctx.parentContext != nil { ctx = ctx.parentContext @@ -250,107 +234,6 @@ func lookupGlobalFlagSet(name string, ctx *Context) *flag.FlagSet { return nil } -func lookupInt(name string, set *flag.FlagSet) int { - f := set.Lookup(name) - if f != nil { - val, err := strconv.Atoi(f.Value.String()) - if err != nil { - return 0 - } - return val - } - - return 0 -} - -func lookupDuration(name string, set *flag.FlagSet) time.Duration { - f := set.Lookup(name) - if f != nil { - val, err := time.ParseDuration(f.Value.String()) - if err == nil { - return val - } - } - - return 0 -} - -func lookupFloat64(name string, set *flag.FlagSet) float64 { - f := set.Lookup(name) - if f != nil { - val, err := strconv.ParseFloat(f.Value.String(), 64) - if err != nil { - return 0 - } - return val - } - - return 0 -} - -func lookupString(name string, set *flag.FlagSet) string { - f := set.Lookup(name) - if f != nil { - return f.Value.String() - } - - return "" -} - -func lookupStringSlice(name string, set *flag.FlagSet) []string { - f := set.Lookup(name) - if f != nil { - return (f.Value.(*StringSlice)).Value() - - } - - return nil -} - -func lookupIntSlice(name string, set *flag.FlagSet) []int { - f := set.Lookup(name) - if f != nil { - return (f.Value.(*IntSlice)).Value() - - } - - return nil -} - -func lookupGeneric(name string, set *flag.FlagSet) interface{} { - f := set.Lookup(name) - if f != nil { - return f.Value - } - return nil -} - -func lookupBool(name string, set *flag.FlagSet) bool { - f := set.Lookup(name) - if f != nil { - val, err := strconv.ParseBool(f.Value.String()) - if err != nil { - return false - } - return val - } - - return false -} - -func lookupBoolT(name string, set *flag.FlagSet) bool { - f := set.Lookup(name) - if f != nil { - val, err := strconv.ParseBool(f.Value.String()) - if err != nil { - return true - } - return val - } - - return false -} - func copyFlag(name string, ff *flag.Flag, set *flag.FlagSet) { switch ff.Value.(type) { case *StringSlice: diff --git a/vendor/github.com/urfave/cli/errors.go b/vendor/github.com/urfave/cli/errors.go new file mode 100644 index 000000000..0206ff491 --- /dev/null +++ b/vendor/github.com/urfave/cli/errors.go @@ -0,0 +1,110 @@ +package cli + +import ( + "fmt" + "io" + "os" + "strings" +) + +// OsExiter is the function used when the app exits. If not set defaults to os.Exit. +var OsExiter = os.Exit + +// ErrWriter is used to write errors to the user. This can be anything +// implementing the io.Writer interface and defaults to os.Stderr. +var ErrWriter io.Writer = os.Stderr + +// MultiError is an error that wraps multiple errors. +type MultiError struct { + Errors []error +} + +// NewMultiError creates a new MultiError. Pass in one or more errors. +func NewMultiError(err ...error) MultiError { + return MultiError{Errors: err} +} + +// Error implements the error interface. +func (m MultiError) Error() string { + errs := make([]string, len(m.Errors)) + for i, err := range m.Errors { + errs[i] = err.Error() + } + + return strings.Join(errs, "\n") +} + +type ErrorFormatter interface { + Format(s fmt.State, verb rune) +} + +// ExitCoder is the interface checked by `App` and `Command` for a custom exit +// code +type ExitCoder interface { + error + ExitCode() int +} + +// ExitError fulfills both the builtin `error` interface and `ExitCoder` +type ExitError struct { + exitCode int + message interface{} +} + +// NewExitError makes a new *ExitError +func NewExitError(message interface{}, exitCode int) *ExitError { + return &ExitError{ + exitCode: exitCode, + message: message, + } +} + +// Error returns the string message, fulfilling the interface required by +// `error` +func (ee *ExitError) Error() string { + return fmt.Sprintf("%v", ee.message) +} + +// ExitCode returns the exit code, fulfilling the interface required by +// `ExitCoder` +func (ee *ExitError) ExitCode() int { + return ee.exitCode +} + +// HandleExitCoder checks if the error fulfills the ExitCoder interface, and if +// so prints the error to stderr (if it is non-empty) and calls OsExiter with the +// given exit code. If the given error is a MultiError, then this func is +// called on all members of the Errors slice. +func HandleExitCoder(err error) { + if err == nil { + return + } + + if exitErr, ok := err.(ExitCoder); ok { + if err.Error() != "" { + if _, ok := exitErr.(ErrorFormatter); ok { + fmt.Fprintf(ErrWriter, "%+v\n", err) + } else { + fmt.Fprintln(ErrWriter, err) + } + } + OsExiter(exitErr.ExitCode()) + return + } + + if multiErr, ok := err.(MultiError); ok { + for _, merr := range multiErr.Errors { + HandleExitCoder(merr) + } + return + } + + if err.Error() != "" { + if _, ok := err.(ErrorFormatter); ok { + fmt.Fprintf(ErrWriter, "%+v\n", err) + } else { + fmt.Fprintln(ErrWriter, err) + } + } + OsExiter(1) +} diff --git a/vendor/github.com/urfave/cli/flag.go b/vendor/github.com/urfave/cli/flag.go index e951c2df7..7dd8a2c4a 100644 --- a/vendor/github.com/urfave/cli/flag.go +++ b/vendor/github.com/urfave/cli/flag.go @@ -3,25 +3,29 @@ package cli import ( "flag" "fmt" - "os" + "reflect" "runtime" "strconv" "strings" + "syscall" "time" ) -// This flag enables bash-completion for all commands and subcommands +const defaultPlaceholder = "value" + +// BashCompletionFlag enables bash-completion for all commands and subcommands var BashCompletionFlag = BoolFlag{ - Name: "generate-bash-completion", + Name: "generate-bash-completion", + Hidden: true, } -// This flag prints the version for the application +// VersionFlag prints the version for the application var VersionFlag = BoolFlag{ Name: "version, v", Usage: "print the version", } -// This flag prints the help for all commands and subcommands +// HelpFlag prints the help for all commands and subcommands // Set to the zero value (BoolFlag{}) to disable flag -- keeps subcommand // unless HideHelp is set to true) var HelpFlag = BoolFlag{ @@ -29,6 +33,25 @@ var HelpFlag = BoolFlag{ Usage: "show help", } +// FlagStringer converts a flag definition to a string. This is used by help +// to display a flag. +var FlagStringer FlagStringFunc = stringifyFlag + +// FlagsByName is a slice of Flag. +type FlagsByName []Flag + +func (f FlagsByName) Len() int { + return len(f) +} + +func (f FlagsByName) Less(i, j int) bool { + return f[i].GetName() < f[j].GetName() +} + +func (f FlagsByName) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + // Flag is a common interface related to parsing flags in cli. // For more advanced flag parsing techniques, it is recommended that // this interface be implemented. @@ -39,13 +62,29 @@ type Flag interface { GetName() string } -func flagSet(name string, flags []Flag) *flag.FlagSet { +// errorableFlag is an interface that allows us to return errors during apply +// it allows flags defined in this library to return errors in a fashion backwards compatible +// TODO remove in v2 and modify the existing Flag interface to return errors +type errorableFlag interface { + Flag + + ApplyWithError(*flag.FlagSet) error +} + +func flagSet(name string, flags []Flag) (*flag.FlagSet, error) { set := flag.NewFlagSet(name, flag.ContinueOnError) for _, f := range flags { - f.Apply(set) + //TODO remove in v2 when errorableFlag is removed + if ef, ok := f.(errorableFlag); ok { + if err := ef.ApplyWithError(set); err != nil { + return nil, err + } + } else { + f.Apply(set) + } } - return set + return set, nil } func eachName(longName string, fn func(string)) { @@ -62,41 +101,24 @@ type Generic interface { String() string } -// GenericFlag is the flag type for types implementing Generic -type GenericFlag struct { - Name string - Value Generic - Usage string - EnvVar string -} - -// String returns the string representation of the generic flag to display the -// help text to the user (uses the String() method of the generic flag to show -// the value) -func (f GenericFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s %v\t%v", prefixedNames(f.Name), f.FormatValueHelp(), f.Usage)) -} - -func (f GenericFlag) FormatValueHelp() string { - if f.Value == nil { - return "" - } - s := f.Value.String() - if len(s) == 0 { - return "" - } - return fmt.Sprintf("\"%s\"", s) -} - // Apply takes the flagset and calls Set on the generic flag with the value // provided by the user for parsing by the flag +// Ignores parsing errors func (f GenericFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError takes the flagset and calls Set on the generic flag with the value +// provided by the user for parsing by the flag +func (f GenericFlag) ApplyWithError(set *flag.FlagSet) error { val := f.Value if f.EnvVar != "" { for _, envVar := range strings.Split(f.EnvVar, ",") { envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - val.Set(envVal) + if envVal, ok := syscall.Getenv(envVar); ok { + if err := val.Set(envVal); err != nil { + return fmt.Errorf("could not parse %s as value for flag %s: %s", envVal, f.Name, err) + } break } } @@ -105,13 +127,11 @@ func (f GenericFlag) Apply(set *flag.FlagSet) { eachName(f.Name, func(name string) { set.Var(f.Value, name, f.Usage) }) -} -func (f GenericFlag) GetName() string { - return f.Name + return nil } -// StringSlice is an opaque type for []string to satisfy flag.Value +// StringSlice is an opaque type for []string to satisfy flag.Value and flag.Getter type StringSlice []string // Set appends the string value to the list of values @@ -130,32 +150,29 @@ func (f *StringSlice) Value() []string { return *f } -// StringSlice is a string flag that can be specified multiple times on the -// command-line -type StringSliceFlag struct { - Name string - Value *StringSlice - Usage string - EnvVar string -} - -// String returns the usage -func (f StringSliceFlag) String() string { - firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ") - pref := prefixFor(firstName) - return withEnvHint(f.EnvVar, fmt.Sprintf("%s [%v]\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage)) +// Get returns the slice of strings set by this flag +func (f *StringSlice) Get() interface{} { + return *f } // Apply populates the flag given the flag set and environment +// Ignores errors func (f StringSliceFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f StringSliceFlag) ApplyWithError(set *flag.FlagSet) error { if f.EnvVar != "" { for _, envVar := range strings.Split(f.EnvVar, ",") { envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { + if envVal, ok := syscall.Getenv(envVar); ok { newVal := &StringSlice{} for _, s := range strings.Split(envVal, ",") { s = strings.TrimSpace(s) - newVal.Set(s) + if err := newVal.Set(s); err != nil { + return fmt.Errorf("could not parse %s as string value for flag %s: %s", envVal, f.Name, err) + } } f.Value = newVal break @@ -169,13 +186,11 @@ func (f StringSliceFlag) Apply(set *flag.FlagSet) { } set.Var(f.Value, name, f.Usage) }) -} -func (f StringSliceFlag) GetName() string { - return f.Name + return nil } -// StringSlice is an opaque type for []int to satisfy flag.Value +// IntSlice is an opaque type for []int to satisfy flag.Value and flag.Getter type IntSlice []int // Set parses the value into an integer and appends it to the list of values @@ -183,15 +198,14 @@ func (f *IntSlice) Set(value string) error { tmp, err := strconv.Atoi(value) if err != nil { return err - } else { - *f = append(*f, tmp) } + *f = append(*f, tmp) return nil } // String returns a readable representation of this value (for usage defaults) func (f *IntSlice) String() string { - return fmt.Sprintf("%d", *f) + return fmt.Sprintf("%#v", *f) } // Value returns the slice of ints set by this flag @@ -199,34 +213,28 @@ func (f *IntSlice) Value() []int { return *f } -// IntSliceFlag is an int flag that can be specified multiple times on the -// command-line -type IntSliceFlag struct { - Name string - Value *IntSlice - Usage string - EnvVar string -} - -// String returns the usage -func (f IntSliceFlag) String() string { - firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ") - pref := prefixFor(firstName) - return withEnvHint(f.EnvVar, fmt.Sprintf("%s [%v]\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage)) +// Get returns the slice of ints set by this flag +func (f *IntSlice) Get() interface{} { + return *f } // Apply populates the flag given the flag set and environment +// Ignores errors func (f IntSliceFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f IntSliceFlag) ApplyWithError(set *flag.FlagSet) error { if f.EnvVar != "" { for _, envVar := range strings.Split(f.EnvVar, ",") { envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { + if envVal, ok := syscall.Getenv(envVar); ok { newVal := &IntSlice{} for _, s := range strings.Split(envVal, ",") { s = strings.TrimSpace(s) - err := newVal.Set(s) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) + if err := newVal.Set(s); err != nil { + return fmt.Errorf("could not parse %s as int slice value for flag %s: %s", envVal, f.Name, err) } } f.Value = newVal @@ -241,36 +249,96 @@ func (f IntSliceFlag) Apply(set *flag.FlagSet) { } set.Var(f.Value, name, f.Usage) }) -} -func (f IntSliceFlag) GetName() string { - return f.Name + return nil } -// BoolFlag is a switch that defaults to false -type BoolFlag struct { - Name string - Usage string - EnvVar string - Destination *bool +// Int64Slice is an opaque type for []int to satisfy flag.Value and flag.Getter +type Int64Slice []int64 + +// Set parses the value into an integer and appends it to the list of values +func (f *Int64Slice) Set(value string) error { + tmp, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return err + } + *f = append(*f, tmp) + return nil } // String returns a readable representation of this value (for usage defaults) -func (f BoolFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage)) +func (f *Int64Slice) String() string { + return fmt.Sprintf("%#v", *f) +} + +// Value returns the slice of ints set by this flag +func (f *Int64Slice) Value() []int64 { + return *f +} + +// Get returns the slice of ints set by this flag +func (f *Int64Slice) Get() interface{} { + return *f } // Apply populates the flag given the flag set and environment +// Ignores errors +func (f Int64SliceFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f Int64SliceFlag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + newVal := &Int64Slice{} + for _, s := range strings.Split(envVal, ",") { + s = strings.TrimSpace(s) + if err := newVal.Set(s); err != nil { + return fmt.Errorf("could not parse %s as int64 slice value for flag %s: %s", envVal, f.Name, err) + } + } + f.Value = newVal + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Value == nil { + f.Value = &Int64Slice{} + } + set.Var(f.Value, name, f.Usage) + }) + return nil +} + +// Apply populates the flag given the flag set and environment +// Ignores errors func (f BoolFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f BoolFlag) ApplyWithError(set *flag.FlagSet) error { val := false if f.EnvVar != "" { for _, envVar := range strings.Split(f.EnvVar, ",") { envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { + if envVal, ok := syscall.Getenv(envVar); ok { + if envVal == "" { + val = false + break + } + envValBool, err := strconv.ParseBool(envVal) - if err == nil { - val = envValBool + if err != nil { + return fmt.Errorf("could not parse %s as bool value for flag %s: %s", envVal, f.Name, err) } + + val = envValBool break } } @@ -283,38 +351,35 @@ func (f BoolFlag) Apply(set *flag.FlagSet) { } set.Bool(name, val, f.Usage) }) -} - -func (f BoolFlag) GetName() string { - return f.Name -} -// BoolTFlag this represents a boolean flag that is true by default, but can -// still be set to false by --some-flag=false -type BoolTFlag struct { - Name string - Usage string - EnvVar string - Destination *bool -} - -// String returns a readable representation of this value (for usage defaults) -func (f BoolTFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage)) + return nil } // Apply populates the flag given the flag set and environment +// Ignores errors func (f BoolTFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f BoolTFlag) ApplyWithError(set *flag.FlagSet) error { val := true if f.EnvVar != "" { for _, envVar := range strings.Split(f.EnvVar, ",") { envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValBool, err := strconv.ParseBool(envVal) - if err == nil { - val = envValBool + if envVal, ok := syscall.Getenv(envVar); ok { + if envVal == "" { + val = false break } + + envValBool, err := strconv.ParseBool(envVal) + if err != nil { + return fmt.Errorf("could not parse %s as bool value for flag %s: %s", envVal, f.Name, err) + } + + val = envValBool + break } } } @@ -326,40 +391,22 @@ func (f BoolTFlag) Apply(set *flag.FlagSet) { } set.Bool(name, val, f.Usage) }) -} - -func (f BoolTFlag) GetName() string { - return f.Name -} - -// StringFlag represents a flag that takes as string value -type StringFlag struct { - Name string - Value string - Usage string - EnvVar string - Destination *string -} - -// String returns the usage -func (f StringFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s %v\t%v", prefixedNames(f.Name), f.FormatValueHelp(), f.Usage)) -} -func (f StringFlag) FormatValueHelp() string { - s := f.Value - if len(s) == 0 { - return "" - } - return fmt.Sprintf("\"%s\"", s) + return nil } // Apply populates the flag given the flag set and environment +// Ignores errors func (f StringFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f StringFlag) ApplyWithError(set *flag.FlagSet) error { if f.EnvVar != "" { for _, envVar := range strings.Split(f.EnvVar, ",") { envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { + if envVal, ok := syscall.Getenv(envVar); ok { f.Value = envVal break } @@ -373,81 +420,164 @@ func (f StringFlag) Apply(set *flag.FlagSet) { } set.String(name, f.Value, f.Usage) }) -} -func (f StringFlag) GetName() string { - return f.Name + return nil } -// IntFlag is a flag that takes an integer -// Errors if the value provided cannot be parsed -type IntFlag struct { - Name string - Value int - Usage string - EnvVar string - Destination *int +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f IntFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) } -// String returns the usage -func (f IntFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage)) +// ApplyWithError populates the flag given the flag set and environment +func (f IntFlag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + envValInt, err := strconv.ParseInt(envVal, 0, 64) + if err != nil { + return fmt.Errorf("could not parse %s as int value for flag %s: %s", envVal, f.Name, err) + } + f.Value = int(envValInt) + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.IntVar(f.Destination, name, f.Value, f.Usage) + return + } + set.Int(name, f.Value, f.Usage) + }) + + return nil } // Apply populates the flag given the flag set and environment -func (f IntFlag) Apply(set *flag.FlagSet) { +// Ignores errors +func (f Int64Flag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f Int64Flag) ApplyWithError(set *flag.FlagSet) error { if f.EnvVar != "" { for _, envVar := range strings.Split(f.EnvVar, ",") { envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { + if envVal, ok := syscall.Getenv(envVar); ok { envValInt, err := strconv.ParseInt(envVal, 0, 64) - if err == nil { - f.Value = int(envValInt) - break + if err != nil { + return fmt.Errorf("could not parse %s as int value for flag %s: %s", envVal, f.Name, err) } + + f.Value = envValInt + break } } } eachName(f.Name, func(name string) { if f.Destination != nil { - set.IntVar(f.Destination, name, f.Value, f.Usage) + set.Int64Var(f.Destination, name, f.Value, f.Usage) return } - set.Int(name, f.Value, f.Usage) + set.Int64(name, f.Value, f.Usage) }) + + return nil } -func (f IntFlag) GetName() string { - return f.Name +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f UintFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) } -// DurationFlag is a flag that takes a duration specified in Go's duration -// format: https://golang.org/pkg/time/#ParseDuration -type DurationFlag struct { - Name string - Value time.Duration - Usage string - EnvVar string - Destination *time.Duration +// ApplyWithError populates the flag given the flag set and environment +func (f UintFlag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + envValInt, err := strconv.ParseUint(envVal, 0, 64) + if err != nil { + return fmt.Errorf("could not parse %s as uint value for flag %s: %s", envVal, f.Name, err) + } + + f.Value = uint(envValInt) + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.UintVar(f.Destination, name, f.Value, f.Usage) + return + } + set.Uint(name, f.Value, f.Usage) + }) + + return nil } -// String returns a readable representation of this value (for usage defaults) -func (f DurationFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage)) +// Apply populates the flag given the flag set and environment +// Ignores errors +func (f Uint64Flag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f Uint64Flag) ApplyWithError(set *flag.FlagSet) error { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal, ok := syscall.Getenv(envVar); ok { + envValInt, err := strconv.ParseUint(envVal, 0, 64) + if err != nil { + return fmt.Errorf("could not parse %s as uint64 value for flag %s: %s", envVal, f.Name, err) + } + + f.Value = uint64(envValInt) + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.Uint64Var(f.Destination, name, f.Value, f.Usage) + return + } + set.Uint64(name, f.Value, f.Usage) + }) + + return nil } // Apply populates the flag given the flag set and environment +// Ignores errors func (f DurationFlag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f DurationFlag) ApplyWithError(set *flag.FlagSet) error { if f.EnvVar != "" { for _, envVar := range strings.Split(f.EnvVar, ",") { envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { + if envVal, ok := syscall.Getenv(envVar); ok { envValDuration, err := time.ParseDuration(envVal) - if err == nil { - f.Value = envValDuration - break + if err != nil { + return fmt.Errorf("could not parse %s as duration for flag %s: %s", envVal, f.Name, err) } + + f.Value = envValDuration + break } } } @@ -459,37 +589,29 @@ func (f DurationFlag) Apply(set *flag.FlagSet) { } set.Duration(name, f.Value, f.Usage) }) -} - -func (f DurationFlag) GetName() string { - return f.Name -} - -// Float64Flag is a flag that takes an float value -// Errors if the value provided cannot be parsed -type Float64Flag struct { - Name string - Value float64 - Usage string - EnvVar string - Destination *float64 -} -// String returns the usage -func (f Float64Flag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage)) + return nil } // Apply populates the flag given the flag set and environment +// Ignores errors func (f Float64Flag) Apply(set *flag.FlagSet) { + f.ApplyWithError(set) +} + +// ApplyWithError populates the flag given the flag set and environment +func (f Float64Flag) ApplyWithError(set *flag.FlagSet) error { if f.EnvVar != "" { for _, envVar := range strings.Split(f.EnvVar, ",") { envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { + if envVal, ok := syscall.Getenv(envVar); ok { envValFloat, err := strconv.ParseFloat(envVal, 10) - if err == nil { - f.Value = float64(envValFloat) + if err != nil { + return fmt.Errorf("could not parse %s as float64 value for flag %s: %s", envVal, f.Name, err) } + + f.Value = float64(envValFloat) + break } } } @@ -501,10 +623,18 @@ func (f Float64Flag) Apply(set *flag.FlagSet) { } set.Float64(name, f.Value, f.Usage) }) + + return nil } -func (f Float64Flag) GetName() string { - return f.Name +func visibleFlags(fl []Flag) []Flag { + visible := []Flag{} + for _, flag := range fl { + if !flagValue(flag).FieldByName("Hidden").Bool() { + visible = append(visible, flag) + } + } + return visible } func prefixFor(name string) (prefix string) { @@ -517,16 +647,37 @@ func prefixFor(name string) (prefix string) { return } -func prefixedNames(fullName string) (prefixed string) { +// Returns the placeholder, if any, and the unquoted usage string. +func unquoteUsage(usage string) (string, string) { + for i := 0; i < len(usage); i++ { + if usage[i] == '`' { + for j := i + 1; j < len(usage); j++ { + if usage[j] == '`' { + name := usage[i+1 : j] + usage = usage[:i] + name + usage[j+1:] + return name, usage + } + } + break + } + } + return "", usage +} + +func prefixedNames(fullName, placeholder string) string { + var prefixed string parts := strings.Split(fullName, ",") for i, name := range parts { name = strings.Trim(name, " ") prefixed += prefixFor(name) + name + if placeholder != "" { + prefixed += " " + placeholder + } if i < len(parts)-1 { prefixed += ", " } } - return + return prefixed } func withEnvHint(envVar, str string) string { @@ -544,3 +695,105 @@ func withEnvHint(envVar, str string) string { } return str + envText } + +func flagValue(f Flag) reflect.Value { + fv := reflect.ValueOf(f) + for fv.Kind() == reflect.Ptr { + fv = reflect.Indirect(fv) + } + return fv +} + +func stringifyFlag(f Flag) string { + fv := flagValue(f) + + switch f.(type) { + case IntSliceFlag: + return withEnvHint(fv.FieldByName("EnvVar").String(), + stringifyIntSliceFlag(f.(IntSliceFlag))) + case Int64SliceFlag: + return withEnvHint(fv.FieldByName("EnvVar").String(), + stringifyInt64SliceFlag(f.(Int64SliceFlag))) + case StringSliceFlag: + return withEnvHint(fv.FieldByName("EnvVar").String(), + stringifyStringSliceFlag(f.(StringSliceFlag))) + } + + placeholder, usage := unquoteUsage(fv.FieldByName("Usage").String()) + + needsPlaceholder := false + defaultValueString := "" + val := fv.FieldByName("Value") + + if val.IsValid() { + needsPlaceholder = true + defaultValueString = fmt.Sprintf(" (default: %v)", val.Interface()) + + if val.Kind() == reflect.String && val.String() != "" { + defaultValueString = fmt.Sprintf(" (default: %q)", val.String()) + } + } + + if defaultValueString == " (default: )" { + defaultValueString = "" + } + + if needsPlaceholder && placeholder == "" { + placeholder = defaultPlaceholder + } + + usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultValueString)) + + return withEnvHint(fv.FieldByName("EnvVar").String(), + fmt.Sprintf("%s\t%s", prefixedNames(fv.FieldByName("Name").String(), placeholder), usageWithDefault)) +} + +func stringifyIntSliceFlag(f IntSliceFlag) string { + defaultVals := []string{} + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, fmt.Sprintf("%d", i)) + } + } + + return stringifySliceFlag(f.Usage, f.Name, defaultVals) +} + +func stringifyInt64SliceFlag(f Int64SliceFlag) string { + defaultVals := []string{} + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, fmt.Sprintf("%d", i)) + } + } + + return stringifySliceFlag(f.Usage, f.Name, defaultVals) +} + +func stringifyStringSliceFlag(f StringSliceFlag) string { + defaultVals := []string{} + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, s := range f.Value.Value() { + if len(s) > 0 { + defaultVals = append(defaultVals, fmt.Sprintf("%q", s)) + } + } + } + + return stringifySliceFlag(f.Usage, f.Name, defaultVals) +} + +func stringifySliceFlag(usage, name string, defaultVals []string) string { + placeholder, usage := unquoteUsage(usage) + if placeholder == "" { + placeholder = defaultPlaceholder + } + + defaultVal := "" + if len(defaultVals) > 0 { + defaultVal = fmt.Sprintf(" (default: %s)", strings.Join(defaultVals, ", ")) + } + + usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultVal)) + return fmt.Sprintf("%s\t%s", prefixedNames(name, placeholder), usageWithDefault) +} diff --git a/vendor/github.com/urfave/cli/flag_generated.go b/vendor/github.com/urfave/cli/flag_generated.go new file mode 100644 index 000000000..491b61956 --- /dev/null +++ b/vendor/github.com/urfave/cli/flag_generated.go @@ -0,0 +1,627 @@ +package cli + +import ( + "flag" + "strconv" + "time" +) + +// WARNING: This file is generated! + +// BoolFlag is a flag with type bool +type BoolFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Destination *bool +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f BoolFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f BoolFlag) GetName() string { + return f.Name +} + +// Bool looks up the value of a local BoolFlag, returns +// false if not found +func (c *Context) Bool(name string) bool { + return lookupBool(name, c.flagSet) +} + +// GlobalBool looks up the value of a global BoolFlag, returns +// false if not found +func (c *Context) GlobalBool(name string) bool { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupBool(name, fs) + } + return false +} + +func lookupBool(name string, set *flag.FlagSet) bool { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseBool(f.Value.String()) + if err != nil { + return false + } + return parsed + } + return false +} + +// BoolTFlag is a flag with type bool that is true by default +type BoolTFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Destination *bool +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f BoolTFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f BoolTFlag) GetName() string { + return f.Name +} + +// BoolT looks up the value of a local BoolTFlag, returns +// false if not found +func (c *Context) BoolT(name string) bool { + return lookupBoolT(name, c.flagSet) +} + +// GlobalBoolT looks up the value of a global BoolTFlag, returns +// false if not found +func (c *Context) GlobalBoolT(name string) bool { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupBoolT(name, fs) + } + return false +} + +func lookupBoolT(name string, set *flag.FlagSet) bool { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseBool(f.Value.String()) + if err != nil { + return false + } + return parsed + } + return false +} + +// DurationFlag is a flag with type time.Duration (see https://golang.org/pkg/time/#ParseDuration) +type DurationFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value time.Duration + Destination *time.Duration +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f DurationFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f DurationFlag) GetName() string { + return f.Name +} + +// Duration looks up the value of a local DurationFlag, returns +// 0 if not found +func (c *Context) Duration(name string) time.Duration { + return lookupDuration(name, c.flagSet) +} + +// GlobalDuration looks up the value of a global DurationFlag, returns +// 0 if not found +func (c *Context) GlobalDuration(name string) time.Duration { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupDuration(name, fs) + } + return 0 +} + +func lookupDuration(name string, set *flag.FlagSet) time.Duration { + f := set.Lookup(name) + if f != nil { + parsed, err := time.ParseDuration(f.Value.String()) + if err != nil { + return 0 + } + return parsed + } + return 0 +} + +// Float64Flag is a flag with type float64 +type Float64Flag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value float64 + Destination *float64 +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Float64Flag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Float64Flag) GetName() string { + return f.Name +} + +// Float64 looks up the value of a local Float64Flag, returns +// 0 if not found +func (c *Context) Float64(name string) float64 { + return lookupFloat64(name, c.flagSet) +} + +// GlobalFloat64 looks up the value of a global Float64Flag, returns +// 0 if not found +func (c *Context) GlobalFloat64(name string) float64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupFloat64(name, fs) + } + return 0 +} + +func lookupFloat64(name string, set *flag.FlagSet) float64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseFloat(f.Value.String(), 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} + +// GenericFlag is a flag with type Generic +type GenericFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value Generic +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f GenericFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f GenericFlag) GetName() string { + return f.Name +} + +// Generic looks up the value of a local GenericFlag, returns +// nil if not found +func (c *Context) Generic(name string) interface{} { + return lookupGeneric(name, c.flagSet) +} + +// GlobalGeneric looks up the value of a global GenericFlag, returns +// nil if not found +func (c *Context) GlobalGeneric(name string) interface{} { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupGeneric(name, fs) + } + return nil +} + +func lookupGeneric(name string, set *flag.FlagSet) interface{} { + f := set.Lookup(name) + if f != nil { + parsed, err := f.Value, error(nil) + if err != nil { + return nil + } + return parsed + } + return nil +} + +// Int64Flag is a flag with type int64 +type Int64Flag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value int64 + Destination *int64 +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Int64Flag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Int64Flag) GetName() string { + return f.Name +} + +// Int64 looks up the value of a local Int64Flag, returns +// 0 if not found +func (c *Context) Int64(name string) int64 { + return lookupInt64(name, c.flagSet) +} + +// GlobalInt64 looks up the value of a global Int64Flag, returns +// 0 if not found +func (c *Context) GlobalInt64(name string) int64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupInt64(name, fs) + } + return 0 +} + +func lookupInt64(name string, set *flag.FlagSet) int64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} + +// IntFlag is a flag with type int +type IntFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value int + Destination *int +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f IntFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f IntFlag) GetName() string { + return f.Name +} + +// Int looks up the value of a local IntFlag, returns +// 0 if not found +func (c *Context) Int(name string) int { + return lookupInt(name, c.flagSet) +} + +// GlobalInt looks up the value of a global IntFlag, returns +// 0 if not found +func (c *Context) GlobalInt(name string) int { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupInt(name, fs) + } + return 0 +} + +func lookupInt(name string, set *flag.FlagSet) int { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return int(parsed) + } + return 0 +} + +// IntSliceFlag is a flag with type *IntSlice +type IntSliceFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value *IntSlice +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f IntSliceFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f IntSliceFlag) GetName() string { + return f.Name +} + +// IntSlice looks up the value of a local IntSliceFlag, returns +// nil if not found +func (c *Context) IntSlice(name string) []int { + return lookupIntSlice(name, c.flagSet) +} + +// GlobalIntSlice looks up the value of a global IntSliceFlag, returns +// nil if not found +func (c *Context) GlobalIntSlice(name string) []int { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupIntSlice(name, fs) + } + return nil +} + +func lookupIntSlice(name string, set *flag.FlagSet) []int { + f := set.Lookup(name) + if f != nil { + parsed, err := (f.Value.(*IntSlice)).Value(), error(nil) + if err != nil { + return nil + } + return parsed + } + return nil +} + +// Int64SliceFlag is a flag with type *Int64Slice +type Int64SliceFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value *Int64Slice +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Int64SliceFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Int64SliceFlag) GetName() string { + return f.Name +} + +// Int64Slice looks up the value of a local Int64SliceFlag, returns +// nil if not found +func (c *Context) Int64Slice(name string) []int64 { + return lookupInt64Slice(name, c.flagSet) +} + +// GlobalInt64Slice looks up the value of a global Int64SliceFlag, returns +// nil if not found +func (c *Context) GlobalInt64Slice(name string) []int64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupInt64Slice(name, fs) + } + return nil +} + +func lookupInt64Slice(name string, set *flag.FlagSet) []int64 { + f := set.Lookup(name) + if f != nil { + parsed, err := (f.Value.(*Int64Slice)).Value(), error(nil) + if err != nil { + return nil + } + return parsed + } + return nil +} + +// StringFlag is a flag with type string +type StringFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value string + Destination *string +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f StringFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f StringFlag) GetName() string { + return f.Name +} + +// String looks up the value of a local StringFlag, returns +// "" if not found +func (c *Context) String(name string) string { + return lookupString(name, c.flagSet) +} + +// GlobalString looks up the value of a global StringFlag, returns +// "" if not found +func (c *Context) GlobalString(name string) string { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupString(name, fs) + } + return "" +} + +func lookupString(name string, set *flag.FlagSet) string { + f := set.Lookup(name) + if f != nil { + parsed, err := f.Value.String(), error(nil) + if err != nil { + return "" + } + return parsed + } + return "" +} + +// StringSliceFlag is a flag with type *StringSlice +type StringSliceFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value *StringSlice +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f StringSliceFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f StringSliceFlag) GetName() string { + return f.Name +} + +// StringSlice looks up the value of a local StringSliceFlag, returns +// nil if not found +func (c *Context) StringSlice(name string) []string { + return lookupStringSlice(name, c.flagSet) +} + +// GlobalStringSlice looks up the value of a global StringSliceFlag, returns +// nil if not found +func (c *Context) GlobalStringSlice(name string) []string { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupStringSlice(name, fs) + } + return nil +} + +func lookupStringSlice(name string, set *flag.FlagSet) []string { + f := set.Lookup(name) + if f != nil { + parsed, err := (f.Value.(*StringSlice)).Value(), error(nil) + if err != nil { + return nil + } + return parsed + } + return nil +} + +// Uint64Flag is a flag with type uint64 +type Uint64Flag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value uint64 + Destination *uint64 +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Uint64Flag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Uint64Flag) GetName() string { + return f.Name +} + +// Uint64 looks up the value of a local Uint64Flag, returns +// 0 if not found +func (c *Context) Uint64(name string) uint64 { + return lookupUint64(name, c.flagSet) +} + +// GlobalUint64 looks up the value of a global Uint64Flag, returns +// 0 if not found +func (c *Context) GlobalUint64(name string) uint64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupUint64(name, fs) + } + return 0 +} + +func lookupUint64(name string, set *flag.FlagSet) uint64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} + +// UintFlag is a flag with type uint +type UintFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value uint + Destination *uint +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f UintFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f UintFlag) GetName() string { + return f.Name +} + +// Uint looks up the value of a local UintFlag, returns +// 0 if not found +func (c *Context) Uint(name string) uint { + return lookupUint(name, c.flagSet) +} + +// GlobalUint looks up the value of a global UintFlag, returns +// 0 if not found +func (c *Context) GlobalUint(name string) uint { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupUint(name, fs) + } + return 0 +} + +func lookupUint(name string, set *flag.FlagSet) uint { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return uint(parsed) + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/funcs.go b/vendor/github.com/urfave/cli/funcs.go new file mode 100644 index 000000000..cba5e6cb0 --- /dev/null +++ b/vendor/github.com/urfave/cli/funcs.go @@ -0,0 +1,28 @@ +package cli + +// BashCompleteFunc is an action to execute when the bash-completion flag is set +type BashCompleteFunc func(*Context) + +// BeforeFunc is an action to execute before any subcommands are run, but after +// the context is ready if a non-nil error is returned, no subcommands are run +type BeforeFunc func(*Context) error + +// AfterFunc is an action to execute after any subcommands are run, but after the +// subcommand has finished it is run even if Action() panics +type AfterFunc func(*Context) error + +// ActionFunc is the action to execute when no subcommands are specified +type ActionFunc func(*Context) error + +// CommandNotFoundFunc is executed if the proper command cannot be found +type CommandNotFoundFunc func(*Context, string) + +// OnUsageErrorFunc is executed if an usage error occurs. This is useful for displaying +// customized usage error messages. This function is able to replace the +// original error messages. If this function is not set, the "Incorrect usage" +// is displayed and the execution is interrupted. +type OnUsageErrorFunc func(context *Context, err error, isSubcommand bool) error + +// FlagStringFunc is used by the help generation to display a flag, which is +// expected to be a single line. +type FlagStringFunc func(Flag) string diff --git a/vendor/github.com/urfave/cli/help.go b/vendor/github.com/urfave/cli/help.go index adf157df1..c8c1aee05 100644 --- a/vendor/github.com/urfave/cli/help.go +++ b/vendor/github.com/urfave/cli/help.go @@ -3,73 +3,78 @@ package cli import ( "fmt" "io" + "os" "strings" "text/tabwriter" "text/template" ) -// The text template for the Default help topic. +// AppHelpTemplate is the text template for the Default help topic. // cli.go uses text/template to render templates. You can // render custom help text by setting this variable. var AppHelpTemplate = `NAME: - {{.Name}} - {{.Usage}} + {{.Name}}{{if .Usage}} - {{.Usage}}{{end}} USAGE: - {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .Flags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}} - {{if .Version}}{{if not .HideVersion}} + {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}} + VERSION: - {{.Version}} - {{end}}{{end}}{{if len .Authors}} -AUTHOR(S): - {{range .Authors}}{{ . }}{{end}} - {{end}}{{if .Commands}} -COMMANDS:{{range .Categories}}{{if .Name}} - {{.Name}}{{ ":" }}{{end}}{{range .Commands}} - {{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}{{end}} -{{end}}{{end}}{{if .Flags}} + {{.Version}}{{end}}{{end}}{{if .Description}} + +DESCRIPTION: + {{.Description}}{{end}}{{if len .Authors}} + +AUTHOR{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}: + {{range $index, $author := .Authors}}{{if $index}} + {{end}}{{$author}}{{end}}{{end}}{{if .VisibleCommands}} + +COMMANDS:{{range .VisibleCategories}}{{if .Name}} + {{.Name}}:{{end}}{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{end}}{{end}}{{if .VisibleFlags}} + GLOBAL OPTIONS: - {{range .Flags}}{{.}} - {{end}}{{end}}{{if .Copyright }} + {{range $index, $option := .VisibleFlags}}{{if $index}} + {{end}}{{$option}}{{end}}{{end}}{{if .Copyright}} + COPYRIGHT: - {{.Copyright}} - {{end}} + {{.Copyright}}{{end}} ` -// The text template for the command help topic. +// CommandHelpTemplate is the text template for the command help topic. // cli.go uses text/template to render templates. You can // render custom help text by setting this variable. var CommandHelpTemplate = `NAME: {{.HelpName}} - {{.Usage}} USAGE: - {{.HelpName}}{{if .Flags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{if .Category}} + {{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{if .Category}} CATEGORY: {{.Category}}{{end}}{{if .Description}} DESCRIPTION: - {{.Description}}{{end}}{{if .Flags}} + {{.Description}}{{end}}{{if .VisibleFlags}} OPTIONS: - {{range .Flags}}{{.}} - {{end}}{{ end }} + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} ` -// The text template for the subcommand help topic. +// SubcommandHelpTemplate is the text template for the subcommand help topic. // cli.go uses text/template to render templates. You can // render custom help text by setting this variable. var SubcommandHelpTemplate = `NAME: {{.HelpName}} - {{.Usage}} USAGE: - {{.HelpName}} command{{if .Flags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}} + {{.HelpName}} command{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}} -COMMANDS:{{range .Categories}}{{if .Name}} - {{.Name}}{{ ":" }}{{end}}{{range .Commands}} - {{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}{{end}} -{{end}}{{if .Flags}} +COMMANDS:{{range .VisibleCategories}}{{if .Name}} + {{.Name}}:{{end}}{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}} +{{end}}{{if .VisibleFlags}} OPTIONS: - {{range .Flags}}{{.}} + {{range .VisibleFlags}}{{.}} {{end}}{{end}} ` @@ -78,13 +83,14 @@ var helpCommand = Command{ Aliases: []string{"h"}, Usage: "Shows a list of commands or help for one command", ArgsUsage: "[command]", - Action: func(c *Context) { + Action: func(c *Context) error { args := c.Args() if args.Present() { - ShowCommandHelp(c, args.First()) - } else { - ShowAppHelp(c) + return ShowCommandHelp(c, args.First()) } + + ShowAppHelp(c) + return nil }, } @@ -93,65 +99,74 @@ var helpSubcommand = Command{ Aliases: []string{"h"}, Usage: "Shows a list of commands or help for one command", ArgsUsage: "[command]", - Action: func(c *Context) { + Action: func(c *Context) error { args := c.Args() if args.Present() { - ShowCommandHelp(c, args.First()) - } else { - ShowSubcommandHelp(c) + return ShowCommandHelp(c, args.First()) } + + return ShowSubcommandHelp(c) }, } // Prints help for the App or Command type helpPrinter func(w io.Writer, templ string, data interface{}) +// HelpPrinter is a function that writes the help output. If not set a default +// is used. The function signature is: +// func(w io.Writer, templ string, data interface{}) var HelpPrinter helpPrinter = printHelp -// Prints version for the App +// VersionPrinter prints the version for the App var VersionPrinter = printVersion -func ShowAppHelp(c *Context) { +// ShowAppHelp is an action that displays the help. +func ShowAppHelp(c *Context) error { HelpPrinter(c.App.Writer, AppHelpTemplate, c.App) + return nil } -// Prints the list of subcommands as the default app completion method +// DefaultAppComplete prints the list of subcommands as the default app completion method func DefaultAppComplete(c *Context) { for _, command := range c.App.Commands { + if command.Hidden { + continue + } for _, name := range command.Names() { fmt.Fprintln(c.App.Writer, name) } } } -// Prints help for the given command -func ShowCommandHelp(ctx *Context, command string) { +// ShowCommandHelp prints help for the given command +func ShowCommandHelp(ctx *Context, command string) error { // show the subcommand help for a command with subcommands if command == "" { HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App) - return + return nil } for _, c := range ctx.App.Commands { if c.HasName(command) { HelpPrinter(ctx.App.Writer, CommandHelpTemplate, c) - return + return nil } } - if ctx.App.CommandNotFound != nil { - ctx.App.CommandNotFound(ctx, command) - } else { - fmt.Fprintf(ctx.App.Writer, "No help topic for '%v'\n", command) + if ctx.App.CommandNotFound == nil { + return NewExitError(fmt.Sprintf("No help topic for '%v'", command), 3) } + + ctx.App.CommandNotFound(ctx, command) + return nil } -// Prints help for the given subcommand -func ShowSubcommandHelp(c *Context) { - ShowCommandHelp(c, c.Command.Name) +// ShowSubcommandHelp prints help for the given subcommand +func ShowSubcommandHelp(c *Context) error { + return ShowCommandHelp(c, c.Command.Name) } -// Prints the version number of the App +// ShowVersion prints the version number of the App func ShowVersion(c *Context) { VersionPrinter(c) } @@ -160,7 +175,7 @@ func printVersion(c *Context) { fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version) } -// Prints the lists of commands within a given context +// ShowCompletions prints the lists of commands within a given context func ShowCompletions(c *Context) { a := c.App if a != nil && a.BashComplete != nil { @@ -168,7 +183,7 @@ func ShowCompletions(c *Context) { } } -// Prints the custom completions for a given command +// ShowCommandCompletions prints the custom completions for a given command func ShowCommandCompletions(ctx *Context, command string) { c := ctx.App.Command(command) if c != nil && c.BashComplete != nil { @@ -181,12 +196,15 @@ func printHelp(out io.Writer, templ string, data interface{}) { "join": strings.Join, } - w := tabwriter.NewWriter(out, 0, 8, 1, '\t', 0) + w := tabwriter.NewWriter(out, 1, 8, 2, ' ', 0) t := template.Must(template.New("help").Funcs(funcMap).Parse(templ)) err := t.Execute(w, data) if err != nil { // If the writer is closed, t.Execute will fail, and there's nothing - // we can do to recover. We could send this to os.Stderr if we need. + // we can do to recover. + if os.Getenv("CLI_TEMPLATE_ERROR_DEBUG") != "" { + fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err) + } return } w.Flush() @@ -226,7 +244,7 @@ func checkCommandHelp(c *Context, name string) bool { } func checkSubcommandHelp(c *Context) bool { - if c.GlobalBool("h") || c.GlobalBool("help") { + if c.Bool("h") || c.Bool("help") { ShowSubcommandHelp(c) return true } @@ -234,20 +252,43 @@ func checkSubcommandHelp(c *Context) bool { return false } +func checkShellCompleteFlag(a *App, arguments []string) (bool, []string) { + if !a.EnableBashCompletion { + return false, arguments + } + + pos := len(arguments) - 1 + lastArg := arguments[pos] + + if lastArg != "--"+BashCompletionFlag.Name { + return false, arguments + } + + return true, arguments[:pos] +} + func checkCompletions(c *Context) bool { - if (c.GlobalBool(BashCompletionFlag.Name) || c.Bool(BashCompletionFlag.Name)) && c.App.EnableBashCompletion { - ShowCompletions(c) - return true + if !c.shellComplete { + return false } - return false + if args := c.Args(); args.Present() { + name := args.First() + if cmd := c.App.Command(name); cmd != nil { + // let the command handle the completion + return false + } + } + + ShowCompletions(c) + return true } func checkCommandCompletions(c *Context, name string) bool { - if c.Bool(BashCompletionFlag.Name) && c.App.EnableBashCompletion { - ShowCommandCompletions(c, name) - return true + if !c.shellComplete { + return false } - return false + ShowCommandCompletions(c, name) + return true } diff --git a/vendor/github.com/xeipuuv/gojsonpointer/README.md b/vendor/github.com/xeipuuv/gojsonpointer/README.md deleted file mode 100644 index dbe4d5082..000000000 --- a/vendor/github.com/xeipuuv/gojsonpointer/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# gojsonpointer -An implementation of JSON Pointer - Go language - -## References -http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 - -### Note -The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. diff --git a/vendor/github.com/xeipuuv/gojsonreference/README.md b/vendor/github.com/xeipuuv/gojsonreference/README.md deleted file mode 100644 index 9ab6e1eb1..000000000 --- a/vendor/github.com/xeipuuv/gojsonreference/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# gojsonreference -An implementation of JSON Reference - Go language - -## Dependencies -https://github.com/xeipuuv/gojsonpointer - -## References -http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 - -http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/vendor/github.com/xeipuuv/gojsonschema/.gitignore b/vendor/github.com/xeipuuv/gojsonschema/.gitignore deleted file mode 100644 index c1e0636fd..000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.sw[nop] diff --git a/vendor/github.com/xeipuuv/gojsonschema/.travis.yml b/vendor/github.com/xeipuuv/gojsonschema/.travis.yml deleted file mode 100644 index 9cc01e8ab..000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go -go: - - 1.3 -before_install: - - go get github.com/sigu-399/gojsonreference - - go get github.com/sigu-399/gojsonpointer - - go get github.com/stretchr/testify/assert diff --git a/vendor/github.com/xeipuuv/gojsonschema/README.md b/vendor/github.com/xeipuuv/gojsonschema/README.md deleted file mode 100644 index 187da61e1..000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/README.md +++ /dev/null @@ -1,236 +0,0 @@ -[![Build Status](https://travis-ci.org/xeipuuv/gojsonschema.svg)](https://travis-ci.org/xeipuuv/gojsonschema) - -# gojsonschema - -## Description - -An implementation of JSON Schema, based on IETF's draft v4 - Go language - -References : - -* http://json-schema.org -* http://json-schema.org/latest/json-schema-core.html -* http://json-schema.org/latest/json-schema-validation.html - -## Installation - -``` -go get github.com/xeipuuv/gojsonschema -``` - -Dependencies : -* [github.com/xeipuuv/gojsonpointer](https://github.com/xeipuuv/gojsonpointer) -* [github.com/xeipuuv/gojsonreference](https://github.com/xeipuuv/gojsonreference) -* [github.com/stretchr/testify/assert](https://github.com/stretchr/testify#assert-package) - -## Usage - -### Example - -```go - -package main - -import ( - "fmt" - "github.com/xeipuuv/gojsonschema" -) - -func main() { - - schemaLoader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json") - documentLoader := gojsonschema.NewReferenceLoader("file:///home/me/document.json") - - result, err := gojsonschema.Validate(schemaLoader, documentLoader) - if err != nil { - panic(err.Error()) - } - - if result.Valid() { - fmt.Printf("The document is valid\n") - } else { - fmt.Printf("The document is not valid. see errors :\n") - for _, desc := range result.Errors() { - fmt.Printf("- %s\n", desc) - } - } - -} - - -``` - -#### Loaders - -There are various ways to load your JSON data. -In order to load your schemas and documents, -first declare an appropriate loader : - -* Web / HTTP, using a reference : - -```go -loader := gojsonschema.NewReferenceLoader("http://www.some_host.com/schema.json") -``` - -* Local file, using a reference : - -```go -loader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json") -``` - -References use the URI scheme, the prefix (file://) and a full path to the file are required. - -* JSON strings : - -```go -loader := gojsonschema.NewStringLoader(`{"type": "string"}`) -``` - -* Custom Go types : - -```go -m := map[string]interface{}{"type": "string"} -loader := gojsonschema.NewGoLoader(m) -``` - -And - -```go -type Root struct { - Users []User `json:"users"` -} - -type User struct { - Name string `json:"name"` -} - -... - -data := Root{} -data.Users = append(data.Users, User{"John"}) -data.Users = append(data.Users, User{"Sophia"}) -data.Users = append(data.Users, User{"Bill"}) - -loader := gojsonschema.NewGoLoader(data) -``` - -#### Validation - -Once the loaders are set, validation is easy : - -```go -result, err := gojsonschema.Validate(schemaLoader, documentLoader) -``` - -Alternatively, you might want to load a schema only once and process to multiple validations : - -```go -schema, err := gojsonschema.NewSchema(schemaLoader) -... -result1, err := schema.Validate(documentLoader1) -... -result2, err := schema.Validate(documentLoader2) -... -// etc ... -``` - -To check the result : - -```go - if result.Valid() { - fmt.Printf("The document is valid\n") - } else { - fmt.Printf("The document is not valid. see errors :\n") - for _, err := range result.Errors() { - // Err implements the ResultError interface - fmt.Printf("- %s\n", err) - } - } -``` - -## Working with Errors - -The library handles string error codes which you can customize by creating your own gojsonschema.locale and setting it -```go -gojsonschema.Locale = YourCustomLocale{} -``` - -However, each error contains additional contextual information. - -**err.Type()**: *string* Returns the "type" of error that occurred. Note you can also type check. See below - -Note: An error of RequiredType has an err.Type() return value of "required" - - "required": RequiredError - "invalid_type": InvalidTypeError - "number_any_of": NumberAnyOfError - "number_one_of": NumberOneOfError - "number_all_of": NumberAllOfError - "number_not": NumberNotError - "missing_dependency": MissingDependencyError - "internal": InternalError - "enum": EnumError - "array_no_additional_items": ArrayNoAdditionalItemsError - "array_min_items": ArrayMinItemsError - "array_max_items": ArrayMaxItemsError - "unique": ItemsMustBeUniqueError - "array_min_properties": ArrayMinPropertiesError - "array_max_properties": ArrayMaxPropertiesError - "additional_property_not_allowed": AdditionalPropertyNotAllowedError - "invalid_property_pattern": InvalidPropertyPatternError - "string_gte": StringLengthGTEError - "string_lte": StringLengthLTEError - "pattern": DoesNotMatchPatternError - "multiple_of": MultipleOfError - "number_gte": NumberGTEError - "number_gt": NumberGTError - "number_lte": NumberLTEError - "number_lt": NumberLTError - -**err.Value()**: *interface{}* Returns the value given - -**err.Context()**: *gojsonschema.jsonContext* Returns the context. This has a String() method that will print something like this: (root).firstName - -**err.Field()**: *string* Returns the fieldname in the format firstName, or for embedded properties, person.firstName. This returns the same as the String() method on *err.Context()* but removes the (root). prefix. - -**err.Description()**: *string* The error description. This is based on the locale you are using. See the beginning of this section for overwriting the locale with a custom implementation. - -**err.Details()**: *gojsonschema.ErrorDetails* Returns a map[string]interface{} of additional error details specific to the error. For example, GTE errors will have a "min" value, LTE will have a "max" value. See errors.go for a full description of all the error details. Every error always contains a "field" key that holds the value of *err.Field()* - -Note in most cases, the err.Details() will be used to generate replacement strings in your locales. and not used directly i.e. -``` -%field% must be greater than or equal to %min% -``` - -## Formats -JSON Schema allows for optional "format" property to validate strings against well-known formats. gojsonschema ships with all of the formats defined in the spec that you can use like this: -````json -{"type": "string", "format": "email"} -```` -Available formats: date-time, hostname, email, ipv4, ipv6, uri. - -For repetitive or more complex formats, you can create custom format checkers and add them to gojsonschema like this: - -```go -// Define the format checker -type RoleFormatChecker struct {} - -// Ensure it meets the gojsonschema.FormatChecker interface -func (f RoleFormatChecker) IsFormat(input string) bool { - return strings.HasPrefix("ROLE_", input) -} - -// Add it to the library -gojsonschema.FormatCheckers.Add("role", RoleFormatChecker{}) -```` - -Now to use in your json schema: -````json -{"type": "string", "format": "role"} -```` - -## Uses - -gojsonschema uses the following test suite : - -https://github.com/json-schema/JSON-Schema-Test-Suite diff --git a/vendor/github.com/xeipuuv/gojsonschema/errors.go b/vendor/github.com/xeipuuv/gojsonschema/errors.go index 5146cbbab..f22fa653f 100644 --- a/vendor/github.com/xeipuuv/gojsonschema/errors.go +++ b/vendor/github.com/xeipuuv/gojsonschema/errors.go @@ -1,10 +1,12 @@ package gojsonschema import ( - "fmt" - "strings" + "bytes" + "text/template" ) +var errorTemplates *template.Template + type ( // RequiredError. ErrorDetails: property string RequiredError struct { @@ -230,13 +232,32 @@ func newError(err ResultError, context *jsonContext, value interface{}, locale l err.SetDescription(formatErrorDescription(d, details)) } -// formatErrorDescription takes a string in this format: %field% is required -// and converts it to a string with replacements. The fields come from -// the ErrorDetails struct and vary for each type of error. +// formatErrorDescription takes a string in the default text/template +// format and converts it to a string with replacements. The fields come +// from the ErrorDetails struct and vary for each type of error. func formatErrorDescription(s string, details ErrorDetails) string { - for name, val := range details { - s = strings.Replace(s, "%"+strings.ToLower(name)+"%", fmt.Sprintf("%v", val), -1) + + var tpl *template.Template + var descrAsBuffer bytes.Buffer + var err error + + if errorTemplates == nil { + errorTemplates = template.New("all-errors") + } + + tpl = errorTemplates.Lookup(s) + if tpl == nil { + tpl = errorTemplates.New(s) + tpl, err = tpl.Parse(s) + if err != nil { + return err.Error() + } + } + + err = tpl.Execute(&descrAsBuffer, details) + if err != nil { + return err.Error() } - return s + return descrAsBuffer.String() } diff --git a/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go b/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go index 8be421070..0c1f92e8b 100644 --- a/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go +++ b/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go @@ -5,6 +5,7 @@ import ( "net/url" "reflect" "regexp" + "strings" "time" ) @@ -59,6 +60,9 @@ type ( // UUIDFormatChecker validates a UUID is in the correct format UUIDFormatChecker struct{} + + // RegexFormatChecker validates a regex is in the correct format + RegexFormatChecker struct{} ) var ( @@ -73,6 +77,7 @@ var ( "ipv6": IPV6FormatChecker{}, "uri": URIFormatChecker{}, "uuid": UUIDFormatChecker{}, + "regex": UUIDFormatChecker{}, }, } @@ -80,7 +85,7 @@ var ( rxEmail = regexp.MustCompile("^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$") // Regex credit: https://www.socketloop.com/tutorials/golang-validate-hostname - rxHostname = regexp.MustCompile(`^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$`) + rxHostname = regexp.MustCompile(`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`) rxUUID = regexp.MustCompile("^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") ) @@ -132,13 +137,13 @@ func (f EmailFormatChecker) IsFormat(input string) bool { // Credit: https://github.com/asaskevich/govalidator func (f IPV4FormatChecker) IsFormat(input string) bool { ip := net.ParseIP(input) - return ip != nil && ip.To4() != nil + return ip != nil && strings.Contains(input, ".") } // Credit: https://github.com/asaskevich/govalidator func (f IPV6FormatChecker) IsFormat(input string) bool { ip := net.ParseIP(input) - return ip != nil && ip.To4() == nil + return ip != nil && strings.Contains(input, ":") } func (f DateTimeFormatChecker) IsFormat(input string) bool { @@ -169,9 +174,21 @@ func (f URIFormatChecker) IsFormat(input string) bool { } func (f HostnameFormatChecker) IsFormat(input string) bool { - return rxHostname.MatchString(input) + return rxHostname.MatchString(input) && len(input) < 256 } func (f UUIDFormatChecker) IsFormat(input string) bool { return rxUUID.MatchString(input) } + +// IsFormat implements FormatChecker interface. +func (f RegexFormatChecker) IsFormat(input string) bool { + if input == "" { + return true + } + _, err := regexp.Compile(input) + if err != nil { + return false + } + return true +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go b/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go index a9458a8e5..0e865eb0e 100644 --- a/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go +++ b/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go @@ -33,6 +33,7 @@ import ( "io" "io/ioutil" "net/http" + "os" "path/filepath" "runtime" "strings" @@ -40,34 +41,90 @@ import ( "github.com/xeipuuv/gojsonreference" ) +var osFS = osFileSystem(os.Open) + // JSON loader interface type JSONLoader interface { - jsonSource() interface{} - loadJSON() (interface{}, error) - loadSchema() (*Schema, error) + JsonSource() interface{} + LoadJSON() (interface{}, error) + JsonReference() (gojsonreference.JsonReference, error) + LoaderFactory() JSONLoaderFactory +} + +type JSONLoaderFactory interface { + New(source string) JSONLoader +} + +type DefaultJSONLoaderFactory struct { +} + +type FileSystemJSONLoaderFactory struct { + fs http.FileSystem +} + +func (d DefaultJSONLoaderFactory) New(source string) JSONLoader { + return &jsonReferenceLoader{ + fs: osFS, + source: source, + } +} + +func (f FileSystemJSONLoaderFactory) New(source string) JSONLoader { + return &jsonReferenceLoader{ + fs: f.fs, + source: source, + } +} + +// osFileSystem is a functional wrapper for os.Open that implements http.FileSystem. +type osFileSystem func(string) (*os.File, error) + +func (o osFileSystem) Open(name string) (http.File, error) { + return o(name) } // JSON Reference loader // references are used to load JSONs from files and HTTP type jsonReferenceLoader struct { + fs http.FileSystem source string } -func (l *jsonReferenceLoader) jsonSource() interface{} { +func (l *jsonReferenceLoader) JsonSource() interface{} { return l.source } +func (l *jsonReferenceLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference(l.JsonSource().(string)) +} + +func (l *jsonReferenceLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +// NewReferenceLoader returns a JSON reference loader using the given source and the local OS file system. func NewReferenceLoader(source string) *jsonReferenceLoader { - return &jsonReferenceLoader{source: source} + return &jsonReferenceLoader{ + fs: osFS, + source: source, + } +} + +// NewReferenceLoaderFileSystem returns a JSON reference loader using the given source and file system. +func NewReferenceLoaderFileSystem(source string, fs http.FileSystem) *jsonReferenceLoader { + return &jsonReferenceLoader{ + fs: fs, + source: source, + } } -func (l *jsonReferenceLoader) loadJSON() (interface{}, error) { +func (l *jsonReferenceLoader) LoadJSON() (interface{}, error) { var err error - reference, err := gojsonreference.NewJsonReference(l.jsonSource().(string)) + reference, err := gojsonreference.NewJsonReference(l.JsonSource().(string)) if err != nil { return nil, err } @@ -79,7 +136,7 @@ func (l *jsonReferenceLoader) loadJSON() (interface{}, error) { if reference.HasFileScheme { - filename := strings.Replace(refToUrl.String(), "file://", "", -1) + filename := strings.Replace(refToUrl.GetUrl().Path, "file://", "", -1) if runtime.GOOS == "windows" { // on Windows, a file URL may have an extra leading slash, use slashes // instead of backslashes, and have spaces escaped @@ -87,7 +144,6 @@ func (l *jsonReferenceLoader) loadJSON() (interface{}, error) { filename = filename[1:] } filename = filepath.FromSlash(filename) - filename = strings.Replace(filename, "%20", " ", -1) } document, err = l.loadFromFile(filename) @@ -108,33 +164,6 @@ func (l *jsonReferenceLoader) loadJSON() (interface{}, error) { } -func (l *jsonReferenceLoader) loadSchema() (*Schema, error) { - - var err error - - d := Schema{} - d.pool = newSchemaPool() - d.referencePool = newSchemaReferencePool() - - d.documentReference, err = gojsonreference.NewJsonReference(l.jsonSource().(string)) - if err != nil { - return nil, err - } - - spd, err := d.pool.GetDocument(d.documentReference) - if err != nil { - return nil, err - } - - err = d.parse(spd.Document) - if err != nil { - return nil, err - } - - return &d, nil - -} - func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) { resp, err := http.Get(address) @@ -157,8 +186,13 @@ func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) } func (l *jsonReferenceLoader) loadFromFile(path string) (interface{}, error) { + f, err := l.fs.Open(path) + if err != nil { + return nil, err + } + defer f.Close() - bodyBuff, err := ioutil.ReadFile(path) + bodyBuff, err := ioutil.ReadAll(f) if err != nil { return nil, err } @@ -173,45 +207,52 @@ type jsonStringLoader struct { source string } -func (l *jsonStringLoader) jsonSource() interface{} { +func (l *jsonStringLoader) JsonSource() interface{} { return l.source } +func (l *jsonStringLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonStringLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + func NewStringLoader(source string) *jsonStringLoader { return &jsonStringLoader{source: source} } -func (l *jsonStringLoader) loadJSON() (interface{}, error) { +func (l *jsonStringLoader) LoadJSON() (interface{}, error) { - return decodeJsonUsingNumber(strings.NewReader(l.jsonSource().(string))) + return decodeJsonUsingNumber(strings.NewReader(l.JsonSource().(string))) } -func (l *jsonStringLoader) loadSchema() (*Schema, error) { +// JSON bytes loader - var err error +type jsonBytesLoader struct { + source []byte +} - document, err := l.loadJSON() - if err != nil { - return nil, err - } +func (l *jsonBytesLoader) JsonSource() interface{} { + return l.source +} - d := Schema{} - d.pool = newSchemaPool() - d.referencePool = newSchemaReferencePool() - d.documentReference, err = gojsonreference.NewJsonReference("#") - d.pool.SetStandaloneDocument(document) - if err != nil { - return nil, err - } +func (l *jsonBytesLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} - err = d.parse(document) - if err != nil { - return nil, err - } +func (l *jsonBytesLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} - return &d, nil +func NewBytesLoader(source []byte) *jsonBytesLoader { + return &jsonBytesLoader{source: source} +} +func (l *jsonBytesLoader) LoadJSON() (interface{}, error) { + return decodeJsonUsingNumber(bytes.NewReader(l.JsonSource().([]byte))) } // JSON Go (types) loader @@ -221,19 +262,27 @@ type jsonGoLoader struct { source interface{} } -func (l *jsonGoLoader) jsonSource() interface{} { +func (l *jsonGoLoader) JsonSource() interface{} { return l.source } +func (l *jsonGoLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonGoLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + func NewGoLoader(source interface{}) *jsonGoLoader { return &jsonGoLoader{source: source} } -func (l *jsonGoLoader) loadJSON() (interface{}, error) { +func (l *jsonGoLoader) LoadJSON() (interface{}, error) { // convert it to a compliant JSON first to avoid types "mismatches" - jsonBytes, err := json.Marshal(l.jsonSource()) + jsonBytes, err := json.Marshal(l.JsonSource()) if err != nil { return nil, err } @@ -242,33 +291,6 @@ func (l *jsonGoLoader) loadJSON() (interface{}, error) { } -func (l *jsonGoLoader) loadSchema() (*Schema, error) { - - var err error - - document, err := l.loadJSON() - if err != nil { - return nil, err - } - - d := Schema{} - d.pool = newSchemaPool() - d.referencePool = newSchemaReferencePool() - d.documentReference, err = gojsonreference.NewJsonReference("#") - d.pool.SetStandaloneDocument(document) - if err != nil { - return nil, err - } - - err = d.parse(document) - if err != nil { - return nil, err - } - - return &d, nil - -} - func decodeJsonUsingNumber(r io.Reader) (interface{}, error) { var document interface{} diff --git a/vendor/github.com/xeipuuv/gojsonschema/locales.go b/vendor/github.com/xeipuuv/gojsonschema/locales.go index de05d60df..f5698f033 100644 --- a/vendor/github.com/xeipuuv/gojsonschema/locales.go +++ b/vendor/github.com/xeipuuv/gojsonschema/locales.go @@ -37,6 +37,7 @@ type ( MissingDependency() string Internal() string Enum() string + ArrayNotEnoughItems() string ArrayNoAdditionalItems() string ArrayMinItems() string ArrayMaxItems() string @@ -83,11 +84,11 @@ type ( ) func (l DefaultLocale) Required() string { - return `%property% is required` + return `{{.property}} is required` } func (l DefaultLocale) InvalidType() string { - return `Invalid type. Expected: %expected%, given: %given%` + return `Invalid type. Expected: {{.expected}}, given: {{.given}}` } func (l DefaultLocale) NumberAnyOf() string { @@ -107,157 +108,161 @@ func (l DefaultLocale) NumberNot() string { } func (l DefaultLocale) MissingDependency() string { - return `Has a dependency on %dependency%` + return `Has a dependency on {{.dependency}}` } func (l DefaultLocale) Internal() string { - return `Internal Error %error%` + return `Internal Error {{.error}}` } func (l DefaultLocale) Enum() string { - return `%field% must be one of the following: %allowed%` + return `{{.field}} must be one of the following: {{.allowed}}` } func (l DefaultLocale) ArrayNoAdditionalItems() string { return `No additional items allowed on array` } +func (l DefaultLocale) ArrayNotEnoughItems() string { + return `Not enough items on array to match positional list of schema` +} + func (l DefaultLocale) ArrayMinItems() string { - return `Array must have at least %min% items` + return `Array must have at least {{.min}} items` } func (l DefaultLocale) ArrayMaxItems() string { - return `Array must have at most %max% items` + return `Array must have at most {{.max}} items` } func (l DefaultLocale) Unique() string { - return `%type% items must be unique` + return `{{.type}} items must be unique` } func (l DefaultLocale) ArrayMinProperties() string { - return `Must have at least %min% properties` + return `Must have at least {{.min}} properties` } func (l DefaultLocale) ArrayMaxProperties() string { - return `Must have at most %max% properties` + return `Must have at most {{.max}} properties` } func (l DefaultLocale) AdditionalPropertyNotAllowed() string { - return `Additional property %property% is not allowed` + return `Additional property {{.property}} is not allowed` } func (l DefaultLocale) InvalidPropertyPattern() string { - return `Property "%property%" does not match pattern %pattern%` + return `Property "{{.property}}" does not match pattern {{.pattern}}` } func (l DefaultLocale) StringGTE() string { - return `String length must be greater than or equal to %min%` + return `String length must be greater than or equal to {{.min}}` } func (l DefaultLocale) StringLTE() string { - return `String length must be less than or equal to %max%` + return `String length must be less than or equal to {{.max}}` } func (l DefaultLocale) DoesNotMatchPattern() string { - return `Does not match pattern '%pattern%'` + return `Does not match pattern '{{.pattern}}'` } func (l DefaultLocale) DoesNotMatchFormat() string { - return `Does not match format '%format%'` + return `Does not match format '{{.format}}'` } func (l DefaultLocale) MultipleOf() string { - return `Must be a multiple of %multiple%` + return `Must be a multiple of {{.multiple}}` } func (l DefaultLocale) NumberGTE() string { - return `Must be greater than or equal to %min%` + return `Must be greater than or equal to {{.min}}` } func (l DefaultLocale) NumberGT() string { - return `Must be greater than %min%` + return `Must be greater than {{.min}}` } func (l DefaultLocale) NumberLTE() string { - return `Must be less than or equal to %max%` + return `Must be less than or equal to {{.max}}` } func (l DefaultLocale) NumberLT() string { - return `Must be less than %max%` + return `Must be less than {{.max}}` } // Schema validators func (l DefaultLocale) RegexPattern() string { - return `Invalid regex pattern '%pattern%'` + return `Invalid regex pattern '{{.pattern}}'` } func (l DefaultLocale) GreaterThanZero() string { - return `%number% must be strictly greater than 0` + return `{{.number}} must be strictly greater than 0` } func (l DefaultLocale) MustBeOfA() string { - return `%x% must be of a %y%` + return `{{.x}} must be of a {{.y}}` } func (l DefaultLocale) MustBeOfAn() string { - return `%x% must be of an %y%` + return `{{.x}} must be of an {{.y}}` } func (l DefaultLocale) CannotBeUsedWithout() string { - return `%x% cannot be used without %y%` + return `{{.x}} cannot be used without {{.y}}` } func (l DefaultLocale) CannotBeGT() string { - return `%x% cannot be greater than %y%` + return `{{.x}} cannot be greater than {{.y}}` } func (l DefaultLocale) MustBeOfType() string { - return `%key% must be of type %type%` + return `{{.key}} must be of type {{.type}}` } func (l DefaultLocale) MustBeValidRegex() string { - return `%key% must be a valid regex` + return `{{.key}} must be a valid regex` } func (l DefaultLocale) MustBeValidFormat() string { - return `%key% must be a valid format %given%` + return `{{.key}} must be a valid format {{.given}}` } func (l DefaultLocale) MustBeGTEZero() string { - return `%key% must be greater than or equal to 0` + return `{{.key}} must be greater than or equal to 0` } func (l DefaultLocale) KeyCannotBeGreaterThan() string { - return `%key% cannot be greater than %y%` + return `{{.key}} cannot be greater than {{.y}}` } func (l DefaultLocale) KeyItemsMustBeOfType() string { - return `%key% items must be %type%` + return `{{.key}} items must be {{.type}}` } func (l DefaultLocale) KeyItemsMustBeUnique() string { - return `%key% items must be unique` + return `{{.key}} items must be unique` } func (l DefaultLocale) ReferenceMustBeCanonical() string { - return `Reference %reference% must be canonical` + return `Reference {{.reference}} must be canonical` } func (l DefaultLocale) NotAValidType() string { - return `%type% is not a valid type -- ` + return `{{.type}} is not a valid type -- ` } func (l DefaultLocale) Duplicated() string { - return `%type% type is duplicated` + return `{{.type}} type is duplicated` } func (l DefaultLocale) httpBadStatus() string { - return `Could not read schema from HTTP, response status is %status%` + return `Could not read schema from HTTP, response status is {{.status}}` } // Replacement options: field, description, context, value func (l DefaultLocale) ErrorFormat() string { - return `%field%: %description%` + return `{{.field}}: {{.description}}` } const ( diff --git a/vendor/github.com/xeipuuv/gojsonschema/result.go b/vendor/github.com/xeipuuv/gojsonschema/result.go index 4e2bfaef1..6ad56ae86 100644 --- a/vendor/github.com/xeipuuv/gojsonschema/result.go +++ b/vendor/github.com/xeipuuv/gojsonschema/result.go @@ -48,6 +48,7 @@ type ( Value() interface{} SetDetails(ErrorDetails) Details() ErrorDetails + String() string } // ResultErrorFields holds the fields for each ResultError implementation. @@ -126,7 +127,7 @@ func (v ResultErrorFields) String() string { valueString := fmt.Sprintf("%v", v.value) // marshal the go value value to json - if v.Value == nil { + if v.value == nil { valueString = TYPE_NULL } else { if vs, err := marshalToJsonString(v.value); err == nil { diff --git a/vendor/github.com/xeipuuv/gojsonschema/schema.go b/vendor/github.com/xeipuuv/gojsonschema/schema.go index 577c7863f..cf3cbc7d5 100644 --- a/vendor/github.com/xeipuuv/gojsonschema/schema.go +++ b/vendor/github.com/xeipuuv/gojsonschema/schema.go @@ -42,7 +42,39 @@ var ( ) func NewSchema(l JSONLoader) (*Schema, error) { - return l.loadSchema() + ref, err := l.JsonReference() + if err != nil { + return nil, err + } + + d := Schema{} + d.pool = newSchemaPool(l.LoaderFactory()) + d.documentReference = ref + d.referencePool = newSchemaReferencePool() + + var doc interface{} + if ref.String() != "" { + // Get document from schema pool + spd, err := d.pool.GetDocument(d.documentReference) + if err != nil { + return nil, err + } + doc = spd.Document + } else { + // Load JSON directly + doc, err = l.LoadJSON() + if err != nil { + return nil, err + } + d.pool.SetStandaloneDocument(doc) + } + + err = d.parse(doc) + if err != nil { + return nil, err + } + + return &d, nil } type Schema struct { @@ -116,14 +148,27 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema) } if k, ok := m[KEY_REF].(string); ok { - if sch, ok := d.referencePool.Get(currentSchema.ref.String() + k); ok { + jsonReference, err := gojsonreference.NewJsonReference(k) + if err != nil { + return err + } + + if jsonReference.HasFullUrl { + currentSchema.ref = &jsonReference + } else { + inheritedReference, err := currentSchema.ref.Inherits(jsonReference) + if err != nil { + return err + } + + currentSchema.ref = inheritedReference + } + if sch, ok := d.referencePool.Get(currentSchema.ref.String() + k); ok { currentSchema.refSchema = sch } else { - - var err error - err = d.parseReference(documentNode, currentSchema, k) + err := d.parseReference(documentNode, currentSchema, k) if err != nil { return err } @@ -755,30 +800,10 @@ func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema) return nil } -func (d *Schema) parseReference(documentNode interface{}, currentSchema *subSchema, reference string) (e error) { - - var err error - - jsonReference, err := gojsonreference.NewJsonReference(reference) - if err != nil { - return err - } - - standaloneDocument := d.pool.GetStandaloneDocument() - - if jsonReference.HasFullUrl { - currentSchema.ref = &jsonReference - } else { - inheritedReference, err := currentSchema.ref.Inherits(jsonReference) - if err != nil { - return err - } - currentSchema.ref = inheritedReference - } - - jsonPointer := currentSchema.ref.GetPointer() - +func (d *Schema) parseReference(documentNode interface{}, currentSchema *subSchema, reference string) error { var refdDocumentNode interface{} + jsonPointer := currentSchema.ref.GetPointer() + standaloneDocument := d.pool.GetStandaloneDocument() if standaloneDocument != nil { @@ -789,8 +814,6 @@ func (d *Schema) parseReference(documentNode interface{}, currentSchema *subSche } } else { - - var err error dsp, err := d.pool.GetDocument(*currentSchema.ref) if err != nil { return err @@ -812,11 +835,10 @@ func (d *Schema) parseReference(documentNode interface{}, currentSchema *subSche // returns the loaded referenced subSchema for the caller to update its current subSchema newSchemaDocument := refdDocumentNode.(map[string]interface{}) - newSchema := &subSchema{property: KEY_REF, parent: currentSchema, ref: currentSchema.ref} d.referencePool.Add(currentSchema.ref.String()+reference, newSchema) - err = d.parseSchema(newSchemaDocument, newSchema) + err := d.parseSchema(newSchemaDocument, newSchema) if err != nil { return err } diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go b/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go index 79fbb60cb..f2ad641af 100644 --- a/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go +++ b/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go @@ -39,13 +39,15 @@ type schemaPoolDocument struct { type schemaPool struct { schemaPoolDocuments map[string]*schemaPoolDocument standaloneDocument interface{} + jsonLoaderFactory JSONLoaderFactory } -func newSchemaPool() *schemaPool { +func newSchemaPool(f JSONLoaderFactory) *schemaPool { p := &schemaPool{} p.schemaPoolDocuments = make(map[string]*schemaPoolDocument) p.standaloneDocument = nil + p.jsonLoaderFactory = f return p } @@ -93,8 +95,8 @@ func (p *schemaPool) GetDocument(reference gojsonreference.JsonReference) (*sche return spd, nil } - jsonReferenceLoader := NewReferenceLoader(reference.String()) - document, err := jsonReferenceLoader.loadJSON() + jsonReferenceLoader := p.jsonLoaderFactory.New(reference.String()) + document, err := jsonReferenceLoader.LoadJSON() if err != nil { return nil, err } diff --git a/vendor/github.com/xeipuuv/gojsonschema/subSchema.go b/vendor/github.com/xeipuuv/gojsonschema/subSchema.go index b249b7e55..9ddbb5fc1 100644 --- a/vendor/github.com/xeipuuv/gojsonschema/subSchema.go +++ b/vendor/github.com/xeipuuv/gojsonschema/subSchema.go @@ -214,7 +214,7 @@ func (s *subSchema) PatternPropertiesString() string { } patternPropertiesKeySlice := []string{} - for pk, _ := range s.patternProperties { + for pk := range s.patternProperties { patternPropertiesKeySlice = append(patternPropertiesKeySlice, `"`+pk+`"`) } diff --git a/vendor/github.com/xeipuuv/gojsonschema/utils.go b/vendor/github.com/xeipuuv/gojsonschema/utils.go index cfeb628eb..26cf75ebf 100644 --- a/vendor/github.com/xeipuuv/gojsonschema/utils.go +++ b/vendor/github.com/xeipuuv/gojsonschema/utils.go @@ -34,7 +34,12 @@ import ( ) func isKind(what interface{}, kind reflect.Kind) bool { - return reflect.ValueOf(what).Kind() == kind + target := what + if isJsonNumber(what) { + // JSON Numbers are strings! + target = *mustBeNumber(what) + } + return reflect.ValueOf(target).Kind() == kind } func existsMapKey(m map[string]interface{}, k string) bool { @@ -77,13 +82,14 @@ func checkJsonNumber(what interface{}) (isValidFloat64 bool, isValidInt64 bool, jsonNumber := what.(json.Number) - _, errFloat64 := jsonNumber.Float64() - _, errInt64 := jsonNumber.Int64() + f64, errFloat64 := jsonNumber.Float64() + s64 := strconv.FormatFloat(f64, 'f', -1, 64) + _, errInt64 := strconv.ParseInt(s64, 10, 64) isValidFloat64 = errFloat64 == nil isValidInt64 = errInt64 == nil - _, errInt32 := strconv.ParseInt(jsonNumber.String(), 10, 32) + _, errInt32 := strconv.ParseInt(s64, 10, 32) isValidInt32 = isValidInt64 && errInt32 == nil return diff --git a/vendor/github.com/xeipuuv/gojsonschema/validation.go b/vendor/github.com/xeipuuv/gojsonschema/validation.go index 23bd52a38..5b2230db1 100644 --- a/vendor/github.com/xeipuuv/gojsonschema/validation.go +++ b/vendor/github.com/xeipuuv/gojsonschema/validation.go @@ -55,7 +55,7 @@ func (v *Schema) Validate(l JSONLoader) (*Result, error) { // load document - root, err := l.loadJSON() + root, err := l.LoadJSON() if err != nil { return nil, err } @@ -412,7 +412,7 @@ func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface internalLog(" %v", value) } - nbItems := len(value) + nbValues := len(value) // TODO explain if currentSubSchema.itemsChildrenIsSingleSchema { @@ -425,15 +425,18 @@ func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface if currentSubSchema.itemsChildren != nil && len(currentSubSchema.itemsChildren) > 0 { nbItems := len(currentSubSchema.itemsChildren) - nbValues := len(value) - if nbItems == nbValues { - for i := 0; i != nbItems; i++ { - subContext := newJsonContext(strconv.Itoa(i), context) - validationResult := currentSubSchema.itemsChildren[i].subValidateWithContext(value[i], subContext) - result.mergeErrors(validationResult) - } - } else if nbItems < nbValues { + // while we have both schemas and values, check them against each other + for i := 0; i != nbItems && i != nbValues; i++ { + subContext := newJsonContext(strconv.Itoa(i), context) + validationResult := currentSubSchema.itemsChildren[i].subValidateWithContext(value[i], subContext) + result.mergeErrors(validationResult) + } + + if nbItems < nbValues { + // we have less schemas than elements in the instance array, + // but that might be ok if "additionalItems" is specified. + switch currentSubSchema.additionalItems.(type) { case bool: if !currentSubSchema.additionalItems.(bool) { @@ -453,7 +456,7 @@ func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface // minItems & maxItems if currentSubSchema.minItems != nil { - if nbItems < int(*currentSubSchema.minItems) { + if nbValues < int(*currentSubSchema.minItems) { result.addError( new(ArrayMinItemsError), context, @@ -463,7 +466,7 @@ func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface } } if currentSubSchema.maxItems != nil { - if nbItems > int(*currentSubSchema.maxItems) { + if nbValues > int(*currentSubSchema.maxItems) { result.addError( new(ArrayMaxItemsError), context, @@ -776,22 +779,22 @@ func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{ if currentSubSchema.exclusiveMaximum { if float64Value >= *currentSubSchema.maximum { result.addError( - new(NumberLTEError), + new(NumberLTError), context, resultErrorFormatJsonNumber(number), ErrorDetails{ - "min": resultErrorFormatNumber(*currentSubSchema.maximum), + "max": resultErrorFormatNumber(*currentSubSchema.maximum), }, ) } } else { if float64Value > *currentSubSchema.maximum { result.addError( - new(NumberLTError), + new(NumberLTEError), context, resultErrorFormatJsonNumber(number), ErrorDetails{ - "min": resultErrorFormatNumber(*currentSubSchema.maximum), + "max": resultErrorFormatNumber(*currentSubSchema.maximum), }, ) } @@ -803,22 +806,22 @@ func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{ if currentSubSchema.exclusiveMinimum { if float64Value <= *currentSubSchema.minimum { result.addError( - new(NumberGTEError), + new(NumberGTError), context, resultErrorFormatJsonNumber(number), ErrorDetails{ - "max": resultErrorFormatNumber(*currentSubSchema.minimum), + "min": resultErrorFormatNumber(*currentSubSchema.minimum), }, ) } } else { if float64Value < *currentSubSchema.minimum { result.addError( - new(NumberGTError), + new(NumberGTEError), context, resultErrorFormatJsonNumber(number), ErrorDetails{ - "max": resultErrorFormatNumber(*currentSubSchema.minimum), + "min": resultErrorFormatNumber(*currentSubSchema.minimum), }, ) } diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go index ea1a7cd53..134654cf7 100644 --- a/vendor/golang.org/x/net/context/context.go +++ b/vendor/golang.org/x/net/context/context.go @@ -34,7 +34,7 @@ // // See http://blog.golang.org/context for example code for a server that uses // Contexts. -package context +package context // import "golang.org/x/net/context" import "time" diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go index f31d88273..aa288de64 100644 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -5,7 +5,7 @@ // +build go1.7 // Package ctxhttp provides helper functions for performing context-aware HTTP requests. -package ctxhttp +package ctxhttp // import "golang.org/x/net/context/ctxhttp" import ( "io" diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go index 7564b2032..926870cc2 100644 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go @@ -4,7 +4,7 @@ // +build !go1.7 -package ctxhttp +package ctxhttp // import "golang.org/x/net/context/ctxhttp" import ( "io" diff --git a/vendor/golang.org/x/net/http2/.gitignore b/vendor/golang.org/x/net/http2/.gitignore deleted file mode 100644 index 190f12234..000000000 --- a/vendor/golang.org/x/net/http2/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*~ -h2i/h2i diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile deleted file mode 100644 index 53fc52579..000000000 --- a/vendor/golang.org/x/net/http2/Dockerfile +++ /dev/null @@ -1,51 +0,0 @@ -# -# This Dockerfile builds a recent curl with HTTP/2 client support, using -# a recent nghttp2 build. -# -# See the Makefile for how to tag it. If Docker and that image is found, the -# Go tests use this curl binary for integration tests. -# - -FROM ubuntu:trusty - -RUN apt-get update && \ - apt-get upgrade -y && \ - apt-get install -y git-core build-essential wget - -RUN apt-get install -y --no-install-recommends \ - autotools-dev libtool pkg-config zlib1g-dev \ - libcunit1-dev libssl-dev libxml2-dev libevent-dev \ - automake autoconf - -# The list of packages nghttp2 recommends for h2load: -RUN apt-get install -y --no-install-recommends make binutils \ - autoconf automake autotools-dev \ - libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \ - libev-dev libevent-dev libjansson-dev libjemalloc-dev \ - cython python3.4-dev python-setuptools - -# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached: -ENV NGHTTP2_VER 895da9a -RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git - -WORKDIR /root/nghttp2 -RUN git reset --hard $NGHTTP2_VER -RUN autoreconf -i -RUN automake -RUN autoconf -RUN ./configure -RUN make -RUN make install - -WORKDIR /root -RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz -RUN tar -zxvf curl-7.45.0.tar.gz -WORKDIR /root/curl-7.45.0 -RUN ./configure --with-ssl --with-nghttp2=/usr/local -RUN make -RUN make install -RUN ldconfig - -CMD ["-h"] -ENTRYPOINT ["/usr/local/bin/curl"] - diff --git a/vendor/golang.org/x/net/http2/Makefile b/vendor/golang.org/x/net/http2/Makefile deleted file mode 100644 index 55fd826f7..000000000 --- a/vendor/golang.org/x/net/http2/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -curlimage: - docker build -t gohttp2/curl . - diff --git a/vendor/golang.org/x/net/http2/README b/vendor/golang.org/x/net/http2/README deleted file mode 100644 index 360d5aa37..000000000 --- a/vendor/golang.org/x/net/http2/README +++ /dev/null @@ -1,20 +0,0 @@ -This is a work-in-progress HTTP/2 implementation for Go. - -It will eventually live in the Go standard library and won't require -any changes to your code to use. It will just be automatic. - -Status: - -* The server support is pretty good. A few things are missing - but are being worked on. -* The client work has just started but shares a lot of code - is coming along much quicker. - -Docs are at https://godoc.org/golang.org/x/net/http2 - -Demo test server at https://http2.golang.org/ - -Help & bug reports welcome! - -Contributing: https://golang.org/doc/contribute.html -Bugs: https://golang.org/issue/new?title=x/net/http2:+ diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go index 3f90b7300..1119f3448 100644 --- a/vendor/golang.org/x/net/internal/timeseries/timeseries.go +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package timeseries implements a time series structure for stats collection. -package timeseries +package timeseries // import "golang.org/x/net/internal/timeseries" import ( "fmt" diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go index 877dfb32b..d860fccf9 100644 --- a/vendor/golang.org/x/net/trace/trace.go +++ b/vendor/golang.org/x/net/trace/trace.go @@ -60,7 +60,7 @@ The /debug/events HTTP endpoint organizes the event logs by family and by time since the last error. The expanded view displays recent log entries and the log's call stack. */ -package trace +package trace // import "golang.org/x/net/trace" import ( "bytes" diff --git a/vendor/golang.org/x/oauth2/.travis.yml b/vendor/golang.org/x/oauth2/.travis.yml deleted file mode 100644 index a035125c3..000000000 --- a/vendor/golang.org/x/oauth2/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go - -go: - - 1.3 - - 1.4 - -install: - - export GOPATH="$HOME/gopath" - - mkdir -p "$GOPATH/src/golang.org/x" - - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2" - - go get -v -t -d golang.org/x/oauth2/... - -script: - - go test -v golang.org/x/oauth2/... diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/golang.org/x/oauth2/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md deleted file mode 100644 index 46aa2b12d..000000000 --- a/vendor/golang.org/x/oauth2/CONTRIBUTING.md +++ /dev/null @@ -1,31 +0,0 @@ -# Contributing to Go - -Go is an open source project. - -It is the work of hundreds of contributors. We appreciate your help! - - -## Filing issues - -When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: - -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. -The gophers there will answer or ask you to file an issue if you've tripped over a bug. - -## Contributing code - -Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) -before sending patches. - -**We do not accept GitHub pull requests** -(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). - -Unless otherwise noted, the Go source files are distributed under -the BSD-style license found in the LICENSE file. - diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/golang.org/x/oauth2/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md deleted file mode 100644 index 0d5141733..000000000 --- a/vendor/golang.org/x/oauth2/README.md +++ /dev/null @@ -1,64 +0,0 @@ -# OAuth2 for Go - -[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) - -oauth2 package contains a client implementation for OAuth 2.0 spec. - -## Installation - -~~~~ -go get golang.org/x/oauth2 -~~~~ - -See godoc for further documentation and examples. - -* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) -* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) - - -## App Engine - -In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor -of the [`context.Context`](https://golang.org/x/net/context#Context) type from -the `golang.org/x/net/context` package - -This means its no longer possible to use the "Classic App Engine" -`appengine.Context` type with the `oauth2` package. (You're using -Classic App Engine if you import the package `"appengine"`.) - -To work around this, you may use the new `"google.golang.org/appengine"` -package. This package has almost the same API as the `"appengine"` package, -but it can be fetched with `go get` and used on "Managed VMs" and well as -Classic App Engine. - -See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app) -for information on updating your app. - -If you don't want to update your entire app to use the new App Engine packages, -you may use both sets of packages in parallel, using only the new packages -with the `oauth2` package. - - import ( - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - newappengine "google.golang.org/appengine" - newurlfetch "google.golang.org/appengine/urlfetch" - - "appengine" - ) - - func handler(w http.ResponseWriter, r *http.Request) { - var c appengine.Context = appengine.NewContext(r) - c.Infof("Logging a message with the old package") - - var ctx context.Context = newappengine.NewContext(r) - client := &http.Client{ - Transport: &oauth2.Transport{ - Source: google.AppEngineTokenSource(ctx, "scope"), - Base: &newurlfetch.Transport{Context: ctx}, - }, - } - client.Get("...") - } - diff --git a/vendor/golang.org/x/oauth2/client_appengine.go b/vendor/golang.org/x/oauth2/client_appengine.go index 4a554cb9b..8962c49d1 100644 --- a/vendor/golang.org/x/oauth2/client_appengine.go +++ b/vendor/golang.org/x/oauth2/client_appengine.go @@ -1,8 +1,8 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build appengine appenginevm +// +build appengine // App Engine hooks. diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go index 65dc34731..dc993efb5 100644 --- a/vendor/golang.org/x/oauth2/google/appengine.go +++ b/vendor/golang.org/x/oauth2/google/appengine.go @@ -1,4 +1,4 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -14,6 +14,9 @@ import ( "golang.org/x/oauth2" ) +// Set at init time by appenginevm_hook.go. If true, we are on App Engine Managed VMs. +var appengineVM bool + // Set at init time by appengine_hook.go. If nil, we're not on App Engine. var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) diff --git a/vendor/golang.org/x/oauth2/google/appengine_hook.go b/vendor/golang.org/x/oauth2/google/appengine_hook.go index 2f9b15432..4f42c8b34 100644 --- a/vendor/golang.org/x/oauth2/google/appengine_hook.go +++ b/vendor/golang.org/x/oauth2/google/appengine_hook.go @@ -1,8 +1,8 @@ -// Copyright 2015 The oauth2 Authors. All rights reserved. +// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build appengine appenginevm +// +build appengine package google diff --git a/vendor/golang.org/x/oauth2/google/appenginevm_hook.go b/vendor/golang.org/x/oauth2/google/appenginevm_hook.go new file mode 100644 index 000000000..633611cc3 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appenginevm_hook.go @@ -0,0 +1,14 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appenginevm + +package google + +import "google.golang.org/appengine" + +func init() { + appengineVM = true + appengineTokenFunc = appengine.AccessToken +} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index 78f808985..565d731c4 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -1,4 +1,4 @@ -// Copyright 2015 The oauth2 Authors. All rights reserved. +// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -14,10 +14,10 @@ import ( "path/filepath" "runtime" + "cloud.google.com/go/compute/metadata" "golang.org/x/net/context" "golang.org/x/oauth2" "golang.org/x/oauth2/jwt" - "google.golang.org/cloud/compute/metadata" ) // DefaultClient returns an HTTP Client that uses the @@ -50,7 +50,8 @@ func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { // On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. // On other systems, $HOME/.config/gcloud/application_default_credentials.json. // 3. On Google App Engine it uses the appengine.AccessToken function. -// 4. On Google Compute Engine, it fetches credentials from the metadata server. +// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches +// credentials from the metadata server. // (In this final case any provided scopes are ignored.) // // For more details, see: @@ -84,7 +85,7 @@ func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSourc } // Third, if we're on Google App Engine use those credentials. - if appengineTokenFunc != nil { + if appengineTokenFunc != nil && !appengineVM { return AppEngineTokenSource(ctx, scope...), nil } diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index 2077d9866..a48d5bf3e 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -1,4 +1,4 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -12,7 +12,7 @@ // https://developers.google.com/accounts/docs/OAuth2 // and // https://developers.google.com/accounts/docs/application-default-credentials. -package google +package google // import "golang.org/x/oauth2/google" import ( "encoding/json" @@ -21,9 +21,9 @@ import ( "strings" "time" + "cloud.google.com/go/compute/metadata" "golang.org/x/oauth2" "golang.org/x/oauth2/jwt" - "google.golang.org/cloud/compute/metadata" ) // Endpoint is Google's OAuth 2.0 endpoint. @@ -37,9 +37,10 @@ const JWTTokenURL = "https://accounts.google.com/o/oauth2/token" // ConfigFromJSON uses a Google Developers Console client_credentials.json // file to construct a config. -// client_credentials.json can be downloadable from https://console.developers.google.com, -// under "APIs & Auth" > "Credentials". Download the Web application credentials in the -// JSON format and provide the contents of the file as jsonKey. +// client_credentials.json can be downloaded from +// https://console.developers.google.com, under "Credentials". Download the Web +// application credentials in the JSON format and provide the contents of the +// file as jsonKey. func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { type cred struct { ClientID string `json:"client_id"` @@ -81,22 +82,29 @@ func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { // JWTConfigFromJSON uses a Google Developers service account JSON key file to read // the credentials that authorize and authenticate the requests. -// Create a service account on "Credentials" page under "APIs & Auth" for your -// project at https://console.developers.google.com to download a JSON key file. +// Create a service account on "Credentials" for your project at +// https://console.developers.google.com to download a JSON key file. func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { var key struct { - Email string `json:"client_email"` - PrivateKey string `json:"private_key"` + Email string `json:"client_email"` + PrivateKey string `json:"private_key"` + PrivateKeyID string `json:"private_key_id"` + TokenURL string `json:"token_uri"` } if err := json.Unmarshal(jsonKey, &key); err != nil { return nil, err } - return &jwt.Config{ - Email: key.Email, - PrivateKey: []byte(key.PrivateKey), - Scopes: scope, - TokenURL: JWTTokenURL, - }, nil + config := &jwt.Config{ + Email: key.Email, + PrivateKey: []byte(key.PrivateKey), + PrivateKeyID: key.PrivateKeyID, + Scopes: scope, + TokenURL: key.TokenURL, + } + if config.TokenURL == "" { + config.TokenURL = JWTTokenURL + } + return config, nil } // ComputeTokenSource returns a token source that fetches access tokens diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go new file mode 100644 index 000000000..b0fdb3a88 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/jwt.go @@ -0,0 +1,74 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "crypto/rsa" + "fmt" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON +// key file to read the credentials that authorize and authenticate the +// requests, and returns a TokenSource that does not use any OAuth2 flow but +// instead creates a JWT and sends that as the access token. +// The audience is typically a URL that specifies the scope of the credentials. +// +// Note that this is not a standard OAuth flow, but rather an +// optimization supported by a few Google services. +// Unless you know otherwise, you should use JWTConfigFromJSON instead. +func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { + cfg, err := JWTConfigFromJSON(jsonKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse JSON key: %v", err) + } + pk, err := internal.ParseKey(cfg.PrivateKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse key: %v", err) + } + ts := &jwtAccessTokenSource{ + email: cfg.Email, + audience: audience, + pk: pk, + pkID: cfg.PrivateKeyID, + } + tok, err := ts.Token() + if err != nil { + return nil, err + } + return oauth2.ReuseTokenSource(tok, ts), nil +} + +type jwtAccessTokenSource struct { + email, audience string + pk *rsa.PrivateKey + pkID string +} + +func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { + iat := time.Now() + exp := iat.Add(time.Hour) + cs := &jws.ClaimSet{ + Iss: ts.email, + Sub: ts.email, + Aud: ts.audience, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + hdr := &jws.Header{ + Algorithm: "RS256", + Typ: "JWT", + KeyID: string(ts.pkID), + } + msg, err := jws.Encode(hdr, cs, ts.pk) + if err != nil { + return nil, fmt.Errorf("google: could not encode JWT: %v", err) + } + return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil +} diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go index 01ba0ecb0..d29a3bb9b 100644 --- a/vendor/golang.org/x/oauth2/google/sdk.go +++ b/vendor/golang.org/x/oauth2/google/sdk.go @@ -1,4 +1,4 @@ -// Copyright 2015 The oauth2 Authors. All rights reserved. +// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go index dc8ebfc4f..fbe1028d6 100644 --- a/vendor/golang.org/x/oauth2/internal/oauth2.go +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -1,4 +1,4 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index ea6716c98..18328a0dc 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -1,4 +1,4 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -91,24 +91,36 @@ func (e *expirationTime) UnmarshalJSON(b []byte) error { var brokenAuthHeaderProviders = []string{ "https://accounts.google.com/", - "https://www.googleapis.com/", - "https://github.com/", - "https://api.instagram.com/", - "https://www.douban.com/", "https://api.dropbox.com/", + "https://api.dropboxapi.com/", + "https://api.instagram.com/", + "https://api.netatmo.net/", + "https://api.odnoklassniki.ru/", + "https://api.pushbullet.com/", "https://api.soundcloud.com/", - "https://www.linkedin.com/", "https://api.twitch.tv/", - "https://oauth.vk.com/", - "https://api.odnoklassniki.ru/", + "https://app.box.com/", "https://connect.stripe.com/", - "https://api.pushbullet.com/", + "https://login.microsoftonline.com/", + "https://login.salesforce.com/", "https://oauth.sandbox.trainingpeaks.com/", "https://oauth.trainingpeaks.com/", - "https://www.strava.com/oauth/", - "https://app.box.com/", + "https://oauth.vk.com/", + "https://openapi.baidu.com/", + "https://slack.com/", "https://test-sandbox.auth.corp.google.com", + "https://test.salesforce.com/", "https://user.gini.net/", + "https://www.douban.com/", + "https://www.googleapis.com/", + "https://www.linkedin.com/", + "https://www.strava.com/oauth/", + "https://www.wunderlist.com/oauth/", + "https://api.patreon.com/", +} + +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL) } // providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL @@ -134,23 +146,23 @@ func providerAuthHeaderWorks(tokenURL string) bool { return true } -func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string, v url.Values) (*Token, error) { +func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values) (*Token, error) { hc, err := ContextClient(ctx) if err != nil { return nil, err } - v.Set("client_id", ClientID) - bustedAuth := !providerAuthHeaderWorks(TokenURL) - if bustedAuth && ClientSecret != "" { - v.Set("client_secret", ClientSecret) + v.Set("client_id", clientID) + bustedAuth := !providerAuthHeaderWorks(tokenURL) + if bustedAuth && clientSecret != "" { + v.Set("client_secret", clientSecret) } - req, err := http.NewRequest("POST", TokenURL, strings.NewReader(v.Encode())) + req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode())) if err != nil { return nil, err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") if !bustedAuth { - req.SetBasicAuth(ClientID, ClientSecret) + req.SetBasicAuth(clientID, clientSecret) } r, err := hc.Do(req) if err != nil { diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go index 521e7b49e..f1f173e34 100644 --- a/vendor/golang.org/x/oauth2/internal/transport.go +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -1,4 +1,4 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -33,6 +33,11 @@ func RegisterContextClientFunc(fn ContextClientFunc) { } func ContextClient(ctx context.Context) (*http.Client, error) { + if ctx != nil { + if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { + return hc, nil + } + } for _, fn := range contextClientFuncs { c, err := fn(ctx) if err != nil { @@ -42,9 +47,6 @@ func ContextClient(ctx context.Context) (*http.Client, error) { return c, nil } } - if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { - return hc, nil - } return http.DefaultClient, nil } diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go index 396b3fac8..683d2d271 100644 --- a/vendor/golang.org/x/oauth2/jws/jws.go +++ b/vendor/golang.org/x/oauth2/jws/jws.go @@ -1,10 +1,18 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package jws provides encoding and decoding utilities for -// signed JWS messages. -package jws +// Package jws provides a partial implementation +// of JSON Web Signature encoding and decoding. +// It exists to support the golang.org/x/oauth2 package. +// +// See RFC 7515. +// +// Deprecated: this package is not intended for public use and might be +// removed in the future. It exists for internal use only. +// Please switch to another JWS package or copy this package into your own +// source tree. +package jws // import "golang.org/x/oauth2/jws" import ( "bytes" @@ -27,8 +35,8 @@ type ClaimSet struct { Iss string `json:"iss"` // email address of the client_id of the application making the access token request Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). - Exp int64 `json:"exp"` // the expiration time of the assertion - Iat int64 `json:"iat"` // the time the assertion was issued. + Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch) + Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch) Typ string `json:"typ,omitempty"` // token type (Optional). // Email for which the application is requesting delegated access (Optional). @@ -41,23 +49,22 @@ type ClaimSet struct { // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 // This array is marshalled using custom code (see (c *ClaimSet) encode()). PrivateClaims map[string]interface{} `json:"-"` - - exp time.Time - iat time.Time } func (c *ClaimSet) encode() (string, error) { - if c.exp.IsZero() || c.iat.IsZero() { - // Reverting time back for machines whose time is not perfectly in sync. - // If client machine's time is in the future according - // to Google servers, an access token will not be issued. - now := time.Now().Add(-10 * time.Second) - c.iat = now - c.exp = now.Add(time.Hour) + // Reverting time back for machines whose time is not perfectly in sync. + // If client machine's time is in the future according + // to Google servers, an access token will not be issued. + now := time.Now().Add(-10 * time.Second) + if c.Iat == 0 { + c.Iat = now.Unix() + } + if c.Exp == 0 { + c.Exp = now.Add(time.Hour).Unix() + } + if c.Exp < c.Iat { + return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat) } - - c.Exp = c.exp.Unix() - c.Iat = c.iat.Unix() b, err := json.Marshal(c) if err != nil { @@ -65,7 +72,7 @@ func (c *ClaimSet) encode() (string, error) { } if len(c.PrivateClaims) == 0 { - return base64Encode(b), nil + return base64.RawURLEncoding.EncodeToString(b), nil } // Marshal private claim set and then append it to b. @@ -83,7 +90,7 @@ func (c *ClaimSet) encode() (string, error) { } b[len(b)-1] = ',' // Replace closing curly brace with a comma. b = append(b, prv[1:]...) // Append private claims. - return base64Encode(b), nil + return base64.RawURLEncoding.EncodeToString(b), nil } // Header represents the header for the signed JWS payloads. @@ -93,6 +100,9 @@ type Header struct { // Represents the token type. Typ string `json:"typ"` + + // The optional hint of which key is being used. + KeyID string `json:"kid,omitempty"` } func (h *Header) encode() (string, error) { @@ -100,7 +110,7 @@ func (h *Header) encode() (string, error) { if err != nil { return "", err } - return base64Encode(b), nil + return base64.RawURLEncoding.EncodeToString(b), nil } // Decode decodes a claim set from a JWS payload. @@ -111,7 +121,7 @@ func Decode(payload string) (*ClaimSet, error) { // TODO(jbd): Provide more context about the error. return nil, errors.New("jws: invalid token received") } - decoded, err := base64Decode(s[1]) + decoded, err := base64.RawURLEncoding.DecodeString(s[1]) if err != nil { return nil, err } @@ -120,8 +130,11 @@ func Decode(payload string) (*ClaimSet, error) { return c, err } -// Encode encodes a signed JWS with provided header and claim set. -func Encode(header *Header, c *ClaimSet, signature *rsa.PrivateKey) (string, error) { +// Signer returns a signature for the given data. +type Signer func(data []byte) (sig []byte, err error) + +// EncodeWithSigner encodes a header and claim set with the provided signer. +func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) { head, err := header.encode() if err != nil { return "", err @@ -131,30 +144,39 @@ func Encode(header *Header, c *ClaimSet, signature *rsa.PrivateKey) (string, err return "", err } ss := fmt.Sprintf("%s.%s", head, cs) - h := sha256.New() - h.Write([]byte(ss)) - b, err := rsa.SignPKCS1v15(rand.Reader, signature, crypto.SHA256, h.Sum(nil)) + sig, err := sg([]byte(ss)) if err != nil { return "", err } - sig := base64Encode(b) - return fmt.Sprintf("%s.%s", ss, sig), nil + return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil } -// base64Encode returns and Base64url encoded version of the input string with any -// trailing "=" stripped. -func base64Encode(b []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +// Encode encodes a signed JWS with provided header and claim set. +// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key. +func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { + sg := func(data []byte) (sig []byte, err error) { + h := sha256.New() + h.Write(data) + return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) + } + return EncodeWithSigner(header, c, sg) } -// base64Decode decodes the Base64url encoded string -func base64Decode(s string) ([]byte, error) { - // add back missing padding - switch len(s) % 4 { - case 2: - s += "==" - case 3: - s += "=" +// Verify tests whether the provided JWT token's signature was produced by the private key +// associated with the supplied public key. +func Verify(token string, key *rsa.PublicKey) error { + parts := strings.Split(token, ".") + if len(parts) != 3 { + return errors.New("jws: invalid token received, token must have 3 parts") } - return base64.URLEncoding.DecodeString(s) + + signedContent := parts[0] + "." + parts[1] + signatureString, err := base64.RawURLEncoding.DecodeString(parts[2]) + if err != nil { + return err + } + + h := sha256.New() + h.Write([]byte(signedContent)) + return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString)) } diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go index 205d23ed4..f4b9523e6 100644 --- a/vendor/golang.org/x/oauth2/jwt/jwt.go +++ b/vendor/golang.org/x/oauth2/jwt/jwt.go @@ -1,4 +1,4 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -46,6 +46,10 @@ type Config struct { // PrivateKey []byte + // PrivateKeyID contains an optional hint indicating which key is being + // used. + PrivateKeyID string + // Subject is the optional user to impersonate. Subject string @@ -54,6 +58,9 @@ type Config struct { // TokenURL is the endpoint required to complete the 2-legged JWT flow. TokenURL string + + // Expires optionally specifies how long the token is valid for. + Expires time.Duration } // TokenSource returns a JWT TokenSource using the configuration @@ -95,6 +102,9 @@ func (js jwtSource) Token() (*oauth2.Token, error) { // to be compatible with legacy OAuth 2.0 providers. claimSet.Prn = subject } + if t := js.conf.Expires; t > 0 { + claimSet.Exp = time.Now().Add(t).Unix() + } payload, err := jws.Encode(defaultHeader, claimSet, pk) if err != nil { return nil, err diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index dfcf238d2..7b06bfe1e 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -1,11 +1,11 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package oauth2 provides support for making // OAuth2 authorized and authenticated HTTP requests. // It can additionally grant authorization with Bearer JWT. -package oauth2 +package oauth2 // import "golang.org/x/oauth2" import ( "bytes" @@ -21,10 +21,26 @@ import ( // NoContext is the default context you should supply if not using // your own context.Context (see https://golang.org/x/net/context). +// +// Deprecated: Use context.Background() or context.TODO() instead. var NoContext = context.TODO() +// RegisterBrokenAuthHeaderProvider registers an OAuth2 server +// identified by the tokenURL prefix as an OAuth2 implementation +// which doesn't support the HTTP Basic authentication +// scheme to authenticate with the authorization server. +// Once a server is registered, credentials (client_id and client_secret) +// will be passed as query parameters rather than being present +// in the Authorization header. +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + internal.RegisterBrokenAuthHeaderProvider(tokenURL) +} + // Config describes a typical 3-legged OAuth2 flow, with both the // client application information and the server's endpoint URLs. +// For the client credentials 2-legged OAuth2 flow, see the clientcredentials +// package (https://golang.org/x/oauth2/clientcredentials). type Config struct { // ClientID is the application's ID. ClientID string @@ -283,7 +299,7 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client { if src == nil { c, err := internal.ContextClient(ctx) if err != nil { - return &http.Client{Transport: internal.ErrorTransport{err}} + return &http.Client{Transport: internal.ErrorTransport{Err: err}} } return c } diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index ebbdddbdc..7a3167f15 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -1,4 +1,4 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -7,6 +7,7 @@ package oauth2 import ( "net/http" "net/url" + "strconv" "strings" "time" @@ -92,14 +93,28 @@ func (t *Token) WithExtra(extra interface{}) *Token { // Extra fields are key-value pairs returned by the server as a // part of the token retrieval response. func (t *Token) Extra(key string) interface{} { - if vals, ok := t.raw.(url.Values); ok { - // TODO(jbd): Cast numeric values to int64 or float64. - return vals.Get(key) - } if raw, ok := t.raw.(map[string]interface{}); ok { return raw[key] } - return nil + + vals, ok := t.raw.(url.Values) + if !ok { + return nil + } + + v := vals.Get(key) + switch s := strings.TrimSpace(v); strings.Count(s, ".") { + case 0: // Contains no "."; try to parse as int + if i, err := strconv.ParseInt(s, 10, 64); err == nil { + return i + } + case 1: // Contains a single "."; try to parse as float + if f, err := strconv.ParseFloat(s, 64); err == nil { + return f + } + } + + return v } // expired reports whether the token is expired. diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go index 90db08833..92ac7e253 100644 --- a/vendor/golang.org/x/oauth2/transport.go +++ b/vendor/golang.org/x/oauth2/transport.go @@ -1,4 +1,4 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/unix/.gitignore b/vendor/golang.org/x/sys/unix/.gitignore deleted file mode 100644 index e48271590..000000000 --- a/vendor/golang.org/x/sys/unix/.gitignore +++ /dev/null @@ -1 +0,0 @@ -_obj/ diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh deleted file mode 100755 index de95a4bbc..000000000 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ /dev/null @@ -1,274 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2009 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -# The unix package provides access to the raw system call -# interface of the underlying operating system. Porting Go to -# a new architecture/operating system combination requires -# some manual effort, though there are tools that automate -# much of the process. The auto-generated files have names -# beginning with z. -# -# This script runs or (given -n) prints suggested commands to generate z files -# for the current system. Running those commands is not automatic. -# This script is documentation more than anything else. -# -# * asm_${GOOS}_${GOARCH}.s -# -# This hand-written assembly file implements system call dispatch. -# There are three entry points: -# -# func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr); -# func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr); -# func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr); -# -# The first and second are the standard ones; they differ only in -# how many arguments can be passed to the kernel. -# The third is for low-level use by the ForkExec wrapper; -# unlike the first two, it does not call into the scheduler to -# let it know that a system call is running. -# -# * syscall_${GOOS}.go -# -# This hand-written Go file implements system calls that need -# special handling and lists "//sys" comments giving prototypes -# for ones that can be auto-generated. Mksyscall reads those -# comments to generate the stubs. -# -# * syscall_${GOOS}_${GOARCH}.go -# -# Same as syscall_${GOOS}.go except that it contains code specific -# to ${GOOS} on one particular architecture. -# -# * types_${GOOS}.c -# -# This hand-written C file includes standard C headers and then -# creates typedef or enum names beginning with a dollar sign -# (use of $ in variable names is a gcc extension). The hardest -# part about preparing this file is figuring out which headers to -# include and which symbols need to be #defined to get the -# actual data structures that pass through to the kernel system calls. -# Some C libraries present alternate versions for binary compatibility -# and translate them on the way in and out of system calls, but -# there is almost always a #define that can get the real ones. -# See types_darwin.c and types_linux.c for examples. -# -# * zerror_${GOOS}_${GOARCH}.go -# -# This machine-generated file defines the system's error numbers, -# error strings, and signal numbers. The generator is "mkerrors.sh". -# Usually no arguments are needed, but mkerrors.sh will pass its -# arguments on to godefs. -# -# * zsyscall_${GOOS}_${GOARCH}.go -# -# Generated by mksyscall.pl; see syscall_${GOOS}.go above. -# -# * zsysnum_${GOOS}_${GOARCH}.go -# -# Generated by mksysnum_${GOOS}. -# -# * ztypes_${GOOS}_${GOARCH}.go -# -# Generated by godefs; see types_${GOOS}.c above. - -GOOSARCH="${GOOS}_${GOARCH}" - -# defaults -mksyscall="./mksyscall.pl" -mkerrors="./mkerrors.sh" -zerrors="zerrors_$GOOSARCH.go" -mksysctl="" -zsysctl="zsysctl_$GOOSARCH.go" -mksysnum= -mktypes= -run="sh" - -case "$1" in --syscalls) - for i in zsyscall*go - do - sed 1q $i | sed 's;^// ;;' | sh > _$i && gofmt < _$i > $i - rm _$i - done - exit 0 - ;; --n) - run="cat" - shift -esac - -case "$#" in -0) - ;; -*) - echo 'usage: mkall.sh [-n]' 1>&2 - exit 2 -esac - -GOOSARCH_in=syscall_$GOOSARCH.go -case "$GOOSARCH" in -_* | *_ | _) - echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2 - exit 1 - ;; -darwin_386) - mkerrors="$mkerrors -m32" - mksyscall="./mksyscall.pl -l32" - mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -darwin_amd64) - mkerrors="$mkerrors -m64" - mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -darwin_arm) - mkerrors="$mkerrors" - mksysnum="./mksysnum_darwin.pl /usr/include/sys/syscall.h" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -darwin_arm64) - mkerrors="$mkerrors -m64" - mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -dragonfly_386) - mkerrors="$mkerrors -m32" - mksyscall="./mksyscall.pl -l32 -dragonfly" - mksysnum="curl -s 'http://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master' | ./mksysnum_dragonfly.pl" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -dragonfly_amd64) - mkerrors="$mkerrors -m64" - mksyscall="./mksyscall.pl -dragonfly" - mksysnum="curl -s 'http://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master' | ./mksysnum_dragonfly.pl" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -freebsd_386) - mkerrors="$mkerrors -m32" - mksyscall="./mksyscall.pl -l32" - mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -freebsd_amd64) - mkerrors="$mkerrors -m64" - mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -freebsd_arm) - mkerrors="$mkerrors" - mksyscall="./mksyscall.pl -l32 -arm" - mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl" - # Let the type of C char be singed for making the bare syscall - # API consistent across over platforms. - mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" - ;; -linux_386) - mkerrors="$mkerrors -m32" - mksyscall="./mksyscall.pl -l32" - mksysnum="./mksysnum_linux.pl /usr/include/asm/unistd_32.h" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -linux_amd64) - unistd_h=$(ls -1 /usr/include/asm/unistd_64.h /usr/include/x86_64-linux-gnu/asm/unistd_64.h 2>/dev/null | head -1) - if [ "$unistd_h" = "" ]; then - echo >&2 cannot find unistd_64.h - exit 1 - fi - mkerrors="$mkerrors -m64" - mksysnum="./mksysnum_linux.pl $unistd_h" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -linux_arm) - mkerrors="$mkerrors" - mksyscall="./mksyscall.pl -l32 -arm" - mksysnum="curl -s 'http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/plain/arch/arm/include/uapi/asm/unistd.h' | ./mksysnum_linux.pl -" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -linux_arm64) - unistd_h=$(ls -1 /usr/include/asm/unistd.h /usr/include/asm-generic/unistd.h 2>/dev/null | head -1) - if [ "$unistd_h" = "" ]; then - echo >&2 cannot find unistd_64.h - exit 1 - fi - mksysnum="./mksysnum_linux.pl $unistd_h" - # Let the type of C char be singed for making the bare syscall - # API consistent across over platforms. - mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" - ;; -linux_ppc64) - GOOSARCH_in=syscall_linux_ppc64x.go - unistd_h=/usr/include/asm/unistd.h - mkerrors="$mkerrors -m64" - mksysnum="./mksysnum_linux.pl $unistd_h" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -linux_ppc64le) - GOOSARCH_in=syscall_linux_ppc64x.go - unistd_h=/usr/include/powerpc64le-linux-gnu/asm/unistd.h - mkerrors="$mkerrors -m64" - mksysnum="./mksysnum_linux.pl $unistd_h" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -netbsd_386) - mkerrors="$mkerrors -m32" - mksyscall="./mksyscall.pl -l32 -netbsd" - mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -netbsd_amd64) - mkerrors="$mkerrors -m64" - mksyscall="./mksyscall.pl -netbsd" - mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -openbsd_386) - mkerrors="$mkerrors -m32" - mksyscall="./mksyscall.pl -l32 -openbsd" - mksysctl="./mksysctl_openbsd.pl" - zsysctl="zsysctl_openbsd.go" - mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -openbsd_amd64) - mkerrors="$mkerrors -m64" - mksyscall="./mksyscall.pl -openbsd" - mksysctl="./mksysctl_openbsd.pl" - zsysctl="zsysctl_openbsd.go" - mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -solaris_amd64) - mksyscall="./mksyscall_solaris.pl" - mkerrors="$mkerrors -m64" - mksysnum= - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; -*) - echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2 - exit 1 - ;; -esac - -( - if [ -n "$mkerrors" ]; then echo "$mkerrors |gofmt >$zerrors"; fi - case "$GOOS" in - *) - syscall_goos="syscall_$GOOS.go" - case "$GOOS" in - darwin | dragonfly | freebsd | netbsd | openbsd) - syscall_goos="syscall_bsd.go $syscall_goos" - ;; - esac - if [ -n "$mksyscall" ]; then echo "$mksyscall $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; fi - ;; - esac - if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi - if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi - if [ -n "$mktypes" ]; then - echo "echo // +build $GOARCH,$GOOS > ztypes_$GOOSARCH.go"; - echo "$mktypes types_$GOOS.go | gofmt >>ztypes_$GOOSARCH.go"; - fi -) | $run diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh deleted file mode 100755 index c40d788c4..000000000 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ /dev/null @@ -1,476 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2009 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -# Generate Go code listing errors and other #defined constant -# values (ENAMETOOLONG etc.), by asking the preprocessor -# about the definitions. - -unset LANG -export LC_ALL=C -export LC_CTYPE=C - -if test -z "$GOARCH" -o -z "$GOOS"; then - echo 1>&2 "GOARCH or GOOS not defined in environment" - exit 1 -fi - -CC=${CC:-cc} - -if [[ "$GOOS" -eq "solaris" ]]; then - # Assumes GNU versions of utilities in PATH. - export PATH=/usr/gnu/bin:$PATH -fi - -uname=$(uname) - -includes_Darwin=' -#define _DARWIN_C_SOURCE -#define KERNEL -#define _DARWIN_USE_64_BIT_INODE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -' - -includes_DragonFly=' -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -' - -includes_FreeBSD=' -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if __FreeBSD__ >= 10 -#define IFT_CARP 0xf8 // IFT_CARP is deprecated in FreeBSD 10 -#undef SIOCAIFADDR -#define SIOCAIFADDR _IOW(105, 26, struct oifaliasreq) // ifaliasreq contains if_data -#undef SIOCSIFPHYADDR -#define SIOCSIFPHYADDR _IOW(105, 70, struct oifaliasreq) // ifaliasreq contains if_data -#endif -' - -includes_Linux=' -#define _LARGEFILE_SOURCE -#define _LARGEFILE64_SOURCE -#ifndef __LP64__ -#define _FILE_OFFSET_BITS 64 -#endif -#define _GNU_SOURCE - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifndef MSG_FASTOPEN -#define MSG_FASTOPEN 0x20000000 -#endif - -#ifndef PTRACE_GETREGS -#define PTRACE_GETREGS 0xc -#endif - -#ifndef PTRACE_SETREGS -#define PTRACE_SETREGS 0xd -#endif -' - -includes_NetBSD=' -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -// Needed since refers to it... -#define schedppq 1 -' - -includes_OpenBSD=' -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -// We keep some constants not supported in OpenBSD 5.5 and beyond for -// the promise of compatibility. -#define EMUL_ENABLED 0x1 -#define EMUL_NATIVE 0x2 -#define IPV6_FAITH 0x1d -#define IPV6_OPTIONS 0x1 -#define IPV6_RTHDR_STRICT 0x1 -#define IPV6_SOCKOPT_RESERVED1 0x3 -#define SIOCGIFGENERIC 0xc020693a -#define SIOCSIFGENERIC 0x80206939 -#define WALTSIG 0x4 -' - -includes_SunOS=' -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -' - - -includes=' -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -' -ccflags="$@" - -# Write go tool cgo -godefs input. -( - echo package unix - echo - echo '/*' - indirect="includes_$(uname)" - echo "${!indirect} $includes" - echo '*/' - echo 'import "C"' - echo 'import "syscall"' - echo - echo 'const (' - - # The gcc command line prints all the #defines - # it encounters while processing the input - echo "${!indirect} $includes" | $CC -x c - -E -dM $ccflags | - awk ' - $1 != "#define" || $2 ~ /\(/ || $3 == "" {next} - - $2 ~ /^E([ABCD]X|[BIS]P|[SD]I|S|FL)$/ {next} # 386 registers - $2 ~ /^(SIGEV_|SIGSTKSZ|SIGRT(MIN|MAX))/ {next} - $2 ~ /^(SCM_SRCRT)$/ {next} - $2 ~ /^(MAP_FAILED)$/ {next} - $2 ~ /^ELF_.*$/ {next}# contains ELF_ARCH, etc. - - $2 ~ /^EXTATTR_NAMESPACE_NAMES/ || - $2 ~ /^EXTATTR_NAMESPACE_[A-Z]+_STRING/ {next} - - $2 !~ /^ETH_/ && - $2 !~ /^EPROC_/ && - $2 !~ /^EQUIV_/ && - $2 !~ /^EXPR_/ && - $2 ~ /^E[A-Z0-9_]+$/ || - $2 ~ /^B[0-9_]+$/ || - $2 == "BOTHER" || - $2 ~ /^CI?BAUD(EX)?$/ || - $2 == "IBSHIFT" || - $2 ~ /^V[A-Z0-9]+$/ || - $2 ~ /^CS[A-Z0-9]/ || - $2 ~ /^I(SIG|CANON|CRNL|UCLC|EXTEN|MAXBEL|STRIP|UTF8)$/ || - $2 ~ /^IGN/ || - $2 ~ /^IX(ON|ANY|OFF)$/ || - $2 ~ /^IN(LCR|PCK)$/ || - $2 ~ /(^FLU?SH)|(FLU?SH$)/ || - $2 ~ /^C(LOCAL|READ|MSPAR|RTSCTS)$/ || - $2 == "BRKINT" || - $2 == "HUPCL" || - $2 == "PENDIN" || - $2 == "TOSTOP" || - $2 == "XCASE" || - $2 == "ALTWERASE" || - $2 == "NOKERNINFO" || - $2 ~ /^PAR/ || - $2 ~ /^SIG[^_]/ || - $2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ || - $2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ || - $2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ || - $2 ~ /^O?XTABS$/ || - $2 ~ /^TC[IO](ON|OFF)$/ || - $2 ~ /^IN_/ || - $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || - $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|EVFILT|NOTE|EV|SHUT|PROT|MAP|PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ || - $2 == "ICMPV6_FILTER" || - $2 == "SOMAXCONN" || - $2 == "NAME_MAX" || - $2 == "IFNAMSIZ" || - $2 ~ /^CTL_(MAXNAME|NET|QUERY)$/ || - $2 ~ /^SYSCTL_VERS/ || - $2 ~ /^(MS|MNT)_/ || - $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ || - $2 ~ /^(O|F|FD|NAME|S|PTRACE|PT)_/ || - $2 ~ /^LINUX_REBOOT_CMD_/ || - $2 ~ /^LINUX_REBOOT_MAGIC[12]$/ || - $2 !~ "NLA_TYPE_MASK" && - $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P)_/ || - $2 ~ /^SIOC/ || - $2 ~ /^TIOC/ || - $2 ~ /^TCGET/ || - $2 ~ /^TCSET/ || - $2 ~ /^TC(FLSH|SBRKP?|XONC)$/ || - $2 !~ "RTF_BITS" && - $2 ~ /^(IFF|IFT|NET_RT|RTM|RTF|RTV|RTA|RTAX)_/ || - $2 ~ /^BIOC/ || - $2 ~ /^RUSAGE_(SELF|CHILDREN|THREAD)/ || - $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|NOFILE|STACK)|RLIM_INFINITY/ || - $2 ~ /^PRIO_(PROCESS|PGRP|USER)/ || - $2 ~ /^CLONE_[A-Z_]+/ || - $2 !~ /^(BPF_TIMEVAL)$/ && - $2 ~ /^(BPF|DLT)_/ || - $2 ~ /^CLOCK_/ || - $2 !~ "WMESGLEN" && - $2 ~ /^W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", $2, $2)} - $2 ~ /^__WCOREFLAG$/ {next} - $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)} - - {next} - ' | sort - - echo ')' -) >_const.go - -# Pull out the error names for later. -errors=$( - echo '#include ' | $CC -x c - -E -dM $ccflags | - awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print $2 }' | - sort -) - -# Pull out the signal names for later. -signals=$( - echo '#include ' | $CC -x c - -E -dM $ccflags | - awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' | - egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT)' | - sort -) - -# Again, writing regexps to a file. -echo '#include ' | $CC -x c - -E -dM $ccflags | - awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print "^\t" $2 "[ \t]*=" }' | - sort >_error.grep -echo '#include ' | $CC -x c - -E -dM $ccflags | - awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' | - egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT)' | - sort >_signal.grep - -echo '// mkerrors.sh' "$@" -echo '// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT' -echo -echo "// +build ${GOARCH},${GOOS}" -echo -go tool cgo -godefs -- "$@" _const.go >_error.out -cat _error.out | grep -vf _error.grep | grep -vf _signal.grep -echo -echo '// Errors' -echo 'const (' -cat _error.out | grep -f _error.grep | sed 's/=\(.*\)/= syscall.Errno(\1)/' -echo ')' - -echo -echo '// Signals' -echo 'const (' -cat _error.out | grep -f _signal.grep | sed 's/=\(.*\)/= syscall.Signal(\1)/' -echo ')' - -# Run C program to print error and syscall strings. -( - echo -E " -#include -#include -#include -#include -#include -#include - -#define nelem(x) (sizeof(x)/sizeof((x)[0])) - -enum { A = 'A', Z = 'Z', a = 'a', z = 'z' }; // avoid need for single quotes below - -int errors[] = { -" - for i in $errors - do - echo -E ' '$i, - done - - echo -E " -}; - -int signals[] = { -" - for i in $signals - do - echo -E ' '$i, - done - - # Use -E because on some systems bash builtin interprets \n itself. - echo -E ' -}; - -static int -intcmp(const void *a, const void *b) -{ - return *(int*)a - *(int*)b; -} - -int -main(void) -{ - int i, j, e; - char buf[1024], *p; - - printf("\n\n// Error table\n"); - printf("var errors = [...]string {\n"); - qsort(errors, nelem(errors), sizeof errors[0], intcmp); - for(i=0; i 0 && errors[i-1] == e) - continue; - strcpy(buf, strerror(e)); - // lowercase first letter: Bad -> bad, but STREAM -> STREAM. - if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z) - buf[0] += a - A; - printf("\t%d: \"%s\",\n", e, buf); - } - printf("}\n\n"); - - printf("\n\n// Signal table\n"); - printf("var signals = [...]string {\n"); - qsort(signals, nelem(signals), sizeof signals[0], intcmp); - for(i=0; i 0 && signals[i-1] == e) - continue; - strcpy(buf, strsignal(e)); - // lowercase first letter: Bad -> bad, but STREAM -> STREAM. - if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z) - buf[0] += a - A; - // cut trailing : number. - p = strrchr(buf, ":"[0]); - if(p) - *p = '\0'; - printf("\t%d: \"%s\",\n", e, buf); - } - printf("}\n\n"); - - return 0; -} - -' -) >_errors.c - -$CC $ccflags -o _errors _errors.c && $GORUN ./_errors && rm -f _errors.c _errors _const.go _error.grep _signal.grep _error.out diff --git a/vendor/golang.org/x/sys/unix/mksyscall.pl b/vendor/golang.org/x/sys/unix/mksyscall.pl deleted file mode 100755 index b1e7766da..000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall.pl +++ /dev/null @@ -1,323 +0,0 @@ -#!/usr/bin/env perl -# Copyright 2009 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -# This program reads a file containing function prototypes -# (like syscall_darwin.go) and generates system call bodies. -# The prototypes are marked by lines beginning with "//sys" -# and read like func declarations if //sys is replaced by func, but: -# * The parameter lists must give a name for each argument. -# This includes return parameters. -# * The parameter lists must give a type for each argument: -# the (x, y, z int) shorthand is not allowed. -# * If the return parameter is an error number, it must be named errno. - -# A line beginning with //sysnb is like //sys, except that the -# goroutine will not be suspended during the execution of the system -# call. This must only be used for system calls which can never -# block, as otherwise the system call could cause all goroutines to -# hang. - -use strict; - -my $cmdline = "mksyscall.pl " . join(' ', @ARGV); -my $errors = 0; -my $_32bit = ""; -my $plan9 = 0; -my $openbsd = 0; -my $netbsd = 0; -my $dragonfly = 0; -my $arm = 0; # 64-bit value should use (even, odd)-pair - -if($ARGV[0] eq "-b32") { - $_32bit = "big-endian"; - shift; -} elsif($ARGV[0] eq "-l32") { - $_32bit = "little-endian"; - shift; -} -if($ARGV[0] eq "-plan9") { - $plan9 = 1; - shift; -} -if($ARGV[0] eq "-openbsd") { - $openbsd = 1; - shift; -} -if($ARGV[0] eq "-netbsd") { - $netbsd = 1; - shift; -} -if($ARGV[0] eq "-dragonfly") { - $dragonfly = 1; - shift; -} -if($ARGV[0] eq "-arm") { - $arm = 1; - shift; -} - -if($ARGV[0] =~ /^-/) { - print STDERR "usage: mksyscall.pl [-b32 | -l32] [file ...]\n"; - exit 1; -} - -if($ENV{'GOARCH'} eq "" || $ENV{'GOOS'} eq "") { - print STDERR "GOARCH or GOOS not defined in environment\n"; - exit 1; -} - -sub parseparamlist($) { - my ($list) = @_; - $list =~ s/^\s*//; - $list =~ s/\s*$//; - if($list eq "") { - return (); - } - return split(/\s*,\s*/, $list); -} - -sub parseparam($) { - my ($p) = @_; - if($p !~ /^(\S*) (\S*)$/) { - print STDERR "$ARGV:$.: malformed parameter: $p\n"; - $errors = 1; - return ("xx", "int"); - } - return ($1, $2); -} - -my $text = ""; -while(<>) { - chomp; - s/\s+/ /g; - s/^\s+//; - s/\s+$//; - my $nonblock = /^\/\/sysnb /; - next if !/^\/\/sys / && !$nonblock; - - # Line must be of the form - # func Open(path string, mode int, perm int) (fd int, errno error) - # Split into name, in params, out params. - if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$/) { - print STDERR "$ARGV:$.: malformed //sys declaration\n"; - $errors = 1; - next; - } - my ($func, $in, $out, $sysname) = ($2, $3, $4, $5); - - # Split argument lists on comma. - my @in = parseparamlist($in); - my @out = parseparamlist($out); - - # Try in vain to keep people from editing this file. - # The theory is that they jump into the middle of the file - # without reading the header. - $text .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"; - - # Go function header. - my $out_decl = @out ? sprintf(" (%s)", join(', ', @out)) : ""; - $text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out_decl; - - # Check if err return available - my $errvar = ""; - foreach my $p (@out) { - my ($name, $type) = parseparam($p); - if($type eq "error") { - $errvar = $name; - last; - } - } - - # Prepare arguments to Syscall. - my @args = (); - my @uses = (); - my $n = 0; - foreach my $p (@in) { - my ($name, $type) = parseparam($p); - if($type =~ /^\*/) { - push @args, "uintptr(unsafe.Pointer($name))"; - } elsif($type eq "string" && $errvar ne "") { - $text .= "\tvar _p$n *byte\n"; - $text .= "\t_p$n, $errvar = BytePtrFromString($name)\n"; - $text .= "\tif $errvar != nil {\n\t\treturn\n\t}\n"; - push @args, "uintptr(unsafe.Pointer(_p$n))"; - push @uses, "use(unsafe.Pointer(_p$n))"; - $n++; - } elsif($type eq "string") { - print STDERR "$ARGV:$.: $func uses string arguments, but has no error return\n"; - $text .= "\tvar _p$n *byte\n"; - $text .= "\t_p$n, _ = BytePtrFromString($name)\n"; - push @args, "uintptr(unsafe.Pointer(_p$n))"; - push @uses, "use(unsafe.Pointer(_p$n))"; - $n++; - } elsif($type =~ /^\[\](.*)/) { - # Convert slice into pointer, length. - # Have to be careful not to take address of &a[0] if len == 0: - # pass dummy pointer in that case. - # Used to pass nil, but some OSes or simulators reject write(fd, nil, 0). - $text .= "\tvar _p$n unsafe.Pointer\n"; - $text .= "\tif len($name) > 0 {\n\t\t_p$n = unsafe.Pointer(\&${name}[0])\n\t}"; - $text .= " else {\n\t\t_p$n = unsafe.Pointer(&_zero)\n\t}"; - $text .= "\n"; - push @args, "uintptr(_p$n)", "uintptr(len($name))"; - $n++; - } elsif($type eq "int64" && ($openbsd || $netbsd)) { - push @args, "0"; - if($_32bit eq "big-endian") { - push @args, "uintptr($name>>32)", "uintptr($name)"; - } elsif($_32bit eq "little-endian") { - push @args, "uintptr($name)", "uintptr($name>>32)"; - } else { - push @args, "uintptr($name)"; - } - } elsif($type eq "int64" && $dragonfly) { - if ($func !~ /^extp(read|write)/i) { - push @args, "0"; - } - if($_32bit eq "big-endian") { - push @args, "uintptr($name>>32)", "uintptr($name)"; - } elsif($_32bit eq "little-endian") { - push @args, "uintptr($name)", "uintptr($name>>32)"; - } else { - push @args, "uintptr($name)"; - } - } elsif($type eq "int64" && $_32bit ne "") { - if(@args % 2 && $arm) { - # arm abi specifies 64-bit argument uses - # (even, odd) pair - push @args, "0" - } - if($_32bit eq "big-endian") { - push @args, "uintptr($name>>32)", "uintptr($name)"; - } else { - push @args, "uintptr($name)", "uintptr($name>>32)"; - } - } else { - push @args, "uintptr($name)"; - } - } - - # Determine which form to use; pad args with zeros. - my $asm = "Syscall"; - if ($nonblock) { - $asm = "RawSyscall"; - } - if(@args <= 3) { - while(@args < 3) { - push @args, "0"; - } - } elsif(@args <= 6) { - $asm .= "6"; - while(@args < 6) { - push @args, "0"; - } - } elsif(@args <= 9) { - $asm .= "9"; - while(@args < 9) { - push @args, "0"; - } - } else { - print STDERR "$ARGV:$.: too many arguments to system call\n"; - } - - # System call number. - if($sysname eq "") { - $sysname = "SYS_$func"; - $sysname =~ s/([a-z])([A-Z])/${1}_$2/g; # turn FooBar into Foo_Bar - $sysname =~ y/a-z/A-Z/; - } - - # Actual call. - my $args = join(', ', @args); - my $call = "$asm($sysname, $args)"; - - # Assign return values. - my $body = ""; - my @ret = ("_", "_", "_"); - my $do_errno = 0; - for(my $i=0; $i<@out; $i++) { - my $p = $out[$i]; - my ($name, $type) = parseparam($p); - my $reg = ""; - if($name eq "err" && !$plan9) { - $reg = "e1"; - $ret[2] = $reg; - $do_errno = 1; - } elsif($name eq "err" && $plan9) { - $ret[0] = "r0"; - $ret[2] = "e1"; - next; - } else { - $reg = sprintf("r%d", $i); - $ret[$i] = $reg; - } - if($type eq "bool") { - $reg = "$reg != 0"; - } - if($type eq "int64" && $_32bit ne "") { - # 64-bit number in r1:r0 or r0:r1. - if($i+2 > @out) { - print STDERR "$ARGV:$.: not enough registers for int64 return\n"; - } - if($_32bit eq "big-endian") { - $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i, $i+1); - } else { - $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i+1, $i); - } - $ret[$i] = sprintf("r%d", $i); - $ret[$i+1] = sprintf("r%d", $i+1); - } - if($reg ne "e1" || $plan9) { - $body .= "\t$name = $type($reg)\n"; - } - } - if ($ret[0] eq "_" && $ret[1] eq "_" && $ret[2] eq "_") { - $text .= "\t$call\n"; - } else { - $text .= "\t$ret[0], $ret[1], $ret[2] := $call\n"; - } - foreach my $use (@uses) { - $text .= "\t$use\n"; - } - $text .= $body; - - if ($plan9 && $ret[2] eq "e1") { - $text .= "\tif int32(r0) == -1 {\n"; - $text .= "\t\terr = e1\n"; - $text .= "\t}\n"; - } elsif ($do_errno) { - $text .= "\tif e1 != 0 {\n"; - $text .= "\t\terr = errnoErr(e1)\n"; - $text .= "\t}\n"; - } - $text .= "\treturn\n"; - $text .= "}\n\n"; -} - -chomp $text; -chomp $text; - -if($errors) { - exit 1; -} - -print <) { - chomp; - s/\s+/ /g; - s/^\s+//; - s/\s+$//; - $package = $1 if !$package && /^package (\S+)$/; - my $nonblock = /^\/\/sysnb /; - next if !/^\/\/sys / && !$nonblock; - - # Line must be of the form - # func Open(path string, mode int, perm int) (fd int, err error) - # Split into name, in params, out params. - if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$/) { - print STDERR "$ARGV:$.: malformed //sys declaration\n"; - $errors = 1; - next; - } - my ($nb, $func, $in, $out, $modname, $sysname) = ($1, $2, $3, $4, $5, $6); - - # Split argument lists on comma. - my @in = parseparamlist($in); - my @out = parseparamlist($out); - - # So file name. - if($modname eq "") { - $modname = "libc"; - } - - # System call name. - if($sysname eq "") { - $sysname = "$func"; - } - - # System call pointer variable name. - my $sysvarname = "proc$sysname"; - - my $strconvfunc = "BytePtrFromString"; - my $strconvtype = "*byte"; - - $sysname =~ y/A-Z/a-z/; # All libc functions are lowercase. - - # Runtime import of function to allow cross-platform builds. - $dynimports .= "//go:cgo_import_dynamic libc_${sysname} ${sysname} \"$modname.so\"\n"; - # Link symbol to proc address variable. - $linknames .= "//go:linkname ${sysvarname} libc_${sysname}\n"; - # Library proc address variable. - push @vars, $sysvarname; - - # Go function header. - $out = join(', ', @out); - if($out ne "") { - $out = " ($out)"; - } - if($text ne "") { - $text .= "\n" - } - $text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out; - - # Check if err return available - my $errvar = ""; - foreach my $p (@out) { - my ($name, $type) = parseparam($p); - if($type eq "error") { - $errvar = $name; - last; - } - } - - # Prepare arguments to Syscall. - my @args = (); - my @uses = (); - my $n = 0; - foreach my $p (@in) { - my ($name, $type) = parseparam($p); - if($type =~ /^\*/) { - push @args, "uintptr(unsafe.Pointer($name))"; - } elsif($type eq "string" && $errvar ne "") { - $text .= "\tvar _p$n $strconvtype\n"; - $text .= "\t_p$n, $errvar = $strconvfunc($name)\n"; - $text .= "\tif $errvar != nil {\n\t\treturn\n\t}\n"; - push @args, "uintptr(unsafe.Pointer(_p$n))"; - push @uses, "use(unsafe.Pointer(_p$n))"; - $n++; - } elsif($type eq "string") { - print STDERR "$ARGV:$.: $func uses string arguments, but has no error return\n"; - $text .= "\tvar _p$n $strconvtype\n"; - $text .= "\t_p$n, _ = $strconvfunc($name)\n"; - push @args, "uintptr(unsafe.Pointer(_p$n))"; - push @uses, "use(unsafe.Pointer(_p$n))"; - $n++; - } elsif($type =~ /^\[\](.*)/) { - # Convert slice into pointer, length. - # Have to be careful not to take address of &a[0] if len == 0: - # pass nil in that case. - $text .= "\tvar _p$n *$1\n"; - $text .= "\tif len($name) > 0 {\n\t\t_p$n = \&$name\[0]\n\t}\n"; - push @args, "uintptr(unsafe.Pointer(_p$n))", "uintptr(len($name))"; - $n++; - } elsif($type eq "int64" && $_32bit ne "") { - if($_32bit eq "big-endian") { - push @args, "uintptr($name >> 32)", "uintptr($name)"; - } else { - push @args, "uintptr($name)", "uintptr($name >> 32)"; - } - } elsif($type eq "bool") { - $text .= "\tvar _p$n uint32\n"; - $text .= "\tif $name {\n\t\t_p$n = 1\n\t} else {\n\t\t_p$n = 0\n\t}\n"; - push @args, "uintptr(_p$n)"; - $n++; - } else { - push @args, "uintptr($name)"; - } - } - my $nargs = @args; - - # Determine which form to use; pad args with zeros. - my $asm = "sysvicall6"; - if ($nonblock) { - $asm = "rawSysvicall6"; - } - if(@args <= 6) { - while(@args < 6) { - push @args, "0"; - } - } else { - print STDERR "$ARGV:$.: too many arguments to system call\n"; - } - - # Actual call. - my $args = join(', ', @args); - my $call = "$asm(uintptr(unsafe.Pointer(&$sysvarname)), $nargs, $args)"; - - # Assign return values. - my $body = ""; - my $failexpr = ""; - my @ret = ("_", "_", "_"); - my @pout= (); - my $do_errno = 0; - for(my $i=0; $i<@out; $i++) { - my $p = $out[$i]; - my ($name, $type) = parseparam($p); - my $reg = ""; - if($name eq "err") { - $reg = "e1"; - $ret[2] = $reg; - $do_errno = 1; - } else { - $reg = sprintf("r%d", $i); - $ret[$i] = $reg; - } - if($type eq "bool") { - $reg = "$reg != 0"; - } - if($type eq "int64" && $_32bit ne "") { - # 64-bit number in r1:r0 or r0:r1. - if($i+2 > @out) { - print STDERR "$ARGV:$.: not enough registers for int64 return\n"; - } - if($_32bit eq "big-endian") { - $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i, $i+1); - } else { - $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i+1, $i); - } - $ret[$i] = sprintf("r%d", $i); - $ret[$i+1] = sprintf("r%d", $i+1); - } - if($reg ne "e1") { - $body .= "\t$name = $type($reg)\n"; - } - } - if ($ret[0] eq "_" && $ret[1] eq "_" && $ret[2] eq "_") { - $text .= "\t$call\n"; - } else { - $text .= "\t$ret[0], $ret[1], $ret[2] := $call\n"; - } - foreach my $use (@uses) { - $text .= "\t$use\n"; - } - $text .= $body; - - if ($do_errno) { - $text .= "\tif e1 != 0 {\n"; - $text .= "\t\terr = e1\n"; - $text .= "\t}\n"; - } - $text .= "\treturn\n"; - $text .= "}\n"; -} - -if($errors) { - exit 1; -} - -print < "net.inet", - "net.inet.ipproto" => "net.inet", - "net.inet6.ipv6proto" => "net.inet6", - "net.inet6.ipv6" => "net.inet6.ip6", - "net.inet.icmpv6" => "net.inet6.icmp6", - "net.inet6.divert6" => "net.inet6.divert", - "net.inet6.tcp6" => "net.inet.tcp", - "net.inet6.udp6" => "net.inet.udp", - "mpls" => "net.mpls", - "swpenc" => "vm.swapencrypt" -); - -# Node mappings -my %node_map = ( - "net.inet.ip.ifq" => "net.ifq", - "net.inet.pfsync" => "net.pfsync", - "net.mpls.ifq" => "net.ifq" -); - -my $ctlname; -my %mib = (); -my %sysctl = (); -my $node; - -sub debug() { - print STDERR "$_[0]\n" if $debug; -} - -# Walk the MIB and build a sysctl name to OID mapping. -sub build_sysctl() { - my ($node, $name, $oid) = @_; - my %node = %{$node}; - my @oid = @{$oid}; - - foreach my $key (sort keys %node) { - my @node = @{$node{$key}}; - my $nodename = $name.($name ne '' ? '.' : '').$key; - my @nodeoid = (@oid, $node[0]); - if ($node[1] eq 'CTLTYPE_NODE') { - if (exists $node_map{$nodename}) { - $node = \%mib; - $ctlname = $node_map{$nodename}; - foreach my $part (split /\./, $ctlname) { - $node = \%{@{$$node{$part}}[2]}; - } - } else { - $node = $node[2]; - } - &build_sysctl($node, $nodename, \@nodeoid); - } elsif ($node[1] ne '') { - $sysctl{$nodename} = \@nodeoid; - } - } -} - -foreach my $ctl (@ctls) { - $ctls{$ctl} = $ctl; -} - -# Build MIB -foreach my $header (@headers) { - &debug("Processing $header..."); - open HEADER, "/usr/include/$header" || - print STDERR "Failed to open $header\n"; - while (

) { - if ($_ =~ /^#define\s+(CTL_NAMES)\s+{/ || - $_ =~ /^#define\s+(CTL_(.*)_NAMES)\s+{/ || - $_ =~ /^#define\s+((.*)CTL_NAMES)\s+{/) { - if ($1 eq 'CTL_NAMES') { - # Top level. - $node = \%mib; - } else { - # Node. - my $nodename = lc($2); - if ($header =~ /^netinet\//) { - $ctlname = "net.inet.$nodename"; - } elsif ($header =~ /^netinet6\//) { - $ctlname = "net.inet6.$nodename"; - } elsif ($header =~ /^net\//) { - $ctlname = "net.$nodename"; - } else { - $ctlname = "$nodename"; - $ctlname =~ s/^(fs|net|kern)_/$1\./; - } - if (exists $ctl_map{$ctlname}) { - $ctlname = $ctl_map{$ctlname}; - } - if (not exists $ctls{$ctlname}) { - &debug("Ignoring $ctlname..."); - next; - } - - # Walk down from the top of the MIB. - $node = \%mib; - foreach my $part (split /\./, $ctlname) { - if (not exists $$node{$part}) { - &debug("Missing node $part"); - $$node{$part} = [ 0, '', {} ]; - } - $node = \%{@{$$node{$part}}[2]}; - } - } - - # Populate current node with entries. - my $i = -1; - while (defined($_) && $_ !~ /^}/) { - $_ =
; - $i++ if $_ =~ /{.*}/; - next if $_ !~ /{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}/; - $$node{$1} = [ $i, $2, {} ]; - } - } - } - close HEADER; -} - -&build_sysctl(\%mib, "", []); - -print <){ - if(/^#define\s+SYS_(\w+)\s+([0-9]+)/){ - my $name = $1; - my $num = $2; - $name =~ y/a-z/A-Z/; - print " SYS_$name = $num;" - } -} - -print <){ - if(/^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$/){ - my $num = $1; - my $proto = $2; - my $name = "SYS_$3"; - $name =~ y/a-z/A-Z/; - - # There are multiple entries for enosys and nosys, so comment them out. - if($name =~ /^SYS_E?NOSYS$/){ - $name = "// $name"; - } - if($name eq 'SYS_SYS_EXIT'){ - $name = 'SYS_EXIT'; - } - - print " $name = $num; // $proto\n"; - } -} - -print <){ - if(/^([0-9]+)\s+\S+\s+STD\s+({ \S+\s+(\w+).*)$/){ - my $num = $1; - my $proto = $2; - my $name = "SYS_$3"; - $name =~ y/a-z/A-Z/; - - # There are multiple entries for enosys and nosys, so comment them out. - if($name =~ /^SYS_E?NOSYS$/){ - $name = "// $name"; - } - if($name eq 'SYS_SYS_EXIT'){ - $name = 'SYS_EXIT'; - } - if($name =~ /^SYS_CAP_+/ || $name =~ /^SYS___CAP_+/){ - next - } - - print " $name = $num; // $proto\n"; - - # We keep Capsicum syscall numbers for FreeBSD - # 9-STABLE here because we are not sure whether they - # are mature and stable. - if($num == 513){ - print " SYS_CAP_NEW = 514 // { int cap_new(int fd, uint64_t rights); }\n"; - print " SYS_CAP_GETRIGHTS = 515 // { int cap_getrights(int fd, \\\n"; - print " SYS_CAP_ENTER = 516 // { int cap_enter(void); }\n"; - print " SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); }\n"; - } - } -} - -print < 999){ - # ignore deprecated syscalls that are no longer implemented - # https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/include/uapi/asm-generic/unistd.h?id=refs/heads/master#n716 - return; - } - $name =~ y/a-z/A-Z/; - print " SYS_$name = $num;\n"; -} - -my $prev; -open(GCC, "gcc -E -dD $ARGV[0] |") || die "can't run gcc"; -while(){ - if(/^#define __NR_syscalls\s+/) { - # ignore redefinitions of __NR_syscalls - } - elsif(/^#define __NR_(\w+)\s+([0-9]+)/){ - $prev = $2; - fmt($1, $2); - } - elsif(/^#define __NR3264_(\w+)\s+([0-9]+)/){ - $prev = $2; - fmt($1, $2); - } - elsif(/^#define __NR_(\w+)\s+\(\w+\+\s*([0-9]+)\)/){ - fmt($1, $prev+$2) - } -} - -print <){ - if($line =~ /^(.*)\\$/) { - # Handle continuation - $line = $1; - $_ =~ s/^\s+//; - $line .= $_; - } else { - # New line - $line = $_; - } - next if $line =~ /\\$/; - if($line =~ /^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$/) { - my $num = $1; - my $proto = $6; - my $compat = $8; - my $name = "$7_$9"; - - $name = "$7_$11" if $11 ne ''; - $name =~ y/a-z/A-Z/; - - if($compat eq '' || $compat eq '30' || $compat eq '50') { - print " $name = $num; // $proto\n"; - } - } -} - -print <){ - if(/^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$/){ - my $num = $1; - my $proto = $3; - my $name = $4; - $name =~ y/a-z/A-Z/; - - # There are multiple entries for enosys and nosys, so comment them out. - if($name =~ /^SYS_E?NOSYS$/){ - $name = "// $name"; - } - if($name eq 'SYS_SYS_EXIT'){ - $name = 'SYS_EXIT'; - } - - print " $name = $num; // $proto\n"; - } -} - -print < 0 { + flushes++ + } + c.pendingLogs.Unlock() + go c.flushLog(false) + w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + + // Avoid nil Write call if c.Write is never called. + if c.outCode != 0 { + w.WriteHeader(c.outCode) + } + if c.outBody != nil { + w.Write(c.outBody) + } +} + +func executeRequestSafely(c *context, r *http.Request) { + defer func() { + if x := recover(); x != nil { + logf(c, 4, "%s", renderPanic(x)) // 4 == critical + c.outCode = 500 + } + }() + + http.DefaultServeMux.ServeHTTP(c, r) +} + +func renderPanic(x interface{}) string { + buf := make([]byte, 16<<10) // 16 KB should be plenty + buf = buf[:runtime.Stack(buf, false)] + + // Remove the first few stack frames: + // this func + // the recover closure in the caller + // That will root the stack trace at the site of the panic. + const ( + skipStart = "internal.renderPanic" + skipFrames = 2 + ) + start := bytes.Index(buf, []byte(skipStart)) + p := start + for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ { + p = bytes.IndexByte(buf[p+1:], '\n') + p + 1 + if p < 0 { + break + } + } + if p >= 0 { + // buf[start:p+1] is the block to remove. + // Copy buf[p+1:] over buf[start:] and shrink buf. + copy(buf[start:], buf[p+1:]) + buf = buf[:len(buf)-(p+1-start)] + } + + // Add panic heading. + head := fmt.Sprintf("panic: %v\n\n", x) + if len(head) > len(buf) { + // Extremely unlikely to happen. + return head + } + copy(buf[len(head):], buf) + copy(buf, head) + + return string(buf) +} + +var ctxs = struct { + sync.Mutex + m map[*http.Request]*context + bg *context // background context, lazily initialized + // dec is used by tests to decorate the netcontext.Context returned + // for a given request. This allows tests to add overrides (such as + // WithAppIDOverride) to the context. The map is nil outside tests. + dec map[*http.Request]func(netcontext.Context) netcontext.Context +}{ + m: make(map[*http.Request]*context), +} + +// context represents the context of an in-flight HTTP request. +// It implements the appengine.Context and http.ResponseWriter interfaces. +type context struct { + req *http.Request + + outCode int + outHeader http.Header + outBody []byte + + pendingLogs struct { + sync.Mutex + lines []*logpb.UserAppLogLine + flushes int + } + + apiURL *url.URL +} + +var contextKey = "holds a *context" + +func fromContext(ctx netcontext.Context) *context { + c, _ := ctx.Value(&contextKey).(*context) + return c +} + +func withContext(parent netcontext.Context, c *context) netcontext.Context { + ctx := netcontext.WithValue(parent, &contextKey, c) + if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { + ctx = withNamespace(ctx, ns) + } + return ctx +} + +func toContext(c *context) netcontext.Context { + return withContext(netcontext.Background(), c) +} + +func IncomingHeaders(ctx netcontext.Context) http.Header { + if c := fromContext(ctx); c != nil { + return c.req.Header + } + return nil +} + +func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { + ctxs.Lock() + c := ctxs.m[req] + d := ctxs.dec[req] + ctxs.Unlock() + + if d != nil { + parent = d(parent) + } + + if c == nil { + // Someone passed in an http.Request that is not in-flight. + // We panic here rather than panicking at a later point + // so that stack traces will be more sensible. + log.Panic("appengine: NewContext passed an unknown http.Request") + } + return withContext(parent, c) +} + +func BackgroundContext() netcontext.Context { + ctxs.Lock() + defer ctxs.Unlock() + + if ctxs.bg != nil { + return toContext(ctxs.bg) + } + + // Compute background security ticket. + appID := partitionlessAppID() + escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) + majVersion := VersionID(nil) + if i := strings.Index(majVersion, "."); i > 0 { + majVersion = majVersion[:i] + } + ticket := fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) + + ctxs.bg = &context{ + req: &http.Request{ + Header: http.Header{ + ticketHeader: []string{ticket}, + }, + }, + apiURL: apiURL(), + } + + // TODO(dsymonds): Wire up the shutdown handler to do a final flush. + go ctxs.bg.logFlusher(make(chan int)) + + return toContext(ctxs.bg) +} + +// RegisterTestRequest registers the HTTP request req for testing, such that +// any API calls are sent to the provided URL. It returns a closure to delete +// the registration. +// It should only be used by aetest package. +func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) func() { + c := &context{ + req: req, + apiURL: apiURL, + } + ctxs.Lock() + defer ctxs.Unlock() + if _, ok := ctxs.m[req]; ok { + log.Panic("req already associated with context") + } + if _, ok := ctxs.dec[req]; ok { + log.Panic("req already associated with context") + } + if ctxs.dec == nil { + ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context) + } + ctxs.m[req] = c + ctxs.dec[req] = decorate + + return func() { + ctxs.Lock() + delete(ctxs.m, req) + delete(ctxs.dec, req) + ctxs.Unlock() + } +} + +var errTimeout = &CallError{ + Detail: "Deadline exceeded", + Code: int32(remotepb.RpcError_CANCELLED), + Timeout: true, +} + +func (c *context) Header() http.Header { return c.outHeader } + +// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status +// codes do not permit a response body (nor response entity headers such as +// Content-Length, Content-Type, etc). +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +func (c *context) Write(b []byte) (int, error) { + if c.outCode == 0 { + c.WriteHeader(http.StatusOK) + } + if len(b) > 0 && !bodyAllowedForStatus(c.outCode) { + return 0, http.ErrBodyNotAllowed + } + c.outBody = append(c.outBody, b...) + return len(b), nil +} + +func (c *context) WriteHeader(code int) { + if c.outCode != 0 { + logf(c, 3, "WriteHeader called multiple times on request.") // error level + return + } + c.outCode = code +} + +func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { + hreq := &http.Request{ + Method: "POST", + URL: c.apiURL, + Header: http.Header{ + apiEndpointHeader: apiEndpointHeaderValue, + apiMethodHeader: apiMethodHeaderValue, + apiContentType: apiContentTypeValue, + apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)}, + }, + Body: ioutil.NopCloser(bytes.NewReader(body)), + ContentLength: int64(len(body)), + Host: c.apiURL.Host, + } + if info := c.req.Header.Get(dapperHeader); info != "" { + hreq.Header.Set(dapperHeader, info) + } + if info := c.req.Header.Get(traceHeader); info != "" { + hreq.Header.Set(traceHeader, info) + } + + tr := apiHTTPClient.Transport.(*http.Transport) + + var timedOut int32 // atomic; set to 1 if timed out + t := time.AfterFunc(timeout, func() { + atomic.StoreInt32(&timedOut, 1) + tr.CancelRequest(hreq) + }) + defer t.Stop() + defer func() { + // Check if timeout was exceeded. + if atomic.LoadInt32(&timedOut) != 0 { + err = errTimeout + } + }() + + hresp, err := apiHTTPClient.Do(hreq) + if err != nil { + return nil, &CallError{ + Detail: fmt.Sprintf("service bridge HTTP failed: %v", err), + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + defer hresp.Body.Close() + hrespBody, err := ioutil.ReadAll(hresp.Body) + if hresp.StatusCode != 200 { + return nil, &CallError{ + Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody), + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + if err != nil { + return nil, &CallError{ + Detail: fmt.Sprintf("service bridge response bad: %v", err), + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + return hrespBody, nil +} + +func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { + if f, ctx, ok := callOverrideFromContext(ctx); ok { + return f(ctx, service, method, in, out) + } + + // Handle already-done contexts quickly. + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + c := fromContext(ctx) + if c == nil { + // Give a good error message rather than a panic lower down. + return errors.New("not an App Engine context") + } + + // Apply transaction modifications if we're in a transaction. + if t := transactionFromContext(ctx); t != nil { + if t.finished { + return errors.New("transaction context has expired") + } + applyTransaction(in, &t.transaction) + } + + // Default RPC timeout is 60s. + timeout := 60 * time.Second + if deadline, ok := ctx.Deadline(); ok { + timeout = deadline.Sub(time.Now()) + } + + data, err := proto.Marshal(in) + if err != nil { + return err + } + + ticket := c.req.Header.Get(ticketHeader) + req := &remotepb.Request{ + ServiceName: &service, + Method: &method, + Request: data, + RequestId: &ticket, + } + hreqBody, err := proto.Marshal(req) + if err != nil { + return err + } + + hrespBody, err := c.post(hreqBody, timeout) + if err != nil { + return err + } + + res := &remotepb.Response{} + if err := proto.Unmarshal(hrespBody, res); err != nil { + return err + } + if res.RpcError != nil { + ce := &CallError{ + Detail: res.RpcError.GetDetail(), + Code: *res.RpcError.Code, + } + switch remotepb.RpcError_ErrorCode(ce.Code) { + case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED: + ce.Timeout = true + } + return ce + } + if res.ApplicationError != nil { + return &APIError{ + Service: *req.ServiceName, + Detail: res.ApplicationError.GetDetail(), + Code: *res.ApplicationError.Code, + } + } + if res.Exception != nil || res.JavaException != nil { + // This shouldn't happen, but let's be defensive. + return &CallError{ + Detail: "service bridge returned exception", + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + return proto.Unmarshal(res.Response, out) +} + +func (c *context) Request() *http.Request { + return c.req +} + +func (c *context) addLogLine(ll *logpb.UserAppLogLine) { + // Truncate long log lines. + // TODO(dsymonds): Check if this is still necessary. + const lim = 8 << 10 + if len(*ll.Message) > lim { + suffix := fmt.Sprintf("...(length %d)", len(*ll.Message)) + ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix) + } + + c.pendingLogs.Lock() + c.pendingLogs.lines = append(c.pendingLogs.lines, ll) + c.pendingLogs.Unlock() +} + +var logLevelName = map[int64]string{ + 0: "DEBUG", + 1: "INFO", + 2: "WARNING", + 3: "ERROR", + 4: "CRITICAL", +} + +func logf(c *context, level int64, format string, args ...interface{}) { + s := fmt.Sprintf(format, args...) + s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. + c.addLogLine(&logpb.UserAppLogLine{ + TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), + Level: &level, + Message: &s, + }) + log.Print(logLevelName[level] + ": " + s) +} + +// flushLog attempts to flush any pending logs to the appserver. +// It should not be called concurrently. +func (c *context) flushLog(force bool) (flushed bool) { + c.pendingLogs.Lock() + // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. + n, rem := 0, 30<<20 + for ; n < len(c.pendingLogs.lines); n++ { + ll := c.pendingLogs.lines[n] + // Each log line will require about 3 bytes of overhead. + nb := proto.Size(ll) + 3 + if nb > rem { + break + } + rem -= nb + } + lines := c.pendingLogs.lines[:n] + c.pendingLogs.lines = c.pendingLogs.lines[n:] + c.pendingLogs.Unlock() + + if len(lines) == 0 && !force { + // Nothing to flush. + return false + } + + rescueLogs := false + defer func() { + if rescueLogs { + c.pendingLogs.Lock() + c.pendingLogs.lines = append(lines, c.pendingLogs.lines...) + c.pendingLogs.Unlock() + } + }() + + buf, err := proto.Marshal(&logpb.UserAppLogGroup{ + LogLine: lines, + }) + if err != nil { + log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err) + rescueLogs = true + return false + } + + req := &logpb.FlushRequest{ + Logs: buf, + } + res := &basepb.VoidProto{} + c.pendingLogs.Lock() + c.pendingLogs.flushes++ + c.pendingLogs.Unlock() + if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil { + log.Printf("internal.flushLog: Flush RPC: %v", err) + rescueLogs = true + return false + } + return true +} + +const ( + // Log flushing parameters. + flushInterval = 1 * time.Second + forceFlushInterval = 60 * time.Second +) + +func (c *context) logFlusher(stop <-chan int) { + lastFlush := time.Now() + tick := time.NewTicker(flushInterval) + for { + select { + case <-stop: + // Request finished. + tick.Stop() + return + case <-tick.C: + force := time.Now().Sub(lastFlush) > forceFlushInterval + if c.flushLog(force) { + lastFlush = time.Now() + } + } + } +} + +func ContextForTesting(req *http.Request) netcontext.Context { + return toContext(&context{req: req}) +} diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go new file mode 100644 index 000000000..1c072e9db --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/api_classic.go @@ -0,0 +1,133 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package internal + +import ( + "errors" + "net/http" + "time" + + "appengine" + "appengine_internal" + basepb "appengine_internal/base" + + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" +) + +var contextKey = "holds an appengine.Context" + +func fromContext(ctx netcontext.Context) appengine.Context { + c, _ := ctx.Value(&contextKey).(appengine.Context) + return c +} + +// This is only for classic App Engine adapters. +func ClassicContextFromContext(ctx netcontext.Context) appengine.Context { + return fromContext(ctx) +} + +func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { + ctx := netcontext.WithValue(parent, &contextKey, c) + + s := &basepb.StringProto{} + c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil) + if ns := s.GetValue(); ns != "" { + ctx = NamespacedContext(ctx, ns) + } + + return ctx +} + +func IncomingHeaders(ctx netcontext.Context) http.Header { + if c := fromContext(ctx); c != nil { + if req, ok := c.Request().(*http.Request); ok { + return req.Header + } + } + return nil +} + +func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { + c := appengine.NewContext(req) + return withContext(parent, c) +} + +func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { + if f, ctx, ok := callOverrideFromContext(ctx); ok { + return f(ctx, service, method, in, out) + } + + // Handle already-done contexts quickly. + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + c := fromContext(ctx) + if c == nil { + // Give a good error message rather than a panic lower down. + return errors.New("not an App Engine context") + } + + // Apply transaction modifications if we're in a transaction. + if t := transactionFromContext(ctx); t != nil { + if t.finished { + return errors.New("transaction context has expired") + } + applyTransaction(in, &t.transaction) + } + + var opts *appengine_internal.CallOptions + if d, ok := ctx.Deadline(); ok { + opts = &appengine_internal.CallOptions{ + Timeout: d.Sub(time.Now()), + } + } + + err := c.Call(service, method, in, out, opts) + switch v := err.(type) { + case *appengine_internal.APIError: + return &APIError{ + Service: v.Service, + Detail: v.Detail, + Code: v.Code, + } + case *appengine_internal.CallError: + return &CallError{ + Detail: v.Detail, + Code: v.Code, + Timeout: v.Timeout, + } + } + return err +} + +func handleHTTP(w http.ResponseWriter, r *http.Request) { + panic("handleHTTP called; this should be impossible") +} + +func logf(c appengine.Context, level int64, format string, args ...interface{}) { + var fn func(format string, args ...interface{}) + switch level { + case 0: + fn = c.Debugf + case 1: + fn = c.Infof + case 2: + fn = c.Warningf + case 3: + fn = c.Errorf + case 4: + fn = c.Criticalf + default: + // This shouldn't happen. + fn = c.Criticalf + } + fn(format, args...) +} diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go new file mode 100644 index 000000000..ec5383e66 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/api_common.go @@ -0,0 +1,101 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +import ( + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" +) + +type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error + +var callOverrideKey = "holds []CallOverrideFunc" + +func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { + // We avoid appending to any existing call override + // so we don't risk overwriting a popped stack below. + var cofs []CallOverrideFunc + if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok { + cofs = append(cofs, uf...) + } + cofs = append(cofs, f) + return netcontext.WithValue(ctx, &callOverrideKey, cofs) +} + +func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { + cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) + if len(cofs) == 0 { + return nil, nil, false + } + // We found a list of overrides; grab the last, and reconstitute a + // context that will hide it. + f := cofs[len(cofs)-1] + ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) + return f, ctx, true +} + +type logOverrideFunc func(level int64, format string, args ...interface{}) + +var logOverrideKey = "holds a logOverrideFunc" + +func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { + return netcontext.WithValue(ctx, &logOverrideKey, f) +} + +var appIDOverrideKey = "holds a string, being the full app ID" + +func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { + return netcontext.WithValue(ctx, &appIDOverrideKey, appID) +} + +var namespaceKey = "holds the namespace string" + +func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { + return netcontext.WithValue(ctx, &namespaceKey, ns) +} + +func NamespaceFromContext(ctx netcontext.Context) string { + // If there's no namespace, return the empty string. + ns, _ := ctx.Value(&namespaceKey).(string) + return ns +} + +// FullyQualifiedAppID returns the fully-qualified application ID. +// This may contain a partition prefix (e.g. "s~" for High Replication apps), +// or a domain prefix (e.g. "example.com:"). +func FullyQualifiedAppID(ctx netcontext.Context) string { + if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { + return id + } + return fullyQualifiedAppID(ctx) +} + +func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { + if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { + f(level, format, args...) + return + } + logf(fromContext(ctx), level, format, args...) +} + +// NamespacedContext wraps a Context to support namespaces. +func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { + n := &namespacedContext{ + namespace: namespace, + } + return withNamespace(WithCallOverride(ctx, n.call), namespace) +} + +type namespacedContext struct { + namespace string +} + +func (n *namespacedContext) call(ctx netcontext.Context, service, method string, in, out proto.Message) error { + // Apply any namespace mods. + if mod, ok := NamespaceMods[service]; ok { + mod(in, n.namespace) + } + return Call(ctx, service, method, in, out) +} diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go new file mode 100644 index 000000000..11df8c07b --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/app_id.go @@ -0,0 +1,28 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +import ( + "strings" +) + +func parseFullAppID(appid string) (partition, domain, displayID string) { + if i := strings.Index(appid, "~"); i != -1 { + partition, appid = appid[:i], appid[i+1:] + } + if i := strings.Index(appid, ":"); i != -1 { + domain, appid = appid[:i], appid[i+1:] + } + return partition, domain, appid +} + +// appID returns "appid" or "domain.com:appid". +func appID(fullAppID string) string { + _, dom, dis := parseFullAppID(fullAppID) + if dom != "" { + return dom + ":" + dis + } + return dis +} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go new file mode 100644 index 000000000..87d9701b8 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go @@ -0,0 +1,296 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto +// DO NOT EDIT! + +/* +Package app_identity is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/app_identity/app_identity_service.proto + +It has these top-level messages: + AppIdentityServiceError + SignForAppRequest + SignForAppResponse + GetPublicCertificateForAppRequest + PublicCertificate + GetPublicCertificateForAppResponse + GetServiceAccountNameRequest + GetServiceAccountNameResponse + GetAccessTokenRequest + GetAccessTokenResponse + GetDefaultGcsBucketNameRequest + GetDefaultGcsBucketNameResponse +*/ +package app_identity + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type AppIdentityServiceError_ErrorCode int32 + +const ( + AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0 + AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9 + AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000 + AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001 + AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002 + AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003 + AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005 + AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006 +) + +var AppIdentityServiceError_ErrorCode_name = map[int32]string{ + 0: "SUCCESS", + 9: "UNKNOWN_SCOPE", + 1000: "BLOB_TOO_LARGE", + 1001: "DEADLINE_EXCEEDED", + 1002: "NOT_A_VALID_APP", + 1003: "UNKNOWN_ERROR", + 1005: "NOT_ALLOWED", + 1006: "NOT_IMPLEMENTED", +} +var AppIdentityServiceError_ErrorCode_value = map[string]int32{ + "SUCCESS": 0, + "UNKNOWN_SCOPE": 9, + "BLOB_TOO_LARGE": 1000, + "DEADLINE_EXCEEDED": 1001, + "NOT_A_VALID_APP": 1002, + "UNKNOWN_ERROR": 1003, + "NOT_ALLOWED": 1005, + "NOT_IMPLEMENTED": 1006, +} + +func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode { + p := new(AppIdentityServiceError_ErrorCode) + *p = x + return p +} +func (x AppIdentityServiceError_ErrorCode) String() string { + return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x)) +} +func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode") + if err != nil { + return err + } + *x = AppIdentityServiceError_ErrorCode(value) + return nil +} + +type AppIdentityServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} } +func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) } +func (*AppIdentityServiceError) ProtoMessage() {} + +type SignForAppRequest struct { + BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign" json:"bytes_to_sign,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} } +func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) } +func (*SignForAppRequest) ProtoMessage() {} + +func (m *SignForAppRequest) GetBytesToSign() []byte { + if m != nil { + return m.BytesToSign + } + return nil +} + +type SignForAppResponse struct { + KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"` + SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes" json:"signature_bytes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} } +func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) } +func (*SignForAppResponse) ProtoMessage() {} + +func (m *SignForAppResponse) GetKeyName() string { + if m != nil && m.KeyName != nil { + return *m.KeyName + } + return "" +} + +func (m *SignForAppResponse) GetSignatureBytes() []byte { + if m != nil { + return m.SignatureBytes + } + return nil +} + +type GetPublicCertificateForAppRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} } +func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) } +func (*GetPublicCertificateForAppRequest) ProtoMessage() {} + +type PublicCertificate struct { + KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"` + X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem" json:"x509_certificate_pem,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PublicCertificate) Reset() { *m = PublicCertificate{} } +func (m *PublicCertificate) String() string { return proto.CompactTextString(m) } +func (*PublicCertificate) ProtoMessage() {} + +func (m *PublicCertificate) GetKeyName() string { + if m != nil && m.KeyName != nil { + return *m.KeyName + } + return "" +} + +func (m *PublicCertificate) GetX509CertificatePem() string { + if m != nil && m.X509CertificatePem != nil { + return *m.X509CertificatePem + } + return "" +} + +type GetPublicCertificateForAppResponse struct { + PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list" json:"public_certificate_list,omitempty"` + MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second" json:"max_client_cache_time_in_second,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} } +func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) } +func (*GetPublicCertificateForAppResponse) ProtoMessage() {} + +func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate { + if m != nil { + return m.PublicCertificateList + } + return nil +} + +func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 { + if m != nil && m.MaxClientCacheTimeInSecond != nil { + return *m.MaxClientCacheTimeInSecond + } + return 0 +} + +type GetServiceAccountNameRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} } +func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetServiceAccountNameRequest) ProtoMessage() {} + +type GetServiceAccountNameResponse struct { + ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name" json:"service_account_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} } +func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) } +func (*GetServiceAccountNameResponse) ProtoMessage() {} + +func (m *GetServiceAccountNameResponse) GetServiceAccountName() string { + if m != nil && m.ServiceAccountName != nil { + return *m.ServiceAccountName + } + return "" +} + +type GetAccessTokenRequest struct { + Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"` + ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id" json:"service_account_id,omitempty"` + ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name" json:"service_account_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} } +func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) } +func (*GetAccessTokenRequest) ProtoMessage() {} + +func (m *GetAccessTokenRequest) GetScope() []string { + if m != nil { + return m.Scope + } + return nil +} + +func (m *GetAccessTokenRequest) GetServiceAccountId() int64 { + if m != nil && m.ServiceAccountId != nil { + return *m.ServiceAccountId + } + return 0 +} + +func (m *GetAccessTokenRequest) GetServiceAccountName() string { + if m != nil && m.ServiceAccountName != nil { + return *m.ServiceAccountName + } + return "" +} + +type GetAccessTokenResponse struct { + AccessToken *string `protobuf:"bytes,1,opt,name=access_token" json:"access_token,omitempty"` + ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time" json:"expiration_time,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} } +func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) } +func (*GetAccessTokenResponse) ProtoMessage() {} + +func (m *GetAccessTokenResponse) GetAccessToken() string { + if m != nil && m.AccessToken != nil { + return *m.AccessToken + } + return "" +} + +func (m *GetAccessTokenResponse) GetExpirationTime() int64 { + if m != nil && m.ExpirationTime != nil { + return *m.ExpirationTime + } + return 0 +} + +type GetDefaultGcsBucketNameRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} } +func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {} + +type GetDefaultGcsBucketNameResponse struct { + DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name" json:"default_gcs_bucket_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} } +func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) } +func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {} + +func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string { + if m != nil && m.DefaultGcsBucketName != nil { + return *m.DefaultGcsBucketName + } + return "" +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go new file mode 100644 index 000000000..36a195650 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go @@ -0,0 +1,133 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/base/api_base.proto +// DO NOT EDIT! + +/* +Package base is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/base/api_base.proto + +It has these top-level messages: + StringProto + Integer32Proto + Integer64Proto + BoolProto + DoubleProto + BytesProto + VoidProto +*/ +package base + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type StringProto struct { + Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StringProto) Reset() { *m = StringProto{} } +func (m *StringProto) String() string { return proto.CompactTextString(m) } +func (*StringProto) ProtoMessage() {} + +func (m *StringProto) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type Integer32Proto struct { + Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Integer32Proto) Reset() { *m = Integer32Proto{} } +func (m *Integer32Proto) String() string { return proto.CompactTextString(m) } +func (*Integer32Proto) ProtoMessage() {} + +func (m *Integer32Proto) GetValue() int32 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Integer64Proto struct { + Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Integer64Proto) Reset() { *m = Integer64Proto{} } +func (m *Integer64Proto) String() string { return proto.CompactTextString(m) } +func (*Integer64Proto) ProtoMessage() {} + +func (m *Integer64Proto) GetValue() int64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type BoolProto struct { + Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BoolProto) Reset() { *m = BoolProto{} } +func (m *BoolProto) String() string { return proto.CompactTextString(m) } +func (*BoolProto) ProtoMessage() {} + +func (m *BoolProto) GetValue() bool { + if m != nil && m.Value != nil { + return *m.Value + } + return false +} + +type DoubleProto struct { + Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DoubleProto) Reset() { *m = DoubleProto{} } +func (m *DoubleProto) String() string { return proto.CompactTextString(m) } +func (*DoubleProto) ProtoMessage() {} + +func (m *DoubleProto) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type BytesProto struct { + Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BytesProto) Reset() { *m = BytesProto{} } +func (m *BytesProto) String() string { return proto.CompactTextString(m) } +func (*BytesProto) ProtoMessage() {} + +func (m *BytesProto) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type VoidProto struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *VoidProto) Reset() { *m = VoidProto{} } +func (m *VoidProto) String() string { return proto.CompactTextString(m) } +func (*VoidProto) ProtoMessage() {} diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go new file mode 100644 index 000000000..8613cb731 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go @@ -0,0 +1,2778 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto +// DO NOT EDIT! + +/* +Package datastore is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/datastore/datastore_v3.proto + +It has these top-level messages: + Action + PropertyValue + Property + Path + Reference + User + EntityProto + CompositeProperty + Index + CompositeIndex + IndexPostfix + IndexPosition + Snapshot + InternalHeader + Transaction + Query + CompiledQuery + CompiledCursor + Cursor + Error + Cost + GetRequest + GetResponse + PutRequest + PutResponse + TouchRequest + TouchResponse + DeleteRequest + DeleteResponse + NextRequest + QueryResult + AllocateIdsRequest + AllocateIdsResponse + CompositeIndices + AddActionsRequest + AddActionsResponse + BeginTransactionRequest + CommitResponse +*/ +package datastore + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type Property_Meaning int32 + +const ( + Property_NO_MEANING Property_Meaning = 0 + Property_BLOB Property_Meaning = 14 + Property_TEXT Property_Meaning = 15 + Property_BYTESTRING Property_Meaning = 16 + Property_ATOM_CATEGORY Property_Meaning = 1 + Property_ATOM_LINK Property_Meaning = 2 + Property_ATOM_TITLE Property_Meaning = 3 + Property_ATOM_CONTENT Property_Meaning = 4 + Property_ATOM_SUMMARY Property_Meaning = 5 + Property_ATOM_AUTHOR Property_Meaning = 6 + Property_GD_WHEN Property_Meaning = 7 + Property_GD_EMAIL Property_Meaning = 8 + Property_GEORSS_POINT Property_Meaning = 9 + Property_GD_IM Property_Meaning = 10 + Property_GD_PHONENUMBER Property_Meaning = 11 + Property_GD_POSTALADDRESS Property_Meaning = 12 + Property_GD_RATING Property_Meaning = 13 + Property_BLOBKEY Property_Meaning = 17 + Property_ENTITY_PROTO Property_Meaning = 19 + Property_INDEX_VALUE Property_Meaning = 18 +) + +var Property_Meaning_name = map[int32]string{ + 0: "NO_MEANING", + 14: "BLOB", + 15: "TEXT", + 16: "BYTESTRING", + 1: "ATOM_CATEGORY", + 2: "ATOM_LINK", + 3: "ATOM_TITLE", + 4: "ATOM_CONTENT", + 5: "ATOM_SUMMARY", + 6: "ATOM_AUTHOR", + 7: "GD_WHEN", + 8: "GD_EMAIL", + 9: "GEORSS_POINT", + 10: "GD_IM", + 11: "GD_PHONENUMBER", + 12: "GD_POSTALADDRESS", + 13: "GD_RATING", + 17: "BLOBKEY", + 19: "ENTITY_PROTO", + 18: "INDEX_VALUE", +} +var Property_Meaning_value = map[string]int32{ + "NO_MEANING": 0, + "BLOB": 14, + "TEXT": 15, + "BYTESTRING": 16, + "ATOM_CATEGORY": 1, + "ATOM_LINK": 2, + "ATOM_TITLE": 3, + "ATOM_CONTENT": 4, + "ATOM_SUMMARY": 5, + "ATOM_AUTHOR": 6, + "GD_WHEN": 7, + "GD_EMAIL": 8, + "GEORSS_POINT": 9, + "GD_IM": 10, + "GD_PHONENUMBER": 11, + "GD_POSTALADDRESS": 12, + "GD_RATING": 13, + "BLOBKEY": 17, + "ENTITY_PROTO": 19, + "INDEX_VALUE": 18, +} + +func (x Property_Meaning) Enum() *Property_Meaning { + p := new(Property_Meaning) + *p = x + return p +} +func (x Property_Meaning) String() string { + return proto.EnumName(Property_Meaning_name, int32(x)) +} +func (x *Property_Meaning) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning") + if err != nil { + return err + } + *x = Property_Meaning(value) + return nil +} + +type Property_FtsTokenizationOption int32 + +const ( + Property_HTML Property_FtsTokenizationOption = 1 + Property_ATOM Property_FtsTokenizationOption = 2 +) + +var Property_FtsTokenizationOption_name = map[int32]string{ + 1: "HTML", + 2: "ATOM", +} +var Property_FtsTokenizationOption_value = map[string]int32{ + "HTML": 1, + "ATOM": 2, +} + +func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption { + p := new(Property_FtsTokenizationOption) + *p = x + return p +} +func (x Property_FtsTokenizationOption) String() string { + return proto.EnumName(Property_FtsTokenizationOption_name, int32(x)) +} +func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption") + if err != nil { + return err + } + *x = Property_FtsTokenizationOption(value) + return nil +} + +type EntityProto_Kind int32 + +const ( + EntityProto_GD_CONTACT EntityProto_Kind = 1 + EntityProto_GD_EVENT EntityProto_Kind = 2 + EntityProto_GD_MESSAGE EntityProto_Kind = 3 +) + +var EntityProto_Kind_name = map[int32]string{ + 1: "GD_CONTACT", + 2: "GD_EVENT", + 3: "GD_MESSAGE", +} +var EntityProto_Kind_value = map[string]int32{ + "GD_CONTACT": 1, + "GD_EVENT": 2, + "GD_MESSAGE": 3, +} + +func (x EntityProto_Kind) Enum() *EntityProto_Kind { + p := new(EntityProto_Kind) + *p = x + return p +} +func (x EntityProto_Kind) String() string { + return proto.EnumName(EntityProto_Kind_name, int32(x)) +} +func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind") + if err != nil { + return err + } + *x = EntityProto_Kind(value) + return nil +} + +type Index_Property_Direction int32 + +const ( + Index_Property_ASCENDING Index_Property_Direction = 1 + Index_Property_DESCENDING Index_Property_Direction = 2 +) + +var Index_Property_Direction_name = map[int32]string{ + 1: "ASCENDING", + 2: "DESCENDING", +} +var Index_Property_Direction_value = map[string]int32{ + "ASCENDING": 1, + "DESCENDING": 2, +} + +func (x Index_Property_Direction) Enum() *Index_Property_Direction { + p := new(Index_Property_Direction) + *p = x + return p +} +func (x Index_Property_Direction) String() string { + return proto.EnumName(Index_Property_Direction_name, int32(x)) +} +func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction") + if err != nil { + return err + } + *x = Index_Property_Direction(value) + return nil +} + +type CompositeIndex_State int32 + +const ( + CompositeIndex_WRITE_ONLY CompositeIndex_State = 1 + CompositeIndex_READ_WRITE CompositeIndex_State = 2 + CompositeIndex_DELETED CompositeIndex_State = 3 + CompositeIndex_ERROR CompositeIndex_State = 4 +) + +var CompositeIndex_State_name = map[int32]string{ + 1: "WRITE_ONLY", + 2: "READ_WRITE", + 3: "DELETED", + 4: "ERROR", +} +var CompositeIndex_State_value = map[string]int32{ + "WRITE_ONLY": 1, + "READ_WRITE": 2, + "DELETED": 3, + "ERROR": 4, +} + +func (x CompositeIndex_State) Enum() *CompositeIndex_State { + p := new(CompositeIndex_State) + *p = x + return p +} +func (x CompositeIndex_State) String() string { + return proto.EnumName(CompositeIndex_State_name, int32(x)) +} +func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State") + if err != nil { + return err + } + *x = CompositeIndex_State(value) + return nil +} + +type Snapshot_Status int32 + +const ( + Snapshot_INACTIVE Snapshot_Status = 0 + Snapshot_ACTIVE Snapshot_Status = 1 +) + +var Snapshot_Status_name = map[int32]string{ + 0: "INACTIVE", + 1: "ACTIVE", +} +var Snapshot_Status_value = map[string]int32{ + "INACTIVE": 0, + "ACTIVE": 1, +} + +func (x Snapshot_Status) Enum() *Snapshot_Status { + p := new(Snapshot_Status) + *p = x + return p +} +func (x Snapshot_Status) String() string { + return proto.EnumName(Snapshot_Status_name, int32(x)) +} +func (x *Snapshot_Status) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status") + if err != nil { + return err + } + *x = Snapshot_Status(value) + return nil +} + +type Query_Hint int32 + +const ( + Query_ORDER_FIRST Query_Hint = 1 + Query_ANCESTOR_FIRST Query_Hint = 2 + Query_FILTER_FIRST Query_Hint = 3 +) + +var Query_Hint_name = map[int32]string{ + 1: "ORDER_FIRST", + 2: "ANCESTOR_FIRST", + 3: "FILTER_FIRST", +} +var Query_Hint_value = map[string]int32{ + "ORDER_FIRST": 1, + "ANCESTOR_FIRST": 2, + "FILTER_FIRST": 3, +} + +func (x Query_Hint) Enum() *Query_Hint { + p := new(Query_Hint) + *p = x + return p +} +func (x Query_Hint) String() string { + return proto.EnumName(Query_Hint_name, int32(x)) +} +func (x *Query_Hint) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint") + if err != nil { + return err + } + *x = Query_Hint(value) + return nil +} + +type Query_Filter_Operator int32 + +const ( + Query_Filter_LESS_THAN Query_Filter_Operator = 1 + Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2 + Query_Filter_GREATER_THAN Query_Filter_Operator = 3 + Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4 + Query_Filter_EQUAL Query_Filter_Operator = 5 + Query_Filter_IN Query_Filter_Operator = 6 + Query_Filter_EXISTS Query_Filter_Operator = 7 +) + +var Query_Filter_Operator_name = map[int32]string{ + 1: "LESS_THAN", + 2: "LESS_THAN_OR_EQUAL", + 3: "GREATER_THAN", + 4: "GREATER_THAN_OR_EQUAL", + 5: "EQUAL", + 6: "IN", + 7: "EXISTS", +} +var Query_Filter_Operator_value = map[string]int32{ + "LESS_THAN": 1, + "LESS_THAN_OR_EQUAL": 2, + "GREATER_THAN": 3, + "GREATER_THAN_OR_EQUAL": 4, + "EQUAL": 5, + "IN": 6, + "EXISTS": 7, +} + +func (x Query_Filter_Operator) Enum() *Query_Filter_Operator { + p := new(Query_Filter_Operator) + *p = x + return p +} +func (x Query_Filter_Operator) String() string { + return proto.EnumName(Query_Filter_Operator_name, int32(x)) +} +func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator") + if err != nil { + return err + } + *x = Query_Filter_Operator(value) + return nil +} + +type Query_Order_Direction int32 + +const ( + Query_Order_ASCENDING Query_Order_Direction = 1 + Query_Order_DESCENDING Query_Order_Direction = 2 +) + +var Query_Order_Direction_name = map[int32]string{ + 1: "ASCENDING", + 2: "DESCENDING", +} +var Query_Order_Direction_value = map[string]int32{ + "ASCENDING": 1, + "DESCENDING": 2, +} + +func (x Query_Order_Direction) Enum() *Query_Order_Direction { + p := new(Query_Order_Direction) + *p = x + return p +} +func (x Query_Order_Direction) String() string { + return proto.EnumName(Query_Order_Direction_name, int32(x)) +} +func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction") + if err != nil { + return err + } + *x = Query_Order_Direction(value) + return nil +} + +type Error_ErrorCode int32 + +const ( + Error_BAD_REQUEST Error_ErrorCode = 1 + Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2 + Error_INTERNAL_ERROR Error_ErrorCode = 3 + Error_NEED_INDEX Error_ErrorCode = 4 + Error_TIMEOUT Error_ErrorCode = 5 + Error_PERMISSION_DENIED Error_ErrorCode = 6 + Error_BIGTABLE_ERROR Error_ErrorCode = 7 + Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8 + Error_CAPABILITY_DISABLED Error_ErrorCode = 9 + Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10 + Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11 +) + +var Error_ErrorCode_name = map[int32]string{ + 1: "BAD_REQUEST", + 2: "CONCURRENT_TRANSACTION", + 3: "INTERNAL_ERROR", + 4: "NEED_INDEX", + 5: "TIMEOUT", + 6: "PERMISSION_DENIED", + 7: "BIGTABLE_ERROR", + 8: "COMMITTED_BUT_STILL_APPLYING", + 9: "CAPABILITY_DISABLED", + 10: "TRY_ALTERNATE_BACKEND", + 11: "SAFE_TIME_TOO_OLD", +} +var Error_ErrorCode_value = map[string]int32{ + "BAD_REQUEST": 1, + "CONCURRENT_TRANSACTION": 2, + "INTERNAL_ERROR": 3, + "NEED_INDEX": 4, + "TIMEOUT": 5, + "PERMISSION_DENIED": 6, + "BIGTABLE_ERROR": 7, + "COMMITTED_BUT_STILL_APPLYING": 8, + "CAPABILITY_DISABLED": 9, + "TRY_ALTERNATE_BACKEND": 10, + "SAFE_TIME_TOO_OLD": 11, +} + +func (x Error_ErrorCode) Enum() *Error_ErrorCode { + p := new(Error_ErrorCode) + *p = x + return p +} +func (x Error_ErrorCode) String() string { + return proto.EnumName(Error_ErrorCode_name, int32(x)) +} +func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode") + if err != nil { + return err + } + *x = Error_ErrorCode(value) + return nil +} + +type PutRequest_AutoIdPolicy int32 + +const ( + PutRequest_CURRENT PutRequest_AutoIdPolicy = 0 + PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1 +) + +var PutRequest_AutoIdPolicy_name = map[int32]string{ + 0: "CURRENT", + 1: "SEQUENTIAL", +} +var PutRequest_AutoIdPolicy_value = map[string]int32{ + "CURRENT": 0, + "SEQUENTIAL": 1, +} + +func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy { + p := new(PutRequest_AutoIdPolicy) + *p = x + return p +} +func (x PutRequest_AutoIdPolicy) String() string { + return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x)) +} +func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy") + if err != nil { + return err + } + *x = PutRequest_AutoIdPolicy(value) + return nil +} + +type Action struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Action) Reset() { *m = Action{} } +func (m *Action) String() string { return proto.CompactTextString(m) } +func (*Action) ProtoMessage() {} + +type PropertyValue struct { + Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"` + BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"` + StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"` + Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue" json:"pointvalue,omitempty"` + Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue" json:"uservalue,omitempty"` + Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue" json:"referencevalue,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyValue) Reset() { *m = PropertyValue{} } +func (m *PropertyValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue) ProtoMessage() {} + +func (m *PropertyValue) GetInt64Value() int64 { + if m != nil && m.Int64Value != nil { + return *m.Int64Value + } + return 0 +} + +func (m *PropertyValue) GetBooleanValue() bool { + if m != nil && m.BooleanValue != nil { + return *m.BooleanValue + } + return false +} + +func (m *PropertyValue) GetStringValue() string { + if m != nil && m.StringValue != nil { + return *m.StringValue + } + return "" +} + +func (m *PropertyValue) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue { + if m != nil { + return m.Pointvalue + } + return nil +} + +func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue { + if m != nil { + return m.Uservalue + } + return nil +} + +func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue { + if m != nil { + return m.Referencevalue + } + return nil +} + +type PropertyValue_PointValue struct { + X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"` + Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} } +func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_PointValue) ProtoMessage() {} + +func (m *PropertyValue_PointValue) GetX() float64 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +func (m *PropertyValue_PointValue) GetY() float64 { + if m != nil && m.Y != nil { + return *m.Y + } + return 0 +} + +type PropertyValue_UserValue struct { + Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"` + AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain" json:"auth_domain,omitempty"` + Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"` + FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity" json:"federated_identity,omitempty"` + FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider" json:"federated_provider,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} } +func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_UserValue) ProtoMessage() {} + +func (m *PropertyValue_UserValue) GetEmail() string { + if m != nil && m.Email != nil { + return *m.Email + } + return "" +} + +func (m *PropertyValue_UserValue) GetAuthDomain() string { + if m != nil && m.AuthDomain != nil { + return *m.AuthDomain + } + return "" +} + +func (m *PropertyValue_UserValue) GetNickname() string { + if m != nil && m.Nickname != nil { + return *m.Nickname + } + return "" +} + +func (m *PropertyValue_UserValue) GetFederatedIdentity() string { + if m != nil && m.FederatedIdentity != nil { + return *m.FederatedIdentity + } + return "" +} + +func (m *PropertyValue_UserValue) GetFederatedProvider() string { + if m != nil && m.FederatedProvider != nil { + return *m.FederatedProvider + } + return "" +} + +type PropertyValue_ReferenceValue struct { + App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` + NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"` + Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement" json:"pathelement,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} } +func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_ReferenceValue) ProtoMessage() {} + +func (m *PropertyValue_ReferenceValue) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *PropertyValue_ReferenceValue) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement { + if m != nil { + return m.Pathelement + } + return nil +} + +type PropertyValue_ReferenceValue_PathElement struct { + Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"` + Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"` + Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyValue_ReferenceValue_PathElement) Reset() { + *m = PropertyValue_ReferenceValue_PathElement{} +} +func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {} + +func (m *PropertyValue_ReferenceValue_PathElement) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 { + if m != nil && m.Id != nil { + return *m.Id + } + return 0 +} + +func (m *PropertyValue_ReferenceValue_PathElement) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +type Property struct { + Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"` + MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri" json:"meaning_uri,omitempty"` + Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` + Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"` + Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"` + Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"` + FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"` + Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Property) Reset() { *m = Property{} } +func (m *Property) String() string { return proto.CompactTextString(m) } +func (*Property) ProtoMessage() {} + +const Default_Property_Meaning Property_Meaning = Property_NO_MEANING +const Default_Property_Searchable bool = false +const Default_Property_Locale string = "en" + +func (m *Property) GetMeaning() Property_Meaning { + if m != nil && m.Meaning != nil { + return *m.Meaning + } + return Default_Property_Meaning +} + +func (m *Property) GetMeaningUri() string { + if m != nil && m.MeaningUri != nil { + return *m.MeaningUri + } + return "" +} + +func (m *Property) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Property) GetValue() *PropertyValue { + if m != nil { + return m.Value + } + return nil +} + +func (m *Property) GetMultiple() bool { + if m != nil && m.Multiple != nil { + return *m.Multiple + } + return false +} + +func (m *Property) GetSearchable() bool { + if m != nil && m.Searchable != nil { + return *m.Searchable + } + return Default_Property_Searchable +} + +func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption { + if m != nil && m.FtsTokenizationOption != nil { + return *m.FtsTokenizationOption + } + return Property_HTML +} + +func (m *Property) GetLocale() string { + if m != nil && m.Locale != nil { + return *m.Locale + } + return Default_Property_Locale +} + +type Path struct { + Element []*Path_Element `protobuf:"group,1,rep,name=Element" json:"element,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Path) Reset() { *m = Path{} } +func (m *Path) String() string { return proto.CompactTextString(m) } +func (*Path) ProtoMessage() {} + +func (m *Path) GetElement() []*Path_Element { + if m != nil { + return m.Element + } + return nil +} + +type Path_Element struct { + Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"` + Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"` + Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Path_Element) Reset() { *m = Path_Element{} } +func (m *Path_Element) String() string { return proto.CompactTextString(m) } +func (*Path_Element) ProtoMessage() {} + +func (m *Path_Element) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +func (m *Path_Element) GetId() int64 { + if m != nil && m.Id != nil { + return *m.Id + } + return 0 +} + +func (m *Path_Element) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +type Reference struct { + App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` + NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"` + Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Reference) Reset() { *m = Reference{} } +func (m *Reference) String() string { return proto.CompactTextString(m) } +func (*Reference) ProtoMessage() {} + +func (m *Reference) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *Reference) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *Reference) GetPath() *Path { + if m != nil { + return m.Path + } + return nil +} + +type User struct { + Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"` + AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain" json:"auth_domain,omitempty"` + Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"` + FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity" json:"federated_identity,omitempty"` + FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider" json:"federated_provider,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *User) Reset() { *m = User{} } +func (m *User) String() string { return proto.CompactTextString(m) } +func (*User) ProtoMessage() {} + +func (m *User) GetEmail() string { + if m != nil && m.Email != nil { + return *m.Email + } + return "" +} + +func (m *User) GetAuthDomain() string { + if m != nil && m.AuthDomain != nil { + return *m.AuthDomain + } + return "" +} + +func (m *User) GetNickname() string { + if m != nil && m.Nickname != nil { + return *m.Nickname + } + return "" +} + +func (m *User) GetFederatedIdentity() string { + if m != nil && m.FederatedIdentity != nil { + return *m.FederatedIdentity + } + return "" +} + +func (m *User) GetFederatedProvider() string { + if m != nil && m.FederatedProvider != nil { + return *m.FederatedProvider + } + return "" +} + +type EntityProto struct { + Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"` + EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group" json:"entity_group,omitempty"` + Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"` + Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"` + KindUri *string `protobuf:"bytes,5,opt,name=kind_uri" json:"kind_uri,omitempty"` + Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` + RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property" json:"raw_property,omitempty"` + Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EntityProto) Reset() { *m = EntityProto{} } +func (m *EntityProto) String() string { return proto.CompactTextString(m) } +func (*EntityProto) ProtoMessage() {} + +func (m *EntityProto) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *EntityProto) GetEntityGroup() *Path { + if m != nil { + return m.EntityGroup + } + return nil +} + +func (m *EntityProto) GetOwner() *User { + if m != nil { + return m.Owner + } + return nil +} + +func (m *EntityProto) GetKind() EntityProto_Kind { + if m != nil && m.Kind != nil { + return *m.Kind + } + return EntityProto_GD_CONTACT +} + +func (m *EntityProto) GetKindUri() string { + if m != nil && m.KindUri != nil { + return *m.KindUri + } + return "" +} + +func (m *EntityProto) GetProperty() []*Property { + if m != nil { + return m.Property + } + return nil +} + +func (m *EntityProto) GetRawProperty() []*Property { + if m != nil { + return m.RawProperty + } + return nil +} + +func (m *EntityProto) GetRank() int32 { + if m != nil && m.Rank != nil { + return *m.Rank + } + return 0 +} + +type CompositeProperty struct { + IndexId *int64 `protobuf:"varint,1,req,name=index_id" json:"index_id,omitempty"` + Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompositeProperty) Reset() { *m = CompositeProperty{} } +func (m *CompositeProperty) String() string { return proto.CompactTextString(m) } +func (*CompositeProperty) ProtoMessage() {} + +func (m *CompositeProperty) GetIndexId() int64 { + if m != nil && m.IndexId != nil { + return *m.IndexId + } + return 0 +} + +func (m *CompositeProperty) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +type Index struct { + EntityType *string `protobuf:"bytes,1,req,name=entity_type" json:"entity_type,omitempty"` + Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"` + Property []*Index_Property `protobuf:"group,2,rep,name=Property" json:"property,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Index) Reset() { *m = Index{} } +func (m *Index) String() string { return proto.CompactTextString(m) } +func (*Index) ProtoMessage() {} + +func (m *Index) GetEntityType() string { + if m != nil && m.EntityType != nil { + return *m.EntityType + } + return "" +} + +func (m *Index) GetAncestor() bool { + if m != nil && m.Ancestor != nil { + return *m.Ancestor + } + return false +} + +func (m *Index) GetProperty() []*Index_Property { + if m != nil { + return m.Property + } + return nil +} + +type Index_Property struct { + Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` + Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Index_Property) Reset() { *m = Index_Property{} } +func (m *Index_Property) String() string { return proto.CompactTextString(m) } +func (*Index_Property) ProtoMessage() {} + +const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING + +func (m *Index_Property) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Index_Property) GetDirection() Index_Property_Direction { + if m != nil && m.Direction != nil { + return *m.Direction + } + return Default_Index_Property_Direction +} + +type CompositeIndex struct { + AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"` + Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"` + State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"` + OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,def=0" json:"only_use_if_required,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompositeIndex) Reset() { *m = CompositeIndex{} } +func (m *CompositeIndex) String() string { return proto.CompactTextString(m) } +func (*CompositeIndex) ProtoMessage() {} + +const Default_CompositeIndex_OnlyUseIfRequired bool = false + +func (m *CompositeIndex) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *CompositeIndex) GetId() int64 { + if m != nil && m.Id != nil { + return *m.Id + } + return 0 +} + +func (m *CompositeIndex) GetDefinition() *Index { + if m != nil { + return m.Definition + } + return nil +} + +func (m *CompositeIndex) GetState() CompositeIndex_State { + if m != nil && m.State != nil { + return *m.State + } + return CompositeIndex_WRITE_ONLY +} + +func (m *CompositeIndex) GetOnlyUseIfRequired() bool { + if m != nil && m.OnlyUseIfRequired != nil { + return *m.OnlyUseIfRequired + } + return Default_CompositeIndex_OnlyUseIfRequired +} + +type IndexPostfix struct { + IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value" json:"index_value,omitempty"` + Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"` + Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexPostfix) Reset() { *m = IndexPostfix{} } +func (m *IndexPostfix) String() string { return proto.CompactTextString(m) } +func (*IndexPostfix) ProtoMessage() {} + +const Default_IndexPostfix_Before bool = true + +func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue { + if m != nil { + return m.IndexValue + } + return nil +} + +func (m *IndexPostfix) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *IndexPostfix) GetBefore() bool { + if m != nil && m.Before != nil { + return *m.Before + } + return Default_IndexPostfix_Before +} + +type IndexPostfix_IndexValue struct { + PropertyName *string `protobuf:"bytes,1,req,name=property_name" json:"property_name,omitempty"` + Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} } +func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) } +func (*IndexPostfix_IndexValue) ProtoMessage() {} + +func (m *IndexPostfix_IndexValue) GetPropertyName() string { + if m != nil && m.PropertyName != nil { + return *m.PropertyName + } + return "" +} + +func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue { + if m != nil { + return m.Value + } + return nil +} + +type IndexPosition struct { + Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexPosition) Reset() { *m = IndexPosition{} } +func (m *IndexPosition) String() string { return proto.CompactTextString(m) } +func (*IndexPosition) ProtoMessage() {} + +const Default_IndexPosition_Before bool = true + +func (m *IndexPosition) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *IndexPosition) GetBefore() bool { + if m != nil && m.Before != nil { + return *m.Before + } + return Default_IndexPosition_Before +} + +type Snapshot struct { + Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} + +func (m *Snapshot) GetTs() int64 { + if m != nil && m.Ts != nil { + return *m.Ts + } + return 0 +} + +type InternalHeader struct { + Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *InternalHeader) Reset() { *m = InternalHeader{} } +func (m *InternalHeader) String() string { return proto.CompactTextString(m) } +func (*InternalHeader) ProtoMessage() {} + +func (m *InternalHeader) GetQos() string { + if m != nil && m.Qos != nil { + return *m.Qos + } + return "" +} + +type Transaction struct { + Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` + Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"` + App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"` + MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Transaction) Reset() { *m = Transaction{} } +func (m *Transaction) String() string { return proto.CompactTextString(m) } +func (*Transaction) ProtoMessage() {} + +const Default_Transaction_MarkChanges bool = false + +func (m *Transaction) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *Transaction) GetHandle() uint64 { + if m != nil && m.Handle != nil { + return *m.Handle + } + return 0 +} + +func (m *Transaction) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *Transaction) GetMarkChanges() bool { + if m != nil && m.MarkChanges != nil { + return *m.MarkChanges + } + return Default_Transaction_MarkChanges +} + +type Query struct { + Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"` + App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` + NameSpace *string `protobuf:"bytes,29,opt,name=name_space" json:"name_space,omitempty"` + Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"` + Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"` + Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter" json:"filter,omitempty"` + SearchQuery *string `protobuf:"bytes,8,opt,name=search_query" json:"search_query,omitempty"` + Order []*Query_Order `protobuf:"group,9,rep,name=Order" json:"order,omitempty"` + Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"` + Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"` + Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"` + Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"` + CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"` + EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor" json:"end_compiled_cursor,omitempty"` + CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index" json:"composite_index,omitempty"` + RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,def=0" json:"require_perfect_plan,omitempty"` + KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,def=0" json:"keys_only,omitempty"` + Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"` + Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"` + FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms" json:"failover_ms,omitempty"` + Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"` + PropertyName []string `protobuf:"bytes,33,rep,name=property_name" json:"property_name,omitempty"` + GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name" json:"group_by_property_name,omitempty"` + Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"` + MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds" json:"min_safe_time_seconds,omitempty"` + SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name" json:"safe_replica_name,omitempty"` + PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,def=0" json:"persist_offset,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Query) Reset() { *m = Query{} } +func (m *Query) String() string { return proto.CompactTextString(m) } +func (*Query) ProtoMessage() {} + +const Default_Query_Offset int32 = 0 +const Default_Query_RequirePerfectPlan bool = false +const Default_Query_KeysOnly bool = false +const Default_Query_Compile bool = false +const Default_Query_PersistOffset bool = false + +func (m *Query) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *Query) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *Query) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *Query) GetKind() string { + if m != nil && m.Kind != nil { + return *m.Kind + } + return "" +} + +func (m *Query) GetAncestor() *Reference { + if m != nil { + return m.Ancestor + } + return nil +} + +func (m *Query) GetFilter() []*Query_Filter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *Query) GetSearchQuery() string { + if m != nil && m.SearchQuery != nil { + return *m.SearchQuery + } + return "" +} + +func (m *Query) GetOrder() []*Query_Order { + if m != nil { + return m.Order + } + return nil +} + +func (m *Query) GetHint() Query_Hint { + if m != nil && m.Hint != nil { + return *m.Hint + } + return Query_ORDER_FIRST +} + +func (m *Query) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *Query) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return Default_Query_Offset +} + +func (m *Query) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +func (m *Query) GetCompiledCursor() *CompiledCursor { + if m != nil { + return m.CompiledCursor + } + return nil +} + +func (m *Query) GetEndCompiledCursor() *CompiledCursor { + if m != nil { + return m.EndCompiledCursor + } + return nil +} + +func (m *Query) GetCompositeIndex() []*CompositeIndex { + if m != nil { + return m.CompositeIndex + } + return nil +} + +func (m *Query) GetRequirePerfectPlan() bool { + if m != nil && m.RequirePerfectPlan != nil { + return *m.RequirePerfectPlan + } + return Default_Query_RequirePerfectPlan +} + +func (m *Query) GetKeysOnly() bool { + if m != nil && m.KeysOnly != nil { + return *m.KeysOnly + } + return Default_Query_KeysOnly +} + +func (m *Query) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *Query) GetCompile() bool { + if m != nil && m.Compile != nil { + return *m.Compile + } + return Default_Query_Compile +} + +func (m *Query) GetFailoverMs() int64 { + if m != nil && m.FailoverMs != nil { + return *m.FailoverMs + } + return 0 +} + +func (m *Query) GetStrong() bool { + if m != nil && m.Strong != nil { + return *m.Strong + } + return false +} + +func (m *Query) GetPropertyName() []string { + if m != nil { + return m.PropertyName + } + return nil +} + +func (m *Query) GetGroupByPropertyName() []string { + if m != nil { + return m.GroupByPropertyName + } + return nil +} + +func (m *Query) GetDistinct() bool { + if m != nil && m.Distinct != nil { + return *m.Distinct + } + return false +} + +func (m *Query) GetMinSafeTimeSeconds() int64 { + if m != nil && m.MinSafeTimeSeconds != nil { + return *m.MinSafeTimeSeconds + } + return 0 +} + +func (m *Query) GetSafeReplicaName() []string { + if m != nil { + return m.SafeReplicaName + } + return nil +} + +func (m *Query) GetPersistOffset() bool { + if m != nil && m.PersistOffset != nil { + return *m.PersistOffset + } + return Default_Query_PersistOffset +} + +type Query_Filter struct { + Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"` + Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Query_Filter) Reset() { *m = Query_Filter{} } +func (m *Query_Filter) String() string { return proto.CompactTextString(m) } +func (*Query_Filter) ProtoMessage() {} + +func (m *Query_Filter) GetOp() Query_Filter_Operator { + if m != nil && m.Op != nil { + return *m.Op + } + return Query_Filter_LESS_THAN +} + +func (m *Query_Filter) GetProperty() []*Property { + if m != nil { + return m.Property + } + return nil +} + +type Query_Order struct { + Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"` + Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Query_Order) Reset() { *m = Query_Order{} } +func (m *Query_Order) String() string { return proto.CompactTextString(m) } +func (*Query_Order) ProtoMessage() {} + +const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING + +func (m *Query_Order) GetProperty() string { + if m != nil && m.Property != nil { + return *m.Property + } + return "" +} + +func (m *Query_Order) GetDirection() Query_Order_Direction { + if m != nil && m.Direction != nil { + return *m.Direction + } + return Default_Query_Order_Direction +} + +type CompiledQuery struct { + Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan" json:"primaryscan,omitempty"` + Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan" json:"mergejoinscan,omitempty"` + IndexDef *Index `protobuf:"bytes,21,opt,name=index_def" json:"index_def,omitempty"` + Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"` + Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"` + KeysOnly *bool `protobuf:"varint,12,req,name=keys_only" json:"keys_only,omitempty"` + PropertyName []string `protobuf:"bytes,24,rep,name=property_name" json:"property_name,omitempty"` + DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size" json:"distinct_infix_size,omitempty"` + Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter" json:"entityfilter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledQuery) Reset() { *m = CompiledQuery{} } +func (m *CompiledQuery) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery) ProtoMessage() {} + +const Default_CompiledQuery_Offset int32 = 0 + +func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan { + if m != nil { + return m.Primaryscan + } + return nil +} + +func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan { + if m != nil { + return m.Mergejoinscan + } + return nil +} + +func (m *CompiledQuery) GetIndexDef() *Index { + if m != nil { + return m.IndexDef + } + return nil +} + +func (m *CompiledQuery) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return Default_CompiledQuery_Offset +} + +func (m *CompiledQuery) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +func (m *CompiledQuery) GetKeysOnly() bool { + if m != nil && m.KeysOnly != nil { + return *m.KeysOnly + } + return false +} + +func (m *CompiledQuery) GetPropertyName() []string { + if m != nil { + return m.PropertyName + } + return nil +} + +func (m *CompiledQuery) GetDistinctInfixSize() int32 { + if m != nil && m.DistinctInfixSize != nil { + return *m.DistinctInfixSize + } + return 0 +} + +func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter { + if m != nil { + return m.Entityfilter + } + return nil +} + +type CompiledQuery_PrimaryScan struct { + IndexName *string `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"` + StartKey *string `protobuf:"bytes,3,opt,name=start_key" json:"start_key,omitempty"` + StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive" json:"start_inclusive,omitempty"` + EndKey *string `protobuf:"bytes,5,opt,name=end_key" json:"end_key,omitempty"` + EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive" json:"end_inclusive,omitempty"` + StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value" json:"start_postfix_value,omitempty"` + EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value" json:"end_postfix_value,omitempty"` + EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us" json:"end_unapplied_log_timestamp_us,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} } +func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery_PrimaryScan) ProtoMessage() {} + +func (m *CompiledQuery_PrimaryScan) GetIndexName() string { + if m != nil && m.IndexName != nil { + return *m.IndexName + } + return "" +} + +func (m *CompiledQuery_PrimaryScan) GetStartKey() string { + if m != nil && m.StartKey != nil { + return *m.StartKey + } + return "" +} + +func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool { + if m != nil && m.StartInclusive != nil { + return *m.StartInclusive + } + return false +} + +func (m *CompiledQuery_PrimaryScan) GetEndKey() string { + if m != nil && m.EndKey != nil { + return *m.EndKey + } + return "" +} + +func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool { + if m != nil && m.EndInclusive != nil { + return *m.EndInclusive + } + return false +} + +func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string { + if m != nil { + return m.StartPostfixValue + } + return nil +} + +func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string { + if m != nil { + return m.EndPostfixValue + } + return nil +} + +func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 { + if m != nil && m.EndUnappliedLogTimestampUs != nil { + return *m.EndUnappliedLogTimestampUs + } + return 0 +} + +type CompiledQuery_MergeJoinScan struct { + IndexName *string `protobuf:"bytes,8,req,name=index_name" json:"index_name,omitempty"` + PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value" json:"prefix_value,omitempty"` + ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,def=0" json:"value_prefix,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} } +func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery_MergeJoinScan) ProtoMessage() {} + +const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false + +func (m *CompiledQuery_MergeJoinScan) GetIndexName() string { + if m != nil && m.IndexName != nil { + return *m.IndexName + } + return "" +} + +func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string { + if m != nil { + return m.PrefixValue + } + return nil +} + +func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool { + if m != nil && m.ValuePrefix != nil { + return *m.ValuePrefix + } + return Default_CompiledQuery_MergeJoinScan_ValuePrefix +} + +type CompiledQuery_EntityFilter struct { + Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"` + Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"` + Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} } +func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery_EntityFilter) ProtoMessage() {} + +const Default_CompiledQuery_EntityFilter_Distinct bool = false + +func (m *CompiledQuery_EntityFilter) GetDistinct() bool { + if m != nil && m.Distinct != nil { + return *m.Distinct + } + return Default_CompiledQuery_EntityFilter_Distinct +} + +func (m *CompiledQuery_EntityFilter) GetKind() string { + if m != nil && m.Kind != nil { + return *m.Kind + } + return "" +} + +func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference { + if m != nil { + return m.Ancestor + } + return nil +} + +type CompiledCursor struct { + Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position" json:"position,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledCursor) Reset() { *m = CompiledCursor{} } +func (m *CompiledCursor) String() string { return proto.CompactTextString(m) } +func (*CompiledCursor) ProtoMessage() {} + +func (m *CompiledCursor) GetPosition() *CompiledCursor_Position { + if m != nil { + return m.Position + } + return nil +} + +type CompiledCursor_Position struct { + StartKey *string `protobuf:"bytes,27,opt,name=start_key" json:"start_key,omitempty"` + Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue" json:"indexvalue,omitempty"` + Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"` + StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,def=1" json:"start_inclusive,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} } +func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) } +func (*CompiledCursor_Position) ProtoMessage() {} + +const Default_CompiledCursor_Position_StartInclusive bool = true + +func (m *CompiledCursor_Position) GetStartKey() string { + if m != nil && m.StartKey != nil { + return *m.StartKey + } + return "" +} + +func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue { + if m != nil { + return m.Indexvalue + } + return nil +} + +func (m *CompiledCursor_Position) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *CompiledCursor_Position) GetStartInclusive() bool { + if m != nil && m.StartInclusive != nil { + return *m.StartInclusive + } + return Default_CompiledCursor_Position_StartInclusive +} + +type CompiledCursor_Position_IndexValue struct { + Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"` + Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} } +func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) } +func (*CompiledCursor_Position_IndexValue) ProtoMessage() {} + +func (m *CompiledCursor_Position_IndexValue) GetProperty() string { + if m != nil && m.Property != nil { + return *m.Property + } + return "" +} + +func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue { + if m != nil { + return m.Value + } + return nil +} + +type Cursor struct { + Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"` + App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Cursor) Reset() { *m = Cursor{} } +func (m *Cursor) String() string { return proto.CompactTextString(m) } +func (*Cursor) ProtoMessage() {} + +func (m *Cursor) GetCursor() uint64 { + if m != nil && m.Cursor != nil { + return *m.Cursor + } + return 0 +} + +func (m *Cursor) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +type Error struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Error) Reset() { *m = Error{} } +func (m *Error) String() string { return proto.CompactTextString(m) } +func (*Error) ProtoMessage() {} + +type Cost struct { + IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes" json:"index_writes,omitempty"` + IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes" json:"index_write_bytes,omitempty"` + EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes" json:"entity_writes,omitempty"` + EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes" json:"entity_write_bytes,omitempty"` + Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost" json:"commitcost,omitempty"` + ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta" json:"approximate_storage_delta,omitempty"` + IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates" json:"id_sequence_updates,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Cost) Reset() { *m = Cost{} } +func (m *Cost) String() string { return proto.CompactTextString(m) } +func (*Cost) ProtoMessage() {} + +func (m *Cost) GetIndexWrites() int32 { + if m != nil && m.IndexWrites != nil { + return *m.IndexWrites + } + return 0 +} + +func (m *Cost) GetIndexWriteBytes() int32 { + if m != nil && m.IndexWriteBytes != nil { + return *m.IndexWriteBytes + } + return 0 +} + +func (m *Cost) GetEntityWrites() int32 { + if m != nil && m.EntityWrites != nil { + return *m.EntityWrites + } + return 0 +} + +func (m *Cost) GetEntityWriteBytes() int32 { + if m != nil && m.EntityWriteBytes != nil { + return *m.EntityWriteBytes + } + return 0 +} + +func (m *Cost) GetCommitcost() *Cost_CommitCost { + if m != nil { + return m.Commitcost + } + return nil +} + +func (m *Cost) GetApproximateStorageDelta() int32 { + if m != nil && m.ApproximateStorageDelta != nil { + return *m.ApproximateStorageDelta + } + return 0 +} + +func (m *Cost) GetIdSequenceUpdates() int32 { + if m != nil && m.IdSequenceUpdates != nil { + return *m.IdSequenceUpdates + } + return 0 +} + +type Cost_CommitCost struct { + RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts" json:"requested_entity_puts,omitempty"` + RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes" json:"requested_entity_deletes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} } +func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) } +func (*Cost_CommitCost) ProtoMessage() {} + +func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 { + if m != nil && m.RequestedEntityPuts != nil { + return *m.RequestedEntityPuts + } + return 0 +} + +func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 { + if m != nil && m.RequestedEntityDeletes != nil { + return *m.RequestedEntityDeletes + } + return 0 +} + +type GetRequest struct { + Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"` + Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` + FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms" json:"failover_ms,omitempty"` + Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"` + AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,def=0" json:"allow_deferred,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetRequest) Reset() { *m = GetRequest{} } +func (m *GetRequest) String() string { return proto.CompactTextString(m) } +func (*GetRequest) ProtoMessage() {} + +const Default_GetRequest_AllowDeferred bool = false + +func (m *GetRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *GetRequest) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *GetRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *GetRequest) GetFailoverMs() int64 { + if m != nil && m.FailoverMs != nil { + return *m.FailoverMs + } + return 0 +} + +func (m *GetRequest) GetStrong() bool { + if m != nil && m.Strong != nil { + return *m.Strong + } + return false +} + +func (m *GetRequest) GetAllowDeferred() bool { + if m != nil && m.AllowDeferred != nil { + return *m.AllowDeferred + } + return Default_GetRequest_AllowDeferred +} + +type GetResponse struct { + Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity" json:"entity,omitempty"` + Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"` + InOrder *bool `protobuf:"varint,6,opt,name=in_order,def=1" json:"in_order,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetResponse) Reset() { *m = GetResponse{} } +func (m *GetResponse) String() string { return proto.CompactTextString(m) } +func (*GetResponse) ProtoMessage() {} + +const Default_GetResponse_InOrder bool = true + +func (m *GetResponse) GetEntity() []*GetResponse_Entity { + if m != nil { + return m.Entity + } + return nil +} + +func (m *GetResponse) GetDeferred() []*Reference { + if m != nil { + return m.Deferred + } + return nil +} + +func (m *GetResponse) GetInOrder() bool { + if m != nil && m.InOrder != nil { + return *m.InOrder + } + return Default_GetResponse_InOrder +} + +type GetResponse_Entity struct { + Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"` + Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"` + Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} } +func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) } +func (*GetResponse_Entity) ProtoMessage() {} + +func (m *GetResponse_Entity) GetEntity() *EntityProto { + if m != nil { + return m.Entity + } + return nil +} + +func (m *GetResponse_Entity) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *GetResponse_Entity) GetVersion() int64 { + if m != nil && m.Version != nil { + return *m.Version + } + return 0 +} + +type PutRequest struct { + Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"` + Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"` + Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` + CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index" json:"composite_index,omitempty"` + Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` + Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` + MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` + Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` + AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PutRequest) Reset() { *m = PutRequest{} } +func (m *PutRequest) String() string { return proto.CompactTextString(m) } +func (*PutRequest) ProtoMessage() {} + +const Default_PutRequest_Trusted bool = false +const Default_PutRequest_Force bool = false +const Default_PutRequest_MarkChanges bool = false +const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT + +func (m *PutRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *PutRequest) GetEntity() []*EntityProto { + if m != nil { + return m.Entity + } + return nil +} + +func (m *PutRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *PutRequest) GetCompositeIndex() []*CompositeIndex { + if m != nil { + return m.CompositeIndex + } + return nil +} + +func (m *PutRequest) GetTrusted() bool { + if m != nil && m.Trusted != nil { + return *m.Trusted + } + return Default_PutRequest_Trusted +} + +func (m *PutRequest) GetForce() bool { + if m != nil && m.Force != nil { + return *m.Force + } + return Default_PutRequest_Force +} + +func (m *PutRequest) GetMarkChanges() bool { + if m != nil && m.MarkChanges != nil { + return *m.MarkChanges + } + return Default_PutRequest_MarkChanges +} + +func (m *PutRequest) GetSnapshot() []*Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy { + if m != nil && m.AutoIdPolicy != nil { + return *m.AutoIdPolicy + } + return Default_PutRequest_AutoIdPolicy +} + +type PutResponse struct { + Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"` + Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PutResponse) Reset() { *m = PutResponse{} } +func (m *PutResponse) String() string { return proto.CompactTextString(m) } +func (*PutResponse) ProtoMessage() {} + +func (m *PutResponse) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *PutResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +func (m *PutResponse) GetVersion() []int64 { + if m != nil { + return m.Version + } + return nil +} + +type TouchRequest struct { + Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` + Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index" json:"composite_index,omitempty"` + Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"` + Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TouchRequest) Reset() { *m = TouchRequest{} } +func (m *TouchRequest) String() string { return proto.CompactTextString(m) } +func (*TouchRequest) ProtoMessage() {} + +const Default_TouchRequest_Force bool = false + +func (m *TouchRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *TouchRequest) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex { + if m != nil { + return m.CompositeIndex + } + return nil +} + +func (m *TouchRequest) GetForce() bool { + if m != nil && m.Force != nil { + return *m.Force + } + return Default_TouchRequest_Force +} + +func (m *TouchRequest) GetSnapshot() []*Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +type TouchResponse struct { + Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TouchResponse) Reset() { *m = TouchResponse{} } +func (m *TouchResponse) String() string { return proto.CompactTextString(m) } +func (*TouchResponse) ProtoMessage() {} + +func (m *TouchResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +type DeleteRequest struct { + Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` + Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"` + Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"` + Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` + Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` + MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` + Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } +func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRequest) ProtoMessage() {} + +const Default_DeleteRequest_Trusted bool = false +const Default_DeleteRequest_Force bool = false +const Default_DeleteRequest_MarkChanges bool = false + +func (m *DeleteRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *DeleteRequest) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *DeleteRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *DeleteRequest) GetTrusted() bool { + if m != nil && m.Trusted != nil { + return *m.Trusted + } + return Default_DeleteRequest_Trusted +} + +func (m *DeleteRequest) GetForce() bool { + if m != nil && m.Force != nil { + return *m.Force + } + return Default_DeleteRequest_Force +} + +func (m *DeleteRequest) GetMarkChanges() bool { + if m != nil && m.MarkChanges != nil { + return *m.MarkChanges + } + return Default_DeleteRequest_MarkChanges +} + +func (m *DeleteRequest) GetSnapshot() []*Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +type DeleteResponse struct { + Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` + Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } +func (m *DeleteResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteResponse) ProtoMessage() {} + +func (m *DeleteResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +func (m *DeleteResponse) GetVersion() []int64 { + if m != nil { + return m.Version + } + return nil +} + +type NextRequest struct { + Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"` + Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"` + Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"` + Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"` + Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NextRequest) Reset() { *m = NextRequest{} } +func (m *NextRequest) String() string { return proto.CompactTextString(m) } +func (*NextRequest) ProtoMessage() {} + +const Default_NextRequest_Offset int32 = 0 +const Default_NextRequest_Compile bool = false + +func (m *NextRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *NextRequest) GetCursor() *Cursor { + if m != nil { + return m.Cursor + } + return nil +} + +func (m *NextRequest) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *NextRequest) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return Default_NextRequest_Offset +} + +func (m *NextRequest) GetCompile() bool { + if m != nil && m.Compile != nil { + return *m.Compile + } + return Default_NextRequest_Compile +} + +type QueryResult struct { + Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"` + Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"` + SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results" json:"skipped_results,omitempty"` + MoreResults *bool `protobuf:"varint,3,req,name=more_results" json:"more_results,omitempty"` + KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only" json:"keys_only,omitempty"` + IndexOnly *bool `protobuf:"varint,9,opt,name=index_only" json:"index_only,omitempty"` + SmallOps *bool `protobuf:"varint,10,opt,name=small_ops" json:"small_ops,omitempty"` + CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query" json:"compiled_query,omitempty"` + CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"` + Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"` + Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *QueryResult) Reset() { *m = QueryResult{} } +func (m *QueryResult) String() string { return proto.CompactTextString(m) } +func (*QueryResult) ProtoMessage() {} + +func (m *QueryResult) GetCursor() *Cursor { + if m != nil { + return m.Cursor + } + return nil +} + +func (m *QueryResult) GetResult() []*EntityProto { + if m != nil { + return m.Result + } + return nil +} + +func (m *QueryResult) GetSkippedResults() int32 { + if m != nil && m.SkippedResults != nil { + return *m.SkippedResults + } + return 0 +} + +func (m *QueryResult) GetMoreResults() bool { + if m != nil && m.MoreResults != nil { + return *m.MoreResults + } + return false +} + +func (m *QueryResult) GetKeysOnly() bool { + if m != nil && m.KeysOnly != nil { + return *m.KeysOnly + } + return false +} + +func (m *QueryResult) GetIndexOnly() bool { + if m != nil && m.IndexOnly != nil { + return *m.IndexOnly + } + return false +} + +func (m *QueryResult) GetSmallOps() bool { + if m != nil && m.SmallOps != nil { + return *m.SmallOps + } + return false +} + +func (m *QueryResult) GetCompiledQuery() *CompiledQuery { + if m != nil { + return m.CompiledQuery + } + return nil +} + +func (m *QueryResult) GetCompiledCursor() *CompiledCursor { + if m != nil { + return m.CompiledCursor + } + return nil +} + +func (m *QueryResult) GetIndex() []*CompositeIndex { + if m != nil { + return m.Index + } + return nil +} + +func (m *QueryResult) GetVersion() []int64 { + if m != nil { + return m.Version + } + return nil +} + +type AllocateIdsRequest struct { + Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` + ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key" json:"model_key,omitempty"` + Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` + Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"` + Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } +func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } +func (*AllocateIdsRequest) ProtoMessage() {} + +func (m *AllocateIdsRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AllocateIdsRequest) GetModelKey() *Reference { + if m != nil { + return m.ModelKey + } + return nil +} + +func (m *AllocateIdsRequest) GetSize() int64 { + if m != nil && m.Size != nil { + return *m.Size + } + return 0 +} + +func (m *AllocateIdsRequest) GetMax() int64 { + if m != nil && m.Max != nil { + return *m.Max + } + return 0 +} + +func (m *AllocateIdsRequest) GetReserve() []*Reference { + if m != nil { + return m.Reserve + } + return nil +} + +type AllocateIdsResponse struct { + Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"` + End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"` + Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } +func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } +func (*AllocateIdsResponse) ProtoMessage() {} + +func (m *AllocateIdsResponse) GetStart() int64 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *AllocateIdsResponse) GetEnd() int64 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *AllocateIdsResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +type CompositeIndices struct { + Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompositeIndices) Reset() { *m = CompositeIndices{} } +func (m *CompositeIndices) String() string { return proto.CompactTextString(m) } +func (*CompositeIndices) ProtoMessage() {} + +func (m *CompositeIndices) GetIndex() []*CompositeIndex { + if m != nil { + return m.Index + } + return nil +} + +type AddActionsRequest struct { + Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` + Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"` + Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} } +func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) } +func (*AddActionsRequest) ProtoMessage() {} + +func (m *AddActionsRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AddActionsRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *AddActionsRequest) GetAction() []*Action { + if m != nil { + return m.Action + } + return nil +} + +type AddActionsResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} } +func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) } +func (*AddActionsResponse) ProtoMessage() {} + +type BeginTransactionRequest struct { + Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` + App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` + AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,def=0" json:"allow_multiple_eg,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } +func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } +func (*BeginTransactionRequest) ProtoMessage() {} + +const Default_BeginTransactionRequest_AllowMultipleEg bool = false + +func (m *BeginTransactionRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *BeginTransactionRequest) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *BeginTransactionRequest) GetAllowMultipleEg() bool { + if m != nil && m.AllowMultipleEg != nil { + return *m.AllowMultipleEg + } + return Default_BeginTransactionRequest_AllowMultipleEg +} + +type CommitResponse struct { + Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` + Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CommitResponse) Reset() { *m = CommitResponse{} } +func (m *CommitResponse) String() string { return proto.CompactTextString(m) } +func (*CommitResponse) ProtoMessage() {} + +func (m *CommitResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +func (m *CommitResponse) GetVersion() []*CommitResponse_Version { + if m != nil { + return m.Version + } + return nil +} + +type CommitResponse_Version struct { + RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key" json:"root_entity_key,omitempty"` + Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} } +func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) } +func (*CommitResponse_Version) ProtoMessage() {} + +func (m *CommitResponse_Version) GetRootEntityKey() *Reference { + if m != nil { + return m.RootEntityKey + } + return nil +} + +func (m *CommitResponse_Version) GetVersion() int64 { + if m != nil && m.Version != nil { + return *m.Version + } + return 0 +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go new file mode 100644 index 000000000..d538701ab --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/identity.go @@ -0,0 +1,14 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +import netcontext "golang.org/x/net/context" + +// These functions are implementations of the wrapper functions +// in ../appengine/identity.go. See that file for commentary. + +func AppID(c netcontext.Context) string { + return appID(FullyQualifiedAppID(c)) +} diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go new file mode 100644 index 000000000..e6b9227c5 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/identity_classic.go @@ -0,0 +1,27 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package internal + +import ( + "appengine" + + netcontext "golang.org/x/net/context" +) + +func DefaultVersionHostname(ctx netcontext.Context) string { + return appengine.DefaultVersionHostname(fromContext(ctx)) +} + +func RequestID(ctx netcontext.Context) string { return appengine.RequestID(fromContext(ctx)) } +func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } +func ServerSoftware() string { return appengine.ServerSoftware() } +func ModuleName(ctx netcontext.Context) string { return appengine.ModuleName(fromContext(ctx)) } +func VersionID(ctx netcontext.Context) string { return appengine.VersionID(fromContext(ctx)) } +func InstanceID() string { return appengine.InstanceID() } +func IsDevAppServer() bool { return appengine.IsDevAppServer() } + +func fullyQualifiedAppID(ctx netcontext.Context) string { return fromContext(ctx).FullyQualifiedAppID() } diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go new file mode 100644 index 000000000..ebe68b785 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/identity_vm.go @@ -0,0 +1,97 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package internal + +import ( + "net/http" + "os" + + netcontext "golang.org/x/net/context" +) + +// These functions are implementations of the wrapper functions +// in ../appengine/identity.go. See that file for commentary. + +const ( + hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname" + hRequestLogId = "X-AppEngine-Request-Log-Id" + hDatacenter = "X-AppEngine-Datacenter" +) + +func ctxHeaders(ctx netcontext.Context) http.Header { + return fromContext(ctx).Request().Header +} + +func DefaultVersionHostname(ctx netcontext.Context) string { + return ctxHeaders(ctx).Get(hDefaultVersionHostname) +} + +func RequestID(ctx netcontext.Context) string { + return ctxHeaders(ctx).Get(hRequestLogId) +} + +func Datacenter(ctx netcontext.Context) string { + return ctxHeaders(ctx).Get(hDatacenter) +} + +func ServerSoftware() string { + // TODO(dsymonds): Remove fallback when we've verified this. + if s := os.Getenv("SERVER_SOFTWARE"); s != "" { + return s + } + return "Google App Engine/1.x.x" +} + +// TODO(dsymonds): Remove the metadata fetches. + +func ModuleName(_ netcontext.Context) string { + if s := os.Getenv("GAE_MODULE_NAME"); s != "" { + return s + } + return string(mustGetMetadata("instance/attributes/gae_backend_name")) +} + +func VersionID(_ netcontext.Context) string { + if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { + return s1 + "." + s2 + } + return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version")) +} + +func InstanceID() string { + if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" { + return s + } + return string(mustGetMetadata("instance/attributes/gae_backend_instance")) +} + +func partitionlessAppID() string { + // gae_project has everything except the partition prefix. + appID := os.Getenv("GAE_LONG_APP_ID") + if appID == "" { + appID = string(mustGetMetadata("instance/attributes/gae_project")) + } + return appID +} + +func fullyQualifiedAppID(_ netcontext.Context) string { + appID := partitionlessAppID() + + part := os.Getenv("GAE_PARTITION") + if part == "" { + part = string(mustGetMetadata("instance/attributes/gae_partition")) + } + + if part != "" { + appID = part + "~" + appID + } + return appID +} + +func IsDevAppServer() bool { + return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" +} diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go new file mode 100644 index 000000000..66e8d7686 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/internal.go @@ -0,0 +1,144 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package internal provides support for package appengine. +// +// Programs should not use this package directly. Its API is not stable. +// Use packages appengine and appengine/* instead. +package internal + +import ( + "fmt" + "io" + "log" + "net/http" + "net/url" + "os" + + "github.com/golang/protobuf/proto" + + remotepb "google.golang.org/appengine/internal/remote_api" +) + +// errorCodeMaps is a map of service name to the error code map for the service. +var errorCodeMaps = make(map[string]map[int32]string) + +// RegisterErrorCodeMap is called from API implementations to register their +// error code map. This should only be called from init functions. +func RegisterErrorCodeMap(service string, m map[int32]string) { + errorCodeMaps[service] = m +} + +type timeoutCodeKey struct { + service string + code int32 +} + +// timeoutCodes is the set of service+code pairs that represent timeouts. +var timeoutCodes = make(map[timeoutCodeKey]bool) + +func RegisterTimeoutErrorCode(service string, code int32) { + timeoutCodes[timeoutCodeKey{service, code}] = true +} + +// APIError is the type returned by appengine.Context's Call method +// when an API call fails in an API-specific way. This may be, for instance, +// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE. +type APIError struct { + Service string + Detail string + Code int32 // API-specific error code +} + +func (e *APIError) Error() string { + if e.Code == 0 { + if e.Detail == "" { + return "APIError " + } + return e.Detail + } + s := fmt.Sprintf("API error %d", e.Code) + if m, ok := errorCodeMaps[e.Service]; ok { + s += " (" + e.Service + ": " + m[e.Code] + ")" + } else { + // Shouldn't happen, but provide a bit more detail if it does. + s = e.Service + " " + s + } + if e.Detail != "" { + s += ": " + e.Detail + } + return s +} + +func (e *APIError) IsTimeout() bool { + return timeoutCodes[timeoutCodeKey{e.Service, e.Code}] +} + +// CallError is the type returned by appengine.Context's Call method when an +// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED. +type CallError struct { + Detail string + Code int32 + // TODO: Remove this if we get a distinguishable error code. + Timeout bool +} + +func (e *CallError) Error() string { + var msg string + switch remotepb.RpcError_ErrorCode(e.Code) { + case remotepb.RpcError_UNKNOWN: + return e.Detail + case remotepb.RpcError_OVER_QUOTA: + msg = "Over quota" + case remotepb.RpcError_CAPABILITY_DISABLED: + msg = "Capability disabled" + case remotepb.RpcError_CANCELLED: + msg = "Canceled" + default: + msg = fmt.Sprintf("Call error %d", e.Code) + } + s := msg + ": " + e.Detail + if e.Timeout { + s += " (timeout)" + } + return s +} + +func (e *CallError) IsTimeout() bool { + return e.Timeout +} + +func Main() { + installHealthChecker(http.DefaultServeMux) + + port := "8080" + if s := os.Getenv("PORT"); s != "" { + port = s + } + + if err := http.ListenAndServe(":"+port, http.HandlerFunc(handleHTTP)); err != nil { + log.Fatalf("http.ListenAndServe: %v", err) + } +} + +func installHealthChecker(mux *http.ServeMux) { + // If no health check handler has been installed by this point, add a trivial one. + const healthPath = "/_ah/health" + hreq := &http.Request{ + Method: "GET", + URL: &url.URL{ + Path: healthPath, + }, + } + if _, pat := mux.Handler(hreq); pat != healthPath { + mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "ok") + }) + } +} + +// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace. +// The function should be prepared to be called on the same message more than once; it should only modify the +// RPC request the first time. +var NamespaceMods = make(map[string]func(m proto.Message, namespace string)) diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go new file mode 100644 index 000000000..20c595be3 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go @@ -0,0 +1,899 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/log/log_service.proto +// DO NOT EDIT! + +/* +Package log is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/log/log_service.proto + +It has these top-level messages: + LogServiceError + UserAppLogLine + UserAppLogGroup + FlushRequest + SetStatusRequest + LogOffset + LogLine + RequestLog + LogModuleVersion + LogReadRequest + LogReadResponse + LogUsageRecord + LogUsageRequest + LogUsageResponse +*/ +package log + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type LogServiceError_ErrorCode int32 + +const ( + LogServiceError_OK LogServiceError_ErrorCode = 0 + LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1 + LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2 +) + +var LogServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INVALID_REQUEST", + 2: "STORAGE_ERROR", +} +var LogServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INVALID_REQUEST": 1, + "STORAGE_ERROR": 2, +} + +func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode { + p := new(LogServiceError_ErrorCode) + *p = x + return p +} +func (x LogServiceError_ErrorCode) String() string { + return proto.EnumName(LogServiceError_ErrorCode_name, int32(x)) +} +func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode") + if err != nil { + return err + } + *x = LogServiceError_ErrorCode(value) + return nil +} + +type LogServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogServiceError) Reset() { *m = LogServiceError{} } +func (m *LogServiceError) String() string { return proto.CompactTextString(m) } +func (*LogServiceError) ProtoMessage() {} + +type UserAppLogLine struct { + TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec" json:"timestamp_usec,omitempty"` + Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` + Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} } +func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) } +func (*UserAppLogLine) ProtoMessage() {} + +func (m *UserAppLogLine) GetTimestampUsec() int64 { + if m != nil && m.TimestampUsec != nil { + return *m.TimestampUsec + } + return 0 +} + +func (m *UserAppLogLine) GetLevel() int64 { + if m != nil && m.Level != nil { + return *m.Level + } + return 0 +} + +func (m *UserAppLogLine) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + +type UserAppLogGroup struct { + LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line" json:"log_line,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} } +func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) } +func (*UserAppLogGroup) ProtoMessage() {} + +func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine { + if m != nil { + return m.LogLine + } + return nil +} + +type FlushRequest struct { + Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FlushRequest) Reset() { *m = FlushRequest{} } +func (m *FlushRequest) String() string { return proto.CompactTextString(m) } +func (*FlushRequest) ProtoMessage() {} + +func (m *FlushRequest) GetLogs() []byte { + if m != nil { + return m.Logs + } + return nil +} + +type SetStatusRequest struct { + Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} } +func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) } +func (*SetStatusRequest) ProtoMessage() {} + +func (m *SetStatusRequest) GetStatus() string { + if m != nil && m.Status != nil { + return *m.Status + } + return "" +} + +type LogOffset struct { + RequestId []byte `protobuf:"bytes,1,opt,name=request_id" json:"request_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogOffset) Reset() { *m = LogOffset{} } +func (m *LogOffset) String() string { return proto.CompactTextString(m) } +func (*LogOffset) ProtoMessage() {} + +func (m *LogOffset) GetRequestId() []byte { + if m != nil { + return m.RequestId + } + return nil +} + +type LogLine struct { + Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"` + Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` + LogMessage *string `protobuf:"bytes,3,req,name=log_message" json:"log_message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogLine) Reset() { *m = LogLine{} } +func (m *LogLine) String() string { return proto.CompactTextString(m) } +func (*LogLine) ProtoMessage() {} + +func (m *LogLine) GetTime() int64 { + if m != nil && m.Time != nil { + return *m.Time + } + return 0 +} + +func (m *LogLine) GetLevel() int32 { + if m != nil && m.Level != nil { + return *m.Level + } + return 0 +} + +func (m *LogLine) GetLogMessage() string { + if m != nil && m.LogMessage != nil { + return *m.LogMessage + } + return "" +} + +type RequestLog struct { + AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + ModuleId *string `protobuf:"bytes,37,opt,name=module_id,def=default" json:"module_id,omitempty"` + VersionId *string `protobuf:"bytes,2,req,name=version_id" json:"version_id,omitempty"` + RequestId []byte `protobuf:"bytes,3,req,name=request_id" json:"request_id,omitempty"` + Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"` + Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"` + Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"` + StartTime *int64 `protobuf:"varint,6,req,name=start_time" json:"start_time,omitempty"` + EndTime *int64 `protobuf:"varint,7,req,name=end_time" json:"end_time,omitempty"` + Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"` + Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"` + Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"` + Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"` + HttpVersion *string `protobuf:"bytes,12,req,name=http_version" json:"http_version,omitempty"` + Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"` + ResponseSize *int64 `protobuf:"varint,14,req,name=response_size" json:"response_size,omitempty"` + Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"` + UserAgent *string `protobuf:"bytes,16,opt,name=user_agent" json:"user_agent,omitempty"` + UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry" json:"url_map_entry,omitempty"` + Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"` + ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles" json:"api_mcycles,omitempty"` + Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"` + Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"` + TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name" json:"task_queue_name,omitempty"` + TaskName *string `protobuf:"bytes,23,opt,name=task_name" json:"task_name,omitempty"` + WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request" json:"was_loading_request,omitempty"` + PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time" json:"pending_time,omitempty"` + ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,def=-1" json:"replica_index,omitempty"` + Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"` + CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key" json:"clone_key,omitempty"` + Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"` + LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete" json:"lines_incomplete,omitempty"` + AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release" json:"app_engine_release,omitempty"` + ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason" json:"exit_reason,omitempty"` + WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time" json:"was_throttled_for_time,omitempty"` + WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests" json:"was_throttled_for_requests,omitempty"` + ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time" json:"throttled_time,omitempty"` + ServerName []byte `protobuf:"bytes,34,opt,name=server_name" json:"server_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RequestLog) Reset() { *m = RequestLog{} } +func (m *RequestLog) String() string { return proto.CompactTextString(m) } +func (*RequestLog) ProtoMessage() {} + +const Default_RequestLog_ModuleId string = "default" +const Default_RequestLog_ReplicaIndex int32 = -1 +const Default_RequestLog_Finished bool = true + +func (m *RequestLog) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *RequestLog) GetModuleId() string { + if m != nil && m.ModuleId != nil { + return *m.ModuleId + } + return Default_RequestLog_ModuleId +} + +func (m *RequestLog) GetVersionId() string { + if m != nil && m.VersionId != nil { + return *m.VersionId + } + return "" +} + +func (m *RequestLog) GetRequestId() []byte { + if m != nil { + return m.RequestId + } + return nil +} + +func (m *RequestLog) GetOffset() *LogOffset { + if m != nil { + return m.Offset + } + return nil +} + +func (m *RequestLog) GetIp() string { + if m != nil && m.Ip != nil { + return *m.Ip + } + return "" +} + +func (m *RequestLog) GetNickname() string { + if m != nil && m.Nickname != nil { + return *m.Nickname + } + return "" +} + +func (m *RequestLog) GetStartTime() int64 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *RequestLog) GetEndTime() int64 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *RequestLog) GetLatency() int64 { + if m != nil && m.Latency != nil { + return *m.Latency + } + return 0 +} + +func (m *RequestLog) GetMcycles() int64 { + if m != nil && m.Mcycles != nil { + return *m.Mcycles + } + return 0 +} + +func (m *RequestLog) GetMethod() string { + if m != nil && m.Method != nil { + return *m.Method + } + return "" +} + +func (m *RequestLog) GetResource() string { + if m != nil && m.Resource != nil { + return *m.Resource + } + return "" +} + +func (m *RequestLog) GetHttpVersion() string { + if m != nil && m.HttpVersion != nil { + return *m.HttpVersion + } + return "" +} + +func (m *RequestLog) GetStatus() int32 { + if m != nil && m.Status != nil { + return *m.Status + } + return 0 +} + +func (m *RequestLog) GetResponseSize() int64 { + if m != nil && m.ResponseSize != nil { + return *m.ResponseSize + } + return 0 +} + +func (m *RequestLog) GetReferrer() string { + if m != nil && m.Referrer != nil { + return *m.Referrer + } + return "" +} + +func (m *RequestLog) GetUserAgent() string { + if m != nil && m.UserAgent != nil { + return *m.UserAgent + } + return "" +} + +func (m *RequestLog) GetUrlMapEntry() string { + if m != nil && m.UrlMapEntry != nil { + return *m.UrlMapEntry + } + return "" +} + +func (m *RequestLog) GetCombined() string { + if m != nil && m.Combined != nil { + return *m.Combined + } + return "" +} + +func (m *RequestLog) GetApiMcycles() int64 { + if m != nil && m.ApiMcycles != nil { + return *m.ApiMcycles + } + return 0 +} + +func (m *RequestLog) GetHost() string { + if m != nil && m.Host != nil { + return *m.Host + } + return "" +} + +func (m *RequestLog) GetCost() float64 { + if m != nil && m.Cost != nil { + return *m.Cost + } + return 0 +} + +func (m *RequestLog) GetTaskQueueName() string { + if m != nil && m.TaskQueueName != nil { + return *m.TaskQueueName + } + return "" +} + +func (m *RequestLog) GetTaskName() string { + if m != nil && m.TaskName != nil { + return *m.TaskName + } + return "" +} + +func (m *RequestLog) GetWasLoadingRequest() bool { + if m != nil && m.WasLoadingRequest != nil { + return *m.WasLoadingRequest + } + return false +} + +func (m *RequestLog) GetPendingTime() int64 { + if m != nil && m.PendingTime != nil { + return *m.PendingTime + } + return 0 +} + +func (m *RequestLog) GetReplicaIndex() int32 { + if m != nil && m.ReplicaIndex != nil { + return *m.ReplicaIndex + } + return Default_RequestLog_ReplicaIndex +} + +func (m *RequestLog) GetFinished() bool { + if m != nil && m.Finished != nil { + return *m.Finished + } + return Default_RequestLog_Finished +} + +func (m *RequestLog) GetCloneKey() []byte { + if m != nil { + return m.CloneKey + } + return nil +} + +func (m *RequestLog) GetLine() []*LogLine { + if m != nil { + return m.Line + } + return nil +} + +func (m *RequestLog) GetLinesIncomplete() bool { + if m != nil && m.LinesIncomplete != nil { + return *m.LinesIncomplete + } + return false +} + +func (m *RequestLog) GetAppEngineRelease() []byte { + if m != nil { + return m.AppEngineRelease + } + return nil +} + +func (m *RequestLog) GetExitReason() int32 { + if m != nil && m.ExitReason != nil { + return *m.ExitReason + } + return 0 +} + +func (m *RequestLog) GetWasThrottledForTime() bool { + if m != nil && m.WasThrottledForTime != nil { + return *m.WasThrottledForTime + } + return false +} + +func (m *RequestLog) GetWasThrottledForRequests() bool { + if m != nil && m.WasThrottledForRequests != nil { + return *m.WasThrottledForRequests + } + return false +} + +func (m *RequestLog) GetThrottledTime() int64 { + if m != nil && m.ThrottledTime != nil { + return *m.ThrottledTime + } + return 0 +} + +func (m *RequestLog) GetServerName() []byte { + if m != nil { + return m.ServerName + } + return nil +} + +type LogModuleVersion struct { + ModuleId *string `protobuf:"bytes,1,opt,name=module_id,def=default" json:"module_id,omitempty"` + VersionId *string `protobuf:"bytes,2,opt,name=version_id" json:"version_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} } +func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) } +func (*LogModuleVersion) ProtoMessage() {} + +const Default_LogModuleVersion_ModuleId string = "default" + +func (m *LogModuleVersion) GetModuleId() string { + if m != nil && m.ModuleId != nil { + return *m.ModuleId + } + return Default_LogModuleVersion_ModuleId +} + +func (m *LogModuleVersion) GetVersionId() string { + if m != nil && m.VersionId != nil { + return *m.VersionId + } + return "" +} + +type LogReadRequest struct { + AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"` + ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version" json:"module_version,omitempty"` + StartTime *int64 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"` + EndTime *int64 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"` + Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"` + RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id" json:"request_id,omitempty"` + MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level" json:"minimum_log_level,omitempty"` + IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete" json:"include_incomplete,omitempty"` + Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"` + CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex" json:"combined_log_regex,omitempty"` + HostRegex *string `protobuf:"bytes,15,opt,name=host_regex" json:"host_regex,omitempty"` + ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index" json:"replica_index,omitempty"` + IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs" json:"include_app_logs,omitempty"` + AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request" json:"app_logs_per_request,omitempty"` + IncludeHost *bool `protobuf:"varint,11,opt,name=include_host" json:"include_host,omitempty"` + IncludeAll *bool `protobuf:"varint,12,opt,name=include_all" json:"include_all,omitempty"` + CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator" json:"cache_iterator,omitempty"` + NumShards *int32 `protobuf:"varint,18,opt,name=num_shards" json:"num_shards,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogReadRequest) Reset() { *m = LogReadRequest{} } +func (m *LogReadRequest) String() string { return proto.CompactTextString(m) } +func (*LogReadRequest) ProtoMessage() {} + +func (m *LogReadRequest) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *LogReadRequest) GetVersionId() []string { + if m != nil { + return m.VersionId + } + return nil +} + +func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion { + if m != nil { + return m.ModuleVersion + } + return nil +} + +func (m *LogReadRequest) GetStartTime() int64 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *LogReadRequest) GetEndTime() int64 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *LogReadRequest) GetOffset() *LogOffset { + if m != nil { + return m.Offset + } + return nil +} + +func (m *LogReadRequest) GetRequestId() [][]byte { + if m != nil { + return m.RequestId + } + return nil +} + +func (m *LogReadRequest) GetMinimumLogLevel() int32 { + if m != nil && m.MinimumLogLevel != nil { + return *m.MinimumLogLevel + } + return 0 +} + +func (m *LogReadRequest) GetIncludeIncomplete() bool { + if m != nil && m.IncludeIncomplete != nil { + return *m.IncludeIncomplete + } + return false +} + +func (m *LogReadRequest) GetCount() int64 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *LogReadRequest) GetCombinedLogRegex() string { + if m != nil && m.CombinedLogRegex != nil { + return *m.CombinedLogRegex + } + return "" +} + +func (m *LogReadRequest) GetHostRegex() string { + if m != nil && m.HostRegex != nil { + return *m.HostRegex + } + return "" +} + +func (m *LogReadRequest) GetReplicaIndex() int32 { + if m != nil && m.ReplicaIndex != nil { + return *m.ReplicaIndex + } + return 0 +} + +func (m *LogReadRequest) GetIncludeAppLogs() bool { + if m != nil && m.IncludeAppLogs != nil { + return *m.IncludeAppLogs + } + return false +} + +func (m *LogReadRequest) GetAppLogsPerRequest() int32 { + if m != nil && m.AppLogsPerRequest != nil { + return *m.AppLogsPerRequest + } + return 0 +} + +func (m *LogReadRequest) GetIncludeHost() bool { + if m != nil && m.IncludeHost != nil { + return *m.IncludeHost + } + return false +} + +func (m *LogReadRequest) GetIncludeAll() bool { + if m != nil && m.IncludeAll != nil { + return *m.IncludeAll + } + return false +} + +func (m *LogReadRequest) GetCacheIterator() bool { + if m != nil && m.CacheIterator != nil { + return *m.CacheIterator + } + return false +} + +func (m *LogReadRequest) GetNumShards() int32 { + if m != nil && m.NumShards != nil { + return *m.NumShards + } + return 0 +} + +type LogReadResponse struct { + Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"` + Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"` + LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time" json:"last_end_time,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogReadResponse) Reset() { *m = LogReadResponse{} } +func (m *LogReadResponse) String() string { return proto.CompactTextString(m) } +func (*LogReadResponse) ProtoMessage() {} + +func (m *LogReadResponse) GetLog() []*RequestLog { + if m != nil { + return m.Log + } + return nil +} + +func (m *LogReadResponse) GetOffset() *LogOffset { + if m != nil { + return m.Offset + } + return nil +} + +func (m *LogReadResponse) GetLastEndTime() int64 { + if m != nil && m.LastEndTime != nil { + return *m.LastEndTime + } + return 0 +} + +type LogUsageRecord struct { + VersionId *string `protobuf:"bytes,1,opt,name=version_id" json:"version_id,omitempty"` + StartTime *int32 `protobuf:"varint,2,opt,name=start_time" json:"start_time,omitempty"` + EndTime *int32 `protobuf:"varint,3,opt,name=end_time" json:"end_time,omitempty"` + Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` + TotalSize *int64 `protobuf:"varint,5,opt,name=total_size" json:"total_size,omitempty"` + Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} } +func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) } +func (*LogUsageRecord) ProtoMessage() {} + +func (m *LogUsageRecord) GetVersionId() string { + if m != nil && m.VersionId != nil { + return *m.VersionId + } + return "" +} + +func (m *LogUsageRecord) GetStartTime() int32 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *LogUsageRecord) GetEndTime() int32 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *LogUsageRecord) GetCount() int64 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *LogUsageRecord) GetTotalSize() int64 { + if m != nil && m.TotalSize != nil { + return *m.TotalSize + } + return 0 +} + +func (m *LogUsageRecord) GetRecords() int32 { + if m != nil && m.Records != nil { + return *m.Records + } + return 0 +} + +type LogUsageRequest struct { + AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"` + StartTime *int32 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"` + EndTime *int32 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"` + ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,def=1" json:"resolution_hours,omitempty"` + CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions" json:"combine_versions,omitempty"` + UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version" json:"usage_version,omitempty"` + VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only" json:"versions_only,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} } +func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) } +func (*LogUsageRequest) ProtoMessage() {} + +const Default_LogUsageRequest_ResolutionHours uint32 = 1 + +func (m *LogUsageRequest) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *LogUsageRequest) GetVersionId() []string { + if m != nil { + return m.VersionId + } + return nil +} + +func (m *LogUsageRequest) GetStartTime() int32 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *LogUsageRequest) GetEndTime() int32 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *LogUsageRequest) GetResolutionHours() uint32 { + if m != nil && m.ResolutionHours != nil { + return *m.ResolutionHours + } + return Default_LogUsageRequest_ResolutionHours +} + +func (m *LogUsageRequest) GetCombineVersions() bool { + if m != nil && m.CombineVersions != nil { + return *m.CombineVersions + } + return false +} + +func (m *LogUsageRequest) GetUsageVersion() int32 { + if m != nil && m.UsageVersion != nil { + return *m.UsageVersion + } + return 0 +} + +func (m *LogUsageRequest) GetVersionsOnly() bool { + if m != nil && m.VersionsOnly != nil { + return *m.VersionsOnly + } + return false +} + +type LogUsageResponse struct { + Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"` + Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} } +func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) } +func (*LogUsageResponse) ProtoMessage() {} + +func (m *LogUsageResponse) GetUsage() []*LogUsageRecord { + if m != nil { + return m.Usage + } + return nil +} + +func (m *LogUsageResponse) GetSummary() *LogUsageRecord { + if m != nil { + return m.Summary + } + return nil +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go new file mode 100644 index 000000000..9cc1f71d1 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/metadata.go @@ -0,0 +1,61 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +// This file has code for accessing metadata. +// +// References: +// https://cloud.google.com/compute/docs/metadata + +import ( + "fmt" + "io/ioutil" + "log" + "net/http" + "net/url" +) + +const ( + metadataHost = "metadata" + metadataPath = "/computeMetadata/v1/" +) + +var ( + metadataRequestHeaders = http.Header{ + "Metadata-Flavor": []string{"Google"}, + } +) + +// TODO(dsymonds): Do we need to support default values, like Python? +func mustGetMetadata(key string) []byte { + b, err := getMetadata(key) + if err != nil { + log.Fatalf("Metadata fetch failed: %v", err) + } + return b +} + +func getMetadata(key string) ([]byte, error) { + // TODO(dsymonds): May need to use url.Parse to support keys with query args. + req := &http.Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "http", + Host: metadataHost, + Path: metadataPath + key, + }, + Header: metadataRequestHeaders, + Host: metadataHost, + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != 200 { + return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode) + } + return ioutil.ReadAll(resp.Body) +} diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go new file mode 100644 index 000000000..a0145ed31 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go @@ -0,0 +1,375 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/modules/modules_service.proto +// DO NOT EDIT! + +/* +Package modules is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/modules/modules_service.proto + +It has these top-level messages: + ModulesServiceError + GetModulesRequest + GetModulesResponse + GetVersionsRequest + GetVersionsResponse + GetDefaultVersionRequest + GetDefaultVersionResponse + GetNumInstancesRequest + GetNumInstancesResponse + SetNumInstancesRequest + SetNumInstancesResponse + StartModuleRequest + StartModuleResponse + StopModuleRequest + StopModuleResponse + GetHostnameRequest + GetHostnameResponse +*/ +package modules + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type ModulesServiceError_ErrorCode int32 + +const ( + ModulesServiceError_OK ModulesServiceError_ErrorCode = 0 + ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1 + ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2 + ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3 + ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4 + ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5 +) + +var ModulesServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INVALID_MODULE", + 2: "INVALID_VERSION", + 3: "INVALID_INSTANCES", + 4: "TRANSIENT_ERROR", + 5: "UNEXPECTED_STATE", +} +var ModulesServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INVALID_MODULE": 1, + "INVALID_VERSION": 2, + "INVALID_INSTANCES": 3, + "TRANSIENT_ERROR": 4, + "UNEXPECTED_STATE": 5, +} + +func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode { + p := new(ModulesServiceError_ErrorCode) + *p = x + return p +} +func (x ModulesServiceError_ErrorCode) String() string { + return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x)) +} +func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode") + if err != nil { + return err + } + *x = ModulesServiceError_ErrorCode(value) + return nil +} + +type ModulesServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} } +func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) } +func (*ModulesServiceError) ProtoMessage() {} + +type GetModulesRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} } +func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) } +func (*GetModulesRequest) ProtoMessage() {} + +type GetModulesResponse struct { + Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} } +func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) } +func (*GetModulesResponse) ProtoMessage() {} + +func (m *GetModulesResponse) GetModule() []string { + if m != nil { + return m.Module + } + return nil +} + +type GetVersionsRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} } +func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) } +func (*GetVersionsRequest) ProtoMessage() {} + +func (m *GetVersionsRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +type GetVersionsResponse struct { + Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} } +func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) } +func (*GetVersionsResponse) ProtoMessage() {} + +func (m *GetVersionsResponse) GetVersion() []string { + if m != nil { + return m.Version + } + return nil +} + +type GetDefaultVersionRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} } +func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) } +func (*GetDefaultVersionRequest) ProtoMessage() {} + +func (m *GetDefaultVersionRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +type GetDefaultVersionResponse struct { + Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} } +func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) } +func (*GetDefaultVersionResponse) ProtoMessage() {} + +func (m *GetDefaultVersionResponse) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type GetNumInstancesRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} } +func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) } +func (*GetNumInstancesRequest) ProtoMessage() {} + +func (m *GetNumInstancesRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *GetNumInstancesRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type GetNumInstancesResponse struct { + Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} } +func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) } +func (*GetNumInstancesResponse) ProtoMessage() {} + +func (m *GetNumInstancesResponse) GetInstances() int64 { + if m != nil && m.Instances != nil { + return *m.Instances + } + return 0 +} + +type SetNumInstancesRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} } +func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) } +func (*SetNumInstancesRequest) ProtoMessage() {} + +func (m *SetNumInstancesRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *SetNumInstancesRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +func (m *SetNumInstancesRequest) GetInstances() int64 { + if m != nil && m.Instances != nil { + return *m.Instances + } + return 0 +} + +type SetNumInstancesResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} } +func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) } +func (*SetNumInstancesResponse) ProtoMessage() {} + +type StartModuleRequest struct { + Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} } +func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) } +func (*StartModuleRequest) ProtoMessage() {} + +func (m *StartModuleRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *StartModuleRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type StartModuleResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} } +func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) } +func (*StartModuleResponse) ProtoMessage() {} + +type StopModuleRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} } +func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) } +func (*StopModuleRequest) ProtoMessage() {} + +func (m *StopModuleRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *StopModuleRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type StopModuleResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} } +func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) } +func (*StopModuleResponse) ProtoMessage() {} + +type GetHostnameRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} } +func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) } +func (*GetHostnameRequest) ProtoMessage() {} + +func (m *GetHostnameRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *GetHostnameRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +func (m *GetHostnameRequest) GetInstance() string { + if m != nil && m.Instance != nil { + return *m.Instance + } + return "" +} + +type GetHostnameResponse struct { + Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} } +func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) } +func (*GetHostnameResponse) ProtoMessage() {} + +func (m *GetHostnameResponse) GetHostname() string { + if m != nil && m.Hostname != nil { + return *m.Hostname + } + return "" +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go new file mode 100644 index 000000000..3b94cf0c6 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/net.go @@ -0,0 +1,56 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +// This file implements a network dialer that limits the number of concurrent connections. +// It is only used for API calls. + +import ( + "log" + "net" + "runtime" + "sync" + "time" +) + +var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable. + +func limitRelease() { + // non-blocking + select { + case <-limitSem: + default: + // This should not normally happen. + log.Print("appengine: unbalanced limitSem release!") + } +} + +func limitDial(network, addr string) (net.Conn, error) { + limitSem <- 1 + + // Dial with a timeout in case the API host is MIA. + // The connection should normally be very fast. + conn, err := net.DialTimeout(network, addr, 500*time.Millisecond) + if err != nil { + limitRelease() + return nil, err + } + lc := &limitConn{Conn: conn} + runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required + return lc, nil +} + +type limitConn struct { + close sync.Once + net.Conn +} + +func (lc *limitConn) Close() error { + defer lc.close.Do(func() { + limitRelease() + runtime.SetFinalizer(lc, nil) + }) + return lc.Conn.Close() +} diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go new file mode 100644 index 000000000..526bd39e6 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go @@ -0,0 +1,231 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/remote_api/remote_api.proto +// DO NOT EDIT! + +/* +Package remote_api is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/remote_api/remote_api.proto + +It has these top-level messages: + Request + ApplicationError + RpcError + Response +*/ +package remote_api + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type RpcError_ErrorCode int32 + +const ( + RpcError_UNKNOWN RpcError_ErrorCode = 0 + RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1 + RpcError_PARSE_ERROR RpcError_ErrorCode = 2 + RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3 + RpcError_OVER_QUOTA RpcError_ErrorCode = 4 + RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5 + RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6 + RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7 + RpcError_BAD_REQUEST RpcError_ErrorCode = 8 + RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9 + RpcError_CANCELLED RpcError_ErrorCode = 10 + RpcError_REPLAY_ERROR RpcError_ErrorCode = 11 + RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12 +) + +var RpcError_ErrorCode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CALL_NOT_FOUND", + 2: "PARSE_ERROR", + 3: "SECURITY_VIOLATION", + 4: "OVER_QUOTA", + 5: "REQUEST_TOO_LARGE", + 6: "CAPABILITY_DISABLED", + 7: "FEATURE_DISABLED", + 8: "BAD_REQUEST", + 9: "RESPONSE_TOO_LARGE", + 10: "CANCELLED", + 11: "REPLAY_ERROR", + 12: "DEADLINE_EXCEEDED", +} +var RpcError_ErrorCode_value = map[string]int32{ + "UNKNOWN": 0, + "CALL_NOT_FOUND": 1, + "PARSE_ERROR": 2, + "SECURITY_VIOLATION": 3, + "OVER_QUOTA": 4, + "REQUEST_TOO_LARGE": 5, + "CAPABILITY_DISABLED": 6, + "FEATURE_DISABLED": 7, + "BAD_REQUEST": 8, + "RESPONSE_TOO_LARGE": 9, + "CANCELLED": 10, + "REPLAY_ERROR": 11, + "DEADLINE_EXCEEDED": 12, +} + +func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode { + p := new(RpcError_ErrorCode) + *p = x + return p +} +func (x RpcError_ErrorCode) String() string { + return proto.EnumName(RpcError_ErrorCode_name, int32(x)) +} +func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode") + if err != nil { + return err + } + *x = RpcError_ErrorCode(value) + return nil +} + +type Request struct { + ServiceName *string `protobuf:"bytes,2,req,name=service_name" json:"service_name,omitempty"` + Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"` + Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"` + RequestId *string `protobuf:"bytes,5,opt,name=request_id" json:"request_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} + +func (m *Request) GetServiceName() string { + if m != nil && m.ServiceName != nil { + return *m.ServiceName + } + return "" +} + +func (m *Request) GetMethod() string { + if m != nil && m.Method != nil { + return *m.Method + } + return "" +} + +func (m *Request) GetRequest() []byte { + if m != nil { + return m.Request + } + return nil +} + +func (m *Request) GetRequestId() string { + if m != nil && m.RequestId != nil { + return *m.RequestId + } + return "" +} + +type ApplicationError struct { + Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` + Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ApplicationError) Reset() { *m = ApplicationError{} } +func (m *ApplicationError) String() string { return proto.CompactTextString(m) } +func (*ApplicationError) ProtoMessage() {} + +func (m *ApplicationError) GetCode() int32 { + if m != nil && m.Code != nil { + return *m.Code + } + return 0 +} + +func (m *ApplicationError) GetDetail() string { + if m != nil && m.Detail != nil { + return *m.Detail + } + return "" +} + +type RpcError struct { + Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` + Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RpcError) Reset() { *m = RpcError{} } +func (m *RpcError) String() string { return proto.CompactTextString(m) } +func (*RpcError) ProtoMessage() {} + +func (m *RpcError) GetCode() int32 { + if m != nil && m.Code != nil { + return *m.Code + } + return 0 +} + +func (m *RpcError) GetDetail() string { + if m != nil && m.Detail != nil { + return *m.Detail + } + return "" +} + +type Response struct { + Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"` + Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"` + ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error" json:"application_error,omitempty"` + JavaException []byte `protobuf:"bytes,4,opt,name=java_exception" json:"java_exception,omitempty"` + RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error" json:"rpc_error,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} + +func (m *Response) GetResponse() []byte { + if m != nil { + return m.Response + } + return nil +} + +func (m *Response) GetException() []byte { + if m != nil { + return m.Exception + } + return nil +} + +func (m *Response) GetApplicationError() *ApplicationError { + if m != nil { + return m.ApplicationError + } + return nil +} + +func (m *Response) GetJavaException() []byte { + if m != nil { + return m.JavaException + } + return nil +} + +func (m *Response) GetRpcError() *RpcError { + if m != nil { + return m.RpcError + } + return nil +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go new file mode 100644 index 000000000..28a6d1812 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/transaction.go @@ -0,0 +1,107 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +// This file implements hooks for applying datastore transactions. + +import ( + "errors" + "reflect" + + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" + + basepb "google.golang.org/appengine/internal/base" + pb "google.golang.org/appengine/internal/datastore" +) + +var transactionSetters = make(map[reflect.Type]reflect.Value) + +// RegisterTransactionSetter registers a function that sets transaction information +// in a protocol buffer message. f should be a function with two arguments, +// the first being a protocol buffer type, and the second being *datastore.Transaction. +func RegisterTransactionSetter(f interface{}) { + v := reflect.ValueOf(f) + transactionSetters[v.Type().In(0)] = v +} + +// applyTransaction applies the transaction t to message pb +// by using the relevant setter passed to RegisterTransactionSetter. +func applyTransaction(pb proto.Message, t *pb.Transaction) { + v := reflect.ValueOf(pb) + if f, ok := transactionSetters[v.Type()]; ok { + f.Call([]reflect.Value{v, reflect.ValueOf(t)}) + } +} + +var transactionKey = "used for *Transaction" + +func transactionFromContext(ctx netcontext.Context) *transaction { + t, _ := ctx.Value(&transactionKey).(*transaction) + return t +} + +func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context { + return netcontext.WithValue(ctx, &transactionKey, t) +} + +type transaction struct { + transaction pb.Transaction + finished bool +} + +var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") + +func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool) error { + if transactionFromContext(c) != nil { + return errors.New("nested transactions are not supported") + } + + // Begin the transaction. + t := &transaction{} + req := &pb.BeginTransactionRequest{ + App: proto.String(FullyQualifiedAppID(c)), + } + if xg { + req.AllowMultipleEg = proto.Bool(true) + } + if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil { + return err + } + + // Call f, rolling back the transaction if f returns a non-nil error, or panics. + // The panic is not recovered. + defer func() { + if t.finished { + return + } + t.finished = true + // Ignore the error return value, since we are already returning a non-nil + // error (or we're panicking). + Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{}) + }() + if err := f(withTransaction(c, t)); err != nil { + return err + } + t.finished = true + + // Commit the transaction. + res := &pb.CommitResponse{} + err := Call(c, "datastore_v3", "Commit", &t.transaction, res) + if ae, ok := err.(*APIError); ok { + /* TODO: restore this conditional + if appengine.IsDevAppServer() { + */ + // The Python Dev AppServer raises an ApplicationError with error code 2 (which is + // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.". + if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." { + return ErrConcurrentTransaction + } + if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) { + return ErrConcurrentTransaction + } + } + return err +} diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go new file mode 100644 index 000000000..af463fbb2 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go @@ -0,0 +1,355 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto +// DO NOT EDIT! + +/* +Package urlfetch is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto + +It has these top-level messages: + URLFetchServiceError + URLFetchRequest + URLFetchResponse +*/ +package urlfetch + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type URLFetchServiceError_ErrorCode int32 + +const ( + URLFetchServiceError_OK URLFetchServiceError_ErrorCode = 0 + URLFetchServiceError_INVALID_URL URLFetchServiceError_ErrorCode = 1 + URLFetchServiceError_FETCH_ERROR URLFetchServiceError_ErrorCode = 2 + URLFetchServiceError_UNSPECIFIED_ERROR URLFetchServiceError_ErrorCode = 3 + URLFetchServiceError_RESPONSE_TOO_LARGE URLFetchServiceError_ErrorCode = 4 + URLFetchServiceError_DEADLINE_EXCEEDED URLFetchServiceError_ErrorCode = 5 + URLFetchServiceError_SSL_CERTIFICATE_ERROR URLFetchServiceError_ErrorCode = 6 + URLFetchServiceError_DNS_ERROR URLFetchServiceError_ErrorCode = 7 + URLFetchServiceError_CLOSED URLFetchServiceError_ErrorCode = 8 + URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9 + URLFetchServiceError_TOO_MANY_REDIRECTS URLFetchServiceError_ErrorCode = 10 + URLFetchServiceError_MALFORMED_REPLY URLFetchServiceError_ErrorCode = 11 + URLFetchServiceError_CONNECTION_ERROR URLFetchServiceError_ErrorCode = 12 +) + +var URLFetchServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INVALID_URL", + 2: "FETCH_ERROR", + 3: "UNSPECIFIED_ERROR", + 4: "RESPONSE_TOO_LARGE", + 5: "DEADLINE_EXCEEDED", + 6: "SSL_CERTIFICATE_ERROR", + 7: "DNS_ERROR", + 8: "CLOSED", + 9: "INTERNAL_TRANSIENT_ERROR", + 10: "TOO_MANY_REDIRECTS", + 11: "MALFORMED_REPLY", + 12: "CONNECTION_ERROR", +} +var URLFetchServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INVALID_URL": 1, + "FETCH_ERROR": 2, + "UNSPECIFIED_ERROR": 3, + "RESPONSE_TOO_LARGE": 4, + "DEADLINE_EXCEEDED": 5, + "SSL_CERTIFICATE_ERROR": 6, + "DNS_ERROR": 7, + "CLOSED": 8, + "INTERNAL_TRANSIENT_ERROR": 9, + "TOO_MANY_REDIRECTS": 10, + "MALFORMED_REPLY": 11, + "CONNECTION_ERROR": 12, +} + +func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode { + p := new(URLFetchServiceError_ErrorCode) + *p = x + return p +} +func (x URLFetchServiceError_ErrorCode) String() string { + return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x)) +} +func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode") + if err != nil { + return err + } + *x = URLFetchServiceError_ErrorCode(value) + return nil +} + +type URLFetchRequest_RequestMethod int32 + +const ( + URLFetchRequest_GET URLFetchRequest_RequestMethod = 1 + URLFetchRequest_POST URLFetchRequest_RequestMethod = 2 + URLFetchRequest_HEAD URLFetchRequest_RequestMethod = 3 + URLFetchRequest_PUT URLFetchRequest_RequestMethod = 4 + URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5 + URLFetchRequest_PATCH URLFetchRequest_RequestMethod = 6 +) + +var URLFetchRequest_RequestMethod_name = map[int32]string{ + 1: "GET", + 2: "POST", + 3: "HEAD", + 4: "PUT", + 5: "DELETE", + 6: "PATCH", +} +var URLFetchRequest_RequestMethod_value = map[string]int32{ + "GET": 1, + "POST": 2, + "HEAD": 3, + "PUT": 4, + "DELETE": 5, + "PATCH": 6, +} + +func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod { + p := new(URLFetchRequest_RequestMethod) + *p = x + return p +} +func (x URLFetchRequest_RequestMethod) String() string { + return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x)) +} +func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod") + if err != nil { + return err + } + *x = URLFetchRequest_RequestMethod(value) + return nil +} + +type URLFetchServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} } +func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) } +func (*URLFetchServiceError) ProtoMessage() {} + +type URLFetchRequest struct { + Method *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"` + Url *string `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"` + Header []*URLFetchRequest_Header `protobuf:"group,3,rep,name=Header" json:"header,omitempty"` + Payload []byte `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"` + FollowRedirects *bool `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"` + Deadline *float64 `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"` + MustValidateServerCertificate *bool `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} } +func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) } +func (*URLFetchRequest) ProtoMessage() {} + +const Default_URLFetchRequest_FollowRedirects bool = true +const Default_URLFetchRequest_MustValidateServerCertificate bool = true + +func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod { + if m != nil && m.Method != nil { + return *m.Method + } + return URLFetchRequest_GET +} + +func (m *URLFetchRequest) GetUrl() string { + if m != nil && m.Url != nil { + return *m.Url + } + return "" +} + +func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header { + if m != nil { + return m.Header + } + return nil +} + +func (m *URLFetchRequest) GetPayload() []byte { + if m != nil { + return m.Payload + } + return nil +} + +func (m *URLFetchRequest) GetFollowRedirects() bool { + if m != nil && m.FollowRedirects != nil { + return *m.FollowRedirects + } + return Default_URLFetchRequest_FollowRedirects +} + +func (m *URLFetchRequest) GetDeadline() float64 { + if m != nil && m.Deadline != nil { + return *m.Deadline + } + return 0 +} + +func (m *URLFetchRequest) GetMustValidateServerCertificate() bool { + if m != nil && m.MustValidateServerCertificate != nil { + return *m.MustValidateServerCertificate + } + return Default_URLFetchRequest_MustValidateServerCertificate +} + +type URLFetchRequest_Header struct { + Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"` + Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} } +func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) } +func (*URLFetchRequest_Header) ProtoMessage() {} + +func (m *URLFetchRequest_Header) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *URLFetchRequest_Header) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type URLFetchResponse struct { + Content []byte `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"` + StatusCode *int32 `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"` + Header []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header" json:"header,omitempty"` + ContentWasTruncated *bool `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"` + ExternalBytesSent *int64 `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"` + ExternalBytesReceived *int64 `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"` + FinalUrl *string `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"` + ApiCpuMilliseconds *int64 `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"` + ApiBytesSent *int64 `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"` + ApiBytesReceived *int64 `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} } +func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) } +func (*URLFetchResponse) ProtoMessage() {} + +const Default_URLFetchResponse_ContentWasTruncated bool = false +const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0 +const Default_URLFetchResponse_ApiBytesSent int64 = 0 +const Default_URLFetchResponse_ApiBytesReceived int64 = 0 + +func (m *URLFetchResponse) GetContent() []byte { + if m != nil { + return m.Content + } + return nil +} + +func (m *URLFetchResponse) GetStatusCode() int32 { + if m != nil && m.StatusCode != nil { + return *m.StatusCode + } + return 0 +} + +func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header { + if m != nil { + return m.Header + } + return nil +} + +func (m *URLFetchResponse) GetContentWasTruncated() bool { + if m != nil && m.ContentWasTruncated != nil { + return *m.ContentWasTruncated + } + return Default_URLFetchResponse_ContentWasTruncated +} + +func (m *URLFetchResponse) GetExternalBytesSent() int64 { + if m != nil && m.ExternalBytesSent != nil { + return *m.ExternalBytesSent + } + return 0 +} + +func (m *URLFetchResponse) GetExternalBytesReceived() int64 { + if m != nil && m.ExternalBytesReceived != nil { + return *m.ExternalBytesReceived + } + return 0 +} + +func (m *URLFetchResponse) GetFinalUrl() string { + if m != nil && m.FinalUrl != nil { + return *m.FinalUrl + } + return "" +} + +func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 { + if m != nil && m.ApiCpuMilliseconds != nil { + return *m.ApiCpuMilliseconds + } + return Default_URLFetchResponse_ApiCpuMilliseconds +} + +func (m *URLFetchResponse) GetApiBytesSent() int64 { + if m != nil && m.ApiBytesSent != nil { + return *m.ApiBytesSent + } + return Default_URLFetchResponse_ApiBytesSent +} + +func (m *URLFetchResponse) GetApiBytesReceived() int64 { + if m != nil && m.ApiBytesReceived != nil { + return *m.ApiBytesReceived + } + return Default_URLFetchResponse_ApiBytesReceived +} + +type URLFetchResponse_Header struct { + Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"` + Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} } +func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) } +func (*URLFetchResponse_Header) ProtoMessage() {} + +func (m *URLFetchResponse_Header) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *URLFetchResponse_Header) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go new file mode 100644 index 000000000..21860ca08 --- /dev/null +++ b/vendor/google.golang.org/appengine/namespace.go @@ -0,0 +1,25 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import ( + "fmt" + "regexp" + + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// Namespace returns a replacement context that operates within the given namespace. +func Namespace(c context.Context, namespace string) (context.Context, error) { + if !validNamespace.MatchString(namespace) { + return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace) + } + return internal.NamespacedContext(c, namespace), nil +} + +// validNamespace matches valid namespace names. +var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`) diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go new file mode 100644 index 000000000..05642a992 --- /dev/null +++ b/vendor/google.golang.org/appengine/timeout.go @@ -0,0 +1,20 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import "golang.org/x/net/context" + +// IsTimeoutError reports whether err is a timeout error. +func IsTimeoutError(err error) bool { + if err == context.DeadlineExceeded { + return true + } + if t, ok := err.(interface { + IsTimeout() bool + }); ok { + return t.IsTimeout() + } + return false +} diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go new file mode 100644 index 000000000..6ffe1e6d9 --- /dev/null +++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go @@ -0,0 +1,210 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package urlfetch provides an http.RoundTripper implementation +// for fetching URLs via App Engine's urlfetch service. +package urlfetch // import "google.golang.org/appengine/urlfetch" + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/urlfetch" +) + +// Transport is an implementation of http.RoundTripper for +// App Engine. Users should generally create an http.Client using +// this transport and use the Client rather than using this transport +// directly. +type Transport struct { + Context context.Context + + // Controls whether the application checks the validity of SSL certificates + // over HTTPS connections. A value of false (the default) instructs the + // application to send a request to the server only if the certificate is + // valid and signed by a trusted certificate authority (CA), and also + // includes a hostname that matches the certificate. A value of true + // instructs the application to perform no certificate validation. + AllowInvalidServerCertificate bool +} + +// Verify statically that *Transport implements http.RoundTripper. +var _ http.RoundTripper = (*Transport)(nil) + +// Client returns an *http.Client using a default urlfetch Transport. This +// client will have the default deadline of 5 seconds, and will check the +// validity of SSL certificates. +// +// Any deadline of the provided context will be used for requests through this client; +// if the client does not have a deadline then a 5 second default is used. +func Client(ctx context.Context) *http.Client { + return &http.Client{ + Transport: &Transport{ + Context: ctx, + }, + } +} + +type bodyReader struct { + content []byte + truncated bool + closed bool +} + +// ErrTruncatedBody is the error returned after the final Read() from a +// response's Body if the body has been truncated by App Engine's proxy. +var ErrTruncatedBody = errors.New("urlfetch: truncated body") + +func statusCodeToText(code int) string { + if t := http.StatusText(code); t != "" { + return t + } + return strconv.Itoa(code) +} + +func (br *bodyReader) Read(p []byte) (n int, err error) { + if br.closed { + if br.truncated { + return 0, ErrTruncatedBody + } + return 0, io.EOF + } + n = copy(p, br.content) + if n > 0 { + br.content = br.content[n:] + return + } + if br.truncated { + br.closed = true + return 0, ErrTruncatedBody + } + return 0, io.EOF +} + +func (br *bodyReader) Close() error { + br.closed = true + br.content = nil + return nil +} + +// A map of the URL Fetch-accepted methods that take a request body. +var methodAcceptsRequestBody = map[string]bool{ + "POST": true, + "PUT": true, + "PATCH": true, +} + +// urlString returns a valid string given a URL. This function is necessary because +// the String method of URL doesn't correctly handle URLs with non-empty Opaque values. +// See http://code.google.com/p/go/issues/detail?id=4860. +func urlString(u *url.URL) string { + if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") { + return u.String() + } + aux := *u + aux.Opaque = "//" + aux.Host + aux.Opaque + return aux.String() +} + +// RoundTrip issues a single HTTP request and returns its response. Per the +// http.RoundTripper interface, RoundTrip only returns an error if there +// was an unsupported request or the URL Fetch proxy fails. +// Note that HTTP response codes such as 5xx, 403, 404, etc are not +// errors as far as the transport is concerned and will be returned +// with err set to nil. +func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) { + methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method] + if !ok { + return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method) + } + + method := pb.URLFetchRequest_RequestMethod(methNum) + + freq := &pb.URLFetchRequest{ + Method: &method, + Url: proto.String(urlString(req.URL)), + FollowRedirects: proto.Bool(false), // http.Client's responsibility + MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate), + } + if deadline, ok := t.Context.Deadline(); ok { + freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds()) + } + + for k, vals := range req.Header { + for _, val := range vals { + freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{ + Key: proto.String(k), + Value: proto.String(val), + }) + } + } + if methodAcceptsRequestBody[req.Method] && req.Body != nil { + // Avoid a []byte copy if req.Body has a Bytes method. + switch b := req.Body.(type) { + case interface { + Bytes() []byte + }: + freq.Payload = b.Bytes() + default: + freq.Payload, err = ioutil.ReadAll(req.Body) + if err != nil { + return nil, err + } + } + } + + fres := &pb.URLFetchResponse{} + if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil { + return nil, err + } + + res = &http.Response{} + res.StatusCode = int(*fres.StatusCode) + res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode)) + res.Header = make(http.Header) + res.Request = req + + // Faked: + res.ProtoMajor = 1 + res.ProtoMinor = 1 + res.Proto = "HTTP/1.1" + res.Close = true + + for _, h := range fres.Header { + hkey := http.CanonicalHeaderKey(*h.Key) + hval := *h.Value + if hkey == "Content-Length" { + // Will get filled in below for all but HEAD requests. + if req.Method == "HEAD" { + res.ContentLength, _ = strconv.ParseInt(hval, 10, 64) + } + continue + } + res.Header.Add(hkey, hval) + } + + if req.Method != "HEAD" { + res.ContentLength = int64(len(fres.Content)) + } + + truncated := fres.GetContentWasTruncated() + res.Body = &bodyReader{content: fres.Content, truncated: truncated} + return +} + +func init() { + internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name) + internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED)) +} diff --git a/vendor/google.golang.org/cloud/internal/cloud.go b/vendor/google.golang.org/cloud/internal/cloud.go deleted file mode 100644 index 59428803d..000000000 --- a/vendor/google.golang.org/cloud/internal/cloud.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package internal provides support for the cloud packages. -// -// Users should not import this package directly. -package internal - -import ( - "fmt" - "net/http" - "sync" - - "golang.org/x/net/context" -) - -type contextKey struct{} - -func WithContext(parent context.Context, projID string, c *http.Client) context.Context { - if c == nil { - panic("nil *http.Client passed to WithContext") - } - if projID == "" { - panic("empty project ID passed to WithContext") - } - return context.WithValue(parent, contextKey{}, &cloudContext{ - ProjectID: projID, - HTTPClient: c, - }) -} - -const userAgent = "gcloud-golang/0.1" - -type cloudContext struct { - ProjectID string - HTTPClient *http.Client - - mu sync.Mutex // guards svc - svc map[string]interface{} // e.g. "storage" => *rawStorage.Service -} - -// Service returns the result of the fill function if it's never been -// called before for the given name (which is assumed to be an API -// service name, like "datastore"). If it has already been cached, the fill -// func is not run. -// It's safe for concurrent use by multiple goroutines. -func Service(ctx context.Context, name string, fill func(*http.Client) interface{}) interface{} { - return cc(ctx).service(name, fill) -} - -func (c *cloudContext) service(name string, fill func(*http.Client) interface{}) interface{} { - c.mu.Lock() - defer c.mu.Unlock() - - if c.svc == nil { - c.svc = make(map[string]interface{}) - } else if v, ok := c.svc[name]; ok { - return v - } - v := fill(c.HTTPClient) - c.svc[name] = v - return v -} - -// Transport is an http.RoundTripper that appends -// Google Cloud client's user-agent to the original -// request's user-agent header. -type Transport struct { - // Base is the actual http.RoundTripper - // requests will use. It must not be nil. - Base http.RoundTripper -} - -// RoundTrip appends a user-agent to the existing user-agent -// header and delegates the request to the base http.RoundTripper. -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - req = cloneRequest(req) - ua := req.Header.Get("User-Agent") - if ua == "" { - ua = userAgent - } else { - ua = fmt.Sprintf("%s %s", ua, userAgent) - } - req.Header.Set("User-Agent", ua) - return t.Base.RoundTrip(req) -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header) - for k, s := range r.Header { - r2.Header[k] = s - } - return r2 -} - -func ProjID(ctx context.Context) string { - return cc(ctx).ProjectID -} - -func HTTPClient(ctx context.Context) *http.Client { - return cc(ctx).HTTPClient -} - -// cc returns the internal *cloudContext (cc) state for a context.Context. -// It panics if the user did it wrong. -func cc(ctx context.Context) *cloudContext { - if c, ok := ctx.Value(contextKey{}).(*cloudContext); ok { - return c - } - panic("invalid context.Context type; it should be created with cloud.NewContext") -} diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml deleted file mode 100644 index d7108cd64..000000000 --- a/vendor/google.golang.org/grpc/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -language: go - -go: - - 1.5.4 - - 1.6.3 - -go_import_path: google.golang.org/grpc - -before_install: - - go get golang.org/x/tools/cmd/goimports - - go get github.com/golang/lint/golint - - go get github.com/axw/gocov/gocov - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover - -script: - - '! gofmt -s -d -l . 2>&1 | read' - - '! goimports -l . | read' - - '! golint ./... | grep -vE "(_string|\.pb)\.go:"' - - '! go tool vet -all . 2>&1 | grep -vE "constant [0-9]+ not a string in call to Errorf"' - - make test testrace diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md deleted file mode 100644 index 36cd6f758..000000000 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ /dev/null @@ -1,46 +0,0 @@ -# How to contribute - -We definitely welcome patches and contribution to grpc! Here are some guidelines -and information about how to do so. - -## Sending patches - -### Getting started - -1. Check out the code: - - $ go get google.golang.org/grpc - $ cd $GOPATH/src/google.golang.org/grpc - -1. Create a fork of the grpc-go repository. -1. Add your fork as a remote: - - $ git remote add fork git@github.com:$YOURGITHUBUSERNAME/grpc-go.git - -1. Make changes, commit them. -1. Run the test suite: - - $ make test - -1. Push your changes to your fork: - - $ git push fork ... - -1. Open a pull request. - -## Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). - -## Filing Issues -When filing an issue, make sure to answer these five questions: - -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -### Contributing code -Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file. diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile deleted file mode 100644 index 03bb01f0b..000000000 --- a/vendor/google.golang.org/grpc/Makefile +++ /dev/null @@ -1,52 +0,0 @@ -all: test testrace - -deps: - go get -d -v google.golang.org/grpc/... - -updatedeps: - go get -d -v -u -f google.golang.org/grpc/... - -testdeps: - go get -d -v -t google.golang.org/grpc/... - -updatetestdeps: - go get -d -v -t -u -f google.golang.org/grpc/... - -build: deps - go build google.golang.org/grpc/... - -proto: - @ if ! which protoc > /dev/null; then \ - echo "error: protoc not installed" >&2; \ - exit 1; \ - fi - go get -u -v github.com/golang/protobuf/protoc-gen-go - # use $$dir as the root for all proto files in the same directory - for dir in $$(git ls-files '*.proto' | xargs -n1 dirname | uniq); do \ - protoc -I $$dir --go_out=plugins=grpc:$$dir $$dir/*.proto; \ - done - -test: testdeps - go test -v -cpu 1,4 google.golang.org/grpc/... - -testrace: testdeps - go test -v -race -cpu 1,4 google.golang.org/grpc/... - -clean: - go clean -i google.golang.org/grpc/... - -coverage: testdeps - ./coverage.sh --coveralls - -.PHONY: \ - all \ - deps \ - updatedeps \ - testdeps \ - updatetestdeps \ - build \ - proto \ - test \ - testrace \ - clean \ - coverage diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md deleted file mode 100644 index 90e9453d5..000000000 --- a/vendor/google.golang.org/grpc/README.md +++ /dev/null @@ -1,32 +0,0 @@ -#gRPC-Go - -[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) - -The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start](http://www.grpc.io/docs/) guide. - -Installation ------------- - -To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run: - -``` -$ go get google.golang.org/grpc -``` - -Prerequisites -------------- - -This requires Go 1.5 or later . - -Constraints ------------ -The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants. - -Documentation -------------- -See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/). - -Status ------- -Beta release - diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go index 419e21461..e217a2077 100644 --- a/vendor/google.golang.org/grpc/balancer.go +++ b/vendor/google.golang.org/grpc/balancer.go @@ -38,6 +38,7 @@ import ( "sync" "golang.org/x/net/context" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/naming" ) @@ -52,6 +53,14 @@ type Address struct { Metadata interface{} } +// BalancerConfig specifies the configurations for Balancer. +type BalancerConfig struct { + // DialCreds is the transport credential the Balancer implementation can + // use to dial to a remote load balancer server. The Balancer implementations + // can ignore this if it does not need to talk to another party securely. + DialCreds credentials.TransportCredentials +} + // BalancerGetOptions configures a Get call. // This is the EXPERIMENTAL API and may be changed or extended in the future. type BalancerGetOptions struct { @@ -66,11 +75,11 @@ type Balancer interface { // Start does the initialization work to bootstrap a Balancer. For example, // this function may start the name resolution and watch the updates. It will // be called when dialing. - Start(target string) error + Start(target string, config BalancerConfig) error // Up informs the Balancer that gRPC has a connection to the server at // addr. It returns down which is called once the connection to addr gets // lost or closed. - // TODO: It is not clear how to construct and take advantage the meaningful error + // TODO: It is not clear how to construct and take advantage of the meaningful error // parameter for down. Need realistic demands to guide. Up(addr Address) (down func(error)) // Get gets the address of a server for the RPC corresponding to ctx. @@ -205,7 +214,12 @@ func (rr *roundRobin) watchAddrUpdates() error { return nil } -func (rr *roundRobin) Start(target string) error { +func (rr *roundRobin) Start(target string, config BalancerConfig) error { + rr.mu.Lock() + defer rr.mu.Unlock() + if rr.done { + return ErrClientConnClosing + } if rr.r == nil { // If there is no name resolver installed, it is not needed to // do name resolution. In this case, target is added into rr.addrs diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index fea07998d..788b3d928 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -96,7 +96,7 @@ func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHd } outBuf, err := encode(codec, args, compressor, cbuf) if err != nil { - return nil, transport.StreamErrorf(codes.Internal, "grpc: %v", err) + return nil, Errorf(codes.Internal, "grpc: %v", err) } err = t.Write(stream, outBuf, opts) // t.NewStream(...) could lead to an early rejection of the RPC (e.g., the service/method @@ -112,7 +112,14 @@ func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHd // Invoke sends the RPC request on the wire and returns after response is received. // Invoke is called by generated code. Also users can call Invoke directly when it // is really needed in their use cases. -func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) { +func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { + if cc.dopts.unaryInt != nil { + return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) + } + return invoke(ctx, method, args, reply, cc, opts...) +} + +func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) { c := defaultCallInfo for _, o := range opts { if err := o.before(&c); err != nil { diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 27e74e6f2..11dce44fd 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -83,15 +83,17 @@ var ( // dialOptions configure a Dial call. dialOptions are set by the DialOption // values passed to Dial. type dialOptions struct { - codec Codec - cp Compressor - dc Decompressor - bs backoffStrategy - balancer Balancer - block bool - insecure bool - timeout time.Duration - copts transport.ConnectOptions + unaryInt UnaryClientInterceptor + streamInt StreamClientInterceptor + codec Codec + cp Compressor + dc Decompressor + bs backoffStrategy + balancer Balancer + block bool + insecure bool + timeout time.Duration + copts transport.ConnectOptions } // DialOption configures how we set up the connection. @@ -215,19 +217,48 @@ func WithUserAgent(s string) DialOption { } } +// WithUnaryInterceptor returns a DialOption that specifies the interceptor for unary RPCs. +func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { + return func(o *dialOptions) { + o.unaryInt = f + } +} + +// WithStreamInterceptor returns a DialOption that specifies the interceptor for streaming RPCs. +func WithStreamInterceptor(f StreamClientInterceptor) DialOption { + return func(o *dialOptions) { + o.streamInt = f + } +} + // Dial creates a client connection to the given target. func Dial(target string, opts ...DialOption) (*ClientConn, error) { return DialContext(context.Background(), target, opts...) } -// DialContext creates a client connection to the given target -// using the supplied context. -func DialContext(ctx context.Context, target string, opts ...DialOption) (*ClientConn, error) { +// DialContext creates a client connection to the given target. ctx can be used to +// cancel or expire the pending connecting. Once this function returns, the +// cancellation and expiration of ctx will be noop. Users should call ClientConn.Close +// to terminate all the pending operations after this function returns. +// This is the EXPERIMENTAL API. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, conns: make(map[Address]*addrConn), } - cc.ctx, cc.cancel = context.WithCancel(ctx) + cc.ctx, cc.cancel = context.WithCancel(context.Background()) + defer func() { + select { + case <-ctx.Done(): + conn, err = nil, ctx.Err() + default: + } + + if err != nil { + cc.Close() + } + }() + for _, opt := range opts { opt(&cc.dopts) } @@ -239,31 +270,47 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (*Clien if cc.dopts.bs == nil { cc.dopts.bs = DefaultBackoffConfig } - - var ( - ok bool - addrs []Address - ) - if cc.dopts.balancer == nil { - // Connect to target directly if balancer is nil. - addrs = append(addrs, Address{Addr: target}) + creds := cc.dopts.copts.TransportCredentials + if creds != nil && creds.Info().ServerName != "" { + cc.authority = creds.Info().ServerName } else { - if err := cc.dopts.balancer.Start(target); err != nil { - return nil, err + colonPos := strings.LastIndex(target, ":") + if colonPos == -1 { + colonPos = len(target) } - ch := cc.dopts.balancer.Notify() - if ch == nil { - // There is no name resolver installed. + cc.authority = target[:colonPos] + } + var ok bool + waitC := make(chan error, 1) + go func() { + var addrs []Address + if cc.dopts.balancer == nil { + // Connect to target directly if balancer is nil. addrs = append(addrs, Address{Addr: target}) } else { - addrs, ok = <-ch - if !ok || len(addrs) == 0 { - return nil, errNoAddr + var credsClone credentials.TransportCredentials + if creds != nil { + credsClone = creds.Clone() + } + config := BalancerConfig{ + DialCreds: credsClone, + } + if err := cc.dopts.balancer.Start(target, config); err != nil { + waitC <- err + return + } + ch := cc.dopts.balancer.Notify() + if ch == nil { + // There is no name resolver installed. + addrs = append(addrs, Address{Addr: target}) + } else { + addrs, ok = <-ch + if !ok || len(addrs) == 0 { + waitC <- errNoAddr + return + } } } - } - waitC := make(chan error, 1) - go func() { for _, a := range addrs { if err := cc.resetAddrConn(a, false, nil); err != nil { waitC <- err @@ -277,16 +324,13 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (*Clien timeoutCh = time.After(cc.dopts.timeout) } select { + case <-ctx.Done(): + return nil, ctx.Err() case err := <-waitC: if err != nil { - cc.Close() return nil, err } - case <-cc.ctx.Done(): - cc.Close() - return nil, cc.ctx.Err() case <-timeoutCh: - cc.Close() return nil, ErrClientConnTimeout } // If balancer is nil or balancer.Notify() is nil, ok will be false here. @@ -294,11 +338,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (*Clien if ok { go cc.lbWatcher() } - colonPos := strings.LastIndex(target, ":") - if colonPos == -1 { - colonPos = len(target) - } - cc.authority = target[:colonPos] return cc, nil } @@ -652,7 +691,7 @@ func (ac *addrConn) resetTransport(closeTransport bool) error { if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { return err } - grpclog.Printf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %q", err, ac.addr) + grpclog.Printf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %v", err, ac.addr) ac.mu.Lock() if ac.state == Shutdown { // ac.tearDown(...) has been invoked. diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh deleted file mode 100755 index b00948884..000000000 --- a/vendor/google.golang.org/grpc/codegen.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -# This script serves as an example to demonstrate how to generate the gRPC-Go -# interface and the related messages from .proto file. -# -# It assumes the installation of i) Google proto buffer compiler at -# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen -# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have -# not, please install them first. -# -# We recommend running this script at $GOPATH/src. -# -# If this is not what you need, feel free to make your own scripts. Again, this -# script is for demonstration purpose. -# -proto=$1 -protoc --go_out=plugins=grpc:. $proto diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index 37c5b860b..e14b464ac 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -33,7 +33,7 @@ // Package codes defines the canonical error codes used by gRPC. It is // consistent across various languages. -package codes +package codes // import "google.golang.org/grpc/codes" // A Code is an unsigned 32-bit error code as defined in the gRPC spec. type Code uint32 diff --git a/vendor/google.golang.org/grpc/coverage.sh b/vendor/google.golang.org/grpc/coverage.sh deleted file mode 100755 index 120235374..000000000 --- a/vendor/google.golang.org/grpc/coverage.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash - -set -e - -workdir=.cover -profile="$workdir/cover.out" -mode=set -end2endtest="google.golang.org/grpc/test" - -generate_cover_data() { - rm -rf "$workdir" - mkdir "$workdir" - - for pkg in "$@"; do - if [ $pkg == "google.golang.org/grpc" -o $pkg == "google.golang.org/grpc/transport" -o $pkg == "google.golang.org/grpc/metadata" -o $pkg == "google.golang.org/grpc/credentials" ] - then - f="$workdir/$(echo $pkg | tr / -)" - go test -covermode="$mode" -coverprofile="$f.cover" "$pkg" - go test -covermode="$mode" -coverpkg "$pkg" -coverprofile="$f.e2e.cover" "$end2endtest" - fi - done - - echo "mode: $mode" >"$profile" - grep -h -v "^mode:" "$workdir"/*.cover >>"$profile" -} - -show_cover_report() { - go tool cover -${1}="$profile" -} - -push_to_coveralls() { - goveralls -coverprofile="$profile" -} - -generate_cover_data $(go list ./...) -show_cover_report func -case "$1" in -"") - ;; ---html) - show_cover_report html ;; ---coveralls) - push_to_coveralls ;; -*) - echo >&2 "error: invalid option: $1" ;; -esac -rm -rf "$workdir" diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 001f134d7..5555ef024 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -35,11 +35,12 @@ // which encapsulate all the state needed by a client to authenticate with a // server and make various assertions, e.g., about the client's identity, role, // or whether it is authorized to make a particular call. -package credentials +package credentials // import "google.golang.org/grpc/credentials" import ( "crypto/tls" "crypto/x509" + "errors" "fmt" "io/ioutil" "net" @@ -71,7 +72,7 @@ type PerRPCCredentials interface { } // ProtocolInfo provides information regarding the gRPC wire protocol version, -// security protocol, security protocol version in use, etc. +// security protocol, security protocol version in use, server name, etc. type ProtocolInfo struct { // ProtocolVersion is the gRPC wire protocol version. ProtocolVersion string @@ -79,6 +80,8 @@ type ProtocolInfo struct { SecurityProtocol string // SecurityVersion is the security protocol version. SecurityVersion string + // ServerName is the user-configured server name. + ServerName string } // AuthInfo defines the common interface for the auth information the users are interested in. @@ -86,6 +89,12 @@ type AuthInfo interface { AuthType() string } +var ( + // ErrConnDispatched indicates that rawConn has been dispatched out of gRPC + // and the caller should not close rawConn. + ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") +) + // TransportCredentials defines the common interface for all the live gRPC wire // protocols and supported transport security protocols (e.g., TLS, SSL). type TransportCredentials interface { @@ -100,6 +109,12 @@ type TransportCredentials interface { ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) // Info provides the ProtocolInfo of this TransportCredentials. Info() ProtocolInfo + // Clone makes a copy of this TransportCredentials. + Clone() TransportCredentials + // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server. + // gRPC internals also use it to override the virtual hosting name if it is set. + // It must be called before dialing. Currently, this is only used by grpclb. + OverrideServerName(string) error } // TLSInfo contains the auth information for a TLS authenticated connection. @@ -123,19 +138,10 @@ func (c tlsCreds) Info() ProtocolInfo { return ProtocolInfo{ SecurityProtocol: "tls", SecurityVersion: "1.2", + ServerName: c.config.ServerName, } } -// GetRequestMetadata returns nil, nil since TLS credentials does not have -// metadata. -func (c *tlsCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { - return nil, nil -} - -func (c *tlsCreds) RequireTransportSecurity() bool { - return true -} - func (c *tlsCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { // use local cfg to avoid clobbering ServerName if using multiple endpoints cfg := cloneTLSConfig(c.config) @@ -172,6 +178,15 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) return conn, TLSInfo{conn.ConnectionState()}, nil } +func (c *tlsCreds) Clone() TransportCredentials { + return NewTLS(c.config) +} + +func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { + c.config.ServerName = serverNameOverride + return nil +} + // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { tc := &tlsCreds{cloneTLSConfig(c)} @@ -180,12 +195,16 @@ func NewTLS(c *tls.Config) TransportCredentials { } // NewClientTLSFromCert constructs a TLS from the input certificate for client. -func NewClientTLSFromCert(cp *x509.CertPool, serverName string) TransportCredentials { - return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp}) +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header field) in requests. +func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) } // NewClientTLSFromFile constructs a TLS from the input certificate file for client. -func NewClientTLSFromFile(certFile, serverName string) (TransportCredentials, error) { +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header field) in requests. +func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { b, err := ioutil.ReadFile(certFile) if err != nil { return nil, err @@ -194,7 +213,7 @@ func NewClientTLSFromFile(certFile, serverName string) (TransportCredentials, er if !cp.AppendCertsFromPEM(b) { return nil, fmt.Errorf("credentials: failed to append certificates") } - return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp}), nil + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil } // NewServerTLSFromCert constructs a TLS from the input certificate for server. diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go index b4c0e740e..a35f21885 100644 --- a/vendor/google.golang.org/grpc/doc.go +++ b/vendor/google.golang.org/grpc/doc.go @@ -3,4 +3,4 @@ Package grpc implements an RPC system called gRPC. See www.grpc.io for more information about gRPC. */ -package grpc +package grpc // import "google.golang.org/grpc" diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index 2cc09be48..3b2933079 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -34,7 +34,7 @@ /* Package grpclog defines logging for grpc. */ -package grpclog +package grpclog // import "google.golang.org/grpc/grpclog" import ( "log" diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index 588f59e5a..8d932efed 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -37,6 +37,22 @@ import ( "golang.org/x/net/context" ) +// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. +type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error + +// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. inovker is the handler to complete the RPC +// and it is the responsibility of the interceptor to call it. +// This is the EXPERIMENTAL API. +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error + +// Streamer is called by StreamClientInterceptor to create a ClientStream. +type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) + +// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O +// operations. streamer is the handlder to create a ClientStream and it is the responsibility of the interceptor to call it. +// This is the EXPERIMENTAL API. +type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) + // UnaryServerInfo consists of various information about a unary RPC on // server side. All per-rpc information may be mutated by the interceptor. type UnaryServerInfo struct { diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index b32001568..3c0ca7a36 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -32,7 +32,7 @@ */ // Package metadata define the structure of the metadata supported by gRPC library. -package metadata +package metadata // import "google.golang.org/grpc/metadata" import ( "encoding/base64" @@ -117,10 +117,17 @@ func (md MD) Len() int { // Copy returns a copy of md. func (md MD) Copy() MD { + return Join(md) +} + +// Join joins any number of MDs into a single MD. +// The order of values for each key is determined by the order in which +// the MDs containing those values are presented to Join. +func Join(mds ...MD) MD { out := MD{} - for k, v := range md { - for _, i := range v { - out[k] = append(out[k], i) + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) } } return out diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 35ac9cc7b..6b60095d5 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -303,10 +303,10 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) er case compressionNone: case compressionMade: if dc == nil || recvCompress != dc.Type() { - return transport.StreamErrorf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + return Errorf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) } default: - return transport.StreamErrorf(codes.Internal, "grpc: received unexpected payload format %d", pf) + return Errorf(codes.Internal, "grpc: received unexpected payload format %d", pf) } return nil } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 1ed8aac9e..debbd79ae 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -324,7 +324,7 @@ func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credenti // Serve accepts incoming connections on the listener lis, creating a new // ServerTransport and service goroutine for each. The service goroutines // read gRPC requests and then call the registered handlers to reply to them. -// Service returns when lis.Accept fails. lis will be closed when +// Serve returns when lis.Accept fails. lis will be closed when // this method returns. func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() @@ -367,7 +367,10 @@ func (s *Server) handleRawConn(rawConn net.Conn) { s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) s.mu.Unlock() grpclog.Printf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) - rawConn.Close() + // If serverHandShake returns ErrConnDispatched, keep rawConn open. + if err != credentials.ErrConnDispatched { + rawConn.Close() + } return } @@ -544,7 +547,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. return err } if err == io.ErrUnexpectedEOF { - err = transport.StreamError{Code: codes.Internal, Desc: "io.ErrUnexpectedEOF"} + err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) } if err != nil { switch err := err.(type) { @@ -566,8 +569,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil { switch err := err.(type) { - case transport.StreamError: - if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil { + case *rpcError: + if err := t.WriteStatus(stream, err.code, err.desc); err != nil { grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) } default: @@ -870,25 +873,28 @@ func SendHeader(ctx context.Context, md metadata.MD) error { } stream, ok := transport.StreamFromContext(ctx) if !ok { - return fmt.Errorf("grpc: failed to fetch the stream from the context %v", ctx) + return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) } t := stream.ServerTransport() if t == nil { grpclog.Fatalf("grpc: SendHeader: %v has no ServerTransport to send header metadata.", stream) } - return t.WriteHeader(stream, md) + if err := t.WriteHeader(stream, md); err != nil { + return toRPCErr(err) + } + return nil } // SetTrailer sets the trailer metadata that will be sent when an RPC returns. -// It may be called at most once from a unary RPC handler. The ctx is the RPC -// handler's Context or one derived from it. +// When called more than once, all the provided metadata will be merged. +// The ctx is the RPC handler's Context or one derived from it. func SetTrailer(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil } stream, ok := transport.StreamFromContext(ctx) if !ok { - return fmt.Errorf("grpc: failed to fetch the stream from the context %v", ctx) + return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) } return stream.SetTrailer(md) } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 51df3f01d..68d777b50 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -97,7 +97,14 @@ type ClientStream interface { // NewClientStream creates a new Stream for the client side. This is called // by generated code. -func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { +func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + if cc.dopts.streamInt != nil { + return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) + } + return newClientStream(ctx, desc, cc, method, opts...) +} + +func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { var ( t transport.ClientTransport s *transport.Stream @@ -296,7 +303,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { } }() if err != nil { - return transport.StreamErrorf(codes.Internal, "grpc: %v", err) + return Errorf(codes.Internal, "grpc: %v", err) } return cs.t.Write(cs.s, out, &transport.Options{Last: false}) } @@ -407,8 +414,8 @@ type ServerStream interface { // after SendProto. It fails if called multiple times or if // called after SendProto. SendHeader(metadata.MD) error - // SetTrailer sets the trailer metadata which will be sent with the - // RPC status. + // SetTrailer sets the trailer metadata which will be sent with the RPC status. + // When called more than once, all the provided metadata will be merged. SetTrailer(metadata.MD) Stream } @@ -468,10 +475,13 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { } }() if err != nil { - err = transport.StreamErrorf(codes.Internal, "grpc: %v", err) + err = Errorf(codes.Internal, "grpc: %v", err) return err } - return ss.t.Write(ss.s, out, &transport.Options{Last: false}) + if err := ss.t.Write(ss.s, out, &transport.Options{Last: false}); err != nil { + return toRPCErr(err) + } + return nil } func (ss *serverStream) RecvMsg(m interface{}) (err error) { @@ -489,5 +499,14 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { ss.mu.Unlock() } }() - return recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxMsgSize) + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxMsgSize); err != nil { + if err == io.EOF { + return err + } + if err == io.ErrUnexpectedEOF { + err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + return toRPCErr(err) + } + return nil } diff --git a/vendor/google.golang.org/grpc/transport/handler_server.go b/vendor/google.golang.org/grpc/transport/handler_server.go index 30e21ac0f..114e34906 100644 --- a/vendor/google.golang.org/grpc/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/transport/handler_server.go @@ -85,7 +85,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTr if v := r.Header.Get("grpc-timeout"); v != "" { to, err := decodeTimeout(v) if err != nil { - return nil, StreamErrorf(codes.Internal, "malformed time-out: %v", err) + return nil, streamErrorf(codes.Internal, "malformed time-out: %v", err) } st.timeoutSet = true st.timeout = to @@ -393,5 +393,5 @@ func mapRecvMsgError(err error) error { } } } - return ConnectionError{Desc: err.Error()} + return connectionErrorf(true, err, err.Error()) } diff --git a/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/google.golang.org/grpc/transport/http2_client.go index 5819cb8a4..3c185541a 100644 --- a/vendor/google.golang.org/grpc/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/transport/http2_client.go @@ -114,14 +114,42 @@ func dial(fn func(context.Context, string) (net.Conn, error), ctx context.Contex return dialContext(ctx, "tcp", addr) } +func isTemporary(err error) bool { + switch err { + case io.EOF: + // Connection closures may be resolved upon retry, and are thus + // treated as temporary. + return true + case context.DeadlineExceeded: + // In Go 1.7, context.DeadlineExceeded implements Timeout(), and this + // special case is not needed. Until then, we need to keep this + // clause. + return true + } + + switch err := err.(type) { + case interface { + Temporary() bool + }: + return err.Temporary() + case interface { + Timeout() bool + }: + // Timeouts may be resolved upon retry, and are thus treated as + // temporary. + return err.Timeout() + } + return false +} + // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. func newHTTP2Client(ctx context.Context, addr string, opts ConnectOptions) (_ ClientTransport, err error) { scheme := "http" - conn, connErr := dial(opts.Dialer, ctx, addr) - if connErr != nil { - return nil, ConnectionErrorf(true, connErr, "transport: %v", connErr) + conn, err := dial(opts.Dialer, ctx, addr) + if err != nil { + return nil, connectionErrorf(true, err, "transport: %v", err) } // Any further errors will close the underlying connection defer func(conn net.Conn) { @@ -132,12 +160,13 @@ func newHTTP2Client(ctx context.Context, addr string, opts ConnectOptions) (_ Cl var authInfo credentials.AuthInfo if creds := opts.TransportCredentials; creds != nil { scheme = "https" - conn, authInfo, connErr = creds.ClientHandshake(ctx, addr, conn) - } - if connErr != nil { - // Credentials handshake error is not a temporary error (unless the error - // was the connection closing). - return nil, ConnectionErrorf(connErr == io.EOF, connErr, "transport: %v", connErr) + conn, authInfo, err = creds.ClientHandshake(ctx, addr, conn) + if err != nil { + // Credentials handshake errors are typically considered permanent + // to avoid retrying on e.g. bad certificates. + temp := isTemporary(err) + return nil, connectionErrorf(temp, err, "transport: %v", err) + } } ua := primaryUA if opts.UserAgent != "" { @@ -176,11 +205,11 @@ func newHTTP2Client(ctx context.Context, addr string, opts ConnectOptions) (_ Cl n, err := t.conn.Write(clientPreface) if err != nil { t.Close() - return nil, ConnectionErrorf(true, err, "transport: %v", err) + return nil, connectionErrorf(true, err, "transport: %v", err) } if n != len(clientPreface) { t.Close() - return nil, ConnectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) } if initialWindowSize != defaultWindowSize { err = t.framer.writeSettings(true, http2.Setting{ @@ -192,13 +221,13 @@ func newHTTP2Client(ctx context.Context, addr string, opts ConnectOptions) (_ Cl } if err != nil { t.Close() - return nil, ConnectionErrorf(true, err, "transport: %v", err) + return nil, connectionErrorf(true, err, "transport: %v", err) } // Adjust the connection flow control window if needed. if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { if err := t.framer.writeWindowUpdate(true, 0, delta); err != nil { t.Close() - return nil, ConnectionErrorf(true, err, "transport: %v", err) + return nil, connectionErrorf(true, err, "transport: %v", err) } } go t.controller() @@ -223,8 +252,10 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { s.windowHandler = func(n int) { t.updateWindow(s, uint32(n)) } - // Make a stream be able to cancel the pending operations by itself. - s.ctx, s.cancel = context.WithCancel(ctx) + // The client side stream context should have exactly the same life cycle with the user provided context. + // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. + // So we use the original context here instead of creating a copy. + s.ctx = ctx s.dec = &recvBufferReader{ ctx: s.ctx, goAway: s.goAway, @@ -236,16 +267,6 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { // NewStream creates a stream and register it into the transport as "active" // streams. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { - // Record the timeout value on the context. - var timeout time.Duration - if dl, ok := ctx.Deadline(); ok { - timeout = dl.Sub(time.Now()) - } - select { - case <-ctx.Done(): - return nil, ContextErr(ctx.Err()) - default: - } pr := &peer.Peer{ Addr: t.conn.RemoteAddr(), } @@ -266,12 +287,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } pos := strings.LastIndex(callHdr.Method, "/") if pos == -1 { - return nil, StreamErrorf(codes.InvalidArgument, "transport: malformed method name: %q", callHdr.Method) + return nil, streamErrorf(codes.InvalidArgument, "transport: malformed method name: %q", callHdr.Method) } audience := "https://" + callHdr.Host + port + callHdr.Method[:pos] data, err := c.GetRequestMetadata(ctx, audience) if err != nil { - return nil, StreamErrorf(codes.InvalidArgument, "transport: %v", err) + return nil, streamErrorf(codes.InvalidArgument, "transport: %v", err) } for k, v := range data { authData[k] = v @@ -352,9 +373,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea if callHdr.SendCompress != "" { t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) } - if timeout > 0 { + if dl, ok := ctx.Deadline(); ok { + // Send out timeout regardless its value. The server can detect timeout context by itself. + timeout := dl.Sub(time.Now()) t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) } + for k, v := range authData { // Capital header names are illegal in HTTP/2. k = strings.ToLower(k) @@ -408,7 +432,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } if err != nil { t.notifyError(err) - return nil, ConnectionErrorf(true, err, "transport: %v", err) + return nil, connectionErrorf(true, err, "transport: %v", err) } } t.writableChan <- 0 @@ -454,7 +478,7 @@ func (t *http2Client) CloseStream(s *Stream, err error) { } s.state = streamDone s.mu.Unlock() - if _, ok := err.(StreamError); ok { + if se, ok := err.(StreamError); ok && se.Code != codes.DeadlineExceeded { t.controlBuf.put(&resetStream{s.id, http2.ErrCodeCancel}) } } @@ -622,7 +646,7 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { // invoked. if err := t.framer.writeData(forceFlush, s.id, endStream, p); err != nil { t.notifyError(err) - return ConnectionErrorf(true, err, "transport: %v", err) + return connectionErrorf(true, err, "transport: %v", err) } if t.framer.adjustNumWriters(-1) == 0 { t.framer.flushWrite() @@ -670,7 +694,7 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { func (t *http2Client) handleData(f *http2.DataFrame) { size := len(f.Data()) if err := t.fc.onData(uint32(size)); err != nil { - t.notifyError(ConnectionErrorf(true, err, "%v", err)) + t.notifyError(connectionErrorf(true, err, "%v", err)) return } // Select the right stream to dispatch. @@ -776,7 +800,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { if t.state == reachable || t.state == draining { if f.LastStreamID > 0 && f.LastStreamID%2 != 1 { t.mu.Unlock() - t.notifyError(ConnectionErrorf(true, nil, "received illegal http2 GOAWAY frame: stream ID %d is even", f.LastStreamID)) + t.notifyError(connectionErrorf(true, nil, "received illegal http2 GOAWAY frame: stream ID %d is even", f.LastStreamID)) return } select { @@ -785,7 +809,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // t.goAway has been closed (i.e.,multiple GoAways). if id < f.LastStreamID { t.mu.Unlock() - t.notifyError(ConnectionErrorf(true, nil, "received illegal http2 GOAWAY frame: previously recv GOAWAY frame with LastStramID %d, currently recv %d", id, f.LastStreamID)) + t.notifyError(connectionErrorf(true, nil, "received illegal http2 GOAWAY frame: previously recv GOAWAY frame with LastStramID %d, currently recv %d", id, f.LastStreamID)) return } t.prevGoAwayID = id @@ -823,6 +847,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { state.processHeaderField(hf) } if state.err != nil { + s.mu.Lock() + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + s.mu.Unlock() s.write(recvMsg{err: state.err}) // Something wrong. Stops reading even when there is remaining. return @@ -900,7 +930,7 @@ func (t *http2Client) reader() { t.mu.Unlock() if s != nil { // use error detail to provide better err message - handleMalformedHTTP2(s, StreamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.errorDetail())) + handleMalformedHTTP2(s, streamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.errorDetail())) } continue } else { diff --git a/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/google.golang.org/grpc/transport/http2_server.go index 16010d55f..f753c4f1e 100644 --- a/vendor/google.golang.org/grpc/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/transport/http2_server.go @@ -111,12 +111,12 @@ func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthI Val: uint32(initialWindowSize)}) } if err := framer.writeSettings(true, settings...); err != nil { - return nil, ConnectionErrorf(true, err, "transport: %v", err) + return nil, connectionErrorf(true, err, "transport: %v", err) } // Adjust the connection flow control window if needed. if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { if err := framer.writeWindowUpdate(true, 0, delta); err != nil { - return nil, ConnectionErrorf(true, err, "transport: %v", err) + return nil, connectionErrorf(true, err, "transport: %v", err) } } var buf bytes.Buffer @@ -448,7 +448,7 @@ func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) e } if err != nil { t.Close() - return ConnectionErrorf(true, err, "transport: %v", err) + return connectionErrorf(true, err, "transport: %v", err) } } return nil @@ -544,7 +544,7 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { s.mu.Lock() if s.state == streamDone { s.mu.Unlock() - return StreamErrorf(codes.Unknown, "the stream has been done") + return streamErrorf(codes.Unknown, "the stream has been done") } if !s.headerOk { writeHeaderFrame = true @@ -568,7 +568,7 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { } if err := t.framer.writeHeaders(false, p); err != nil { t.Close() - return ConnectionErrorf(true, err, "transport: %v", err) + return connectionErrorf(true, err, "transport: %v", err) } t.writableChan <- 0 } @@ -642,7 +642,7 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { } if err := t.framer.writeData(forceFlush, s.id, false, p); err != nil { t.Close() - return ConnectionErrorf(true, err, "transport: %v", err) + return connectionErrorf(true, err, "transport: %v", err) } if t.framer.adjustNumWriters(-1) == 0 { t.framer.flushWrite() diff --git a/vendor/google.golang.org/grpc/transport/http_util.go b/vendor/google.golang.org/grpc/transport/http_util.go index 3e16e4df4..a3c68d4ca 100644 --- a/vendor/google.golang.org/grpc/transport/http_util.go +++ b/vendor/google.golang.org/grpc/transport/http_util.go @@ -53,7 +53,7 @@ import ( const ( // The primary user agent - primaryUA = "grpc-go/0.11" + primaryUA = "grpc-go/1.0" // http2MaxFrameLen specifies the max length of a HTTP2 frame. http2MaxFrameLen = 16384 // 16KB frame // http://http2.github.io/http2-spec/#SettingValues @@ -162,7 +162,7 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) { switch f.Name { case "content-type": if !validContentType(f.Value) { - d.setErr(StreamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value)) + d.setErr(streamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value)) return } case "grpc-encoding": @@ -170,7 +170,7 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) { case "grpc-status": code, err := strconv.Atoi(f.Value) if err != nil { - d.setErr(StreamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err)) + d.setErr(streamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err)) return } d.statusCode = codes.Code(code) @@ -181,7 +181,7 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) { var err error d.timeout, err = decodeTimeout(f.Value) if err != nil { - d.setErr(StreamErrorf(codes.Internal, "transport: malformed time-out: %v", err)) + d.setErr(streamErrorf(codes.Internal, "transport: malformed time-out: %v", err)) return } case ":path": @@ -253,6 +253,9 @@ func div(d, r time.Duration) int64 { // TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it. func encodeTimeout(t time.Duration) string { + if t <= 0 { + return "0n" + } if d := div(t, time.Nanosecond); d <= maxTimeoutValue { return strconv.FormatInt(d, 10) + "n" } @@ -349,7 +352,7 @@ func decodeGrpcMessageUnchecked(msg string) string { for i := 0; i < lenMsg; i++ { c := msg[i] if c == percentByte && i+2 < lenMsg { - parsed, err := strconv.ParseInt(msg[i+1:i+3], 16, 8) + parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) if err != nil { buf.WriteByte(c) } else { diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go index f0885da23..3d6b6a6d5 100644 --- a/vendor/google.golang.org/grpc/transport/transport.go +++ b/vendor/google.golang.org/grpc/transport/transport.go @@ -35,11 +35,10 @@ Package transport defines and implements message oriented communication channel to complete various transactions (e.g., an RPC). */ -package transport +package transport // import "google.golang.org/grpc/transport" import ( "bytes" - "errors" "fmt" "io" "net" @@ -169,7 +168,8 @@ type Stream struct { // nil for client side Stream. st ServerTransport // ctx is the associated context of the stream. - ctx context.Context + ctx context.Context + // cancel is always nil for client side Stream. cancel context.CancelFunc // done is closed when the final status arrives. done chan struct{} @@ -286,19 +286,12 @@ func (s *Stream) StatusDesc() string { return s.statusDesc } -// ErrIllegalTrailerSet indicates that the trailer has already been set or it -// is too late to do so. -var ErrIllegalTrailerSet = errors.New("transport: trailer has been set") - // SetTrailer sets the trailer metadata which will be sent with the RPC status -// by the server. This can only be called at most once. Server side only. +// by the server. This can be called multiple times. Server side only. func (s *Stream) SetTrailer(md metadata.MD) error { s.mu.Lock() defer s.mu.Unlock() - if s.trailer != nil { - return ErrIllegalTrailerSet - } - s.trailer = md.Copy() + s.trailer = metadata.Join(s.trailer, md) return nil } @@ -476,16 +469,16 @@ type ServerTransport interface { Drain() } -// StreamErrorf creates an StreamError with the specified error code and description. -func StreamErrorf(c codes.Code, format string, a ...interface{}) StreamError { +// streamErrorf creates an StreamError with the specified error code and description. +func streamErrorf(c codes.Code, format string, a ...interface{}) StreamError { return StreamError{ Code: c, Desc: fmt.Sprintf(format, a...), } } -// ConnectionErrorf creates an ConnectionError with the specified error description. -func ConnectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { +// connectionErrorf creates an ConnectionError with the specified error description. +func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { return ConnectionError{ Desc: fmt.Sprintf(format, a...), temp: temp, @@ -522,10 +515,10 @@ func (e ConnectionError) Origin() error { var ( // ErrConnClosing indicates that the transport is closing. - ErrConnClosing = ConnectionError{Desc: "transport is closing", temp: true} + ErrConnClosing = connectionErrorf(true, nil, "transport is closing") // ErrStreamDrain indicates that the stream is rejected by the server because // the server stops accepting new RPCs. - ErrStreamDrain = StreamErrorf(codes.Unavailable, "the server stops accepting new RPCs") + ErrStreamDrain = streamErrorf(codes.Unavailable, "the server stops accepting new RPCs") ) // StreamError is an error that only affects one stream within a connection. @@ -542,9 +535,9 @@ func (e StreamError) Error() string { func ContextErr(err error) StreamError { switch err { case context.DeadlineExceeded: - return StreamErrorf(codes.DeadlineExceeded, "%v", err) + return streamErrorf(codes.DeadlineExceeded, "%v", err) case context.Canceled: - return StreamErrorf(codes.Canceled, "%v", err) + return streamErrorf(codes.Canceled, "%v", err) } panic(fmt.Sprintf("Unexpected error from context packet: %v", err)) } diff --git a/vendor/gopkg.in/inf.v0/dec.go b/vendor/gopkg.in/inf.v0/dec.go index d17ad945d..3b4afedf1 100644 --- a/vendor/gopkg.in/inf.v0/dec.go +++ b/vendor/gopkg.in/inf.v0/dec.go @@ -20,7 +20,7 @@ // + combined operations such as AddRound/MulAdd etc // + exchanging data in decimal32/64/128 formats // -package inf +package inf // import "gopkg.in/inf.v0" // TODO: // - avoid excessive deep copying (quo and rounders) diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml deleted file mode 100644 index 004172a2e..000000000 --- a/vendor/gopkg.in/yaml.v2/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.4 - - 1.5 - - 1.6 - - tip - -go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md deleted file mode 100644 index 7b8bd8670..000000000 --- a/vendor/gopkg.in/yaml.v2/README.md +++ /dev/null @@ -1,131 +0,0 @@ -# YAML support for the Go language - -Introduction ------------- - -The yaml package enables Go programs to comfortably encode and decode YAML -values. It was developed within [Canonical](https://www.canonical.com) as -part of the [juju](https://juju.ubuntu.com) project, and is based on a -pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) -C library to parse and generate YAML data quickly and reliably. - -Compatibility -------------- - -The yaml package supports most of YAML 1.1 and 1.2, including support for -anchors, tags, map merging, etc. Multi-document unmarshalling is not yet -implemented, and base-60 floats from YAML 1.1 are purposefully not -supported since they're a poor design and are gone in YAML 1.2. - -Installation and usage ----------------------- - -The import path for the package is *gopkg.in/yaml.v2*. - -To install it, run: - - go get gopkg.in/yaml.v2 - -API documentation ------------------ - -If opened in a browser, the import path itself leads to the API documentation: - - * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) - -API stability -------------- - -The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). - - -License -------- - -The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details. - - -Example -------- - -```Go -package main - -import ( - "fmt" - "log" - - "gopkg.in/yaml.v2" -) - -var data = ` -a: Easy! -b: - c: 2 - d: [3, 4] -` - -type T struct { - A string - B struct { - RenamedC int `yaml:"c"` - D []int `yaml:",flow"` - } -} - -func main() { - t := T{} - - err := yaml.Unmarshal([]byte(data), &t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t:\n%v\n\n", t) - - d, err := yaml.Marshal(&t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t dump:\n%s\n\n", string(d)) - - m := make(map[interface{}]interface{}) - - err = yaml.Unmarshal([]byte(data), &m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m:\n%v\n\n", m) - - d, err = yaml.Marshal(&m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m dump:\n%s\n\n", string(d)) -} -``` - -This example will generate the following output: - -``` ---- t: -{Easy! {2 [3 4]}} - ---- t dump: -a: Easy! -b: - c: 2 - d: [3, 4] - - ---- m: -map[a:Easy! b:map[c:2 d:[3 4]]] - ---- m dump: -a: Easy! -b: - c: 2 - d: - - 3 - - 4 -``` - diff --git a/vendor/k8s.io/client-go/1.4/LICENSE b/vendor/k8s.io/client-go/1.4/LICENSE new file mode 100644 index 000000000..00b240110 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2014 The Kubernetes Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/client-go/1.4/pkg/api/OWNERS b/vendor/k8s.io/client-go/1.4/pkg/api/OWNERS deleted file mode 100644 index d28472e0f..000000000 --- a/vendor/k8s.io/client-go/1.4/pkg/api/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -assignees: - - bgrant0607 - - erictune - - lavalamp - - smarterclayton - - thockin diff --git a/vendor/k8s.io/client-go/1.4/pkg/api/node_example.json b/vendor/k8s.io/client-go/1.4/pkg/api/node_example.json deleted file mode 100644 index 260183484..000000000 --- a/vendor/k8s.io/client-go/1.4/pkg/api/node_example.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "kind": "Node", - "apiVersion": "v1", - "metadata": { - "name": "e2e-test-wojtekt-minion-etd6", - "selfLink": "/api/v1/nodes/e2e-test-wojtekt-minion-etd6", - "uid": "a7e89222-e8e5-11e4-8fde-42010af09327", - "resourceVersion": "379", - "creationTimestamp": "2015-04-22T11:49:39Z" - }, - "spec": { - "externalID": "15488322946290398375" - }, - "status": { - "capacity": { - "cpu": "1", - "memory": "1745152Ki" - }, - "conditions": [ - { - "type": "Ready", - "status": "True", - "lastHeartbeatTime": "2015-04-22T11:58:17Z", - "lastTransitionTime": "2015-04-22T11:49:52Z", - "reason": "kubelet is posting ready status" - } - ], - "addresses": [ - { - "type": "ExternalIP", - "address": "104.197.49.213" - }, - { - "type": "LegacyHostIP", - "address": "104.197.20.11" - } - ], - "nodeInfo": { - "machineID": "", - "systemUUID": "D59FA3FA-7B5B-7287-5E1A-1D79F13CB577", - "bootID": "44a832f3-8cfb-4de5-b7d2-d66030b6cd95", - "kernelVersion": "3.16.0-0.bpo.4-amd64", - "osImage": "Debian GNU/Linux 7 (wheezy)", - "containerRuntimeVersion": "docker://1.5.0", - "kubeletVersion": "v0.15.0-484-g0c8ee980d705a3-dirty", - "kubeProxyVersion": "v0.15.0-484-g0c8ee980d705a3-dirty" - } - } -} diff --git a/vendor/k8s.io/client-go/1.4/pkg/api/replication_controller_example.json b/vendor/k8s.io/client-go/1.4/pkg/api/replication_controller_example.json deleted file mode 100644 index 70eef1cff..000000000 --- a/vendor/k8s.io/client-go/1.4/pkg/api/replication_controller_example.json +++ /dev/null @@ -1,83 +0,0 @@ -{ - "kind": "ReplicationController", - "apiVersion": "v1", - "metadata": { - "name": "elasticsearch-logging-controller", - "namespace": "default", - "selfLink": "/api/v1/namespaces/default/replicationcontrollers/elasticsearch-logging-controller", - "uid": "aa76f162-e8e5-11e4-8fde-42010af09327", - "resourceVersion": "98", - "creationTimestamp": "2015-04-22T11:49:43Z", - "labels": { - "kubernetes.io/cluster-service": "true", - "name": "elasticsearch-logging" - } - }, - "spec": { - "replicas": 1, - "selector": { - "name": "elasticsearch-logging" - }, - "template": { - "metadata": { - "creationTimestamp": null, - "labels": { - "kubernetes.io/cluster-service": "true", - "name": "elasticsearch-logging" - } - }, - "spec": { - "volumes": [ - { - "name": "es-persistent-storage", - "hostPath": null, - "emptyDir": { - "medium": "" - }, - "gcePersistentDisk": null, - "awsElasticBlockStore": null, - "gitRepo": null, - "secret": null, - "nfs": null, - "iscsi": null, - "glusterfs": null, - "quobyte": null - } - ], - "containers": [ - { - "name": "elasticsearch-logging", - "image": "gcr.io/google_containers/elasticsearch:1.0", - "ports": [ - { - "name": "db", - "containerPort": 9200, - "protocol": "TCP" - }, - { - "name": "transport", - "containerPort": 9300, - "protocol": "TCP" - } - ], - "resources": {}, - "volumeMounts": [ - { - "name": "es-persistent-storage", - "mountPath": "/data" - } - ], - "terminationMessagePath": "/dev/termination-log", - "imagePullPolicy": "IfNotPresent", - "capabilities": {} - } - ], - "restartPolicy": "Always", - "dnsPolicy": "ClusterFirst" - } - } - }, - "status": { - "replicas": 1 - } -} diff --git a/vendor/k8s.io/client-go/1.4/pkg/api/resource/generated.proto b/vendor/k8s.io/client-go/1.4/pkg/api/resource/generated.proto deleted file mode 100644 index bdc091d98..000000000 --- a/vendor/k8s.io/client-go/1.4/pkg/api/resource/generated.proto +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.api.resource; - -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "resource"; - -// Quantity is a fixed-point representation of a number. -// It provides convenient marshaling/unmarshaling in JSON and YAML, -// in addition to String() and Int64() accessors. -// -// The serialization format is: -// -// ::= -// (Note that may be empty, from the "" case in .) -// ::= 0 | 1 | ... | 9 -// ::= | -// ::= | . | . | . -// ::= "+" | "-" -// ::= | -// ::= | | -// ::= Ki | Mi | Gi | Ti | Pi | Ei -// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) -// ::= m | "" | k | M | G | T | P | E -// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) -// ::= "e" | "E" -// -// No matter which of the three exponent forms is used, no quantity may represent -// a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal -// places. Numbers larger or more precise will be capped or rounded up. -// (E.g.: 0.1m will rounded up to 1m.) -// This may be extended in the future if we require larger or smaller quantities. -// -// When a Quantity is parsed from a string, it will remember the type of suffix -// it had, and will use the same type again when it is serialized. -// -// Before serializing, Quantity will be put in "canonical form". -// This means that Exponent/suffix will be adjusted up or down (with a -// corresponding increase or decrease in Mantissa) such that: -// a. No precision is lost -// b. No fractional digits will be emitted -// c. The exponent (or suffix) is as large as possible. -// The sign will be omitted unless the number is negative. -// -// Examples: -// 1.5 will be serialized as "1500m" -// 1.5Gi will be serialized as "1536Mi" -// -// NOTE: We reserve the right to amend this canonical format, perhaps to -// allow 1.5 to be canonical. -// TODO: Remove above disclaimer after all bikeshedding about format is over, -// or after March 2015. -// -// Note that the quantity will NEVER be internally represented by a -// floating point number. That is the whole point of this exercise. -// -// Non-canonical values will still parse as long as they are well formed, -// but will be re-emitted in their canonical form. (So always use canonical -// form, or don't diff.) -// -// This format is intended to make it difficult to use these numbers without -// writing some sort of special handling code in the hopes that that will -// cause implementors to also use a fixed point implementation. -// -// +protobuf=true -// +protobuf.embed=string -// +protobuf.options.marshal=false -// +protobuf.options.(gogoproto.goproto_stringer)=false -message Quantity { - optional string string = 1; -} - diff --git a/vendor/k8s.io/client-go/1.4/pkg/api/unversioned/generated.proto b/vendor/k8s.io/client-go/1.4/pkg/api/unversioned/generated.proto deleted file mode 100644 index bd72ad341..000000000 --- a/vendor/k8s.io/client-go/1.4/pkg/api/unversioned/generated.proto +++ /dev/null @@ -1,378 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.api.unversioned; - -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "unversioned"; - -// APIGroup contains the name, the supported versions, and the preferred version -// of a group. -message APIGroup { - // name is the name of the group. - optional string name = 1; - - // versions are the versions supported in this group. - repeated GroupVersionForDiscovery versions = 2; - - // preferredVersion is the version preferred by the API server, which - // probably is the storage version. - optional GroupVersionForDiscovery preferredVersion = 3; - - // a map of client CIDR to server address that is serving this group. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - // The server returns only those CIDRs that it thinks that the client can match. - // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. - // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. - repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 4; -} - -// APIGroupList is a list of APIGroup, to allow clients to discover the API at -// /apis. -message APIGroupList { - // groups is a list of APIGroup. - repeated APIGroup groups = 1; -} - -// APIResource specifies the name of a resource and whether it is namespaced. -message APIResource { - // name is the name of the resource. - optional string name = 1; - - // namespaced indicates if a resource is namespaced or not. - optional bool namespaced = 2; - - // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') - optional string kind = 3; -} - -// APIResourceList is a list of APIResource, it is used to expose the name of the -// resources supported in a specific group and version, and if the resource -// is namespaced. -message APIResourceList { - // groupVersion is the group and version this APIResourceList is for. - optional string groupVersion = 1; - - // resources contains the name of the resources and if they are namespaced. - repeated APIResource resources = 2; -} - -// APIVersions lists the versions that are available, to allow clients to -// discover the API at /api, which is the root path of the legacy v1 API. -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -message APIVersions { - // versions are the api versions that are available. - repeated string versions = 1; - - // a map of client CIDR to server address that is serving this group. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - // The server returns only those CIDRs that it thinks that the client can match. - // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. - // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. - repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 2; -} - -// Duration is a wrapper around time.Duration which supports correct -// marshaling to YAML and JSON. In particular, it marshals into strings, which -// can be used as map keys in json. -message Duration { - optional int64 duration = 1; -} - -// ExportOptions is the query options to the standard REST get call. -message ExportOptions { - // Should this value be exported. Export strips fields that a user can not specify.` - optional bool export = 1; - - // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace' - optional bool exact = 2; -} - -// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying -// concepts during lookup stages without having partially valid types -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -message GroupKind { - optional string group = 1; - - optional string kind = 2; -} - -// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying -// concepts during lookup stages without having partially valid types -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -message GroupResource { - optional string group = 1; - - optional string resource = 2; -} - -// GroupVersion contains the "group" and the "version", which uniquely identifies the API. -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -message GroupVersion { - optional string group = 1; - - optional string version = 2; -} - -// GroupVersion contains the "group/version" and "version" string of a version. -// It is made a struct to keep extensibility. -message GroupVersionForDiscovery { - // groupVersion specifies the API group and version in the form "group/version" - optional string groupVersion = 1; - - // version specifies the version in the form of "version". This is to save - // the clients the trouble of splitting the GroupVersion. - optional string version = 2; -} - -// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion -// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -message GroupVersionKind { - optional string group = 1; - - optional string version = 2; - - optional string kind = 3; -} - -// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion -// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -message GroupVersionResource { - optional string group = 1; - - optional string version = 2; - - optional string resource = 3; -} - -// A label selector is a label query over a set of resources. The result of matchLabels and -// matchExpressions are ANDed. An empty label selector matches all objects. A null -// label selector matches no objects. -message LabelSelector { - // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - // map is equivalent to an element of matchExpressions, whose key field is "key", the - // operator is "In", and the values array contains only "value". The requirements are ANDed. - map matchLabels = 1; - - // matchExpressions is a list of label selector requirements. The requirements are ANDed. - repeated LabelSelectorRequirement matchExpressions = 2; -} - -// A label selector requirement is a selector that contains values, a key, and an operator that -// relates the key and values. -message LabelSelectorRequirement { - // key is the label key that the selector applies to. - optional string key = 1; - - // operator represents a key's relationship to a set of values. - // Valid operators ard In, NotIn, Exists and DoesNotExist. - optional string operator = 2; - - // values is an array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. This array is replaced during a strategic - // merge patch. - repeated string values = 3; -} - -// ListMeta describes metadata that synthetic resources must have, including lists and -// various status objects. A resource may have only one of {ObjectMeta, ListMeta}. -message ListMeta { - // SelfLink is a URL representing this object. - // Populated by the system. - // Read-only. - optional string selfLink = 1; - - // String that identifies the server's internal version of this object that - // can be used by clients to determine when objects have changed. - // Value must be treated as opaque by clients and passed unmodified back to the server. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#concurrency-control-and-consistency - optional string resourceVersion = 2; -} - -// RootPaths lists the paths available at root. -// For example: "/healthz", "/apis". -message RootPaths { - // paths are the paths available at root. - repeated string paths = 1; -} - -// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. -message ServerAddressByClientCIDR { - // The CIDR with which clients can match their IP to figure out the server address that they should use. - optional string clientCIDR = 1; - - // Address of this server, suitable for a client that matches the above CIDR. - // This can be a hostname, hostname:port, IP or IP:port. - optional string serverAddress = 2; -} - -// Status is a return value for calls that don't return other objects. -message Status { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional ListMeta metadata = 1; - - // Status of the operation. - // One of: "Success" or "Failure". - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional string status = 2; - - // A human-readable description of the status of this operation. - optional string message = 3; - - // A machine-readable description of why this operation is in the - // "Failure" status. If this value is empty there - // is no information available. A Reason clarifies an HTTP status - // code but does not override it. - optional string reason = 4; - - // Extended data associated with the reason. Each reason may define its - // own extended details. This field is optional and the data returned - // is not guaranteed to conform to any schema except that defined by - // the reason type. - optional StatusDetails details = 5; - - // Suggested HTTP return code for this status, 0 if not set. - optional int32 code = 6; -} - -// StatusCause provides more information about an api.Status failure, including -// cases when multiple errors are encountered. -message StatusCause { - // A machine-readable description of the cause of the error. If this value is - // empty there is no information available. - optional string reason = 1; - - // A human-readable description of the cause of the error. This field may be - // presented as-is to a reader. - optional string message = 2; - - // The field of the resource that has caused this error, as named by its JSON - // serialization. May include dot and postfix notation for nested attributes. - // Arrays are zero-indexed. Fields may appear more than once in an array of - // causes due to fields having multiple errors. - // Optional. - // - // Examples: - // "name" - the field "name" on the current resource - // "items[0].name" - the field "name" on the first array entry in "items" - optional string field = 3; -} - -// StatusDetails is a set of additional properties that MAY be set by the -// server to provide additional information about a response. The Reason -// field of a Status object defines what attributes will be set. Clients -// must ignore fields that do not match the defined type of each attribute, -// and should assume that any attribute may be empty, invalid, or under -// defined. -message StatusDetails { - // The name attribute of the resource associated with the status StatusReason - // (when there is a single name which can be described). - optional string name = 1; - - // The group attribute of the resource associated with the status StatusReason. - optional string group = 2; - - // The kind attribute of the resource associated with the status StatusReason. - // On some operations may differ from the requested resource Kind. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional string kind = 3; - - // The Causes array includes more details associated with the StatusReason - // failure. Not all StatusReasons may provide detailed causes. - repeated StatusCause causes = 4; - - // If specified, the time in seconds before the operation should be retried. - optional int32 retryAfterSeconds = 5; -} - -// Time is a wrapper around time.Time which supports correct -// marshaling to YAML and JSON. Wrappers are provided for many -// of the factory methods that the time package offers. -// -// +protobuf.options.marshal=false -// +protobuf.as=Timestamp -// +protobuf.options.(gogoproto.goproto_stringer)=false -message Time { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - optional int64 seconds = 1; - - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. This field may be limited in precision depending on context. - optional int32 nanos = 2; -} - -// Timestamp is a struct that is equivalent to Time, but intended for -// protobuf marshalling/unmarshalling. It is generated into a serialization -// that matches Time. Do not use in Go structs. -message Timestamp { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - optional int64 seconds = 1; - - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. This field may be limited in precision depending on context. - optional int32 nanos = 2; -} - -// TypeMeta describes an individual object in an API response or request -// with strings representing the type of the object and its API schema version. -// Structures that are versioned or persisted should inline TypeMeta. -message TypeMeta { - // Kind is a string value representing the REST resource this object represents. - // Servers may infer this from the endpoint the client submits requests to. - // Cannot be updated. - // In CamelCase. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional string kind = 1; - - // APIVersion defines the versioned schema of this representation of an object. - // Servers should convert recognized schemas to the latest internal value, and - // may reject unrecognized values. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#resources - optional string apiVersion = 2; -} - diff --git a/vendor/k8s.io/client-go/1.4/pkg/api/v1/generated.proto b/vendor/k8s.io/client-go/1.4/pkg/api/v1/generated.proto deleted file mode 100644 index 06da5fcc2..000000000 --- a/vendor/k8s.io/client-go/1.4/pkg/api/v1/generated.proto +++ /dev/null @@ -1,3088 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.api.v1; - -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1"; - -// Represents a Persistent Disk resource in AWS. -// -// An AWS EBS disk must exist before mounting to a container. The disk -// must also be in the same AWS zone as the kubelet. An AWS EBS disk -// can only be mounted as read/write once. AWS EBS volumes support -// ownership management and SELinux relabeling. -message AWSElasticBlockStoreVolumeSource { - // Unique ID of the persistent disk resource in AWS (Amazon EBS volume). - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#awselasticblockstore - optional string volumeID = 1; - - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#awselasticblockstore - // TODO: how do we prevent errors in the filesystem from compromising the machine - optional string fsType = 2; - - // The partition in the volume that you want to mount. - // If omitted, the default is to mount by volume name. - // Examples: For volume /dev/sda1, you specify the partition as "1". - // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - optional int32 partition = 3; - - // Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". - // If omitted, the default is "false". - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#awselasticblockstore - optional bool readOnly = 4; -} - -// Affinity is a group of affinity scheduling rules. -message Affinity { - // Describes node affinity scheduling rules for the pod. - optional NodeAffinity nodeAffinity = 1; - - // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - optional PodAffinity podAffinity = 2; - - // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - optional PodAntiAffinity podAntiAffinity = 3; -} - -// AttachedVolume describes a volume attached to a node -message AttachedVolume { - // Name of the attached volume - optional string name = 1; - - // DevicePath represents the device path where the volume should be avilable - optional string devicePath = 2; -} - -// AvoidPods describes pods that should avoid this node. This is the value for a -// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and -// will eventually become a field of NodeStatus. -message AvoidPods { - // Bounded-sized list of signatures of pods that should avoid this node, sorted - // in timestamp order from oldest to newest. Size of the slice is unspecified. - repeated PreferAvoidPodsEntry preferAvoidPods = 1; -} - -// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. -message AzureDiskVolumeSource { - // The Name of the data disk in the blob storage - optional string diskName = 1; - - // The URI the data disk in the blob storage - optional string diskURI = 2; - - // Host Caching mode: None, Read Only, Read Write. - optional string cachingMode = 3; - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - optional string fsType = 4; - - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - optional bool readOnly = 5; -} - -// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. -message AzureFileVolumeSource { - // the name of secret that contains Azure Storage Account Name and Key - optional string secretName = 1; - - // Share Name - optional string shareName = 2; - - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - optional bool readOnly = 3; -} - -// Binding ties one object to another. -// For example, a pod is bound to a node by a scheduler. -message Binding { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // The target object that you want to bind to the standard object. - optional ObjectReference target = 2; -} - -// Adds and removes POSIX capabilities from running containers. -message Capabilities { - // Added capabilities - repeated string add = 1; - - // Removed capabilities - repeated string drop = 2; -} - -// Represents a Ceph Filesystem mount that lasts the lifetime of a pod -// Cephfs volumes do not support ownership management or SELinux relabeling. -message CephFSVolumeSource { - // Required: Monitors is a collection of Ceph monitors - // More info: http://releases.k8s.io/release-1.4/examples/volumes/cephfs/README.md#how-to-use-it - repeated string monitors = 1; - - // Optional: Used as the mounted root, rather than the full Ceph tree, default is / - optional string path = 2; - - // Optional: User is the rados user name, default is admin - // More info: http://releases.k8s.io/release-1.4/examples/volumes/cephfs/README.md#how-to-use-it - optional string user = 3; - - // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: http://releases.k8s.io/release-1.4/examples/volumes/cephfs/README.md#how-to-use-it - optional string secretFile = 4; - - // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/cephfs/README.md#how-to-use-it - optional LocalObjectReference secretRef = 5; - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/cephfs/README.md#how-to-use-it - optional bool readOnly = 6; -} - -// Represents a cinder volume resource in Openstack. -// A Cinder volume must exist before mounting to a container. -// The volume must also be in the same region as the kubelet. -// Cinder volumes support ownership management and SELinux relabeling. -message CinderVolumeSource { - // volume id used to identify the volume in cinder - // More info: http://releases.k8s.io/release-1.4/examples/mysql-cinder-pd/README.md - optional string volumeID = 1; - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/release-1.4/examples/mysql-cinder-pd/README.md - optional string fsType = 2; - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // More info: http://releases.k8s.io/release-1.4/examples/mysql-cinder-pd/README.md - optional bool readOnly = 3; -} - -// Information about the condition of a component. -message ComponentCondition { - // Type of condition for a component. - // Valid value: "Healthy" - optional string type = 1; - - // Status of the condition for a component. - // Valid values for "Healthy": "True", "False", or "Unknown". - optional string status = 2; - - // Message about the condition for a component. - // For example, information about a health check. - optional string message = 3; - - // Condition error code for a component. - // For example, a health check error code. - optional string error = 4; -} - -// ComponentStatus (and ComponentStatusList) holds the cluster validation info. -message ComponentStatus { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // List of component conditions observed - repeated ComponentCondition conditions = 2; -} - -// Status of all the conditions for the component as a list of ComponentStatus objects. -message ComponentStatusList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of ComponentStatus objects. - repeated ComponentStatus items = 2; -} - -// ConfigMap holds configuration data for pods to consume. -message ConfigMap { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Data contains the configuration data. - // Each key must be a valid DNS_SUBDOMAIN with an optional leading dot. - map data = 2; -} - -// Selects a key from a ConfigMap. -message ConfigMapKeySelector { - // The ConfigMap to select from. - optional LocalObjectReference localObjectReference = 1; - - // The key to select. - optional string key = 2; -} - -// ConfigMapList is a resource containing a list of ConfigMap objects. -message ConfigMapList { - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is the list of ConfigMaps. - repeated ConfigMap items = 2; -} - -// Adapts a ConfigMap into a volume. -// -// The contents of the target ConfigMap's Data field will be presented in a -// volume as files using the keys in the Data field as the file names, unless -// the items element is populated with specific mappings of keys to paths. -// ConfigMap volumes support ownership management and SELinux relabeling. -message ConfigMapVolumeSource { - optional LocalObjectReference localObjectReference = 1; - - // If unspecified, each key-value pair in the Data field of the referenced - // ConfigMap will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the ConfigMap, - // the volume setup will error. Paths must be relative and may not contain - // the '..' path or start with '..'. - repeated KeyToPath items = 2; - - // Optional: mode bits to use on created files by default. Must be a - // value between 0 and 0777. Defaults to 0644. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - optional int32 defaultMode = 3; -} - -// A single application container that you want to run within a pod. -message Container { - // Name of the container specified as a DNS_LABEL. - // Each container in a pod must have a unique name (DNS_LABEL). - // Cannot be updated. - optional string name = 1; - - // Docker image name. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/images.md - optional string image = 2; - - // Entrypoint array. Not executed within a shell. - // The docker image's ENTRYPOINT is used if this is not provided. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/containers.md#containers-and-commands - repeated string command = 3; - - // Arguments to the entrypoint. - // The docker image's CMD is used if this is not provided. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/containers.md#containers-and-commands - repeated string args = 4; - - // Container's working directory. - // If not specified, the container runtime's default will be used, which - // might be configured in the container image. - // Cannot be updated. - optional string workingDir = 5; - - // List of ports to expose from the container. Exposing a port here gives - // the system additional information about the network connections a - // container uses, but is primarily informational. Not specifying a port here - // DOES NOT prevent that port from being exposed. Any port which is - // listening on the default "0.0.0.0" address inside a container will be - // accessible from the network. - // Cannot be updated. - repeated ContainerPort ports = 6; - - // List of environment variables to set in the container. - // Cannot be updated. - repeated EnvVar env = 7; - - // Compute Resources required by this container. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#resources - optional ResourceRequirements resources = 8; - - // Pod volumes to mount into the container's filesystem. - // Cannot be updated. - repeated VolumeMount volumeMounts = 9; - - // Periodic probe of container liveness. - // Container will be restarted if the probe fails. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#container-probes - optional Probe livenessProbe = 10; - - // Periodic probe of container service readiness. - // Container will be removed from service endpoints if the probe fails. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#container-probes - optional Probe readinessProbe = 11; - - // Actions that the management system should take in response to container lifecycle events. - // Cannot be updated. - optional Lifecycle lifecycle = 12; - - // Optional: Path at which the file to which the container's termination message - // will be written is mounted into the container's filesystem. - // Message written is intended to be brief final status, such as an assertion failure message. - // Defaults to /dev/termination-log. - // Cannot be updated. - optional string terminationMessagePath = 13; - - // Image pull policy. - // One of Always, Never, IfNotPresent. - // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/images.md#updating-images - optional string imagePullPolicy = 14; - - // Security options the pod should run with. - // More info: http://releases.k8s.io/release-1.4/docs/design/security_context.md - optional SecurityContext securityContext = 15; - - // Whether this container should allocate a buffer for stdin in the container runtime. If this - // is not set, reads from stdin in the container will always result in EOF. - // Default is false. - optional bool stdin = 16; - - // Whether the container runtime should close the stdin channel after it has been opened by - // a single attach. When stdin is true the stdin stream will remain open across multiple attach - // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the - // first client attaches to stdin, and then remains open and accepts data until the client disconnects, - // at which time stdin is closed and remains closed until the container is restarted. If this - // flag is false, a container processes that reads from stdin will never receive an EOF. - // Default is false - optional bool stdinOnce = 17; - - // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. - // Default is false. - optional bool tty = 18; -} - -// Describe a container image -message ContainerImage { - // Names by which this image is known. - // e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] - repeated string names = 1; - - // The size of the image in bytes. - optional int64 sizeBytes = 2; -} - -// ContainerPort represents a network port in a single container. -message ContainerPort { - // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each - // named port in a pod must have a unique name. Name for the port that can be - // referred to by services. - optional string name = 1; - - // Number of port to expose on the host. - // If specified, this must be a valid port number, 0 < x < 65536. - // If HostNetwork is specified, this must match ContainerPort. - // Most containers do not need this. - optional int32 hostPort = 2; - - // Number of port to expose on the pod's IP address. - // This must be a valid port number, 0 < x < 65536. - optional int32 containerPort = 3; - - // Protocol for port. Must be UDP or TCP. - // Defaults to "TCP". - optional string protocol = 4; - - // What host IP to bind the external port to. - optional string hostIP = 5; -} - -// ContainerState holds a possible state of container. -// Only one of its members may be specified. -// If none of them is specified, the default one is ContainerStateWaiting. -message ContainerState { - // Details about a waiting container - optional ContainerStateWaiting waiting = 1; - - // Details about a running container - optional ContainerStateRunning running = 2; - - // Details about a terminated container - optional ContainerStateTerminated terminated = 3; -} - -// ContainerStateRunning is a running state of a container. -message ContainerStateRunning { - // Time at which the container was last (re-)started - optional k8s.io.kubernetes.pkg.api.unversioned.Time startedAt = 1; -} - -// ContainerStateTerminated is a terminated state of a container. -message ContainerStateTerminated { - // Exit status from the last termination of the container - optional int32 exitCode = 1; - - // Signal from the last termination of the container - optional int32 signal = 2; - - // (brief) reason from the last termination of the container - optional string reason = 3; - - // Message regarding the last termination of the container - optional string message = 4; - - // Time at which previous execution of the container started - optional k8s.io.kubernetes.pkg.api.unversioned.Time startedAt = 5; - - // Time at which the container last terminated - optional k8s.io.kubernetes.pkg.api.unversioned.Time finishedAt = 6; - - // Container's ID in the format 'docker://' - optional string containerID = 7; -} - -// ContainerStateWaiting is a waiting state of a container. -message ContainerStateWaiting { - // (brief) reason the container is not yet running. - optional string reason = 1; - - // Message regarding why the container is not yet running. - optional string message = 2; -} - -// ContainerStatus contains details for the current status of this container. -message ContainerStatus { - // This must be a DNS_LABEL. Each container in a pod must have a unique name. - // Cannot be updated. - optional string name = 1; - - // Details about the container's current condition. - optional ContainerState state = 2; - - // Details about the container's last termination condition. - optional ContainerState lastState = 3; - - // Specifies whether the container has passed its readiness probe. - optional bool ready = 4; - - // The number of times the container has been restarted, currently based on - // the number of dead containers that have not yet been removed. - // Note that this is calculated from dead containers. But those containers are subject to - // garbage collection. This value will get capped at 5 by GC. - optional int32 restartCount = 5; - - // The image the container is running. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/images.md - // TODO(dchen1107): Which image the container is running with? - optional string image = 6; - - // ImageID of the container's image. - optional string imageID = 7; - - // Container's ID in the format 'docker://'. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/container-environment.md#container-information - optional string containerID = 8; -} - -// DaemonEndpoint contains information about a single Daemon endpoint. -message DaemonEndpoint { - // Port number of the given endpoint. - optional int32 Port = 1; -} - -// DeleteOptions may be provided when deleting an API object -message DeleteOptions { - // The duration in seconds before the object should be deleted. Value must be non-negative integer. - // The value zero indicates delete immediately. If this value is nil, the default grace period for the - // specified type will be used. - // Defaults to a per object value if not specified. zero means delete immediately. - optional int64 gracePeriodSeconds = 1; - - // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be - // returned. - optional Preconditions preconditions = 2; - - // Should the dependent objects be orphaned. If true/false, the "orphan" - // finalizer will be added to/removed from the object's finalizers list. - optional bool orphanDependents = 3; -} - -// DownwardAPIVolumeFile represents information to create the file containing the pod field -message DownwardAPIVolumeFile { - // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' - optional string path = 1; - - // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. - optional ObjectFieldSelector fieldRef = 2; - - // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - optional ResourceFieldSelector resourceFieldRef = 3; - - // Optional: mode bits to use on this file, must be a value between 0 - // and 0777. If not specified, the volume defaultMode will be used. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - optional int32 mode = 4; -} - -// DownwardAPIVolumeSource represents a volume containing downward API info. -// Downward API volumes support ownership management and SELinux relabeling. -message DownwardAPIVolumeSource { - // Items is a list of downward API volume file - repeated DownwardAPIVolumeFile items = 1; - - // Optional: mode bits to use on created files by default. Must be a - // value between 0 and 0777. Defaults to 0644. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - optional int32 defaultMode = 2; -} - -// Represents an empty directory for a pod. -// Empty directory volumes support ownership management and SELinux relabeling. -message EmptyDirVolumeSource { - // What type of storage medium should back this directory. - // The default is "" which means to use the node's default medium. - // Must be an empty string (default) or Memory. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#emptydir - optional string medium = 1; -} - -// EndpointAddress is a tuple that describes single IP address. -message EndpointAddress { - // The IP of this endpoint. - // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), - // or link-local multicast ((224.0.0.0/24). - // IPv6 is also accepted but not fully supported on all platforms. Also, certain - // kubernetes components, like kube-proxy, are not IPv6 ready. - // TODO: This should allow hostname or IP, See #4447. - optional string ip = 1; - - // The Hostname of this endpoint - optional string hostname = 3; - - // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. - optional string nodeName = 4; - - // Reference to object providing the endpoint. - optional ObjectReference targetRef = 2; -} - -// EndpointPort is a tuple that describes a single port. -message EndpointPort { - // The name of this port (corresponds to ServicePort.Name). - // Must be a DNS_LABEL. - // Optional only if one port is defined. - optional string name = 1; - - // The port number of the endpoint. - optional int32 port = 2; - - // The IP protocol for this port. - // Must be UDP or TCP. - // Default is TCP. - optional string protocol = 3; -} - -// EndpointSubset is a group of addresses with a common set of ports. The -// expanded set of endpoints is the Cartesian product of Addresses x Ports. -// For example, given: -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// } -// The resulting set of endpoints can be viewed as: -// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], -// b: [ 10.10.1.1:309, 10.10.2.2:309 ] -message EndpointSubset { - // IP addresses which offer the related ports that are marked as ready. These endpoints - // should be considered safe for load balancers and clients to utilize. - repeated EndpointAddress addresses = 1; - - // IP addresses which offer the related ports but are not currently marked as ready - // because they have not yet finished starting, have recently failed a readiness check, - // or have recently failed a liveness check. - repeated EndpointAddress notReadyAddresses = 2; - - // Port numbers available on the related IP addresses. - repeated EndpointPort ports = 3; -} - -// Endpoints is a collection of endpoints that implement the actual service. Example: -// Name: "mysvc", -// Subsets: [ -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// }, -// { -// Addresses: [{"ip": "10.10.3.3"}], -// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] -// }, -// ] -message Endpoints { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // The set of all endpoints is the union of all subsets. Addresses are placed into - // subsets according to the IPs they share. A single address with multiple ports, - // some of which are ready and some of which are not (because they come from - // different containers) will result in the address being displayed in different - // subsets for the different ports. No address will appear in both Addresses and - // NotReadyAddresses in the same subset. - // Sets of addresses and ports that comprise a service. - repeated EndpointSubset subsets = 2; -} - -// EndpointsList is a list of endpoints. -message EndpointsList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of endpoints. - repeated Endpoints items = 2; -} - -// EnvVar represents an environment variable present in a Container. -message EnvVar { - // Name of the environment variable. Must be a C_IDENTIFIER. - optional string name = 1; - - // Variable references $(VAR_NAME) are expanded - // using the previous defined environment variables in the container and - // any service environment variables. If a variable cannot be resolved, - // the reference in the input string will be unchanged. The $(VAR_NAME) - // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped - // references will never be expanded, regardless of whether the variable - // exists or not. - // Defaults to "". - optional string value = 2; - - // Source for the environment variable's value. Cannot be used if value is not empty. - optional EnvVarSource valueFrom = 3; -} - -// EnvVarSource represents a source for the value of an EnvVar. -message EnvVarSource { - // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, - // spec.nodeName, spec.serviceAccountName, status.podIP. - optional ObjectFieldSelector fieldRef = 1; - - // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - optional ResourceFieldSelector resourceFieldRef = 2; - - // Selects a key of a ConfigMap. - optional ConfigMapKeySelector configMapKeyRef = 3; - - // Selects a key of a secret in the pod's namespace - optional SecretKeySelector secretKeyRef = 4; -} - -// Event is a report of an event somewhere in the cluster. -// TODO: Decide whether to store these separately or with the object they apply to. -message Event { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // The object that this event is about. - optional ObjectReference involvedObject = 2; - - // This should be a short, machine understandable string that gives the reason - // for the transition into the object's current status. - // TODO: provide exact specification for format. - optional string reason = 3; - - // A human-readable description of the status of this operation. - // TODO: decide on maximum length. - optional string message = 4; - - // The component reporting this event. Should be a short machine understandable string. - optional EventSource source = 5; - - // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) - optional k8s.io.kubernetes.pkg.api.unversioned.Time firstTimestamp = 6; - - // The time at which the most recent occurrence of this event was recorded. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTimestamp = 7; - - // The number of times this event has occurred. - optional int32 count = 8; - - // Type of this event (Normal, Warning), new types could be added in the future - optional string type = 9; -} - -// EventList is a list of events. -message EventList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of events - repeated Event items = 2; -} - -// EventSource contains information for an event. -message EventSource { - // Component from which the event is generated. - optional string component = 1; - - // Host name on which the event is generated. - optional string host = 2; -} - -// ExecAction describes a "run in container" action. -message ExecAction { - // Command is the command line to execute inside the container, the working directory for the - // command is root ('/') in the container's filesystem. The command is simply exec'd, it is - // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - // a shell, you need to explicitly call out to that shell. - // Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - repeated string command = 1; -} - -// ExportOptions is the query options to the standard REST get call. -message ExportOptions { - // Should this value be exported. Export strips fields that a user can not specify. - optional bool export = 1; - - // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace' - optional bool exact = 2; -} - -// Represents a Fibre Channel volume. -// Fibre Channel volumes can only be mounted as read/write once. -// Fibre Channel volumes support ownership management and SELinux relabeling. -message FCVolumeSource { - // Required: FC target worldwide names (WWNs) - repeated string targetWWNs = 1; - - // Required: FC target lun number - optional int32 lun = 2; - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - optional string fsType = 3; - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - optional bool readOnly = 4; -} - -// FlexVolume represents a generic volume resource that is -// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. -message FlexVolumeSource { - // Driver is the name of the driver to use for this volume. - optional string driver = 1; - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. - optional string fsType = 2; - - // Optional: SecretRef is reference to the secret object containing - // sensitive information to pass to the plugin scripts. This may be - // empty if no secret object is specified. If the secret object - // contains more than one secret, all secrets are passed to the plugin - // scripts. - optional LocalObjectReference secretRef = 3; - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - optional bool readOnly = 4; - - // Optional: Extra command options if any. - map options = 5; -} - -// Represents a Flocker volume mounted by the Flocker agent. -// Flocker volumes do not support ownership management or SELinux relabeling. -message FlockerVolumeSource { - // Required: the volume name. This is going to be store on metadata -> name on the payload for Flocker - optional string datasetName = 1; -} - -// Represents a Persistent Disk resource in Google Compute Engine. -// -// A GCE PD must exist before mounting to a container. The disk must -// also be in the same GCE project and zone as the kubelet. A GCE PD -// can only be mounted as read/write once or read-only many times. GCE -// PDs support ownership management and SELinux relabeling. -message GCEPersistentDiskVolumeSource { - // Unique name of the PD resource in GCE. Used to identify the disk in GCE. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk - optional string pdName = 1; - - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk - // TODO: how do we prevent errors in the filesystem from compromising the machine - optional string fsType = 2; - - // The partition in the volume that you want to mount. - // If omitted, the default is to mount by volume name. - // Examples: For volume /dev/sda1, you specify the partition as "1". - // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk - optional int32 partition = 3; - - // ReadOnly here will force the ReadOnly setting in VolumeMounts. - // Defaults to false. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk - optional bool readOnly = 4; -} - -// Represents a volume that is populated with the contents of a git repository. -// Git repo volumes do not support ownership management. -// Git repo volumes support SELinux relabeling. -message GitRepoVolumeSource { - // Repository URL - optional string repository = 1; - - // Commit hash for the specified revision. - optional string revision = 2; - - // Target directory name. - // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the - // git repository. Otherwise, if specified, the volume will contain the git repository in - // the subdirectory with the given name. - optional string directory = 3; -} - -// Represents a Glusterfs mount that lasts the lifetime of a pod. -// Glusterfs volumes do not support ownership management or SELinux relabeling. -message GlusterfsVolumeSource { - // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/glusterfs/README.md#create-a-pod - optional string endpoints = 1; - - // Path is the Glusterfs volume path. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/glusterfs/README.md#create-a-pod - optional string path = 2; - - // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. - // Defaults to false. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/glusterfs/README.md#create-a-pod - optional bool readOnly = 3; -} - -// HTTPGetAction describes an action based on HTTP Get requests. -message HTTPGetAction { - // Path to access on the HTTP server. - optional string path = 1; - - // Name or number of the port to access on the container. - // Number must be in the range 1 to 65535. - // Name must be an IANA_SVC_NAME. - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString port = 2; - - // Host name to connect to, defaults to the pod IP. You probably want to set - // "Host" in httpHeaders instead. - optional string host = 3; - - // Scheme to use for connecting to the host. - // Defaults to HTTP. - optional string scheme = 4; - - // Custom headers to set in the request. HTTP allows repeated headers. - repeated HTTPHeader httpHeaders = 5; -} - -// HTTPHeader describes a custom header to be used in HTTP probes -message HTTPHeader { - // The header field name - optional string name = 1; - - // The header field value - optional string value = 2; -} - -// Handler defines a specific action that should be taken -// TODO: pass structured data to these actions, and document that data here. -message Handler { - // One and only one of the following should be specified. - // Exec specifies the action to take. - optional ExecAction exec = 1; - - // HTTPGet specifies the http request to perform. - optional HTTPGetAction httpGet = 2; - - // TCPSocket specifies an action involving a TCP port. - // TCP hooks not yet supported - // TODO: implement a realistic TCP lifecycle hook - optional TCPSocketAction tcpSocket = 3; -} - -// Represents a host path mapped into a pod. -// Host path volumes do not support ownership management or SELinux relabeling. -message HostPathVolumeSource { - // Path of the directory on the host. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#hostpath - optional string path = 1; -} - -// Represents an ISCSI disk. -// ISCSI volumes can only be mounted as read/write once. -// ISCSI volumes support ownership management and SELinux relabeling. -message ISCSIVolumeSource { - // iSCSI target portal. The portal is either an IP or ip_addr:port if the port - // is other than default (typically TCP ports 860 and 3260). - optional string targetPortal = 1; - - // Target iSCSI Qualified Name. - optional string iqn = 2; - - // iSCSI target lun number. - optional int32 lun = 3; - - // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. - optional string iscsiInterface = 4; - - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#iscsi - // TODO: how do we prevent errors in the filesystem from compromising the machine - optional string fsType = 5; - - // ReadOnly here will force the ReadOnly setting in VolumeMounts. - // Defaults to false. - optional bool readOnly = 6; -} - -// Maps a string key to a path within a volume. -message KeyToPath { - // The key to project. - optional string key = 1; - - // The relative path of the file to map the key to. - // May not be an absolute path. - // May not contain the path element '..'. - // May not start with the string '..'. - optional string path = 2; - - // Optional: mode bits to use on this file, must be a value between 0 - // and 0777. If not specified, the volume defaultMode will be used. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - optional int32 mode = 3; -} - -// Lifecycle describes actions that the management system should take in response to container lifecycle -// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks -// until the action is complete, unless the container process fails, in which case the handler is aborted. -message Lifecycle { - // PostStart is called immediately after a container is created. If the handler fails, - // the container is terminated and restarted according to its restart policy. - // Other management of the container blocks until the hook completes. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/container-environment.md#hook-details - optional Handler postStart = 1; - - // PreStop is called immediately before a container is terminated. - // The container is terminated after the handler completes. - // The reason for termination is passed to the handler. - // Regardless of the outcome of the handler, the container is eventually terminated. - // Other management of the container blocks until the hook completes. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/container-environment.md#hook-details - optional Handler preStop = 2; -} - -// LimitRange sets resource usage limits for each kind of resource in a Namespace. -message LimitRange { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Spec defines the limits enforced. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional LimitRangeSpec spec = 2; -} - -// LimitRangeItem defines a min/max usage limit for any resource that matches on kind. -message LimitRangeItem { - // Type of resource that this limit applies to. - optional string type = 1; - - // Max usage constraints on this kind by resource name. - map max = 2; - - // Min usage constraints on this kind by resource name. - map min = 3; - - // Default resource requirement limit value by resource name if resource limit is omitted. - map default = 4; - - // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted. - map defaultRequest = 5; - - // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource. - map maxLimitRequestRatio = 6; -} - -// LimitRangeList is a list of LimitRange items. -message LimitRangeList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is a list of LimitRange objects. - // More info: http://releases.k8s.io/release-1.4/docs/design/admission_control_limit_range.md - repeated LimitRange items = 2; -} - -// LimitRangeSpec defines a min/max usage limit for resources that match on kind. -message LimitRangeSpec { - // Limits is the list of LimitRangeItem objects that are enforced. - repeated LimitRangeItem limits = 1; -} - -// List holds a list of objects, which may not be known by the server. -message List { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of objects - repeated k8s.io.kubernetes.pkg.runtime.RawExtension items = 2; -} - -// ListOptions is the query options to a standard REST list call. -message ListOptions { - // A selector to restrict the list of returned objects by their labels. - // Defaults to everything. - optional string labelSelector = 1; - - // A selector to restrict the list of returned objects by their fields. - // Defaults to everything. - optional string fieldSelector = 2; - - // Watch for changes to the described resources and return them as a stream of - // add, update, and remove notifications. Specify resourceVersion. - optional bool watch = 3; - - // When specified with a watch call, shows changes that occur after that particular version of a resource. - // Defaults to changes from the beginning of history. - optional string resourceVersion = 4; - - // Timeout for the list/watch call. - optional int64 timeoutSeconds = 5; -} - -// LoadBalancerIngress represents the status of a load-balancer ingress point: -// traffic intended for the service should be sent to an ingress point. -message LoadBalancerIngress { - // IP is set for load-balancer ingress points that are IP based - // (typically GCE or OpenStack load-balancers) - optional string ip = 1; - - // Hostname is set for load-balancer ingress points that are DNS based - // (typically AWS load-balancers) - optional string hostname = 2; -} - -// LoadBalancerStatus represents the status of a load-balancer. -message LoadBalancerStatus { - // Ingress is a list containing ingress points for the load-balancer. - // Traffic intended for the service should be sent to these ingress points. - repeated LoadBalancerIngress ingress = 1; -} - -// LocalObjectReference contains enough information to let you locate the -// referenced object inside the same namespace. -message LocalObjectReference { - // Name of the referent. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names - // TODO: Add other useful fields. apiVersion, kind, uid? - optional string name = 1; -} - -// Represents an NFS mount that lasts the lifetime of a pod. -// NFS volumes do not support ownership management or SELinux relabeling. -message NFSVolumeSource { - // Server is the hostname or IP address of the NFS server. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#nfs - optional string server = 1; - - // Path that is exported by the NFS server. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#nfs - optional string path = 2; - - // ReadOnly here will force - // the NFS export to be mounted with read-only permissions. - // Defaults to false. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#nfs - optional bool readOnly = 3; -} - -// Namespace provides a scope for Names. -// Use of multiple namespaces is optional. -message Namespace { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Spec defines the behavior of the Namespace. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional NamespaceSpec spec = 2; - - // Status describes the current status of a Namespace. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional NamespaceStatus status = 3; -} - -// NamespaceList is a list of Namespaces. -message NamespaceList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is the list of Namespace objects in the list. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/namespaces.md - repeated Namespace items = 2; -} - -// NamespaceSpec describes the attributes on a Namespace. -message NamespaceSpec { - // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. - // More info: http://releases.k8s.io/release-1.4/docs/design/namespaces.md#finalizers - repeated string finalizers = 1; -} - -// NamespaceStatus is information about the current status of a Namespace. -message NamespaceStatus { - // Phase is the current lifecycle phase of the namespace. - // More info: http://releases.k8s.io/release-1.4/docs/design/namespaces.md#phases - optional string phase = 1; -} - -// Node is a worker node in Kubernetes, formerly known as minion. -// Each node will have a unique identifier in the cache (i.e. in etcd). -message Node { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Spec defines the behavior of a node. - // http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional NodeSpec spec = 2; - - // Most recently observed status of the node. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional NodeStatus status = 3; -} - -// NodeAddress contains information for the node's address. -message NodeAddress { - // Node address type, one of Hostname, ExternalIP or InternalIP. - optional string type = 1; - - // The node address. - optional string address = 2; -} - -// Node affinity is a group of node affinity scheduling rules. -message NodeAffinity { - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to an update), the system - // may or may not try to eventually evict the pod from its node. - optional NodeSelector requiredDuringSchedulingIgnoredDuringExecution = 1; - - // The scheduler will prefer to schedule pods to nodes that satisfy - // the affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node matches the corresponding matchExpressions; the - // node(s) with the highest sum are the most preferred. - repeated PreferredSchedulingTerm preferredDuringSchedulingIgnoredDuringExecution = 2; -} - -// NodeCondition contains condition information for a node. -message NodeCondition { - // Type of node condition. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // Last time we got an update on a given condition. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastHeartbeatTime = 3; - - // Last time the condition transit from one status to another. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; - - // (brief) reason for the condition's last transition. - optional string reason = 5; - - // Human readable message indicating details about last transition. - optional string message = 6; -} - -// NodeDaemonEndpoints lists ports opened by daemons running on the Node. -message NodeDaemonEndpoints { - // Endpoint on which Kubelet is listening. - optional DaemonEndpoint kubeletEndpoint = 1; -} - -// NodeList is the whole list of all Nodes which have been registered with master. -message NodeList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of nodes - repeated Node items = 2; -} - -// NodeProxyOptions is the query options to a Node's proxy call. -message NodeProxyOptions { - // Path is the URL path to use for the current proxy request to node. - optional string path = 1; -} - -// A node selector represents the union of the results of one or more label queries -// over a set of nodes; that is, it represents the OR of the selectors represented -// by the node selector terms. -message NodeSelector { - // Required. A list of node selector terms. The terms are ORed. - repeated NodeSelectorTerm nodeSelectorTerms = 1; -} - -// A node selector requirement is a selector that contains values, a key, and an operator -// that relates the key and values. -message NodeSelectorRequirement { - // The label key that the selector applies to. - optional string key = 1; - - // Represents a key's relationship to a set of values. - // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - optional string operator = 2; - - // An array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. If the operator is Gt or Lt, the values - // array must have a single element, which will be interpreted as an integer. - // This array is replaced during a strategic merge patch. - repeated string values = 3; -} - -// A null or empty node selector term matches no objects. -message NodeSelectorTerm { - // Required. A list of node selector requirements. The requirements are ANDed. - repeated NodeSelectorRequirement matchExpressions = 1; -} - -// NodeSpec describes the attributes that a node is created with. -message NodeSpec { - // PodCIDR represents the pod IP range assigned to the node. - optional string podCIDR = 1; - - // External ID of the node assigned by some machine database (e.g. a cloud provider). - // Deprecated. - optional string externalID = 2; - - // ID of the node assigned by the cloud provider in the format: :// - optional string providerID = 3; - - // Unschedulable controls node schedulability of new pods. By default, node is schedulable. - // More info: http://releases.k8s.io/release-1.4/docs/admin/node.md#manual-node-administration"` - optional bool unschedulable = 4; -} - -// NodeStatus is information about the current status of a node. -message NodeStatus { - // Capacity represents the total resources of a node. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#capacity for more details. - map capacity = 1; - - // Allocatable represents the resources of a node that are available for scheduling. - // Defaults to Capacity. - map allocatable = 2; - - // NodePhase is the recently observed lifecycle phase of the node. - // More info: http://releases.k8s.io/release-1.4/docs/admin/node.md#node-phase - // The field is never populated, and now is deprecated. - optional string phase = 3; - - // Conditions is an array of current observed node conditions. - // More info: http://releases.k8s.io/release-1.4/docs/admin/node.md#node-condition - repeated NodeCondition conditions = 4; - - // List of addresses reachable to the node. - // Queried from cloud provider, if available. - // More info: http://releases.k8s.io/release-1.4/docs/admin/node.md#node-addresses - repeated NodeAddress addresses = 5; - - // Endpoints of daemons running on the Node. - optional NodeDaemonEndpoints daemonEndpoints = 6; - - // Set of ids/uuids to uniquely identify the node. - // More info: http://releases.k8s.io/release-1.4/docs/admin/node.md#node-info - optional NodeSystemInfo nodeInfo = 7; - - // List of container images on this node - repeated ContainerImage images = 8; - - // List of attachable volumes in use (mounted) by the node. - repeated string volumesInUse = 9; - - // List of volumes that are attached to the node. - repeated AttachedVolume volumesAttached = 10; -} - -// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. -message NodeSystemInfo { - // Machine ID reported by the node. - optional string machineID = 1; - - // System UUID reported by the node. - optional string systemUUID = 2; - - // Boot ID reported by the node. - optional string bootID = 3; - - // Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). - optional string kernelVersion = 4; - - // OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). - optional string osImage = 5; - - // ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0). - optional string containerRuntimeVersion = 6; - - // Kubelet Version reported by the node. - optional string kubeletVersion = 7; - - // KubeProxy Version reported by the node. - optional string kubeProxyVersion = 8; - - // The Operating System reported by the node - optional string operatingSystem = 9; - - // The Architecture reported by the node - optional string architecture = 10; -} - -// ObjectFieldSelector selects an APIVersioned field of an object. -message ObjectFieldSelector { - // Version of the schema the FieldPath is written in terms of, defaults to "v1". - optional string apiVersion = 1; - - // Path of the field to select in the specified API version. - optional string fieldPath = 2; -} - -// ObjectMeta is metadata that all persisted resources must have, which includes all objects -// users must create. -message ObjectMeta { - // Name must be unique within a namespace. Is required when creating resources, although - // some resources may allow a client to request the generation of an appropriate name - // automatically. Name is primarily intended for creation idempotence and configuration - // definition. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names - optional string name = 1; - - // GenerateName is an optional prefix, used by the server, to generate a unique - // name ONLY IF the Name field has not been provided. - // If this field is used, the name returned to the client will be different - // than the name passed. This value will also be combined with a unique suffix. - // The provided value has the same validation rules as the Name field, - // and may be truncated by the length of the suffix required to make the value - // unique on the server. - // - // If this field is specified and the generated name exists, the server will - // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason - // ServerTimeout indicating a unique name could not be found in the time allotted, and the client - // should retry (optionally after the time indicated in the Retry-After header). - // - // Applied only if Name is not specified. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#idempotency - optional string generateName = 2; - - // Namespace defines the space within each name must be unique. An empty namespace is - // equivalent to the "default" namespace, but "default" is the canonical representation. - // Not all objects are required to be scoped to a namespace - the value of this field for - // those objects will be empty. - // - // Must be a DNS_LABEL. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/namespaces.md - optional string namespace = 3; - - // SelfLink is a URL representing this object. - // Populated by the system. - // Read-only. - optional string selfLink = 4; - - // UID is the unique in time and space value for this object. It is typically generated by - // the server on successful creation of a resource and is not allowed to change on PUT - // operations. - // - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#uids - optional string uid = 5; - - // An opaque value that represents the internal version of this object that can - // be used by clients to determine when objects have changed. May be used for optimistic - // concurrency, change detection, and the watch operation on a resource or set of resources. - // Clients must treat these values as opaque and passed unmodified back to the server. - // They may only be valid for a particular resource or set of resources. - // - // Populated by the system. - // Read-only. - // Value must be treated as opaque by clients and . - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#concurrency-control-and-consistency - optional string resourceVersion = 6; - - // A sequence number representing a specific generation of the desired state. - // Populated by the system. Read-only. - optional int64 generation = 7; - - // CreationTimestamp is a timestamp representing the server time when this object was - // created. It is not guaranteed to be set in happens-before order across separate operations. - // Clients may not set this value. It is represented in RFC3339 form and is in UTC. - // - // Populated by the system. - // Read-only. - // Null for lists. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.Time creationTimestamp = 8; - - // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This - // field is set by the server when a graceful deletion is requested by the user, and is not - // directly settable by a client. The resource will be deleted (no longer visible from - // resource lists, and not reachable by name) after the time in this field. Once set, this - // value may not be unset or be set further into the future, although it may be shortened - // or the resource may be deleted prior to this time. For example, a user may request that - // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination - // signal to the containers in the pod. Once the resource is deleted in the API, the Kubelet - // will send a hard termination signal to the container. - // If not set, graceful deletion of the object has not been requested. - // - // Populated by the system when a graceful deletion is requested. - // Read-only. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.Time deletionTimestamp = 9; - - // Number of seconds allowed for this object to gracefully terminate before - // it will be removed from the system. Only set when deletionTimestamp is also set. - // May only be shortened. - // Read-only. - optional int64 deletionGracePeriodSeconds = 10; - - // Map of string keys and values that can be used to organize and categorize - // (scope and select) objects. May match selectors of replication controllers - // and services. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md - map labels = 11; - - // Annotations is an unstructured key value map stored with a resource that may be - // set by external tools to store and retrieve arbitrary metadata. They are not - // queryable and should be preserved when modifying objects. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/annotations.md - map annotations = 12; - - // List of objects depended by this object. If ALL objects in the list have - // been deleted, this object will be garbage collected. If this object is managed by a controller, - // then an entry in this list will point to this controller, with the controller field set to true. - // There cannot be more than one managing controller. - repeated OwnerReference ownerReferences = 13; - - // Must be empty before the object is deleted from the registry. Each entry - // is an identifier for the responsible component that will remove the entry - // from the list. If the deletionTimestamp of the object is non-nil, entries - // in this list can only be removed. - repeated string finalizers = 14; - - // The name of the cluster which the object belongs to. - // This is used to distinguish resources with same name and namespace in different clusters. - // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. - optional string clusterName = 15; -} - -// ObjectReference contains enough information to let you inspect or modify the referred object. -message ObjectReference { - // Kind of the referent. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional string kind = 1; - - // Namespace of the referent. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/namespaces.md - optional string namespace = 2; - - // Name of the referent. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names - optional string name = 3; - - // UID of the referent. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#uids - optional string uid = 4; - - // API version of the referent. - optional string apiVersion = 5; - - // Specific resourceVersion to which this reference is made, if any. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#concurrency-control-and-consistency - optional string resourceVersion = 6; - - // If referring to a piece of an object instead of an entire object, this string - // should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - // For example, if the object reference is to a container within a pod, this would take on a value like: - // "spec.containers{name}" (where "name" refers to the name of the container that triggered - // the event) or if no container name is specified "spec.containers[2]" (container with - // index 2 in this pod). This syntax is chosen only to have some well-defined way of - // referencing a part of an object. - // TODO: this design is not final and this field is subject to change in the future. - optional string fieldPath = 7; -} - -// OwnerReference contains enough information to let you identify an owning -// object. Currently, an owning object must be in the same namespace, so there -// is no namespace field. -message OwnerReference { - // API version of the referent. - optional string apiVersion = 5; - - // Kind of the referent. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional string kind = 1; - - // Name of the referent. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names - optional string name = 3; - - // UID of the referent. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#uids - optional string uid = 4; - - // If true, this reference points to the managing controller. - optional bool controller = 6; -} - -// PersistentVolume (PV) is a storage resource provisioned by an administrator. -// It is analogous to a node. -// More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md -message PersistentVolume { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Spec defines a specification of a persistent volume owned by the cluster. - // Provisioned by an administrator. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistent-volumes - optional PersistentVolumeSpec spec = 2; - - // Status represents the current information/status for the persistent volume. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistent-volumes - optional PersistentVolumeStatus status = 3; -} - -// PersistentVolumeClaim is a user's request for and claim to a persistent volume -message PersistentVolumeClaim { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Spec defines the desired characteristics of a volume requested by a pod author. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistentvolumeclaims - optional PersistentVolumeClaimSpec spec = 2; - - // Status represents the current information/status of a persistent volume claim. - // Read-only. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistentvolumeclaims - optional PersistentVolumeClaimStatus status = 3; -} - -// PersistentVolumeClaimList is a list of PersistentVolumeClaim items. -message PersistentVolumeClaimList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // A list of persistent volume claims. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistentvolumeclaims - repeated PersistentVolumeClaim items = 2; -} - -// PersistentVolumeClaimSpec describes the common attributes of storage devices -// and allows a Source for provider-specific attributes -message PersistentVolumeClaimSpec { - // AccessModes contains the desired access modes the volume should have. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#access-modes-1 - repeated string accessModes = 1; - - // A label query over volumes to consider for binding. - optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector selector = 4; - - // Resources represents the minimum resources the volume should have. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#resources - optional ResourceRequirements resources = 2; - - // VolumeName is the binding reference to the PersistentVolume backing this claim. - optional string volumeName = 3; -} - -// PersistentVolumeClaimStatus is the current status of a persistent volume claim. -message PersistentVolumeClaimStatus { - // Phase represents the current phase of PersistentVolumeClaim. - optional string phase = 1; - - // AccessModes contains the actual access modes the volume backing the PVC has. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#access-modes-1 - repeated string accessModes = 2; - - // Represents the actual resources of the underlying volume. - map capacity = 3; -} - -// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. -// This volume finds the bound PV and mounts that volume for the pod. A -// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another -// type of volume that is owned by someone else (the system). -message PersistentVolumeClaimVolumeSource { - // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistentvolumeclaims - optional string claimName = 1; - - // Will force the ReadOnly setting in VolumeMounts. - // Default false. - optional bool readOnly = 2; -} - -// PersistentVolumeList is a list of PersistentVolume items. -message PersistentVolumeList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of persistent volumes. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md - repeated PersistentVolume items = 2; -} - -// PersistentVolumeSource is similar to VolumeSource but meant for the -// administrator who creates PVs. Exactly one of its members must be set. -message PersistentVolumeSource { - // GCEPersistentDisk represents a GCE Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. Provisioned by an admin. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk - optional GCEPersistentDiskVolumeSource gcePersistentDisk = 1; - - // AWSElasticBlockStore represents an AWS Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#awselasticblockstore - optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 2; - - // HostPath represents a directory on the host. - // Provisioned by a developer or tester. - // This is useful for single-node development and testing only! - // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#hostpath - optional HostPathVolumeSource hostPath = 3; - - // Glusterfs represents a Glusterfs volume that is attached to a host and - // exposed to the pod. Provisioned by an admin. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/glusterfs/README.md - optional GlusterfsVolumeSource glusterfs = 4; - - // NFS represents an NFS mount on the host. Provisioned by an admin. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#nfs - optional NFSVolumeSource nfs = 5; - - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md - optional RBDVolumeSource rbd = 6; - - // ISCSI represents an ISCSI Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. Provisioned by an admin. - optional ISCSIVolumeSource iscsi = 7; - - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: http://releases.k8s.io/release-1.4/examples/mysql-cinder-pd/README.md - optional CinderVolumeSource cinder = 8; - - // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime - optional CephFSVolumeSource cephfs = 9; - - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - optional FCVolumeSource fc = 10; - - // Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running - optional FlockerVolumeSource flocker = 11; - - // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an - // alpha feature and may change in future. - optional FlexVolumeSource flexVolume = 12; - - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - optional AzureFileVolumeSource azureFile = 13; - - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine - optional VsphereVirtualDiskVolumeSource vsphereVolume = 14; - - // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime - optional QuobyteVolumeSource quobyte = 15; - - // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. - optional AzureDiskVolumeSource azureDisk = 16; -} - -// PersistentVolumeSpec is the specification of a persistent volume. -message PersistentVolumeSpec { - // A description of the persistent volume's resources and capacity. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#capacity - map capacity = 1; - - // The actual volume backing the persistent volume. - optional PersistentVolumeSource persistentVolumeSource = 2; - - // AccessModes contains all ways the volume can be mounted. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#access-modes - repeated string accessModes = 3; - - // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. - // Expected to be non-nil when bound. - // claim.VolumeName is the authoritative bind between PV and PVC. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#binding - optional ObjectReference claimRef = 4; - - // What happens to a persistent volume when released from its claim. - // Valid options are Retain (default) and Recycle. - // Recycling must be supported by the volume plugin underlying this persistent volume. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#recycling-policy - optional string persistentVolumeReclaimPolicy = 5; -} - -// PersistentVolumeStatus is the current status of a persistent volume. -message PersistentVolumeStatus { - // Phase indicates if a volume is available, bound to a claim, or released by a claim. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#phase - optional string phase = 1; - - // A human-readable message indicating details about why the volume is in this state. - optional string message = 2; - - // Reason is a brief CamelCase string that describes any failure and is meant - // for machine parsing and tidy display in the CLI. - optional string reason = 3; -} - -// Pod is a collection of containers that can run on a host. This resource is created -// by clients and scheduled onto hosts. -message Pod { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Specification of the desired behavior of the pod. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional PodSpec spec = 2; - - // Most recently observed status of the pod. - // This data may not be up to date. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional PodStatus status = 3; -} - -// Pod affinity is a group of inter pod affinity scheduling rules. -message PodAffinity { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system may or may not try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1; - - // The scheduler will prefer to schedule pods to nodes that satisfy - // the affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - // node(s) with the highest sum are the most preferred. - repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2; -} - -// Defines a set of pods (namely those matching the labelSelector -// relative to the given namespace(s)) that this pod should be -// co-located (affinity) or not co-located (anti-affinity) with, -// where co-located is defined as running on a node whose value of -// the label with key tches that of any node on which -// a pod of the set of pods is running -message PodAffinityTerm { - // A label query over a set of resources, in this case pods. - optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector labelSelector = 1; - - // namespaces specifies which namespaces the labelSelector applies to (matches against); - // nil list means "this pod's namespace," empty list means "all namespaces" - // The json tag here is not "omitempty" since we need to distinguish nil and empty. - // See https://golang.org/pkg/encoding/json/#Marshal for more details. - repeated string namespaces = 2; - - // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - // the labelSelector in the specified namespaces, where co-located is defined as running on a node - // whose value of the label with key topologyKey matches that of any node on which any of the - // selected pods is running. - // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" - // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); - // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. - optional string topologyKey = 3; -} - -// Pod anti affinity is a group of inter pod anti affinity scheduling rules. -message PodAntiAffinity { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the anti-affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the anti-affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` - // If the anti-affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the anti-affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system may or may not try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1; - - // The scheduler will prefer to schedule pods to nodes that satisfy - // the anti-affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling anti-affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - // node(s) with the highest sum are the most preferred. - repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2; -} - -// PodAttachOptions is the query options to a Pod's remote attach call. -// --- -// TODO: merge w/ PodExecOptions below for stdin, stdout, etc -// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY -message PodAttachOptions { - // Stdin if true, redirects the standard input stream of the pod for this call. - // Defaults to false. - optional bool stdin = 1; - - // Stdout if true indicates that stdout is to be redirected for the attach call. - // Defaults to true. - optional bool stdout = 2; - - // Stderr if true indicates that stderr is to be redirected for the attach call. - // Defaults to true. - optional bool stderr = 3; - - // TTY if true indicates that a tty will be allocated for the attach call. - // This is passed through the container runtime so the tty - // is allocated on the worker node by the container runtime. - // Defaults to false. - optional bool tty = 4; - - // The container in which to execute the command. - // Defaults to only container if there is only one container in the pod. - optional string container = 5; -} - -// PodCondition contains details for the current condition of this pod. -message PodCondition { - // Type is the type of the condition. - // Currently only Ready. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#pod-conditions - optional string type = 1; - - // Status is the status of the condition. - // Can be True, False, Unknown. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#pod-conditions - optional string status = 2; - - // Last time we probed the condition. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3; - - // Last time the condition transitioned from one status to another. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; - - // Unique, one-word, CamelCase reason for the condition's last transition. - optional string reason = 5; - - // Human-readable message indicating details about last transition. - optional string message = 6; -} - -// PodExecOptions is the query options to a Pod's remote exec call. -// --- -// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging -// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY -message PodExecOptions { - // Redirect the standard input stream of the pod for this call. - // Defaults to false. - optional bool stdin = 1; - - // Redirect the standard output stream of the pod for this call. - // Defaults to true. - optional bool stdout = 2; - - // Redirect the standard error stream of the pod for this call. - // Defaults to true. - optional bool stderr = 3; - - // TTY if true indicates that a tty will be allocated for the exec call. - // Defaults to false. - optional bool tty = 4; - - // Container in which to execute the command. - // Defaults to only container if there is only one container in the pod. - optional string container = 5; - - // Command is the remote command to execute. argv array. Not executed within a shell. - repeated string command = 6; -} - -// PodList is a list of Pods. -message PodList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of pods. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pods.md - repeated Pod items = 2; -} - -// PodLogOptions is the query options for a Pod's logs REST call. -message PodLogOptions { - // The container for which to stream logs. Defaults to only container if there is one container in the pod. - optional string container = 1; - - // Follow the log stream of the pod. Defaults to false. - optional bool follow = 2; - - // Return previous terminated container logs. Defaults to false. - optional bool previous = 3; - - // A relative time in seconds before the current time from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - optional int64 sinceSeconds = 4; - - // An RFC3339 timestamp from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - optional k8s.io.kubernetes.pkg.api.unversioned.Time sinceTime = 5; - - // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line - // of log output. Defaults to false. - optional bool timestamps = 6; - - // If set, the number of lines from the end of the logs to show. If not specified, - // logs are shown from the creation of the container or sinceSeconds or sinceTime - optional int64 tailLines = 7; - - // If set, the number of bytes to read from the server before terminating the - // log output. This may not display a complete final line of logging, and may return - // slightly more or slightly less than the specified limit. - optional int64 limitBytes = 8; -} - -// PodProxyOptions is the query options to a Pod's proxy call. -message PodProxyOptions { - // Path is the URL path to use for the current proxy request to pod. - optional string path = 1; -} - -// PodSecurityContext holds pod-level security attributes and common container settings. -// Some fields are also present in container.securityContext. Field values of -// container.securityContext take precedence over field values of PodSecurityContext. -message PodSecurityContext { - // The SELinux context to be applied to all containers. - // If unspecified, the container runtime will allocate a random SELinux context for each - // container. May also be set in SecurityContext. If set in - // both SecurityContext and PodSecurityContext, the value specified in SecurityContext - // takes precedence for that container. - optional SELinuxOptions seLinuxOptions = 1; - - // The UID to run the entrypoint of the container process. - // Defaults to user specified in image metadata if unspecified. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence - // for that container. - optional int64 runAsUser = 2; - - // Indicates that the container must run as a non-root user. - // If true, the Kubelet will validate the image at runtime to ensure that it - // does not run as UID 0 (root) and fail to start the container if it does. - // If unset or false, no such validation will be performed. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - optional bool runAsNonRoot = 3; - - // A list of groups applied to the first process run in each container, in addition - // to the container's primary GID. If unspecified, no groups will be added to - // any container. - repeated int64 supplementalGroups = 4; - - // A special supplemental group that applies to all containers in a pod. - // Some volume types allow the Kubelet to change the ownership of that volume - // to be owned by the pod: - // - // 1. The owning GID will be the FSGroup - // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) - // 3. The permission bits are OR'd with rw-rw---- - // - // If unset, the Kubelet will not modify the ownership and permissions of any volume. - optional int64 fsGroup = 5; -} - -// Describes the class of pods that should avoid this node. -// Exactly one field should be set. -message PodSignature { - // Reference to controller whose pods should avoid this node. - optional OwnerReference podController = 1; -} - -// PodSpec is a description of a pod. -message PodSpec { - // List of volumes that can be mounted by containers belonging to the pod. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md - repeated Volume volumes = 1; - - // List of containers belonging to the pod. - // Containers cannot currently be added or removed. - // There must be at least one container in a Pod. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/containers.md - repeated Container containers = 2; - - // Restart policy for all containers within the pod. - // One of Always, OnFailure, Never. - // Default to Always. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#restartpolicy - optional string restartPolicy = 3; - - // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. - // Value must be non-negative integer. The value zero indicates delete immediately. - // If this value is nil, the default grace period will be used instead. - // The grace period is the duration in seconds after the processes running in the pod are sent - // a termination signal and the time when the processes are forcibly halted with a kill signal. - // Set this value longer than the expected cleanup time for your process. - // Defaults to 30 seconds. - optional int64 terminationGracePeriodSeconds = 4; - - // Optional duration in seconds the pod may be active on the node relative to - // StartTime before the system will actively try to mark it failed and kill associated containers. - // Value must be a positive integer. - optional int64 activeDeadlineSeconds = 5; - - // Set DNS policy for containers within the pod. - // One of 'ClusterFirst' or 'Default'. - // Defaults to "ClusterFirst". - optional string dnsPolicy = 6; - - // NodeSelector is a selector which must be true for the pod to fit on a node. - // Selector which must match a node's labels for the pod to be scheduled on that node. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/node-selection/README.md - map nodeSelector = 7; - - // ServiceAccountName is the name of the ServiceAccount to use to run this pod. - // More info: http://releases.k8s.io/release-1.4/docs/design/service_accounts.md - optional string serviceAccountName = 8; - - // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. - // Deprecated: Use serviceAccountName instead. - // +k8s:conversion-gen=false - optional string serviceAccount = 9; - - // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - // the scheduler simply schedules this pod onto that node, assuming that it fits resource - // requirements. - optional string nodeName = 10; - - // Host networking requested for this pod. Use the host's network namespace. - // If this option is set, the ports that will be used must be specified. - // Default to false. - // +k8s:conversion-gen=false - optional bool hostNetwork = 11; - - // Use the host's pid namespace. - // Optional: Default to false. - // +k8s:conversion-gen=false - optional bool hostPID = 12; - - // Use the host's ipc namespace. - // Optional: Default to false. - // +k8s:conversion-gen=false - optional bool hostIPC = 13; - - // SecurityContext holds pod-level security attributes and common container settings. - // Optional: Defaults to empty. See type description for default values of each field. - optional PodSecurityContext securityContext = 14; - - // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. - // If specified, these secrets will be passed to individual puller implementations for them to use. For example, - // in the case of docker, only DockerConfig type secrets are honored. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/images.md#specifying-imagepullsecrets-on-a-pod - repeated LocalObjectReference imagePullSecrets = 15; - - // Specifies the hostname of the Pod - // If not specified, the pod's hostname will be set to a system-defined value. - optional string hostname = 16; - - // If specified, the fully qualified Pod hostname will be "...svc.". - // If not specified, the pod will not have a domainname at all. - optional string subdomain = 17; -} - -// PodStatus represents information about the status of a pod. Status may trail the actual -// state of a system. -message PodStatus { - // Current condition of the pod. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#pod-phase - optional string phase = 1; - - // Current service state of pod. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#pod-conditions - repeated PodCondition conditions = 2; - - // A human readable message indicating details about why the pod is in this condition. - optional string message = 3; - - // A brief CamelCase message indicating details about why the pod is in this state. - // e.g. 'OutOfDisk' - optional string reason = 4; - - // IP address of the host to which the pod is assigned. Empty if not yet scheduled. - optional string hostIP = 5; - - // IP address allocated to the pod. Routable at least within the cluster. - // Empty if not yet allocated. - optional string podIP = 6; - - // RFC 3339 date and time at which the object was acknowledged by the Kubelet. - // This is before the Kubelet pulled the container image(s) for the pod. - optional k8s.io.kubernetes.pkg.api.unversioned.Time startTime = 7; - - // The list has one entry per container in the manifest. Each entry is currently the output - // of `docker inspect`. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#container-statuses - repeated ContainerStatus containerStatuses = 8; -} - -// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded -message PodStatusResult { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Most recently observed status of the pod. - // This data may not be up to date. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional PodStatus status = 2; -} - -// PodTemplate describes a template for creating copies of a predefined pod. -message PodTemplate { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Template defines the pods that will be created from this pod template. - // http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional PodTemplateSpec template = 2; -} - -// PodTemplateList is a list of PodTemplates. -message PodTemplateList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of pod templates - repeated PodTemplate items = 2; -} - -// PodTemplateSpec describes the data a pod should have when created from a template -message PodTemplateSpec { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Specification of the desired behavior of the pod. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional PodSpec spec = 2; -} - -// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. -message Preconditions { - // Specifies the target UID. - optional string uid = 1; -} - -// Describes a class of pods that should avoid this node. -message PreferAvoidPodsEntry { - // The class of pods. - optional PodSignature podSignature = 1; - - // Time at which this entry was added to the list. - optional k8s.io.kubernetes.pkg.api.unversioned.Time evictionTime = 2; - - // (brief) reason why this entry was added to the list. - optional string reason = 3; - - // Human readable message indicating why this entry was added to the list. - optional string message = 4; -} - -// An empty preferred scheduling term matches all objects with implicit weight 0 -// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). -message PreferredSchedulingTerm { - // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - optional int32 weight = 1; - - // A node selector term, associated with the corresponding weight. - optional NodeSelectorTerm preference = 2; -} - -// Probe describes a health check to be performed against a container to determine whether it is -// alive or ready to receive traffic. -message Probe { - // The action taken to determine the health of a container - optional Handler handler = 1; - - // Number of seconds after the container has started before liveness probes are initiated. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#container-probes - optional int32 initialDelaySeconds = 2; - - // Number of seconds after which the probe times out. - // Defaults to 1 second. Minimum value is 1. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#container-probes - optional int32 timeoutSeconds = 3; - - // How often (in seconds) to perform the probe. - // Default to 10 seconds. Minimum value is 1. - optional int32 periodSeconds = 4; - - // Minimum consecutive successes for the probe to be considered successful after having failed. - // Defaults to 1. Must be 1 for liveness. Minimum value is 1. - optional int32 successThreshold = 5; - - // Minimum consecutive failures for the probe to be considered failed after having succeeded. - // Defaults to 3. Minimum value is 1. - optional int32 failureThreshold = 6; -} - -// Represents a Quobyte mount that lasts the lifetime of a pod. -// Quobyte volumes do not support ownership management or SELinux relabeling. -message QuobyteVolumeSource { - // Registry represents a single or multiple Quobyte Registry services - // specified as a string as host:port pair (multiple entries are separated with commas) - // which acts as the central registry for volumes - optional string registry = 1; - - // Volume is a string that references an already created Quobyte volume by name. - optional string volume = 2; - - // ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. - // Defaults to false. - optional bool readOnly = 3; - - // User to map volume access to - // Defaults to serivceaccount user - optional string user = 4; - - // Group to map volume access to - // Default is no group - optional string group = 5; -} - -// Represents a Rados Block Device mount that lasts the lifetime of a pod. -// RBD volumes support ownership management and SELinux relabeling. -message RBDVolumeSource { - // A collection of Ceph monitors. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it - repeated string monitors = 1; - - // The rados image name. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it - optional string image = 2; - - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#rbd - // TODO: how do we prevent errors in the filesystem from compromising the machine - optional string fsType = 3; - - // The rados pool name. - // Default is rbd. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it. - optional string pool = 4; - - // The rados user name. - // Default is admin. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it - optional string user = 5; - - // Keyring is the path to key ring for RBDUser. - // Default is /etc/ceph/keyring. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it - optional string keyring = 6; - - // SecretRef is name of the authentication secret for RBDUser. If provided - // overrides keyring. - // Default is nil. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it - optional LocalObjectReference secretRef = 7; - - // ReadOnly here will force the ReadOnly setting in VolumeMounts. - // Defaults to false. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it - optional bool readOnly = 8; -} - -// RangeAllocation is not a public type. -message RangeAllocation { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Range is string that identifies the range represented by 'data'. - optional string range = 2; - - // Data is a bit array containing all allocated addresses in the previous segment. - optional bytes data = 3; -} - -// ReplicationController represents the configuration of a replication controller. -message ReplicationController { - // If the Labels of a ReplicationController are empty, they are defaulted to - // be the same as the Pod(s) that the replication controller manages. - // Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Spec defines the specification of the desired behavior of the replication controller. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional ReplicationControllerSpec spec = 2; - - // Status is the most recently observed status of the replication controller. - // This data may be out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional ReplicationControllerStatus status = 3; -} - -// ReplicationControllerList is a collection of replication controllers. -message ReplicationControllerList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of replication controllers. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md - repeated ReplicationController items = 2; -} - -// ReplicationControllerSpec is the specification of a replication controller. -message ReplicationControllerSpec { - // Replicas is the number of desired replicas. - // This is a pointer to distinguish between explicit zero and unspecified. - // Defaults to 1. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#what-is-a-replication-controller - optional int32 replicas = 1; - - // Selector is a label query over pods that should match the Replicas count. - // If Selector is empty, it is defaulted to the labels present on the Pod template. - // Label keys and values that must match in order to be controlled by this replication - // controller, if empty defaulted to labels on Pod template. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors - map selector = 2; - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. This takes precedence over a TemplateRef. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#pod-template - optional PodTemplateSpec template = 3; -} - -// ReplicationControllerStatus represents the current status of a replication -// controller. -message ReplicationControllerStatus { - // Replicas is the most recently oberved number of replicas. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#what-is-a-replication-controller - optional int32 replicas = 1; - - // The number of pods that have labels matching the labels of the pod template of the replication controller. - optional int32 fullyLabeledReplicas = 2; - - // The number of ready replicas for this replication controller. - optional int32 readyReplicas = 4; - - // ObservedGeneration reflects the generation of the most recently observed replication controller. - optional int64 observedGeneration = 3; -} - -// ResourceFieldSelector represents container resources (cpu, memory) and their output format -message ResourceFieldSelector { - // Container name: required for volumes, optional for env vars - optional string containerName = 1; - - // Required: resource to select - optional string resource = 2; - - // Specifies the output format of the exposed resources, defaults to "1" - optional k8s.io.kubernetes.pkg.api.resource.Quantity divisor = 3; -} - -// ResourceQuota sets aggregate quota restrictions enforced per namespace -message ResourceQuota { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Spec defines the desired quota. - // http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional ResourceQuotaSpec spec = 2; - - // Status defines the actual enforced quota and its current usage. - // http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional ResourceQuotaStatus status = 3; -} - -// ResourceQuotaList is a list of ResourceQuota items. -message ResourceQuotaList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is a list of ResourceQuota objects. - // More info: http://releases.k8s.io/release-1.4/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota - repeated ResourceQuota items = 2; -} - -// ResourceQuotaSpec defines the desired hard limits to enforce for Quota. -message ResourceQuotaSpec { - // Hard is the set of desired hard limits for each named resource. - // More info: http://releases.k8s.io/release-1.4/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota - map hard = 1; - - // A collection of filters that must match each object tracked by a quota. - // If not specified, the quota matches all objects. - repeated string scopes = 2; -} - -// ResourceQuotaStatus defines the enforced hard limits and observed use. -message ResourceQuotaStatus { - // Hard is the set of enforced hard limits for each named resource. - // More info: http://releases.k8s.io/release-1.4/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota - map hard = 1; - - // Used is the current observed total usage of the resource in the namespace. - map used = 2; -} - -// ResourceRequirements describes the compute resource requirements. -message ResourceRequirements { - // Limits describes the maximum amount of compute resources allowed. - // More info: http://kubernetes.io/docs/user-guide/compute-resources/ - map limits = 1; - - // Requests describes the minimum amount of compute resources required. - // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - // otherwise to an implementation-defined value. - // More info: http://kubernetes.io/docs/user-guide/compute-resources/ - map requests = 2; -} - -// SELinuxOptions are the labels to be applied to the container -message SELinuxOptions { - // User is a SELinux user label that applies to the container. - optional string user = 1; - - // Role is a SELinux role label that applies to the container. - optional string role = 2; - - // Type is a SELinux type label that applies to the container. - optional string type = 3; - - // Level is SELinux level label that applies to the container. - optional string level = 4; -} - -// Secret holds secret data of a certain type. The total bytes of the values in -// the Data field must be less than MaxSecretSize bytes. -message Secret { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN - // or leading dot followed by valid DNS_SUBDOMAIN. - // The serialized form of the secret data is a base64 encoded string, - // representing the arbitrary (possibly non-string) data value here. - // Described in https://tools.ietf.org/html/rfc4648#section-4 - map data = 2; - - // stringData allows specifying non-binary secret data in string form. - // It is provided as a write-only convenience method. - // All keys and values are merged into the data field on write, overwriting any existing values. - // It is never output when reading from the API. - // +k8s:conversion-gen=false - map stringData = 4; - - // Used to facilitate programmatic handling of secret data. - optional string type = 3; -} - -// SecretKeySelector selects a key of a Secret. -message SecretKeySelector { - // The name of the secret in the pod's namespace to select from. - optional LocalObjectReference localObjectReference = 1; - - // The key of the secret to select from. Must be a valid secret key. - optional string key = 2; -} - -// SecretList is a list of Secret. -message SecretList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is a list of secret objects. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/secrets.md - repeated Secret items = 2; -} - -// Adapts a Secret into a volume. -// -// The contents of the target Secret's Data field will be presented in a volume -// as files using the keys in the Data field as the file names. -// Secret volumes support ownership management and SELinux relabeling. -message SecretVolumeSource { - // Name of the secret in the pod's namespace to use. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#secrets - optional string secretName = 1; - - // If unspecified, each key-value pair in the Data field of the referenced - // Secret will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the Secret, - // the volume setup will error. Paths must be relative and may not contain - // the '..' path or start with '..'. - repeated KeyToPath items = 2; - - // Optional: mode bits to use on created files by default. Must be a - // value between 0 and 0777. Defaults to 0644. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - optional int32 defaultMode = 3; -} - -// SecurityContext holds security configuration that will be applied to a container. -// Some fields are present in both SecurityContext and PodSecurityContext. When both -// are set, the values in SecurityContext take precedence. -message SecurityContext { - // The capabilities to add/drop when running containers. - // Defaults to the default set of capabilities granted by the container runtime. - optional Capabilities capabilities = 1; - - // Run container in privileged mode. - // Processes in privileged containers are essentially equivalent to root on the host. - // Defaults to false. - optional bool privileged = 2; - - // The SELinux context to be applied to the container. - // If unspecified, the container runtime will allocate a random SELinux context for each - // container. May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - optional SELinuxOptions seLinuxOptions = 3; - - // The UID to run the entrypoint of the container process. - // Defaults to user specified in image metadata if unspecified. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - optional int64 runAsUser = 4; - - // Indicates that the container must run as a non-root user. - // If true, the Kubelet will validate the image at runtime to ensure that it - // does not run as UID 0 (root) and fail to start the container if it does. - // If unset or false, no such validation will be performed. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - optional bool runAsNonRoot = 5; - - // Whether this container has a read-only root filesystem. - // Default is false. - optional bool readOnlyRootFilesystem = 6; -} - -// SerializedReference is a reference to serialized object. -message SerializedReference { - // The reference to an object in the system. - optional ObjectReference reference = 1; -} - -// Service is a named abstraction of software service (for example, mysql) consisting of local port -// (for example 3306) that the proxy listens on, and the selector that determines which pods -// will answer requests sent through the proxy. -message Service { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Spec defines the behavior of a service. - // http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional ServiceSpec spec = 2; - - // Most recently observed status of the service. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional ServiceStatus status = 3; -} - -// ServiceAccount binds together: -// * a name, understood by users, and perhaps by peripheral systems, for an identity -// * a principal that can be authenticated and authorized -// * a set of secrets -message ServiceAccount { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/secrets.md - repeated ObjectReference secrets = 2; - - // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images - // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets - // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/secrets.md#manually-specifying-an-imagepullsecret - repeated LocalObjectReference imagePullSecrets = 3; -} - -// ServiceAccountList is a list of ServiceAccount objects -message ServiceAccountList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of ServiceAccounts. - // More info: http://releases.k8s.io/release-1.4/docs/design/service_accounts.md#service-accounts - repeated ServiceAccount items = 2; -} - -// ServiceList holds a list of services. -message ServiceList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of services - repeated Service items = 2; -} - -// ServicePort contains information on service's port. -message ServicePort { - // The name of this port within the service. This must be a DNS_LABEL. - // All ports within a ServiceSpec must have unique names. This maps to - // the 'Name' field in EndpointPort objects. - // Optional if only one ServicePort is defined on this service. - optional string name = 1; - - // The IP protocol for this port. Supports "TCP" and "UDP". - // Default is TCP. - optional string protocol = 2; - - // The port that will be exposed by this service. - optional int32 port = 3; - - // Number or name of the port to access on the pods targeted by the service. - // Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - // If this is a string, it will be looked up as a named port in the - // target Pod's container ports. If this is not specified, the value - // of the 'port' field is used (an identity map). - // This field is ignored for services with clusterIP=None, and should be - // omitted or set equal to the 'port' field. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#defining-a-service - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString targetPort = 4; - - // The port on each node on which this service is exposed when type=NodePort or LoadBalancer. - // Usually assigned by the system. If specified, it will be allocated to the service - // if unused or else creation of the service will fail. - // Default is to auto-allocate a port if the ServiceType of this Service requires one. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#type--nodeport - optional int32 nodePort = 5; -} - -// ServiceProxyOptions is the query options to a Service's proxy call. -message ServiceProxyOptions { - // Path is the part of URLs that include service endpoints, suffixes, - // and parameters to use for the current proxy request to service. - // For example, the whole request URL is - // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. - // Path is _search?q=user:kimchy. - optional string path = 1; -} - -// ServiceSpec describes the attributes that a user creates on a service. -message ServiceSpec { - // The list of ports that are exposed by this service. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#virtual-ips-and-service-proxies - repeated ServicePort ports = 1; - - // Route service traffic to pods with label keys and values matching this - // selector. If empty or not present, the service is assumed to have an - // external process managing its endpoints, which Kubernetes will not - // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. - // Ignored if type is ExternalName. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#overview - map selector = 2; - - // clusterIP is the IP address of the service and is usually assigned - // randomly by the master. If an address is specified manually and is not in - // use by others, it will be allocated to the service; otherwise, creation - // of the service will fail. This field can not be changed through updates. - // Valid values are "None", empty string (""), or a valid IP address. "None" - // can be specified for headless services when proxying is not required. - // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if - // type is ExternalName. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#virtual-ips-and-service-proxies - optional string clusterIP = 3; - - // type determines how the Service is exposed. Defaults to ClusterIP. Valid - // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. - // "ExternalName" maps to the specified externalName. - // "ClusterIP" allocates a cluster-internal IP address for load-balancing to - // endpoints. Endpoints are determined by the selector or if that is not - // specified, by manual construction of an Endpoints object. If clusterIP is - // "None", no virtual IP is allocated and the endpoints are published as a - // set of endpoints rather than a stable IP. - // "NodePort" builds on ClusterIP and allocates a port on every node which - // routes to the clusterIP. - // "LoadBalancer" builds on NodePort and creates an - // external load-balancer (if supported in the current cloud) which routes - // to the clusterIP. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#overview - optional string type = 4; - - // externalIPs is a list of IP addresses for which nodes in the cluster - // will also accept traffic for this service. These IPs are not managed by - // Kubernetes. The user is responsible for ensuring that traffic arrives - // at a node with this IP. A common example is external load-balancers - // that are not part of the Kubernetes system. A previous form of this - // functionality exists as the deprecatedPublicIPs field. When using this - // field, callers should also clear the deprecatedPublicIPs field. - repeated string externalIPs = 5; - - // deprecatedPublicIPs is deprecated and replaced by the externalIPs field - // with almost the exact same semantics. This field is retained in the v1 - // API for compatibility until at least 8/20/2016. It will be removed from - // any new API revisions. If both deprecatedPublicIPs *and* externalIPs are - // set, deprecatedPublicIPs is used. - // +k8s:conversion-gen=false - repeated string deprecatedPublicIPs = 6; - - // Supports "ClientIP" and "None". Used to maintain session affinity. - // Enable client IP based session affinity. - // Must be ClientIP or None. - // Defaults to None. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#virtual-ips-and-service-proxies - optional string sessionAffinity = 7; - - // Only applies to Service Type: LoadBalancer - // LoadBalancer will get created with the IP specified in this field. - // This feature depends on whether the underlying cloud-provider supports specifying - // the loadBalancerIP when a load balancer is created. - // This field will be ignored if the cloud-provider does not support the feature. - optional string loadBalancerIP = 8; - - // If specified and supported by the platform, this will restrict traffic through the cloud-provider - // load-balancer will be restricted to the specified client IPs. This field will be ignored if the - // cloud-provider does not support the feature." - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services-firewalls.md - repeated string loadBalancerSourceRanges = 9; - - // externalName is the external reference that kubedns or equivalent will - // return as a CNAME record for this service. No proxying will be involved. - // Must be a valid DNS name and requires Type to be ExternalName. - optional string externalName = 10; -} - -// ServiceStatus represents the current status of a service. -message ServiceStatus { - // LoadBalancer contains the current status of the load-balancer, - // if one is present. - optional LoadBalancerStatus loadBalancer = 1; -} - -// TCPSocketAction describes an action based on opening a socket -message TCPSocketAction { - // Number or name of the port to access on the container. - // Number must be in the range 1 to 65535. - // Name must be an IANA_SVC_NAME. - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString port = 1; -} - -// The node this Taint is attached to has the effect "effect" on -// any pod that that does not tolerate the Taint. -message Taint { - // Required. The taint key to be applied to a node. - optional string key = 1; - - // Required. The taint value corresponding to the taint key. - optional string value = 2; - - // Required. The effect of the taint on pods - // that do not tolerate the taint. - // Valid effects are NoSchedule and PreferNoSchedule. - optional string effect = 3; -} - -// The pod this Toleration is attached to tolerates any taint that matches -// the triple using the matching operator . -message Toleration { - // Required. Key is the taint key that the toleration applies to. - optional string key = 1; - - // operator represents a key's relationship to the value. - // Valid operators are Exists and Equal. Defaults to Equal. - // Exists is equivalent to wildcard for value, so that a pod can - // tolerate all taints of a particular category. - optional string operator = 2; - - // Value is the taint value the toleration matches to. - // If the operator is Exists, the value should be empty, otherwise just a regular string. - optional string value = 3; - - // Effect indicates the taint effect to match. Empty means match all taint effects. - // When specified, allowed values are NoSchedule and PreferNoSchedule. - optional string effect = 4; -} - -// Volume represents a named volume in a pod that may be accessed by any container in the pod. -message Volume { - // Volume's name. - // Must be a DNS_LABEL and unique within the pod. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names - optional string name = 1; - - // VolumeSource represents the location and type of the mounted volume. - // If not specified, the Volume is implied to be an EmptyDir. - // This implied behavior is deprecated and will be removed in a future version. - optional VolumeSource volumeSource = 2; -} - -// VolumeMount describes a mounting of a Volume within a container. -message VolumeMount { - // This must match the Name of a Volume. - optional string name = 1; - - // Mounted read-only if true, read-write otherwise (false or unspecified). - // Defaults to false. - optional bool readOnly = 2; - - // Path within the container at which the volume should be mounted. Must - // not contain ':'. - optional string mountPath = 3; - - // Path within the volume from which the container's volume should be mounted. - // Defaults to "" (volume's root). - optional string subPath = 4; -} - -// Represents the source of a volume to mount. -// Only one of its members may be specified. -message VolumeSource { - // HostPath represents a pre-existing file or directory on the host - // machine that is directly exposed to the container. This is generally - // used for system agents or other privileged things that are allowed - // to see the host machine. Most containers will NOT need this. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#hostpath - // --- - // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not - // mount host directories as read/write. - optional HostPathVolumeSource hostPath = 1; - - // EmptyDir represents a temporary directory that shares a pod's lifetime. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#emptydir - optional EmptyDirVolumeSource emptyDir = 2; - - // GCEPersistentDisk represents a GCE Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk - optional GCEPersistentDiskVolumeSource gcePersistentDisk = 3; - - // AWSElasticBlockStore represents an AWS Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#awselasticblockstore - optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 4; - - // GitRepo represents a git repository at a particular revision. - optional GitRepoVolumeSource gitRepo = 5; - - // Secret represents a secret that should populate this volume. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#secrets - optional SecretVolumeSource secret = 6; - - // NFS represents an NFS mount on the host that shares a pod's lifetime - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#nfs - optional NFSVolumeSource nfs = 7; - - // ISCSI represents an ISCSI Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/iscsi/README.md - optional ISCSIVolumeSource iscsi = 8; - - // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/glusterfs/README.md - optional GlusterfsVolumeSource glusterfs = 9; - - // PersistentVolumeClaimVolumeSource represents a reference to a - // PersistentVolumeClaim in the same namespace. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistentvolumeclaims - optional PersistentVolumeClaimVolumeSource persistentVolumeClaim = 10; - - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md - optional RBDVolumeSource rbd = 11; - - // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an - // alpha feature and may change in future. - optional FlexVolumeSource flexVolume = 12; - - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: http://releases.k8s.io/release-1.4/examples/mysql-cinder-pd/README.md - optional CinderVolumeSource cinder = 13; - - // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime - optional CephFSVolumeSource cephfs = 14; - - // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running - optional FlockerVolumeSource flocker = 15; - - // DownwardAPI represents downward API about the pod that should populate this volume - optional DownwardAPIVolumeSource downwardAPI = 16; - - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - optional FCVolumeSource fc = 17; - - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - optional AzureFileVolumeSource azureFile = 18; - - // ConfigMap represents a configMap that should populate this volume - optional ConfigMapVolumeSource configMap = 19; - - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine - optional VsphereVirtualDiskVolumeSource vsphereVolume = 20; - - // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime - optional QuobyteVolumeSource quobyte = 21; - - // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. - optional AzureDiskVolumeSource azureDisk = 22; -} - -// Represents a vSphere volume resource. -message VsphereVirtualDiskVolumeSource { - // Path that identifies vSphere volume vmdk - optional string volumePath = 1; - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - optional string fsType = 2; -} - -// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) -message WeightedPodAffinityTerm { - // weight associated with matching the corresponding podAffinityTerm, - // in the range 1-100. - optional int32 weight = 1; - - // Required. A pod affinity term, associated with the corresponding weight. - optional PodAffinityTerm podAffinityTerm = 2; -} - diff --git a/vendor/k8s.io/client-go/1.4/pkg/conversion/OWNERS b/vendor/k8s.io/client-go/1.4/pkg/conversion/OWNERS deleted file mode 100644 index a046efc0c..000000000 --- a/vendor/k8s.io/client-go/1.4/pkg/conversion/OWNERS +++ /dev/null @@ -1,5 +0,0 @@ -assignees: - - derekwaynecarr - - lavalamp - - smarterclayton - - wojtek-t diff --git a/vendor/k8s.io/client-go/1.4/pkg/runtime/OWNERS b/vendor/k8s.io/client-go/1.4/pkg/runtime/OWNERS deleted file mode 100644 index d038b5e9b..000000000 --- a/vendor/k8s.io/client-go/1.4/pkg/runtime/OWNERS +++ /dev/null @@ -1,5 +0,0 @@ -assignees: - - caesarxuchao - - deads2k - - lavalamp - - smarterclayton diff --git a/vendor/k8s.io/client-go/1.4/pkg/runtime/generated.proto b/vendor/k8s.io/client-go/1.4/pkg/runtime/generated.proto deleted file mode 100644 index 0e602abe1..000000000 --- a/vendor/k8s.io/client-go/1.4/pkg/runtime/generated.proto +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.runtime; - -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "runtime"; - -// RawExtension is used to hold extensions in external versions. -// -// To use this, make a field which has RawExtension as its type in your external, versioned -// struct, and Object in your internal struct. You also need to register your -// various plugin types. -// -// // Internal package: -// type MyAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// MyPlugin runtime.Object `json:"myPlugin"` -// } -// type PluginA struct { -// AOption string `json:"aOption"` -// } -// -// // External package: -// type MyAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// MyPlugin runtime.RawExtension `json:"myPlugin"` -// } -// type PluginA struct { -// AOption string `json:"aOption"` -// } -// -// // On the wire, the JSON will look something like this: -// { -// "kind":"MyAPIObject", -// "apiVersion":"v1", -// "myPlugin": { -// "kind":"PluginA", -// "aOption":"foo", -// }, -// } -// -// So what happens? Decode first uses json or yaml to unmarshal the serialized data into -// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. -// The next step is to copy (using pkg/conversion) into the internal struct. The runtime -// package's DefaultScheme has conversion functions installed which will unpack the -// JSON stored in RawExtension, turning it into the correct object type, and storing it -// in the Object. (TODO: In the case where the object is of an unknown type, a -// runtime.Unknown object will be created and stored.) -// -// +k8s:deepcopy-gen=true -// +protobuf=true -message RawExtension { - // Raw is the underlying serialization of this object. - // - // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. - optional bytes raw = 1; -} - -// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, -// like this: -// type MyAwesomeAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// ... // other fields -// } -// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *unversioned.GroupVersionKind) { unversioned.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind -// -// TypeMeta is provided here for convenience. You may use it directly from this package or define -// your own with the same fields. -// -// +k8s:deepcopy-gen=true -// +protobuf=true -message TypeMeta { - optional string apiVersion = 1; - - optional string kind = 2; -} - -// Unknown allows api objects with unknown types to be passed-through. This can be used -// to deal with the API objects from a plug-in. Unknown objects still have functioning -// TypeMeta features-- kind, version, etc. -// TODO: Make this object have easy access to field based accessors and settors for -// metadata and field mutatation. -// -// +k8s:deepcopy-gen=true -// +protobuf=true -message Unknown { - optional TypeMeta typeMeta = 1; - - // Raw will hold the complete serialized object which couldn't be matched - // with a registered type. Most likely, nothing should be done with this - // except for passing it through the system. - optional bytes raw = 2; - - // ContentEncoding is encoding used to encode 'Raw' data. - // Unspecified means no encoding. - optional string contentEncoding = 3; - - // ContentType is serialization method used to serialize 'Raw'. - // Unspecified means ContentTypeJSON. - optional string contentType = 4; -} - diff --git a/vendor/k8s.io/client-go/1.4/pkg/util/intstr/generated.proto b/vendor/k8s.io/client-go/1.4/pkg/util/intstr/generated.proto deleted file mode 100644 index dd508e1c8..000000000 --- a/vendor/k8s.io/client-go/1.4/pkg/util/intstr/generated.proto +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.util.intstr; - -// Package-wide variables from generator "generated". -option go_package = "intstr"; - -// IntOrString is a type that can hold an int32 or a string. When used in -// JSON or YAML marshalling and unmarshalling, it produces or consumes the -// inner type. This allows you to have, for example, a JSON field that can -// accept a name or number. -// TODO: Rename to Int32OrString -// -// +protobuf=true -// +protobuf.options.(gogoproto.goproto_stringer)=false -message IntOrString { - optional int64 type = 1; - - optional int32 intVal = 2; - - optional string strVal = 3; -} - diff --git a/vendor/k8s.io/client-go/1.4/pkg/version/.gitattributes b/vendor/k8s.io/client-go/1.4/pkg/version/.gitattributes deleted file mode 100644 index 7e349eff6..000000000 --- a/vendor/k8s.io/client-go/1.4/pkg/version/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -base.go export-subst diff --git a/vendor/k8s.io/client-go/1.4/pkg/watch/versioned/generated.proto b/vendor/k8s.io/client-go/1.4/pkg/watch/versioned/generated.proto deleted file mode 100644 index 8d5506552..000000000 --- a/vendor/k8s.io/client-go/1.4/pkg/watch/versioned/generated.proto +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.watch.versioned; - -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "versioned"; - -// Event represents a single event to a watched resource. -// -// +protobuf=true -message Event { - optional string type = 1; - - // Object is: - // * If Type is Added or Modified: the new state of the object. - // * If Type is Deleted: the state of the object immediately before deletion. - // * If Type is Error: *api.Status is recommended; other types may make sense - // depending on context. - optional k8s.io.kubernetes.pkg.runtime.RawExtension object = 2; -} - diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/doc.go b/vendor/k8s.io/kubernetes/federation/apis/federation/doc.go index 7a45fb7bb..4c51dfeeb 100644 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/doc.go +++ b/vendor/k8s.io/kubernetes/federation/apis/federation/doc.go @@ -16,4 +16,4 @@ limitations under the License. // +k8s:deepcopy-gen=package,register -package federation +package federation // import "k8s.io/kubernetes/federation/apis/federation" diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/doc.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/doc.go index 3a4f89c98..63a9aac12 100644 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/doc.go +++ b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/doc.go @@ -17,4 +17,4 @@ limitations under the License. // +k8s:deepcopy-gen=package,register // +k8s:conversion-gen=k8s.io/kubernetes/federation/apis/federation -package v1beta1 +package v1beta1 // import "k8s.io/kubernetes/federation/apis/federation/v1beta1" diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto deleted file mode 100644 index d779bcffc..000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto +++ /dev/null @@ -1,114 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.federation.apis.federation.v1beta1; - -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// Information about a registered cluster in a federated kubernetes setup. Clusters are not namespaced and have unique names in the federation. -message Cluster { - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec defines the behavior of the Cluster. - optional ClusterSpec spec = 2; - - // Status describes the current status of a Cluster - optional ClusterStatus status = 3; -} - -// ClusterCondition describes current state of a cluster. -message ClusterCondition { - // Type of cluster condition, Complete or Failed. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // Last time the condition was checked. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3; - - // Last time the condition transit from one status to another. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; - - // (brief) reason for the condition's last transition. - optional string reason = 5; - - // Human readable message indicating details about last transition. - optional string message = 6; -} - -// A list of all the kubernetes clusters registered to the federation -message ClusterList { - // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of Cluster objects. - repeated Cluster items = 2; -} - -// ClusterSpec describes the attributes of a kubernetes cluster. -message ClusterSpec { - // A map of client CIDR to server address. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 1; - - // Name of the secret containing kubeconfig to access this cluster. - // The secret is read from the kubernetes cluster that is hosting federation control plane. - // Admin needs to ensure that the required secret exists. Secret should be in the same namespace where federation control plane is hosted and it should have kubeconfig in its data with key "kubeconfig". - // This will later be changed to a reference to secret in federation control plane when the federation control plane supports secrets. - // This can be left empty if the cluster allows insecure access. - optional k8s.io.kubernetes.pkg.api.v1.LocalObjectReference secretRef = 2; -} - -// ClusterStatus is information about the current status of a cluster updated by cluster controller peridocally. -message ClusterStatus { - // Conditions is an array of current cluster conditions. - repeated ClusterCondition conditions = 1; - - // Zones is the list of avaliability zones in which the nodes of the cluster exist, e.g. 'us-east1-a'. - // These will always be in the same region. - repeated string zones = 5; - - // Region is the name of the region in which all of the nodes in the cluster exist. e.g. 'us-east1'. - optional string region = 6; -} - -// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. -message ServerAddressByClientCIDR { - // The CIDR with which clients can match their IP to figure out the server address that they should use. - optional string clientCIDR = 1; - - // Address of this server, suitable for a client that matches the above CIDR. - // This can be a hostname, hostname:port, IP or IP:port. - optional string serverAddress = 2; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/api/OWNERS b/vendor/k8s.io/kubernetes/pkg/api/OWNERS deleted file mode 100644 index d28472e0f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -assignees: - - bgrant0607 - - erictune - - lavalamp - - smarterclayton - - thockin diff --git a/vendor/k8s.io/kubernetes/pkg/api/annotations/doc.go b/vendor/k8s.io/kubernetes/pkg/api/annotations/doc.go index 0c36c85cc..2978b2278 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/annotations/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/api/annotations/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package annotations defines annotation keys that shared between server and client -package annotations +package annotations // import "k8s.io/kubernetes/pkg/api/annotations" diff --git a/vendor/k8s.io/kubernetes/pkg/api/context.go b/vendor/k8s.io/kubernetes/pkg/api/context.go index 580e6b8a0..0f735f468 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/context.go +++ b/vendor/k8s.io/kubernetes/pkg/api/context.go @@ -59,6 +59,9 @@ const ( // uidKey is the context key for the uid to assign to an object on create. uidKey + + // userAgentKey is the context key for the request user agent. + userAgentKey ) // NewContext instantiates a base context object for request flows. @@ -136,3 +139,14 @@ func UIDFrom(ctx Context) (types.UID, bool) { uid, ok := ctx.Value(uidKey).(types.UID) return uid, ok } + +// WithUserAgent returns a copy of parent in which the user value is set +func WithUserAgent(parent Context, userAgent string) Context { + return WithValue(parent, userAgentKey, userAgent) +} + +// UserAgentFrom returns the value of the userAgent key on the ctx +func UserAgentFrom(ctx Context) (string, bool) { + userAgent, ok := ctx.Value(userAgentKey).(string) + return userAgent, ok +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/doc.go b/vendor/k8s.io/kubernetes/pkg/api/doc.go index 1507a8823..283a83e40 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/api/doc.go @@ -21,4 +21,4 @@ limitations under the License. // The contract presented to clients is located in the versioned packages, // which are sub-directories. The first one is "v1". Those packages // describe how a particular version is serialized to storage/network. -package api +package api // import "k8s.io/kubernetes/pkg/api" diff --git a/vendor/k8s.io/kubernetes/pkg/api/errors/doc.go b/vendor/k8s.io/kubernetes/pkg/api/errors/doc.go index 58751ed0e..0929d746c 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/errors/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/api/errors/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package errors provides detailed error types for api field validation. -package errors +package errors // import "k8s.io/kubernetes/pkg/api/errors" diff --git a/vendor/k8s.io/kubernetes/pkg/api/helpers.go b/vendor/k8s.io/kubernetes/pkg/api/helpers.go index 50e811b35..49a2db8a3 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/api/helpers.go @@ -123,6 +123,7 @@ func IsStandardContainerResourceName(str string) bool { var standardLimitRangeTypes = sets.NewString( string(LimitTypePod), string(LimitTypeContainer), + string(LimitTypePersistentVolumeClaim), ) // IsStandardLimitRangeType returns true if the type is Pod or Container @@ -526,6 +527,20 @@ func TaintToleratedByTolerations(taint *Taint, tolerations []Toleration) bool { return tolerated } +// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect, +// if the two taints have same key:effect, regard as they match. +func (t *Taint) MatchTaint(taintToMatch Taint) bool { + return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect +} + +// taint.ToString() converts taint struct to string in format key=value:effect or key:effect. +func (t *Taint) ToString() string { + if len(t.Value) == 0 { + return fmt.Sprintf("%v:%v", t.Key, t.Effect) + } + return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect) +} + func GetAvoidPodsFromNodeAnnotations(annotations map[string]string) (AvoidPods, error) { var avoidPods AvoidPods if len(annotations) > 0 && annotations[PreferAvoidPodsAnnotationKey] != "" { diff --git a/vendor/k8s.io/kubernetes/pkg/api/meta/doc.go b/vendor/k8s.io/kubernetes/pkg/api/meta/doc.go index a3b18a5c9..50007adbb 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/meta/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/api/meta/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package meta provides functions for retrieving API metadata from objects // belonging to the Kubernetes API -package meta +package meta // import "k8s.io/kubernetes/pkg/api/meta" diff --git a/vendor/k8s.io/kubernetes/pkg/api/node_example.json b/vendor/k8s.io/kubernetes/pkg/api/node_example.json deleted file mode 100644 index 260183484..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/node_example.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "kind": "Node", - "apiVersion": "v1", - "metadata": { - "name": "e2e-test-wojtekt-minion-etd6", - "selfLink": "/api/v1/nodes/e2e-test-wojtekt-minion-etd6", - "uid": "a7e89222-e8e5-11e4-8fde-42010af09327", - "resourceVersion": "379", - "creationTimestamp": "2015-04-22T11:49:39Z" - }, - "spec": { - "externalID": "15488322946290398375" - }, - "status": { - "capacity": { - "cpu": "1", - "memory": "1745152Ki" - }, - "conditions": [ - { - "type": "Ready", - "status": "True", - "lastHeartbeatTime": "2015-04-22T11:58:17Z", - "lastTransitionTime": "2015-04-22T11:49:52Z", - "reason": "kubelet is posting ready status" - } - ], - "addresses": [ - { - "type": "ExternalIP", - "address": "104.197.49.213" - }, - { - "type": "LegacyHostIP", - "address": "104.197.20.11" - } - ], - "nodeInfo": { - "machineID": "", - "systemUUID": "D59FA3FA-7B5B-7287-5E1A-1D79F13CB577", - "bootID": "44a832f3-8cfb-4de5-b7d2-d66030b6cd95", - "kernelVersion": "3.16.0-0.bpo.4-amd64", - "osImage": "Debian GNU/Linux 7 (wheezy)", - "containerRuntimeVersion": "docker://1.5.0", - "kubeletVersion": "v0.15.0-484-g0c8ee980d705a3-dirty", - "kubeProxyVersion": "v0.15.0-484-g0c8ee980d705a3-dirty" - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/replication_controller_example.json b/vendor/k8s.io/kubernetes/pkg/api/replication_controller_example.json deleted file mode 100644 index 70eef1cff..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/replication_controller_example.json +++ /dev/null @@ -1,83 +0,0 @@ -{ - "kind": "ReplicationController", - "apiVersion": "v1", - "metadata": { - "name": "elasticsearch-logging-controller", - "namespace": "default", - "selfLink": "/api/v1/namespaces/default/replicationcontrollers/elasticsearch-logging-controller", - "uid": "aa76f162-e8e5-11e4-8fde-42010af09327", - "resourceVersion": "98", - "creationTimestamp": "2015-04-22T11:49:43Z", - "labels": { - "kubernetes.io/cluster-service": "true", - "name": "elasticsearch-logging" - } - }, - "spec": { - "replicas": 1, - "selector": { - "name": "elasticsearch-logging" - }, - "template": { - "metadata": { - "creationTimestamp": null, - "labels": { - "kubernetes.io/cluster-service": "true", - "name": "elasticsearch-logging" - } - }, - "spec": { - "volumes": [ - { - "name": "es-persistent-storage", - "hostPath": null, - "emptyDir": { - "medium": "" - }, - "gcePersistentDisk": null, - "awsElasticBlockStore": null, - "gitRepo": null, - "secret": null, - "nfs": null, - "iscsi": null, - "glusterfs": null, - "quobyte": null - } - ], - "containers": [ - { - "name": "elasticsearch-logging", - "image": "gcr.io/google_containers/elasticsearch:1.0", - "ports": [ - { - "name": "db", - "containerPort": 9200, - "protocol": "TCP" - }, - { - "name": "transport", - "containerPort": 9300, - "protocol": "TCP" - } - ], - "resources": {}, - "volumeMounts": [ - { - "name": "es-persistent-storage", - "mountPath": "/data" - } - ], - "terminationMessagePath": "/dev/termination-log", - "imagePullPolicy": "IfNotPresent", - "capabilities": {} - } - ], - "restartPolicy": "Always", - "dnsPolicy": "ClusterFirst" - } - } - }, - "status": { - "replicas": 1 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource/generated.proto b/vendor/k8s.io/kubernetes/pkg/api/resource/generated.proto deleted file mode 100644 index bdc091d98..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/resource/generated.proto +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.api.resource; - -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "resource"; - -// Quantity is a fixed-point representation of a number. -// It provides convenient marshaling/unmarshaling in JSON and YAML, -// in addition to String() and Int64() accessors. -// -// The serialization format is: -// -// ::= -// (Note that may be empty, from the "" case in .) -// ::= 0 | 1 | ... | 9 -// ::= | -// ::= | . | . | . -// ::= "+" | "-" -// ::= | -// ::= | | -// ::= Ki | Mi | Gi | Ti | Pi | Ei -// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) -// ::= m | "" | k | M | G | T | P | E -// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) -// ::= "e" | "E" -// -// No matter which of the three exponent forms is used, no quantity may represent -// a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal -// places. Numbers larger or more precise will be capped or rounded up. -// (E.g.: 0.1m will rounded up to 1m.) -// This may be extended in the future if we require larger or smaller quantities. -// -// When a Quantity is parsed from a string, it will remember the type of suffix -// it had, and will use the same type again when it is serialized. -// -// Before serializing, Quantity will be put in "canonical form". -// This means that Exponent/suffix will be adjusted up or down (with a -// corresponding increase or decrease in Mantissa) such that: -// a. No precision is lost -// b. No fractional digits will be emitted -// c. The exponent (or suffix) is as large as possible. -// The sign will be omitted unless the number is negative. -// -// Examples: -// 1.5 will be serialized as "1500m" -// 1.5Gi will be serialized as "1536Mi" -// -// NOTE: We reserve the right to amend this canonical format, perhaps to -// allow 1.5 to be canonical. -// TODO: Remove above disclaimer after all bikeshedding about format is over, -// or after March 2015. -// -// Note that the quantity will NEVER be internally represented by a -// floating point number. That is the whole point of this exercise. -// -// Non-canonical values will still parse as long as they are well formed, -// but will be re-emitted in their canonical form. (So always use canonical -// form, or don't diff.) -// -// This format is intended to make it difficult to use these numbers without -// writing some sort of special handling code in the hopes that that will -// cause implementors to also use a fixed point implementation. -// -// +protobuf=true -// +protobuf.embed=string -// +protobuf.options.marshal=false -// +protobuf.options.(gogoproto.goproto_stringer)=false -message Quantity { - optional string string = 1; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/api/rest/doc.go b/vendor/k8s.io/kubernetes/pkg/api/rest/doc.go index ee7c4145f..97d4b9a0a 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/rest/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/api/rest/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package rest defines common logic around changes to Kubernetes resources. -package rest +package rest // import "k8s.io/kubernetes/pkg/api/rest" diff --git a/vendor/k8s.io/kubernetes/pkg/api/types.go b/vendor/k8s.io/kubernetes/pkg/api/types.go index 94071fb40..4be90c087 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/types.go +++ b/vendor/k8s.io/kubernetes/pkg/api/types.go @@ -2667,6 +2667,8 @@ const ( LimitTypePod LimitType = "Pod" // Limit that applies to all containers in a namespace LimitTypeContainer LimitType = "Container" + // Limit that applies to all persistent volume claims in a namespace + LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim" ) // LimitRangeItem defines a min/max usage limit for any resource that matches on kind diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/doc.go b/vendor/k8s.io/kubernetes/pkg/api/unversioned/doc.go index d0ffc3327..afb2391ac 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/api/unversioned/doc.go @@ -16,4 +16,4 @@ limitations under the License. // +k8s:deepcopy-gen=package -package unversioned +package unversioned // import "k8s.io/kubernetes/pkg/api/unversioned" diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/generated.proto b/vendor/k8s.io/kubernetes/pkg/api/unversioned/generated.proto deleted file mode 100644 index bd72ad341..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/generated.proto +++ /dev/null @@ -1,378 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.api.unversioned; - -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "unversioned"; - -// APIGroup contains the name, the supported versions, and the preferred version -// of a group. -message APIGroup { - // name is the name of the group. - optional string name = 1; - - // versions are the versions supported in this group. - repeated GroupVersionForDiscovery versions = 2; - - // preferredVersion is the version preferred by the API server, which - // probably is the storage version. - optional GroupVersionForDiscovery preferredVersion = 3; - - // a map of client CIDR to server address that is serving this group. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - // The server returns only those CIDRs that it thinks that the client can match. - // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. - // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. - repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 4; -} - -// APIGroupList is a list of APIGroup, to allow clients to discover the API at -// /apis. -message APIGroupList { - // groups is a list of APIGroup. - repeated APIGroup groups = 1; -} - -// APIResource specifies the name of a resource and whether it is namespaced. -message APIResource { - // name is the name of the resource. - optional string name = 1; - - // namespaced indicates if a resource is namespaced or not. - optional bool namespaced = 2; - - // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') - optional string kind = 3; -} - -// APIResourceList is a list of APIResource, it is used to expose the name of the -// resources supported in a specific group and version, and if the resource -// is namespaced. -message APIResourceList { - // groupVersion is the group and version this APIResourceList is for. - optional string groupVersion = 1; - - // resources contains the name of the resources and if they are namespaced. - repeated APIResource resources = 2; -} - -// APIVersions lists the versions that are available, to allow clients to -// discover the API at /api, which is the root path of the legacy v1 API. -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -message APIVersions { - // versions are the api versions that are available. - repeated string versions = 1; - - // a map of client CIDR to server address that is serving this group. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - // The server returns only those CIDRs that it thinks that the client can match. - // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. - // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. - repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 2; -} - -// Duration is a wrapper around time.Duration which supports correct -// marshaling to YAML and JSON. In particular, it marshals into strings, which -// can be used as map keys in json. -message Duration { - optional int64 duration = 1; -} - -// ExportOptions is the query options to the standard REST get call. -message ExportOptions { - // Should this value be exported. Export strips fields that a user can not specify.` - optional bool export = 1; - - // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace' - optional bool exact = 2; -} - -// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying -// concepts during lookup stages without having partially valid types -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -message GroupKind { - optional string group = 1; - - optional string kind = 2; -} - -// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying -// concepts during lookup stages without having partially valid types -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -message GroupResource { - optional string group = 1; - - optional string resource = 2; -} - -// GroupVersion contains the "group" and the "version", which uniquely identifies the API. -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -message GroupVersion { - optional string group = 1; - - optional string version = 2; -} - -// GroupVersion contains the "group/version" and "version" string of a version. -// It is made a struct to keep extensibility. -message GroupVersionForDiscovery { - // groupVersion specifies the API group and version in the form "group/version" - optional string groupVersion = 1; - - // version specifies the version in the form of "version". This is to save - // the clients the trouble of splitting the GroupVersion. - optional string version = 2; -} - -// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion -// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -message GroupVersionKind { - optional string group = 1; - - optional string version = 2; - - optional string kind = 3; -} - -// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion -// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -message GroupVersionResource { - optional string group = 1; - - optional string version = 2; - - optional string resource = 3; -} - -// A label selector is a label query over a set of resources. The result of matchLabels and -// matchExpressions are ANDed. An empty label selector matches all objects. A null -// label selector matches no objects. -message LabelSelector { - // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - // map is equivalent to an element of matchExpressions, whose key field is "key", the - // operator is "In", and the values array contains only "value". The requirements are ANDed. - map matchLabels = 1; - - // matchExpressions is a list of label selector requirements. The requirements are ANDed. - repeated LabelSelectorRequirement matchExpressions = 2; -} - -// A label selector requirement is a selector that contains values, a key, and an operator that -// relates the key and values. -message LabelSelectorRequirement { - // key is the label key that the selector applies to. - optional string key = 1; - - // operator represents a key's relationship to a set of values. - // Valid operators ard In, NotIn, Exists and DoesNotExist. - optional string operator = 2; - - // values is an array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. This array is replaced during a strategic - // merge patch. - repeated string values = 3; -} - -// ListMeta describes metadata that synthetic resources must have, including lists and -// various status objects. A resource may have only one of {ObjectMeta, ListMeta}. -message ListMeta { - // SelfLink is a URL representing this object. - // Populated by the system. - // Read-only. - optional string selfLink = 1; - - // String that identifies the server's internal version of this object that - // can be used by clients to determine when objects have changed. - // Value must be treated as opaque by clients and passed unmodified back to the server. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#concurrency-control-and-consistency - optional string resourceVersion = 2; -} - -// RootPaths lists the paths available at root. -// For example: "/healthz", "/apis". -message RootPaths { - // paths are the paths available at root. - repeated string paths = 1; -} - -// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. -message ServerAddressByClientCIDR { - // The CIDR with which clients can match their IP to figure out the server address that they should use. - optional string clientCIDR = 1; - - // Address of this server, suitable for a client that matches the above CIDR. - // This can be a hostname, hostname:port, IP or IP:port. - optional string serverAddress = 2; -} - -// Status is a return value for calls that don't return other objects. -message Status { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional ListMeta metadata = 1; - - // Status of the operation. - // One of: "Success" or "Failure". - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional string status = 2; - - // A human-readable description of the status of this operation. - optional string message = 3; - - // A machine-readable description of why this operation is in the - // "Failure" status. If this value is empty there - // is no information available. A Reason clarifies an HTTP status - // code but does not override it. - optional string reason = 4; - - // Extended data associated with the reason. Each reason may define its - // own extended details. This field is optional and the data returned - // is not guaranteed to conform to any schema except that defined by - // the reason type. - optional StatusDetails details = 5; - - // Suggested HTTP return code for this status, 0 if not set. - optional int32 code = 6; -} - -// StatusCause provides more information about an api.Status failure, including -// cases when multiple errors are encountered. -message StatusCause { - // A machine-readable description of the cause of the error. If this value is - // empty there is no information available. - optional string reason = 1; - - // A human-readable description of the cause of the error. This field may be - // presented as-is to a reader. - optional string message = 2; - - // The field of the resource that has caused this error, as named by its JSON - // serialization. May include dot and postfix notation for nested attributes. - // Arrays are zero-indexed. Fields may appear more than once in an array of - // causes due to fields having multiple errors. - // Optional. - // - // Examples: - // "name" - the field "name" on the current resource - // "items[0].name" - the field "name" on the first array entry in "items" - optional string field = 3; -} - -// StatusDetails is a set of additional properties that MAY be set by the -// server to provide additional information about a response. The Reason -// field of a Status object defines what attributes will be set. Clients -// must ignore fields that do not match the defined type of each attribute, -// and should assume that any attribute may be empty, invalid, or under -// defined. -message StatusDetails { - // The name attribute of the resource associated with the status StatusReason - // (when there is a single name which can be described). - optional string name = 1; - - // The group attribute of the resource associated with the status StatusReason. - optional string group = 2; - - // The kind attribute of the resource associated with the status StatusReason. - // On some operations may differ from the requested resource Kind. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional string kind = 3; - - // The Causes array includes more details associated with the StatusReason - // failure. Not all StatusReasons may provide detailed causes. - repeated StatusCause causes = 4; - - // If specified, the time in seconds before the operation should be retried. - optional int32 retryAfterSeconds = 5; -} - -// Time is a wrapper around time.Time which supports correct -// marshaling to YAML and JSON. Wrappers are provided for many -// of the factory methods that the time package offers. -// -// +protobuf.options.marshal=false -// +protobuf.as=Timestamp -// +protobuf.options.(gogoproto.goproto_stringer)=false -message Time { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - optional int64 seconds = 1; - - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. This field may be limited in precision depending on context. - optional int32 nanos = 2; -} - -// Timestamp is a struct that is equivalent to Time, but intended for -// protobuf marshalling/unmarshalling. It is generated into a serialization -// that matches Time. Do not use in Go structs. -message Timestamp { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - optional int64 seconds = 1; - - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. This field may be limited in precision depending on context. - optional int32 nanos = 2; -} - -// TypeMeta describes an individual object in an API response or request -// with strings representing the type of the object and its API schema version. -// Structures that are versioned or persisted should inline TypeMeta. -message TypeMeta { - // Kind is a string value representing the REST resource this object represents. - // Servers may infer this from the endpoint the client submits requests to. - // Cannot be updated. - // In CamelCase. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional string kind = 1; - - // APIVersion defines the versioned schema of this representation of an object. - // Servers should convert recognized schemas to the latest internal value, and - // may reject unrecognized values. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#resources - optional string apiVersion = 2; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/group_version.go b/vendor/k8s.io/kubernetes/pkg/api/unversioned/group_version.go index dfbfe3a32..db842affe 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/group_version.go +++ b/vendor/k8s.io/kubernetes/pkg/api/unversioned/group_version.go @@ -268,17 +268,37 @@ type GroupVersions []GroupVersion // KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false // if none of the options match the group. -func (gvs GroupVersions) KindForGroupVersionKinds(kinds []GroupVersionKind) (target GroupVersionKind, ok bool) { +func (gvs GroupVersions) KindForGroupVersionKinds(kinds []GroupVersionKind) (GroupVersionKind, bool) { + var targets []GroupVersionKind for _, gv := range gvs { target, ok := gv.KindForGroupVersionKinds(kinds) if !ok { continue } - return target, true + targets = append(targets, target) + } + if len(targets) == 1 { + return targets[0], true + } + if len(targets) > 1 { + return bestMatch(kinds, targets), true } return GroupVersionKind{}, false } +// bestMatch tries to pick best matching GroupVersionKind and falls back to the first +// found if no exact match exists. +func bestMatch(kinds []GroupVersionKind, targets []GroupVersionKind) GroupVersionKind { + for _, gvk := range targets { + for _, k := range kinds { + if k == gvk { + return k + } + } + } + return targets[0] +} + // ToAPIVersionAndKind is a convenience method for satisfying runtime.Object on types that // do not use TypeMeta. func (gvk *GroupVersionKind) ToAPIVersionAndKind() (string, string) { diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/conversion.go b/vendor/k8s.io/kubernetes/pkg/api/v1/conversion.go index 0bbee72f8..a4305dc98 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/conversion.go @@ -420,14 +420,22 @@ func Convert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, return err } out.Annotations[PodInitContainerStatusesAnnotationKey] = string(value) + out.Annotations[PodInitContainerStatusesBetaAnnotationKey] = string(value) } else { delete(out.Annotations, PodInitContainerStatusesAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesBetaAnnotationKey) } return nil } func Convert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error { // TODO: sometime after we move init container to stable, remove these conversions + // If there is a beta annotation, copy to alpha key. + // See commit log for PR #31026 for why we do this. + if valueBeta, okBeta := in.Annotations[PodInitContainerStatusesBetaAnnotationKey]; okBeta { + in.Annotations[PodInitContainerStatusesAnnotationKey] = valueBeta + } + // Move the annotation to the internal repr. field if value, ok := in.Annotations[PodInitContainerStatusesAnnotationKey]; ok { var values []ContainerStatus if err := json.Unmarshal([]byte(value), &values); err != nil { @@ -452,6 +460,7 @@ func Convert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out out.Annotations[k] = v } delete(out.Annotations, PodInitContainerStatusesAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesBetaAnnotationKey) } return nil } @@ -581,6 +590,7 @@ func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error delete(out.Annotations, PodInitContainersAnnotationKey) delete(out.Annotations, PodInitContainersBetaAnnotationKey) delete(out.Annotations, PodInitContainerStatusesAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesBetaAnnotationKey) } if len(out.Spec.InitContainers) > 0 { value, err := json.Marshal(out.Spec.InitContainers) @@ -596,6 +606,7 @@ func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error return err } out.Annotations[PodInitContainerStatusesAnnotationKey] = string(value) + out.Annotations[PodInitContainerStatusesBetaAnnotationKey] = string(value) } // We need to reset certain fields for mirror pods from pre-v1.1 kubelet @@ -633,6 +644,11 @@ func Convert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) error // back to the caller. in.Spec.InitContainers = values } + // If there is a beta annotation, copy to alpha key. + // See commit log for PR #31026 for why we do this. + if valueBeta, okBeta := in.Annotations[PodInitContainerStatusesBetaAnnotationKey]; okBeta { + in.Annotations[PodInitContainerStatusesAnnotationKey] = valueBeta + } if value, ok := in.Annotations[PodInitContainerStatusesAnnotationKey]; ok { var values []ContainerStatus if err := json.Unmarshal([]byte(value), &values); err != nil { @@ -659,6 +675,7 @@ func Convert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) error delete(out.Annotations, PodInitContainersAnnotationKey) delete(out.Annotations, PodInitContainersBetaAnnotationKey) delete(out.Annotations, PodInitContainerStatusesAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesBetaAnnotationKey) } return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/api/v1/doc.go index 8849ee1cb..10dc479a3 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/doc.go @@ -18,4 +18,4 @@ limitations under the License. // +k8s:conversion-gen=k8s.io/kubernetes/pkg/api // Package v1 is the v1 version of the API. -package v1 +package v1 // import "k8s.io/kubernetes/pkg/api/v1" diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/generated.proto b/vendor/k8s.io/kubernetes/pkg/api/v1/generated.proto deleted file mode 100644 index 1d284acdb..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/generated.proto +++ /dev/null @@ -1,3273 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.api.v1; - -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1"; - -// Represents a Persistent Disk resource in AWS. -// -// An AWS EBS disk must exist before mounting to a container. The disk -// must also be in the same AWS zone as the kubelet. An AWS EBS disk -// can only be mounted as read/write once. AWS EBS volumes support -// ownership management and SELinux relabeling. -message AWSElasticBlockStoreVolumeSource { - // Unique ID of the persistent disk resource in AWS (Amazon EBS volume). - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#awselasticblockstore - optional string volumeID = 1; - - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#awselasticblockstore - // TODO: how do we prevent errors in the filesystem from compromising the machine - optional string fsType = 2; - - // The partition in the volume that you want to mount. - // If omitted, the default is to mount by volume name. - // Examples: For volume /dev/sda1, you specify the partition as "1". - // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - optional int32 partition = 3; - - // Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". - // If omitted, the default is "false". - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#awselasticblockstore - optional bool readOnly = 4; -} - -// Affinity is a group of affinity scheduling rules. -message Affinity { - // Describes node affinity scheduling rules for the pod. - optional NodeAffinity nodeAffinity = 1; - - // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - optional PodAffinity podAffinity = 2; - - // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - optional PodAntiAffinity podAntiAffinity = 3; -} - -// AttachedVolume describes a volume attached to a node -message AttachedVolume { - // Name of the attached volume - optional string name = 1; - - // DevicePath represents the device path where the volume should be avilable - optional string devicePath = 2; -} - -// AvoidPods describes pods that should avoid this node. This is the value for a -// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and -// will eventually become a field of NodeStatus. -message AvoidPods { - // Bounded-sized list of signatures of pods that should avoid this node, sorted - // in timestamp order from oldest to newest. Size of the slice is unspecified. - repeated PreferAvoidPodsEntry preferAvoidPods = 1; -} - -// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. -message AzureDiskVolumeSource { - // The Name of the data disk in the blob storage - optional string diskName = 1; - - // The URI the data disk in the blob storage - optional string diskURI = 2; - - // Host Caching mode: None, Read Only, Read Write. - optional string cachingMode = 3; - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - optional string fsType = 4; - - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - optional bool readOnly = 5; -} - -// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. -message AzureFileVolumeSource { - // the name of secret that contains Azure Storage Account Name and Key - optional string secretName = 1; - - // Share Name - optional string shareName = 2; - - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - optional bool readOnly = 3; -} - -// Binding ties one object to another. -// For example, a pod is bound to a node by a scheduler. -message Binding { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // The target object that you want to bind to the standard object. - optional ObjectReference target = 2; -} - -// Adds and removes POSIX capabilities from running containers. -message Capabilities { - // Added capabilities - repeated string add = 1; - - // Removed capabilities - repeated string drop = 2; -} - -// Represents a Ceph Filesystem mount that lasts the lifetime of a pod -// Cephfs volumes do not support ownership management or SELinux relabeling. -message CephFSVolumeSource { - // Required: Monitors is a collection of Ceph monitors - // More info: http://releases.k8s.io/release-1.4/examples/volumes/cephfs/README.md#how-to-use-it - repeated string monitors = 1; - - // Optional: Used as the mounted root, rather than the full Ceph tree, default is / - optional string path = 2; - - // Optional: User is the rados user name, default is admin - // More info: http://releases.k8s.io/release-1.4/examples/volumes/cephfs/README.md#how-to-use-it - optional string user = 3; - - // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: http://releases.k8s.io/release-1.4/examples/volumes/cephfs/README.md#how-to-use-it - optional string secretFile = 4; - - // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/cephfs/README.md#how-to-use-it - optional LocalObjectReference secretRef = 5; - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/cephfs/README.md#how-to-use-it - optional bool readOnly = 6; -} - -// Represents a cinder volume resource in Openstack. -// A Cinder volume must exist before mounting to a container. -// The volume must also be in the same region as the kubelet. -// Cinder volumes support ownership management and SELinux relabeling. -message CinderVolumeSource { - // volume id used to identify the volume in cinder - // More info: http://releases.k8s.io/release-1.4/examples/mysql-cinder-pd/README.md - optional string volumeID = 1; - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/release-1.4/examples/mysql-cinder-pd/README.md - optional string fsType = 2; - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // More info: http://releases.k8s.io/release-1.4/examples/mysql-cinder-pd/README.md - optional bool readOnly = 3; -} - -// Information about the condition of a component. -message ComponentCondition { - // Type of condition for a component. - // Valid value: "Healthy" - optional string type = 1; - - // Status of the condition for a component. - // Valid values for "Healthy": "True", "False", or "Unknown". - optional string status = 2; - - // Message about the condition for a component. - // For example, information about a health check. - optional string message = 3; - - // Condition error code for a component. - // For example, a health check error code. - optional string error = 4; -} - -// ComponentStatus (and ComponentStatusList) holds the cluster validation info. -message ComponentStatus { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // List of component conditions observed - repeated ComponentCondition conditions = 2; -} - -// Status of all the conditions for the component as a list of ComponentStatus objects. -message ComponentStatusList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of ComponentStatus objects. - repeated ComponentStatus items = 2; -} - -// ConfigMap holds configuration data for pods to consume. -message ConfigMap { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Data contains the configuration data. - // Each key must be a valid DNS_SUBDOMAIN with an optional leading dot. - map data = 2; -} - -// Selects a key from a ConfigMap. -message ConfigMapKeySelector { - // The ConfigMap to select from. - optional LocalObjectReference localObjectReference = 1; - - // The key to select. - optional string key = 2; -} - -// ConfigMapList is a resource containing a list of ConfigMap objects. -message ConfigMapList { - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is the list of ConfigMaps. - repeated ConfigMap items = 2; -} - -// Adapts a ConfigMap into a volume. -// -// The contents of the target ConfigMap's Data field will be presented in a -// volume as files using the keys in the Data field as the file names, unless -// the items element is populated with specific mappings of keys to paths. -// ConfigMap volumes support ownership management and SELinux relabeling. -message ConfigMapVolumeSource { - optional LocalObjectReference localObjectReference = 1; - - // If unspecified, each key-value pair in the Data field of the referenced - // ConfigMap will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the ConfigMap, - // the volume setup will error. Paths must be relative and may not contain - // the '..' path or start with '..'. - repeated KeyToPath items = 2; - - // Optional: mode bits to use on created files by default. Must be a - // value between 0 and 0777. Defaults to 0644. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - optional int32 defaultMode = 3; -} - -// A single application container that you want to run within a pod. -message Container { - // Name of the container specified as a DNS_LABEL. - // Each container in a pod must have a unique name (DNS_LABEL). - // Cannot be updated. - optional string name = 1; - - // Docker image name. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/images.md - optional string image = 2; - - // Entrypoint array. Not executed within a shell. - // The docker image's ENTRYPOINT is used if this is not provided. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/containers.md#containers-and-commands - repeated string command = 3; - - // Arguments to the entrypoint. - // The docker image's CMD is used if this is not provided. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/containers.md#containers-and-commands - repeated string args = 4; - - // Container's working directory. - // If not specified, the container runtime's default will be used, which - // might be configured in the container image. - // Cannot be updated. - optional string workingDir = 5; - - // List of ports to expose from the container. Exposing a port here gives - // the system additional information about the network connections a - // container uses, but is primarily informational. Not specifying a port here - // DOES NOT prevent that port from being exposed. Any port which is - // listening on the default "0.0.0.0" address inside a container will be - // accessible from the network. - // Cannot be updated. - repeated ContainerPort ports = 6; - - // List of environment variables to set in the container. - // Cannot be updated. - repeated EnvVar env = 7; - - // Compute Resources required by this container. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#resources - optional ResourceRequirements resources = 8; - - // Pod volumes to mount into the container's filesystem. - // Cannot be updated. - repeated VolumeMount volumeMounts = 9; - - // Periodic probe of container liveness. - // Container will be restarted if the probe fails. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#container-probes - optional Probe livenessProbe = 10; - - // Periodic probe of container service readiness. - // Container will be removed from service endpoints if the probe fails. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#container-probes - optional Probe readinessProbe = 11; - - // Actions that the management system should take in response to container lifecycle events. - // Cannot be updated. - optional Lifecycle lifecycle = 12; - - // Optional: Path at which the file to which the container's termination message - // will be written is mounted into the container's filesystem. - // Message written is intended to be brief final status, such as an assertion failure message. - // Defaults to /dev/termination-log. - // Cannot be updated. - optional string terminationMessagePath = 13; - - // Image pull policy. - // One of Always, Never, IfNotPresent. - // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/images.md#updating-images - optional string imagePullPolicy = 14; - - // Security options the pod should run with. - // More info: http://releases.k8s.io/release-1.4/docs/design/security_context.md - optional SecurityContext securityContext = 15; - - // Whether this container should allocate a buffer for stdin in the container runtime. If this - // is not set, reads from stdin in the container will always result in EOF. - // Default is false. - optional bool stdin = 16; - - // Whether the container runtime should close the stdin channel after it has been opened by - // a single attach. When stdin is true the stdin stream will remain open across multiple attach - // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the - // first client attaches to stdin, and then remains open and accepts data until the client disconnects, - // at which time stdin is closed and remains closed until the container is restarted. If this - // flag is false, a container processes that reads from stdin will never receive an EOF. - // Default is false - optional bool stdinOnce = 17; - - // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. - // Default is false. - optional bool tty = 18; -} - -// Describe a container image -message ContainerImage { - // Names by which this image is known. - // e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] - repeated string names = 1; - - // The size of the image in bytes. - optional int64 sizeBytes = 2; -} - -// ContainerPort represents a network port in a single container. -message ContainerPort { - // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each - // named port in a pod must have a unique name. Name for the port that can be - // referred to by services. - optional string name = 1; - - // Number of port to expose on the host. - // If specified, this must be a valid port number, 0 < x < 65536. - // If HostNetwork is specified, this must match ContainerPort. - // Most containers do not need this. - optional int32 hostPort = 2; - - // Number of port to expose on the pod's IP address. - // This must be a valid port number, 0 < x < 65536. - optional int32 containerPort = 3; - - // Protocol for port. Must be UDP or TCP. - // Defaults to "TCP". - optional string protocol = 4; - - // What host IP to bind the external port to. - optional string hostIP = 5; -} - -// ContainerState holds a possible state of container. -// Only one of its members may be specified. -// If none of them is specified, the default one is ContainerStateWaiting. -message ContainerState { - // Details about a waiting container - optional ContainerStateWaiting waiting = 1; - - // Details about a running container - optional ContainerStateRunning running = 2; - - // Details about a terminated container - optional ContainerStateTerminated terminated = 3; -} - -// ContainerStateRunning is a running state of a container. -message ContainerStateRunning { - // Time at which the container was last (re-)started - optional k8s.io.kubernetes.pkg.api.unversioned.Time startedAt = 1; -} - -// ContainerStateTerminated is a terminated state of a container. -message ContainerStateTerminated { - // Exit status from the last termination of the container - optional int32 exitCode = 1; - - // Signal from the last termination of the container - optional int32 signal = 2; - - // (brief) reason from the last termination of the container - optional string reason = 3; - - // Message regarding the last termination of the container - optional string message = 4; - - // Time at which previous execution of the container started - optional k8s.io.kubernetes.pkg.api.unversioned.Time startedAt = 5; - - // Time at which the container last terminated - optional k8s.io.kubernetes.pkg.api.unversioned.Time finishedAt = 6; - - // Container's ID in the format 'docker://' - optional string containerID = 7; -} - -// ContainerStateWaiting is a waiting state of a container. -message ContainerStateWaiting { - // (brief) reason the container is not yet running. - optional string reason = 1; - - // Message regarding why the container is not yet running. - optional string message = 2; -} - -// ContainerStatus contains details for the current status of this container. -message ContainerStatus { - // This must be a DNS_LABEL. Each container in a pod must have a unique name. - // Cannot be updated. - optional string name = 1; - - // Details about the container's current condition. - optional ContainerState state = 2; - - // Details about the container's last termination condition. - optional ContainerState lastState = 3; - - // Specifies whether the container has passed its readiness probe. - optional bool ready = 4; - - // The number of times the container has been restarted, currently based on - // the number of dead containers that have not yet been removed. - // Note that this is calculated from dead containers. But those containers are subject to - // garbage collection. This value will get capped at 5 by GC. - optional int32 restartCount = 5; - - // The image the container is running. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/images.md - // TODO(dchen1107): Which image the container is running with? - optional string image = 6; - - // ImageID of the container's image. - optional string imageID = 7; - - // Container's ID in the format 'docker://'. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/container-environment.md#container-information - optional string containerID = 8; -} - -// DaemonEndpoint contains information about a single Daemon endpoint. -message DaemonEndpoint { - // Port number of the given endpoint. - optional int32 Port = 1; -} - -// DeleteOptions may be provided when deleting an API object -message DeleteOptions { - // The duration in seconds before the object should be deleted. Value must be non-negative integer. - // The value zero indicates delete immediately. If this value is nil, the default grace period for the - // specified type will be used. - // Defaults to a per object value if not specified. zero means delete immediately. - optional int64 gracePeriodSeconds = 1; - - // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be - // returned. - optional Preconditions preconditions = 2; - - // Should the dependent objects be orphaned. If true/false, the "orphan" - // finalizer will be added to/removed from the object's finalizers list. - optional bool orphanDependents = 3; -} - -// DeprecatedDownwardAPIVolumeFile represents information to create the file containing the pod field -// This type is deprecated and should be replaced by use of the downwardAPI volume source. -message DeprecatedDownwardAPIVolumeFile { - // Required: Name is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' - optional string name = 1; - - // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. - optional ObjectFieldSelector fieldRef = 2; - - // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - optional ResourceFieldSelector resourceFieldRef = 3; - - // Optional: mode bits to use on this file, must be a value between 0 - // and 0777. If not specified, the volume defaultMode will be used. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - optional int32 mode = 4; -} - -// DeprecatedDownwardAPIVolumeSource represents a volume containing downward API info. -// This type is deprecated and should be replaced by use of the downwardAPI volume source. -message DeprecatedDownwardAPIVolumeSource { - // Items is a list of downward API volume file - repeated DeprecatedDownwardAPIVolumeFile items = 1; - - // Optional: mode bits to use on created files by default. Must be a - // value between 0 and 0777. Defaults to 0644. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - optional int32 defaultMode = 2; -} - -// DownwardAPIVolumeFile represents information to create the file containing the pod field -message DownwardAPIVolumeFile { - // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' - optional string path = 1; - - // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. - optional ObjectFieldSelector fieldRef = 2; - - // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - optional ResourceFieldSelector resourceFieldRef = 3; - - // Optional: mode bits to use on this file, must be a value between 0 - // and 0777. If not specified, the volume defaultMode will be used. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - optional int32 mode = 4; -} - -// DownwardAPIVolumeSource represents a volume containing downward API info. -// Downward API volumes support ownership management and SELinux relabeling. -message DownwardAPIVolumeSource { - // Items is a list of downward API volume file - repeated DownwardAPIVolumeFile items = 1; - - // Optional: mode bits to use on created files by default. Must be a - // value between 0 and 0777. Defaults to 0644. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - optional int32 defaultMode = 2; -} - -// Represents an empty directory for a pod. -// Empty directory volumes support ownership management and SELinux relabeling. -message EmptyDirVolumeSource { - // What type of storage medium should back this directory. - // The default is "" which means to use the node's default medium. - // Must be an empty string (default) or Memory. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#emptydir - optional string medium = 1; -} - -// EndpointAddress is a tuple that describes single IP address. -message EndpointAddress { - // The IP of this endpoint. - // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), - // or link-local multicast ((224.0.0.0/24). - // IPv6 is also accepted but not fully supported on all platforms. Also, certain - // kubernetes components, like kube-proxy, are not IPv6 ready. - // TODO: This should allow hostname or IP, See #4447. - optional string ip = 1; - - // The Hostname of this endpoint - optional string hostname = 3; - - // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. - optional string nodeName = 4; - - // Reference to object providing the endpoint. - optional ObjectReference targetRef = 2; -} - -// EndpointPort is a tuple that describes a single port. -message EndpointPort { - // The name of this port (corresponds to ServicePort.Name). - // Must be a DNS_LABEL. - // Optional only if one port is defined. - optional string name = 1; - - // The port number of the endpoint. - optional int32 port = 2; - - // The IP protocol for this port. - // Must be UDP or TCP. - // Default is TCP. - optional string protocol = 3; -} - -// EndpointSubset is a group of addresses with a common set of ports. The -// expanded set of endpoints is the Cartesian product of Addresses x Ports. -// For example, given: -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// } -// The resulting set of endpoints can be viewed as: -// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], -// b: [ 10.10.1.1:309, 10.10.2.2:309 ] -message EndpointSubset { - // IP addresses which offer the related ports that are marked as ready. These endpoints - // should be considered safe for load balancers and clients to utilize. - repeated EndpointAddress addresses = 1; - - // IP addresses which offer the related ports but are not currently marked as ready - // because they have not yet finished starting, have recently failed a readiness check, - // or have recently failed a liveness check. - repeated EndpointAddress notReadyAddresses = 2; - - // Port numbers available on the related IP addresses. - repeated EndpointPort ports = 3; -} - -// Endpoints is a collection of endpoints that implement the actual service. Example: -// Name: "mysvc", -// Subsets: [ -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// }, -// { -// Addresses: [{"ip": "10.10.3.3"}], -// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] -// }, -// ] -message Endpoints { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // The set of all endpoints is the union of all subsets. Addresses are placed into - // subsets according to the IPs they share. A single address with multiple ports, - // some of which are ready and some of which are not (because they come from - // different containers) will result in the address being displayed in different - // subsets for the different ports. No address will appear in both Addresses and - // NotReadyAddresses in the same subset. - // Sets of addresses and ports that comprise a service. - repeated EndpointSubset subsets = 2; -} - -// EndpointsList is a list of endpoints. -message EndpointsList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of endpoints. - repeated Endpoints items = 2; -} - -// EnvVar represents an environment variable present in a Container. -message EnvVar { - // Name of the environment variable. Must be a C_IDENTIFIER. - optional string name = 1; - - // Variable references $(VAR_NAME) are expanded - // using the previous defined environment variables in the container and - // any service environment variables. If a variable cannot be resolved, - // the reference in the input string will be unchanged. The $(VAR_NAME) - // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped - // references will never be expanded, regardless of whether the variable - // exists or not. - // Defaults to "". - optional string value = 2; - - // Source for the environment variable's value. Cannot be used if value is not empty. - optional EnvVarSource valueFrom = 3; -} - -// EnvVarSource represents a source for the value of an EnvVar. -message EnvVarSource { - // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, - // spec.nodeName, spec.serviceAccountName, status.podIP. - optional ObjectFieldSelector fieldRef = 1; - - // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - optional ResourceFieldSelector resourceFieldRef = 2; - - // Selects a key of a ConfigMap. - optional ConfigMapKeySelector configMapKeyRef = 3; - - // Selects a key of a secret in the pod's namespace - optional SecretKeySelector secretKeyRef = 4; -} - -// Event is a report of an event somewhere in the cluster. -// TODO: Decide whether to store these separately or with the object they apply to. -message Event { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // The object that this event is about. - optional ObjectReference involvedObject = 2; - - // This should be a short, machine understandable string that gives the reason - // for the transition into the object's current status. - // TODO: provide exact specification for format. - optional string reason = 3; - - // A human-readable description of the status of this operation. - // TODO: decide on maximum length. - optional string message = 4; - - // The component reporting this event. Should be a short machine understandable string. - optional EventSource source = 5; - - // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) - optional k8s.io.kubernetes.pkg.api.unversioned.Time firstTimestamp = 6; - - // The time at which the most recent occurrence of this event was recorded. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTimestamp = 7; - - // The number of times this event has occurred. - optional int32 count = 8; - - // Type of this event (Normal, Warning), new types could be added in the future - optional string type = 9; -} - -// EventList is a list of events. -message EventList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of events - repeated Event items = 2; -} - -// EventSource contains information for an event. -message EventSource { - // Component from which the event is generated. - optional string component = 1; - - // Host name on which the event is generated. - optional string host = 2; -} - -// ExecAction describes a "run in container" action. -message ExecAction { - // Command is the command line to execute inside the container, the working directory for the - // command is root ('/') in the container's filesystem. The command is simply exec'd, it is - // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - // a shell, you need to explicitly call out to that shell. - // Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - repeated string command = 1; -} - -// ExportOptions is the query options to the standard REST get call. -message ExportOptions { - // Should this value be exported. Export strips fields that a user can not specify. - optional bool export = 1; - - // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace' - optional bool exact = 2; -} - -// Represents a Fibre Channel volume. -// Fibre Channel volumes can only be mounted as read/write once. -// Fibre Channel volumes support ownership management and SELinux relabeling. -message FCVolumeSource { - // Required: FC target worldwide names (WWNs) - repeated string targetWWNs = 1; - - // Required: FC target lun number - optional int32 lun = 2; - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - optional string fsType = 3; - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - optional bool readOnly = 4; -} - -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -message FSGroupStrategyOptions { - // Type is the strategy that will dictate what FSGroup is used in the SecurityContext. - optional string type = 1; - - // Ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. - repeated IDRange ranges = 2; -} - -// FlexVolume represents a generic volume resource that is -// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. -message FlexVolumeSource { - // Driver is the name of the driver to use for this volume. - optional string driver = 1; - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. - optional string fsType = 2; - - // Optional: SecretRef is reference to the secret object containing - // sensitive information to pass to the plugin scripts. This may be - // empty if no secret object is specified. If the secret object - // contains more than one secret, all secrets are passed to the plugin - // scripts. - optional LocalObjectReference secretRef = 3; - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - optional bool readOnly = 4; - - // Optional: Extra command options if any. - map options = 5; -} - -// Represents a Flocker volume mounted by the Flocker agent. -// Flocker volumes do not support ownership management or SELinux relabeling. -message FlockerVolumeSource { - // Required: the volume name. This is going to be store on metadata -> name on the payload for Flocker - optional string datasetName = 1; -} - -// Represents a Persistent Disk resource in Google Compute Engine. -// -// A GCE PD must exist before mounting to a container. The disk must -// also be in the same GCE project and zone as the kubelet. A GCE PD -// can only be mounted as read/write once or read-only many times. GCE -// PDs support ownership management and SELinux relabeling. -message GCEPersistentDiskVolumeSource { - // Unique name of the PD resource in GCE. Used to identify the disk in GCE. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk - optional string pdName = 1; - - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk - // TODO: how do we prevent errors in the filesystem from compromising the machine - optional string fsType = 2; - - // The partition in the volume that you want to mount. - // If omitted, the default is to mount by volume name. - // Examples: For volume /dev/sda1, you specify the partition as "1". - // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk - optional int32 partition = 3; - - // ReadOnly here will force the ReadOnly setting in VolumeMounts. - // Defaults to false. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk - optional bool readOnly = 4; -} - -// Represents a volume that is populated with the contents of a git repository. -// Git repo volumes do not support ownership management. -// Git repo volumes support SELinux relabeling. -message GitRepoVolumeSource { - // Repository URL - optional string repository = 1; - - // Commit hash for the specified revision. - optional string revision = 2; - - // Target directory name. - // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the - // git repository. Otherwise, if specified, the volume will contain the git repository in - // the subdirectory with the given name. - optional string directory = 3; -} - -// Represents a Glusterfs mount that lasts the lifetime of a pod. -// Glusterfs volumes do not support ownership management or SELinux relabeling. -message GlusterfsVolumeSource { - // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/glusterfs/README.md#create-a-pod - optional string endpoints = 1; - - // Path is the Glusterfs volume path. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/glusterfs/README.md#create-a-pod - optional string path = 2; - - // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. - // Defaults to false. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/glusterfs/README.md#create-a-pod - optional bool readOnly = 3; -} - -// HTTPGetAction describes an action based on HTTP Get requests. -message HTTPGetAction { - // Path to access on the HTTP server. - optional string path = 1; - - // Name or number of the port to access on the container. - // Number must be in the range 1 to 65535. - // Name must be an IANA_SVC_NAME. - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString port = 2; - - // Host name to connect to, defaults to the pod IP. You probably want to set - // "Host" in httpHeaders instead. - optional string host = 3; - - // Scheme to use for connecting to the host. - // Defaults to HTTP. - optional string scheme = 4; - - // Custom headers to set in the request. HTTP allows repeated headers. - repeated HTTPHeader httpHeaders = 5; -} - -// HTTPHeader describes a custom header to be used in HTTP probes -message HTTPHeader { - // The header field name - optional string name = 1; - - // The header field value - optional string value = 2; -} - -// Handler defines a specific action that should be taken -// TODO: pass structured data to these actions, and document that data here. -message Handler { - // One and only one of the following should be specified. - // Exec specifies the action to take. - optional ExecAction exec = 1; - - // HTTPGet specifies the http request to perform. - optional HTTPGetAction httpGet = 2; - - // TCPSocket specifies an action involving a TCP port. - // TCP hooks not yet supported - // TODO: implement a realistic TCP lifecycle hook - optional TCPSocketAction tcpSocket = 3; -} - -// Represents a host path mapped into a pod. -// Host path volumes do not support ownership management or SELinux relabeling. -message HostPathVolumeSource { - // Path of the directory on the host. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#hostpath - optional string path = 1; -} - -// IDRange provides a min/max of an allowed range of IDs. -// TODO: this could be reused for UIDs. -message IDRange { - // Min is the start of the range, inclusive. - optional int64 min = 1; - - // Max is the end of the range, inclusive. - optional int64 max = 2; -} - -// Represents an ISCSI disk. -// ISCSI volumes can only be mounted as read/write once. -// ISCSI volumes support ownership management and SELinux relabeling. -message ISCSIVolumeSource { - // iSCSI target portal. The portal is either an IP or ip_addr:port if the port - // is other than default (typically TCP ports 860 and 3260). - optional string targetPortal = 1; - - // Target iSCSI Qualified Name. - optional string iqn = 2; - - // iSCSI target lun number. - optional int32 lun = 3; - - // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. - optional string iscsiInterface = 4; - - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#iscsi - // TODO: how do we prevent errors in the filesystem from compromising the machine - optional string fsType = 5; - - // ReadOnly here will force the ReadOnly setting in VolumeMounts. - // Defaults to false. - optional bool readOnly = 6; -} - -// Maps a string key to a path within a volume. -message KeyToPath { - // The key to project. - optional string key = 1; - - // The relative path of the file to map the key to. - // May not be an absolute path. - // May not contain the path element '..'. - // May not start with the string '..'. - optional string path = 2; - - // Optional: mode bits to use on this file, must be a value between 0 - // and 0777. If not specified, the volume defaultMode will be used. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - optional int32 mode = 3; -} - -// Lifecycle describes actions that the management system should take in response to container lifecycle -// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks -// until the action is complete, unless the container process fails, in which case the handler is aborted. -message Lifecycle { - // PostStart is called immediately after a container is created. If the handler fails, - // the container is terminated and restarted according to its restart policy. - // Other management of the container blocks until the hook completes. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/container-environment.md#hook-details - optional Handler postStart = 1; - - // PreStop is called immediately before a container is terminated. - // The container is terminated after the handler completes. - // The reason for termination is passed to the handler. - // Regardless of the outcome of the handler, the container is eventually terminated. - // Other management of the container blocks until the hook completes. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/container-environment.md#hook-details - optional Handler preStop = 2; -} - -// LimitRange sets resource usage limits for each kind of resource in a Namespace. -message LimitRange { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Spec defines the limits enforced. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional LimitRangeSpec spec = 2; -} - -// LimitRangeItem defines a min/max usage limit for any resource that matches on kind. -message LimitRangeItem { - // Type of resource that this limit applies to. - optional string type = 1; - - // Max usage constraints on this kind by resource name. - map max = 2; - - // Min usage constraints on this kind by resource name. - map min = 3; - - // Default resource requirement limit value by resource name if resource limit is omitted. - map default = 4; - - // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted. - map defaultRequest = 5; - - // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource. - map maxLimitRequestRatio = 6; -} - -// LimitRangeList is a list of LimitRange items. -message LimitRangeList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is a list of LimitRange objects. - // More info: http://releases.k8s.io/release-1.4/docs/design/admission_control_limit_range.md - repeated LimitRange items = 2; -} - -// LimitRangeSpec defines a min/max usage limit for resources that match on kind. -message LimitRangeSpec { - // Limits is the list of LimitRangeItem objects that are enforced. - repeated LimitRangeItem limits = 1; -} - -// List holds a list of objects, which may not be known by the server. -message List { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of objects - repeated k8s.io.kubernetes.pkg.runtime.RawExtension items = 2; -} - -// ListOptions is the query options to a standard REST list call. -message ListOptions { - // A selector to restrict the list of returned objects by their labels. - // Defaults to everything. - optional string labelSelector = 1; - - // A selector to restrict the list of returned objects by their fields. - // Defaults to everything. - optional string fieldSelector = 2; - - // Watch for changes to the described resources and return them as a stream of - // add, update, and remove notifications. Specify resourceVersion. - optional bool watch = 3; - - // When specified with a watch call, shows changes that occur after that particular version of a resource. - // Defaults to changes from the beginning of history. - optional string resourceVersion = 4; - - // Timeout for the list/watch call. - optional int64 timeoutSeconds = 5; -} - -// LoadBalancerIngress represents the status of a load-balancer ingress point: -// traffic intended for the service should be sent to an ingress point. -message LoadBalancerIngress { - // IP is set for load-balancer ingress points that are IP based - // (typically GCE or OpenStack load-balancers) - optional string ip = 1; - - // Hostname is set for load-balancer ingress points that are DNS based - // (typically AWS load-balancers) - optional string hostname = 2; -} - -// LoadBalancerStatus represents the status of a load-balancer. -message LoadBalancerStatus { - // Ingress is a list containing ingress points for the load-balancer. - // Traffic intended for the service should be sent to these ingress points. - repeated LoadBalancerIngress ingress = 1; -} - -// LocalObjectReference contains enough information to let you locate the -// referenced object inside the same namespace. -message LocalObjectReference { - // Name of the referent. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names - // TODO: Add other useful fields. apiVersion, kind, uid? - optional string name = 1; -} - -// Represents an NFS mount that lasts the lifetime of a pod. -// NFS volumes do not support ownership management or SELinux relabeling. -message NFSVolumeSource { - // Server is the hostname or IP address of the NFS server. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#nfs - optional string server = 1; - - // Path that is exported by the NFS server. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#nfs - optional string path = 2; - - // ReadOnly here will force - // the NFS export to be mounted with read-only permissions. - // Defaults to false. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#nfs - optional bool readOnly = 3; -} - -// Namespace provides a scope for Names. -// Use of multiple namespaces is optional. -message Namespace { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Spec defines the behavior of the Namespace. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional NamespaceSpec spec = 2; - - // Status describes the current status of a Namespace. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional NamespaceStatus status = 3; -} - -// NamespaceList is a list of Namespaces. -message NamespaceList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is the list of Namespace objects in the list. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/namespaces.md - repeated Namespace items = 2; -} - -// NamespaceSpec describes the attributes on a Namespace. -message NamespaceSpec { - // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. - // More info: http://releases.k8s.io/release-1.4/docs/design/namespaces.md#finalizers - repeated string finalizers = 1; -} - -// NamespaceStatus is information about the current status of a Namespace. -message NamespaceStatus { - // Phase is the current lifecycle phase of the namespace. - // More info: http://releases.k8s.io/release-1.4/docs/design/namespaces.md#phases - optional string phase = 1; -} - -// Node is a worker node in Kubernetes, formerly known as minion. -// Each node will have a unique identifier in the cache (i.e. in etcd). -message Node { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Spec defines the behavior of a node. - // http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional NodeSpec spec = 2; - - // Most recently observed status of the node. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional NodeStatus status = 3; -} - -// NodeAddress contains information for the node's address. -message NodeAddress { - // Node address type, one of Hostname, ExternalIP or InternalIP. - optional string type = 1; - - // The node address. - optional string address = 2; -} - -// Node affinity is a group of node affinity scheduling rules. -message NodeAffinity { - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to an update), the system - // may or may not try to eventually evict the pod from its node. - optional NodeSelector requiredDuringSchedulingIgnoredDuringExecution = 1; - - // The scheduler will prefer to schedule pods to nodes that satisfy - // the affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node matches the corresponding matchExpressions; the - // node(s) with the highest sum are the most preferred. - repeated PreferredSchedulingTerm preferredDuringSchedulingIgnoredDuringExecution = 2; -} - -// NodeCondition contains condition information for a node. -message NodeCondition { - // Type of node condition. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // Last time we got an update on a given condition. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastHeartbeatTime = 3; - - // Last time the condition transit from one status to another. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; - - // (brief) reason for the condition's last transition. - optional string reason = 5; - - // Human readable message indicating details about last transition. - optional string message = 6; -} - -// NodeDaemonEndpoints lists ports opened by daemons running on the Node. -message NodeDaemonEndpoints { - // Endpoint on which Kubelet is listening. - optional DaemonEndpoint kubeletEndpoint = 1; -} - -// NodeList is the whole list of all Nodes which have been registered with master. -message NodeList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of nodes - repeated Node items = 2; -} - -// NodeProxyOptions is the query options to a Node's proxy call. -message NodeProxyOptions { - // Path is the URL path to use for the current proxy request to node. - optional string path = 1; -} - -// A node selector represents the union of the results of one or more label queries -// over a set of nodes; that is, it represents the OR of the selectors represented -// by the node selector terms. -message NodeSelector { - // Required. A list of node selector terms. The terms are ORed. - repeated NodeSelectorTerm nodeSelectorTerms = 1; -} - -// A node selector requirement is a selector that contains values, a key, and an operator -// that relates the key and values. -message NodeSelectorRequirement { - // The label key that the selector applies to. - optional string key = 1; - - // Represents a key's relationship to a set of values. - // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - optional string operator = 2; - - // An array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. If the operator is Gt or Lt, the values - // array must have a single element, which will be interpreted as an integer. - // This array is replaced during a strategic merge patch. - repeated string values = 3; -} - -// A null or empty node selector term matches no objects. -message NodeSelectorTerm { - // Required. A list of node selector requirements. The requirements are ANDed. - repeated NodeSelectorRequirement matchExpressions = 1; -} - -// NodeSpec describes the attributes that a node is created with. -message NodeSpec { - // PodCIDR represents the pod IP range assigned to the node. - optional string podCIDR = 1; - - // External ID of the node assigned by some machine database (e.g. a cloud provider). - // Deprecated. - optional string externalID = 2; - - // ID of the node assigned by the cloud provider in the format: :// - optional string providerID = 3; - - // Unschedulable controls node schedulability of new pods. By default, node is schedulable. - // More info: http://releases.k8s.io/release-1.4/docs/admin/node.md#manual-node-administration"` - optional bool unschedulable = 4; -} - -// NodeStatus is information about the current status of a node. -message NodeStatus { - // Capacity represents the total resources of a node. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#capacity for more details. - map capacity = 1; - - // Allocatable represents the resources of a node that are available for scheduling. - // Defaults to Capacity. - map allocatable = 2; - - // NodePhase is the recently observed lifecycle phase of the node. - // More info: http://releases.k8s.io/release-1.4/docs/admin/node.md#node-phase - // The field is never populated, and now is deprecated. - optional string phase = 3; - - // Conditions is an array of current observed node conditions. - // More info: http://releases.k8s.io/release-1.4/docs/admin/node.md#node-condition - repeated NodeCondition conditions = 4; - - // List of addresses reachable to the node. - // Queried from cloud provider, if available. - // More info: http://releases.k8s.io/release-1.4/docs/admin/node.md#node-addresses - repeated NodeAddress addresses = 5; - - // Endpoints of daemons running on the Node. - optional NodeDaemonEndpoints daemonEndpoints = 6; - - // Set of ids/uuids to uniquely identify the node. - // More info: http://releases.k8s.io/release-1.4/docs/admin/node.md#node-info - optional NodeSystemInfo nodeInfo = 7; - - // List of container images on this node - repeated ContainerImage images = 8; - - // List of attachable volumes in use (mounted) by the node. - repeated string volumesInUse = 9; - - // List of volumes that are attached to the node. - repeated AttachedVolume volumesAttached = 10; -} - -// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. -message NodeSystemInfo { - // Machine ID reported by the node. - optional string machineID = 1; - - // System UUID reported by the node. - optional string systemUUID = 2; - - // Boot ID reported by the node. - optional string bootID = 3; - - // Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). - optional string kernelVersion = 4; - - // OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). - optional string osImage = 5; - - // ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0). - optional string containerRuntimeVersion = 6; - - // Kubelet Version reported by the node. - optional string kubeletVersion = 7; - - // KubeProxy Version reported by the node. - optional string kubeProxyVersion = 8; - - // The Operating System reported by the node - optional string operatingSystem = 9; - - // The Architecture reported by the node - optional string architecture = 10; -} - -// ObjectFieldSelector selects an APIVersioned field of an object. -message ObjectFieldSelector { - // Version of the schema the FieldPath is written in terms of, defaults to "v1". - optional string apiVersion = 1; - - // Path of the field to select in the specified API version. - optional string fieldPath = 2; -} - -// ObjectMeta is metadata that all persisted resources must have, which includes all objects -// users must create. -message ObjectMeta { - // Name must be unique within a namespace. Is required when creating resources, although - // some resources may allow a client to request the generation of an appropriate name - // automatically. Name is primarily intended for creation idempotence and configuration - // definition. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names - optional string name = 1; - - // GenerateName is an optional prefix, used by the server, to generate a unique - // name ONLY IF the Name field has not been provided. - // If this field is used, the name returned to the client will be different - // than the name passed. This value will also be combined with a unique suffix. - // The provided value has the same validation rules as the Name field, - // and may be truncated by the length of the suffix required to make the value - // unique on the server. - // - // If this field is specified and the generated name exists, the server will - // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason - // ServerTimeout indicating a unique name could not be found in the time allotted, and the client - // should retry (optionally after the time indicated in the Retry-After header). - // - // Applied only if Name is not specified. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#idempotency - optional string generateName = 2; - - // Namespace defines the space within each name must be unique. An empty namespace is - // equivalent to the "default" namespace, but "default" is the canonical representation. - // Not all objects are required to be scoped to a namespace - the value of this field for - // those objects will be empty. - // - // Must be a DNS_LABEL. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/namespaces.md - optional string namespace = 3; - - // SelfLink is a URL representing this object. - // Populated by the system. - // Read-only. - optional string selfLink = 4; - - // UID is the unique in time and space value for this object. It is typically generated by - // the server on successful creation of a resource and is not allowed to change on PUT - // operations. - // - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#uids - optional string uid = 5; - - // An opaque value that represents the internal version of this object that can - // be used by clients to determine when objects have changed. May be used for optimistic - // concurrency, change detection, and the watch operation on a resource or set of resources. - // Clients must treat these values as opaque and passed unmodified back to the server. - // They may only be valid for a particular resource or set of resources. - // - // Populated by the system. - // Read-only. - // Value must be treated as opaque by clients and . - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#concurrency-control-and-consistency - optional string resourceVersion = 6; - - // A sequence number representing a specific generation of the desired state. - // Populated by the system. Read-only. - optional int64 generation = 7; - - // CreationTimestamp is a timestamp representing the server time when this object was - // created. It is not guaranteed to be set in happens-before order across separate operations. - // Clients may not set this value. It is represented in RFC3339 form and is in UTC. - // - // Populated by the system. - // Read-only. - // Null for lists. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.Time creationTimestamp = 8; - - // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This - // field is set by the server when a graceful deletion is requested by the user, and is not - // directly settable by a client. The resource will be deleted (no longer visible from - // resource lists, and not reachable by name) after the time in this field. Once set, this - // value may not be unset or be set further into the future, although it may be shortened - // or the resource may be deleted prior to this time. For example, a user may request that - // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination - // signal to the containers in the pod. Once the resource is deleted in the API, the Kubelet - // will send a hard termination signal to the container. - // If not set, graceful deletion of the object has not been requested. - // - // Populated by the system when a graceful deletion is requested. - // Read-only. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.Time deletionTimestamp = 9; - - // Number of seconds allowed for this object to gracefully terminate before - // it will be removed from the system. Only set when deletionTimestamp is also set. - // May only be shortened. - // Read-only. - optional int64 deletionGracePeriodSeconds = 10; - - // Map of string keys and values that can be used to organize and categorize - // (scope and select) objects. May match selectors of replication controllers - // and services. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md - map labels = 11; - - // Annotations is an unstructured key value map stored with a resource that may be - // set by external tools to store and retrieve arbitrary metadata. They are not - // queryable and should be preserved when modifying objects. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/annotations.md - map annotations = 12; - - // List of objects depended by this object. If ALL objects in the list have - // been deleted, this object will be garbage collected. If this object is managed by a controller, - // then an entry in this list will point to this controller, with the controller field set to true. - // There cannot be more than one managing controller. - repeated OwnerReference ownerReferences = 13; - - // Must be empty before the object is deleted from the registry. Each entry - // is an identifier for the responsible component that will remove the entry - // from the list. If the deletionTimestamp of the object is non-nil, entries - // in this list can only be removed. - repeated string finalizers = 14; - - // The name of the cluster which the object belongs to. - // This is used to distinguish resources with same name and namespace in different clusters. - // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. - optional string clusterName = 15; -} - -// ObjectReference contains enough information to let you inspect or modify the referred object. -message ObjectReference { - // Kind of the referent. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional string kind = 1; - - // Namespace of the referent. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/namespaces.md - optional string namespace = 2; - - // Name of the referent. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names - optional string name = 3; - - // UID of the referent. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#uids - optional string uid = 4; - - // API version of the referent. - optional string apiVersion = 5; - - // Specific resourceVersion to which this reference is made, if any. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#concurrency-control-and-consistency - optional string resourceVersion = 6; - - // If referring to a piece of an object instead of an entire object, this string - // should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - // For example, if the object reference is to a container within a pod, this would take on a value like: - // "spec.containers{name}" (where "name" refers to the name of the container that triggered - // the event) or if no container name is specified "spec.containers[2]" (container with - // index 2 in this pod). This syntax is chosen only to have some well-defined way of - // referencing a part of an object. - // TODO: this design is not final and this field is subject to change in the future. - optional string fieldPath = 7; -} - -// OwnerReference contains enough information to let you identify an owning -// object. Currently, an owning object must be in the same namespace, so there -// is no namespace field. -message OwnerReference { - // API version of the referent. - optional string apiVersion = 5; - - // Kind of the referent. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional string kind = 1; - - // Name of the referent. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names - optional string name = 3; - - // UID of the referent. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#uids - optional string uid = 4; - - // If true, this reference points to the managing controller. - optional bool controller = 6; -} - -// PersistentVolume (PV) is a storage resource provisioned by an administrator. -// It is analogous to a node. -// More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md -message PersistentVolume { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Spec defines a specification of a persistent volume owned by the cluster. - // Provisioned by an administrator. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistent-volumes - optional PersistentVolumeSpec spec = 2; - - // Status represents the current information/status for the persistent volume. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistent-volumes - optional PersistentVolumeStatus status = 3; -} - -// PersistentVolumeClaim is a user's request for and claim to a persistent volume -message PersistentVolumeClaim { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Spec defines the desired characteristics of a volume requested by a pod author. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistentvolumeclaims - optional PersistentVolumeClaimSpec spec = 2; - - // Status represents the current information/status of a persistent volume claim. - // Read-only. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistentvolumeclaims - optional PersistentVolumeClaimStatus status = 3; -} - -// PersistentVolumeClaimList is a list of PersistentVolumeClaim items. -message PersistentVolumeClaimList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // A list of persistent volume claims. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistentvolumeclaims - repeated PersistentVolumeClaim items = 2; -} - -// PersistentVolumeClaimSpec describes the common attributes of storage devices -// and allows a Source for provider-specific attributes -message PersistentVolumeClaimSpec { - // AccessModes contains the desired access modes the volume should have. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#access-modes-1 - repeated string accessModes = 1; - - // A label query over volumes to consider for binding. - optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector selector = 4; - - // Resources represents the minimum resources the volume should have. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#resources - optional ResourceRequirements resources = 2; - - // VolumeName is the binding reference to the PersistentVolume backing this claim. - optional string volumeName = 3; -} - -// PersistentVolumeClaimStatus is the current status of a persistent volume claim. -message PersistentVolumeClaimStatus { - // Phase represents the current phase of PersistentVolumeClaim. - optional string phase = 1; - - // AccessModes contains the actual access modes the volume backing the PVC has. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#access-modes-1 - repeated string accessModes = 2; - - // Represents the actual resources of the underlying volume. - map capacity = 3; -} - -// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. -// This volume finds the bound PV and mounts that volume for the pod. A -// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another -// type of volume that is owned by someone else (the system). -message PersistentVolumeClaimVolumeSource { - // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistentvolumeclaims - optional string claimName = 1; - - // Will force the ReadOnly setting in VolumeMounts. - // Default false. - optional bool readOnly = 2; -} - -// PersistentVolumeList is a list of PersistentVolume items. -message PersistentVolumeList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of persistent volumes. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md - repeated PersistentVolume items = 2; -} - -// PersistentVolumeSource is similar to VolumeSource but meant for the -// administrator who creates PVs. Exactly one of its members must be set. -message PersistentVolumeSource { - // GCEPersistentDisk represents a GCE Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. Provisioned by an admin. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk - optional GCEPersistentDiskVolumeSource gcePersistentDisk = 1; - - // AWSElasticBlockStore represents an AWS Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#awselasticblockstore - optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 2; - - // HostPath represents a directory on the host. - // Provisioned by a developer or tester. - // This is useful for single-node development and testing only! - // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#hostpath - optional HostPathVolumeSource hostPath = 3; - - // Glusterfs represents a Glusterfs volume that is attached to a host and - // exposed to the pod. Provisioned by an admin. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/glusterfs/README.md - optional GlusterfsVolumeSource glusterfs = 4; - - // NFS represents an NFS mount on the host. Provisioned by an admin. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#nfs - optional NFSVolumeSource nfs = 5; - - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md - optional RBDVolumeSource rbd = 6; - - // ISCSI represents an ISCSI Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. Provisioned by an admin. - optional ISCSIVolumeSource iscsi = 7; - - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: http://releases.k8s.io/release-1.4/examples/mysql-cinder-pd/README.md - optional CinderVolumeSource cinder = 8; - - // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime - optional CephFSVolumeSource cephfs = 9; - - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - optional FCVolumeSource fc = 10; - - // Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running - optional FlockerVolumeSource flocker = 11; - - // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an - // alpha feature and may change in future. - optional FlexVolumeSource flexVolume = 12; - - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - optional AzureFileVolumeSource azureFile = 13; - - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine - optional VsphereVirtualDiskVolumeSource vsphereVolume = 14; - - // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime - optional QuobyteVolumeSource quobyte = 15; - - // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. - optional AzureDiskVolumeSource azureDisk = 16; -} - -// PersistentVolumeSpec is the specification of a persistent volume. -message PersistentVolumeSpec { - // A description of the persistent volume's resources and capacity. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#capacity - map capacity = 1; - - // The actual volume backing the persistent volume. - optional PersistentVolumeSource persistentVolumeSource = 2; - - // AccessModes contains all ways the volume can be mounted. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#access-modes - repeated string accessModes = 3; - - // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. - // Expected to be non-nil when bound. - // claim.VolumeName is the authoritative bind between PV and PVC. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#binding - optional ObjectReference claimRef = 4; - - // What happens to a persistent volume when released from its claim. - // Valid options are Retain (default) and Recycle. - // Recycling must be supported by the volume plugin underlying this persistent volume. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#recycling-policy - optional string persistentVolumeReclaimPolicy = 5; -} - -// PersistentVolumeStatus is the current status of a persistent volume. -message PersistentVolumeStatus { - // Phase indicates if a volume is available, bound to a claim, or released by a claim. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#phase - optional string phase = 1; - - // A human-readable message indicating details about why the volume is in this state. - optional string message = 2; - - // Reason is a brief CamelCase string that describes any failure and is meant - // for machine parsing and tidy display in the CLI. - optional string reason = 3; -} - -// Pod is a collection of containers that can run on a host. This resource is created -// by clients and scheduled onto hosts. -message Pod { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Specification of the desired behavior of the pod. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional PodSpec spec = 2; - - // Most recently observed status of the pod. - // This data may not be up to date. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional PodStatus status = 3; -} - -// Pod affinity is a group of inter pod affinity scheduling rules. -message PodAffinity { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system may or may not try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1; - - // The scheduler will prefer to schedule pods to nodes that satisfy - // the affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - // node(s) with the highest sum are the most preferred. - repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2; -} - -// Defines a set of pods (namely those matching the labelSelector -// relative to the given namespace(s)) that this pod should be -// co-located (affinity) or not co-located (anti-affinity) with, -// where co-located is defined as running on a node whose value of -// the label with key tches that of any node on which -// a pod of the set of pods is running -message PodAffinityTerm { - // A label query over a set of resources, in this case pods. - optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector labelSelector = 1; - - // namespaces specifies which namespaces the labelSelector applies to (matches against); - // nil list means "this pod's namespace," empty list means "all namespaces" - // The json tag here is not "omitempty" since we need to distinguish nil and empty. - // See https://golang.org/pkg/encoding/json/#Marshal for more details. - repeated string namespaces = 2; - - // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - // the labelSelector in the specified namespaces, where co-located is defined as running on a node - // whose value of the label with key topologyKey matches that of any node on which any of the - // selected pods is running. - // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" - // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); - // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. - optional string topologyKey = 3; -} - -// Pod anti affinity is a group of inter pod anti affinity scheduling rules. -message PodAntiAffinity { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the anti-affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the anti-affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` - // If the anti-affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the anti-affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system may or may not try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1; - - // The scheduler will prefer to schedule pods to nodes that satisfy - // the anti-affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling anti-affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - // node(s) with the highest sum are the most preferred. - repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2; -} - -// PodAttachOptions is the query options to a Pod's remote attach call. -// --- -// TODO: merge w/ PodExecOptions below for stdin, stdout, etc -// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY -message PodAttachOptions { - // Stdin if true, redirects the standard input stream of the pod for this call. - // Defaults to false. - optional bool stdin = 1; - - // Stdout if true indicates that stdout is to be redirected for the attach call. - // Defaults to true. - optional bool stdout = 2; - - // Stderr if true indicates that stderr is to be redirected for the attach call. - // Defaults to true. - optional bool stderr = 3; - - // TTY if true indicates that a tty will be allocated for the attach call. - // This is passed through the container runtime so the tty - // is allocated on the worker node by the container runtime. - // Defaults to false. - optional bool tty = 4; - - // The container in which to execute the command. - // Defaults to only container if there is only one container in the pod. - optional string container = 5; -} - -// PodCondition contains details for the current condition of this pod. -message PodCondition { - // Type is the type of the condition. - // Currently only Ready. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#pod-conditions - optional string type = 1; - - // Status is the status of the condition. - // Can be True, False, Unknown. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#pod-conditions - optional string status = 2; - - // Last time we probed the condition. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3; - - // Last time the condition transitioned from one status to another. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; - - // Unique, one-word, CamelCase reason for the condition's last transition. - optional string reason = 5; - - // Human-readable message indicating details about last transition. - optional string message = 6; -} - -// PodExecOptions is the query options to a Pod's remote exec call. -// --- -// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging -// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY -message PodExecOptions { - // Redirect the standard input stream of the pod for this call. - // Defaults to false. - optional bool stdin = 1; - - // Redirect the standard output stream of the pod for this call. - // Defaults to true. - optional bool stdout = 2; - - // Redirect the standard error stream of the pod for this call. - // Defaults to true. - optional bool stderr = 3; - - // TTY if true indicates that a tty will be allocated for the exec call. - // Defaults to false. - optional bool tty = 4; - - // Container in which to execute the command. - // Defaults to only container if there is only one container in the pod. - optional string container = 5; - - // Command is the remote command to execute. argv array. Not executed within a shell. - repeated string command = 6; -} - -// PodList is a list of Pods. -message PodList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of pods. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pods.md - repeated Pod items = 2; -} - -// PodLogOptions is the query options for a Pod's logs REST call. -message PodLogOptions { - // The container for which to stream logs. Defaults to only container if there is one container in the pod. - optional string container = 1; - - // Follow the log stream of the pod. Defaults to false. - optional bool follow = 2; - - // Return previous terminated container logs. Defaults to false. - optional bool previous = 3; - - // A relative time in seconds before the current time from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - optional int64 sinceSeconds = 4; - - // An RFC3339 timestamp from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - optional k8s.io.kubernetes.pkg.api.unversioned.Time sinceTime = 5; - - // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line - // of log output. Defaults to false. - optional bool timestamps = 6; - - // If set, the number of lines from the end of the logs to show. If not specified, - // logs are shown from the creation of the container or sinceSeconds or sinceTime - optional int64 tailLines = 7; - - // If set, the number of bytes to read from the server before terminating the - // log output. This may not display a complete final line of logging, and may return - // slightly more or slightly less than the specified limit. - optional int64 limitBytes = 8; -} - -// PodProxyOptions is the query options to a Pod's proxy call. -message PodProxyOptions { - // Path is the URL path to use for the current proxy request to pod. - optional string path = 1; -} - -// PodSecurityContext holds pod-level security attributes and common container settings. -// Some fields are also present in container.securityContext. Field values of -// container.securityContext take precedence over field values of PodSecurityContext. -message PodSecurityContext { - // The SELinux context to be applied to all containers. - // If unspecified, the container runtime will allocate a random SELinux context for each - // container. May also be set in SecurityContext. If set in - // both SecurityContext and PodSecurityContext, the value specified in SecurityContext - // takes precedence for that container. - optional SELinuxOptions seLinuxOptions = 1; - - // The UID to run the entrypoint of the container process. - // Defaults to user specified in image metadata if unspecified. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence - // for that container. - optional int64 runAsUser = 2; - - // Indicates that the container must run as a non-root user. - // If true, the Kubelet will validate the image at runtime to ensure that it - // does not run as UID 0 (root) and fail to start the container if it does. - // If unset or false, no such validation will be performed. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - optional bool runAsNonRoot = 3; - - // A list of groups applied to the first process run in each container, in addition - // to the container's primary GID. If unspecified, no groups will be added to - // any container. - repeated int64 supplementalGroups = 4; - - // A special supplemental group that applies to all containers in a pod. - // Some volume types allow the Kubelet to change the ownership of that volume - // to be owned by the pod: - // - // 1. The owning GID will be the FSGroup - // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) - // 3. The permission bits are OR'd with rw-rw---- - // - // If unset, the Kubelet will not modify the ownership and permissions of any volume. - optional int64 fsGroup = 5; -} - -// Describes the class of pods that should avoid this node. -// Exactly one field should be set. -message PodSignature { - // Reference to controller whose pods should avoid this node. - optional OwnerReference podController = 1; -} - -// PodSpec is a description of a pod. -message PodSpec { - // List of volumes that can be mounted by containers belonging to the pod. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md - repeated Volume volumes = 1; - - // List of containers belonging to the pod. - // Containers cannot currently be added or removed. - // There must be at least one container in a Pod. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/containers.md - repeated Container containers = 2; - - // Restart policy for all containers within the pod. - // One of Always, OnFailure, Never. - // Default to Always. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#restartpolicy - optional string restartPolicy = 3; - - // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. - // Value must be non-negative integer. The value zero indicates delete immediately. - // If this value is nil, the default grace period will be used instead. - // The grace period is the duration in seconds after the processes running in the pod are sent - // a termination signal and the time when the processes are forcibly halted with a kill signal. - // Set this value longer than the expected cleanup time for your process. - // Defaults to 30 seconds. - optional int64 terminationGracePeriodSeconds = 4; - - // Optional duration in seconds the pod may be active on the node relative to - // StartTime before the system will actively try to mark it failed and kill associated containers. - // Value must be a positive integer. - optional int64 activeDeadlineSeconds = 5; - - // Set DNS policy for containers within the pod. - // One of 'ClusterFirst' or 'Default'. - // Defaults to "ClusterFirst". - optional string dnsPolicy = 6; - - // NodeSelector is a selector which must be true for the pod to fit on a node. - // Selector which must match a node's labels for the pod to be scheduled on that node. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/node-selection/README.md - map nodeSelector = 7; - - // ServiceAccountName is the name of the ServiceAccount to use to run this pod. - // More info: http://releases.k8s.io/release-1.4/docs/design/service_accounts.md - optional string serviceAccountName = 8; - - // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. - // Deprecated: Use serviceAccountName instead. - // +k8s:conversion-gen=false - optional string serviceAccount = 9; - - // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - // the scheduler simply schedules this pod onto that node, assuming that it fits resource - // requirements. - optional string nodeName = 10; - - // Host networking requested for this pod. Use the host's network namespace. - // If this option is set, the ports that will be used must be specified. - // Default to false. - // +k8s:conversion-gen=false - optional bool hostNetwork = 11; - - // Use the host's pid namespace. - // Optional: Default to false. - // +k8s:conversion-gen=false - optional bool hostPID = 12; - - // Use the host's ipc namespace. - // Optional: Default to false. - // +k8s:conversion-gen=false - optional bool hostIPC = 13; - - // SecurityContext holds pod-level security attributes and common container settings. - // Optional: Defaults to empty. See type description for default values of each field. - optional PodSecurityContext securityContext = 14; - - // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. - // If specified, these secrets will be passed to individual puller implementations for them to use. For example, - // in the case of docker, only DockerConfig type secrets are honored. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/images.md#specifying-imagepullsecrets-on-a-pod - repeated LocalObjectReference imagePullSecrets = 15; - - // Specifies the hostname of the Pod - // If not specified, the pod's hostname will be set to a system-defined value. - optional string hostname = 16; - - // If specified, the fully qualified Pod hostname will be "...svc.". - // If not specified, the pod will not have a domainname at all. - optional string subdomain = 17; -} - -// PodStatus represents information about the status of a pod. Status may trail the actual -// state of a system. -message PodStatus { - // Current condition of the pod. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#pod-phase - optional string phase = 1; - - // Current service state of pod. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#pod-conditions - repeated PodCondition conditions = 2; - - // A human readable message indicating details about why the pod is in this condition. - optional string message = 3; - - // A brief CamelCase message indicating details about why the pod is in this state. - // e.g. 'OutOfDisk' - optional string reason = 4; - - // IP address of the host to which the pod is assigned. Empty if not yet scheduled. - optional string hostIP = 5; - - // IP address allocated to the pod. Routable at least within the cluster. - // Empty if not yet allocated. - optional string podIP = 6; - - // RFC 3339 date and time at which the object was acknowledged by the Kubelet. - // This is before the Kubelet pulled the container image(s) for the pod. - optional k8s.io.kubernetes.pkg.api.unversioned.Time startTime = 7; - - // The list has one entry per container in the manifest. Each entry is currently the output - // of `docker inspect`. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#container-statuses - repeated ContainerStatus containerStatuses = 8; -} - -// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded -message PodStatusResult { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Most recently observed status of the pod. - // This data may not be up to date. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional PodStatus status = 2; -} - -// PodTemplate describes a template for creating copies of a predefined pod. -message PodTemplate { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Template defines the pods that will be created from this pod template. - // http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional PodTemplateSpec template = 2; -} - -// PodTemplateList is a list of PodTemplates. -message PodTemplateList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of pod templates - repeated PodTemplate items = 2; -} - -// PodTemplateSpec describes the data a pod should have when created from a template -message PodTemplateSpec { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Specification of the desired behavior of the pod. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional PodSpec spec = 2; -} - -// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. -message Preconditions { - // Specifies the target UID. - optional string uid = 1; -} - -// Describes a class of pods that should avoid this node. -message PreferAvoidPodsEntry { - // The class of pods. - optional PodSignature podSignature = 1; - - // Time at which this entry was added to the list. - optional k8s.io.kubernetes.pkg.api.unversioned.Time evictionTime = 2; - - // (brief) reason why this entry was added to the list. - optional string reason = 3; - - // Human readable message indicating why this entry was added to the list. - optional string message = 4; -} - -// An empty preferred scheduling term matches all objects with implicit weight 0 -// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). -message PreferredSchedulingTerm { - // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - optional int32 weight = 1; - - // A node selector term, associated with the corresponding weight. - optional NodeSelectorTerm preference = 2; -} - -// Probe describes a health check to be performed against a container to determine whether it is -// alive or ready to receive traffic. -message Probe { - // The action taken to determine the health of a container - optional Handler handler = 1; - - // Number of seconds after the container has started before liveness probes are initiated. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#container-probes - optional int32 initialDelaySeconds = 2; - - // Number of seconds after which the probe times out. - // Defaults to 1 second. Minimum value is 1. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#container-probes - optional int32 timeoutSeconds = 3; - - // How often (in seconds) to perform the probe. - // Default to 10 seconds. Minimum value is 1. - optional int32 periodSeconds = 4; - - // Minimum consecutive successes for the probe to be considered successful after having failed. - // Defaults to 1. Must be 1 for liveness. Minimum value is 1. - optional int32 successThreshold = 5; - - // Minimum consecutive failures for the probe to be considered failed after having succeeded. - // Defaults to 3. Minimum value is 1. - optional int32 failureThreshold = 6; -} - -// Represents a Quobyte mount that lasts the lifetime of a pod. -// Quobyte volumes do not support ownership management or SELinux relabeling. -message QuobyteVolumeSource { - // Registry represents a single or multiple Quobyte Registry services - // specified as a string as host:port pair (multiple entries are separated with commas) - // which acts as the central registry for volumes - optional string registry = 1; - - // Volume is a string that references an already created Quobyte volume by name. - optional string volume = 2; - - // ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. - // Defaults to false. - optional bool readOnly = 3; - - // User to map volume access to - // Defaults to serivceaccount user - optional string user = 4; - - // Group to map volume access to - // Default is no group - optional string group = 5; -} - -// Represents a Rados Block Device mount that lasts the lifetime of a pod. -// RBD volumes support ownership management and SELinux relabeling. -message RBDVolumeSource { - // A collection of Ceph monitors. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it - repeated string monitors = 1; - - // The rados image name. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it - optional string image = 2; - - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#rbd - // TODO: how do we prevent errors in the filesystem from compromising the machine - optional string fsType = 3; - - // The rados pool name. - // Default is rbd. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it. - optional string pool = 4; - - // The rados user name. - // Default is admin. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it - optional string user = 5; - - // Keyring is the path to key ring for RBDUser. - // Default is /etc/ceph/keyring. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it - optional string keyring = 6; - - // SecretRef is name of the authentication secret for RBDUser. If provided - // overrides keyring. - // Default is nil. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it - optional LocalObjectReference secretRef = 7; - - // ReadOnly here will force the ReadOnly setting in VolumeMounts. - // Defaults to false. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it - optional bool readOnly = 8; -} - -// RangeAllocation is not a public type. -message RangeAllocation { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Range is string that identifies the range represented by 'data'. - optional string range = 2; - - // Data is a bit array containing all allocated addresses in the previous segment. - optional bytes data = 3; -} - -// ReplicationController represents the configuration of a replication controller. -message ReplicationController { - // If the Labels of a ReplicationController are empty, they are defaulted to - // be the same as the Pod(s) that the replication controller manages. - // Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Spec defines the specification of the desired behavior of the replication controller. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional ReplicationControllerSpec spec = 2; - - // Status is the most recently observed status of the replication controller. - // This data may be out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional ReplicationControllerStatus status = 3; -} - -// ReplicationControllerList is a collection of replication controllers. -message ReplicationControllerList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of replication controllers. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md - repeated ReplicationController items = 2; -} - -// ReplicationControllerSpec is the specification of a replication controller. -message ReplicationControllerSpec { - // Replicas is the number of desired replicas. - // This is a pointer to distinguish between explicit zero and unspecified. - // Defaults to 1. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#what-is-a-replication-controller - optional int32 replicas = 1; - - // Selector is a label query over pods that should match the Replicas count. - // If Selector is empty, it is defaulted to the labels present on the Pod template. - // Label keys and values that must match in order to be controlled by this replication - // controller, if empty defaulted to labels on Pod template. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors - map selector = 2; - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. This takes precedence over a TemplateRef. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#pod-template - optional PodTemplateSpec template = 3; -} - -// ReplicationControllerStatus represents the current status of a replication -// controller. -message ReplicationControllerStatus { - // Replicas is the most recently oberved number of replicas. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#what-is-a-replication-controller - optional int32 replicas = 1; - - // The number of pods that have labels matching the labels of the pod template of the replication controller. - optional int32 fullyLabeledReplicas = 2; - - // The number of ready replicas for this replication controller. - optional int32 readyReplicas = 4; - - // ObservedGeneration reflects the generation of the most recently observed replication controller. - optional int64 observedGeneration = 3; -} - -// ResourceFieldSelector represents container resources (cpu, memory) and their output format -message ResourceFieldSelector { - // Container name: required for volumes, optional for env vars - optional string containerName = 1; - - // Required: resource to select - optional string resource = 2; - - // Specifies the output format of the exposed resources, defaults to "1" - optional k8s.io.kubernetes.pkg.api.resource.Quantity divisor = 3; -} - -// ResourceQuota sets aggregate quota restrictions enforced per namespace -message ResourceQuota { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Spec defines the desired quota. - // http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional ResourceQuotaSpec spec = 2; - - // Status defines the actual enforced quota and its current usage. - // http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional ResourceQuotaStatus status = 3; -} - -// ResourceQuotaList is a list of ResourceQuota items. -message ResourceQuotaList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is a list of ResourceQuota objects. - // More info: http://releases.k8s.io/release-1.4/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota - repeated ResourceQuota items = 2; -} - -// ResourceQuotaSpec defines the desired hard limits to enforce for Quota. -message ResourceQuotaSpec { - // Hard is the set of desired hard limits for each named resource. - // More info: http://releases.k8s.io/release-1.4/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota - map hard = 1; - - // A collection of filters that must match each object tracked by a quota. - // If not specified, the quota matches all objects. - repeated string scopes = 2; -} - -// ResourceQuotaStatus defines the enforced hard limits and observed use. -message ResourceQuotaStatus { - // Hard is the set of enforced hard limits for each named resource. - // More info: http://releases.k8s.io/release-1.4/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota - map hard = 1; - - // Used is the current observed total usage of the resource in the namespace. - map used = 2; -} - -// ResourceRequirements describes the compute resource requirements. -message ResourceRequirements { - // Limits describes the maximum amount of compute resources allowed. - // More info: http://kubernetes.io/docs/user-guide/compute-resources/ - map limits = 1; - - // Requests describes the minimum amount of compute resources required. - // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - // otherwise to an implementation-defined value. - // More info: http://kubernetes.io/docs/user-guide/compute-resources/ - map requests = 2; -} - -// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. -message RunAsUserStrategyOptions { - // Type is the strategy that will dictate what RunAsUser is used in the SecurityContext. - optional string type = 1; - - // UID is the user id that containers must run as. Required for the MustRunAs strategy if not using - // namespace/service account allocated uids. - optional int64 uid = 2; - - // UIDRangeMin defines the min value for a strategy that allocates by range. - optional int64 uidRangeMin = 3; - - // UIDRangeMax defines the max value for a strategy that allocates by range. - optional int64 uidRangeMax = 4; -} - -// SELinuxContextStrategyOptions defines the strategy type and any options used to create the strategy. -message SELinuxContextStrategyOptions { - // Type is the strategy that will dictate what SELinux context is used in the SecurityContext. - optional string type = 1; - - // seLinuxOptions required to run as; required for MustRunAs - optional SELinuxOptions seLinuxOptions = 2; -} - -// SELinuxOptions are the labels to be applied to the container -message SELinuxOptions { - // User is a SELinux user label that applies to the container. - optional string user = 1; - - // Role is a SELinux role label that applies to the container. - optional string role = 2; - - // Type is a SELinux type label that applies to the container. - optional string type = 3; - - // Level is SELinux level label that applies to the container. - optional string level = 4; -} - -// Secret holds secret data of a certain type. The total bytes of the values in -// the Data field must be less than MaxSecretSize bytes. -message Secret { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN - // or leading dot followed by valid DNS_SUBDOMAIN. - // The serialized form of the secret data is a base64 encoded string, - // representing the arbitrary (possibly non-string) data value here. - // Described in https://tools.ietf.org/html/rfc4648#section-4 - map data = 2; - - // stringData allows specifying non-binary secret data in string form. - // It is provided as a write-only convenience method. - // All keys and values are merged into the data field on write, overwriting any existing values. - // It is never output when reading from the API. - // +k8s:conversion-gen=false - map stringData = 4; - - // Used to facilitate programmatic handling of secret data. - optional string type = 3; -} - -// SecretKeySelector selects a key of a Secret. -message SecretKeySelector { - // The name of the secret in the pod's namespace to select from. - optional LocalObjectReference localObjectReference = 1; - - // The key of the secret to select from. Must be a valid secret key. - optional string key = 2; -} - -// SecretList is a list of Secret. -message SecretList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is a list of secret objects. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/secrets.md - repeated Secret items = 2; -} - -// Adapts a Secret into a volume. -// -// The contents of the target Secret's Data field will be presented in a volume -// as files using the keys in the Data field as the file names. -// Secret volumes support ownership management and SELinux relabeling. -message SecretVolumeSource { - // Name of the secret in the pod's namespace to use. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#secrets - optional string secretName = 1; - - // If unspecified, each key-value pair in the Data field of the referenced - // Secret will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the Secret, - // the volume setup will error. Paths must be relative and may not contain - // the '..' path or start with '..'. - repeated KeyToPath items = 2; - - // Optional: mode bits to use on created files by default. Must be a - // value between 0 and 0777. Defaults to 0644. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - optional int32 defaultMode = 3; -} - -// SecurityContext holds security configuration that will be applied to a container. -// Some fields are present in both SecurityContext and PodSecurityContext. When both -// are set, the values in SecurityContext take precedence. -message SecurityContext { - // The capabilities to add/drop when running containers. - // Defaults to the default set of capabilities granted by the container runtime. - optional Capabilities capabilities = 1; - - // Run container in privileged mode. - // Processes in privileged containers are essentially equivalent to root on the host. - // Defaults to false. - optional bool privileged = 2; - - // The SELinux context to be applied to the container. - // If unspecified, the container runtime will allocate a random SELinux context for each - // container. May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - optional SELinuxOptions seLinuxOptions = 3; - - // The UID to run the entrypoint of the container process. - // Defaults to user specified in image metadata if unspecified. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - optional int64 runAsUser = 4; - - // Indicates that the container must run as a non-root user. - // If true, the Kubelet will validate the image at runtime to ensure that it - // does not run as UID 0 (root) and fail to start the container if it does. - // If unset or false, no such validation will be performed. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - optional bool runAsNonRoot = 5; - - // Whether this container has a read-only root filesystem. - // Default is false. - optional bool readOnlyRootFilesystem = 6; -} - -// SecurityContextConstraints governs the ability to make requests that affect the SecurityContext -// that will be applied to a container. -message SecurityContextConstraints { - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Priority influences the sort order of SCCs when evaluating which SCCs to try first for - // a given pod request based on access in the Users and Groups fields. The higher the int, the - // higher priority. If scores for multiple SCCs are equal they will be sorted by name. - optional int32 priority = 2; - - // AllowPrivilegedContainer determines if a container can request to be run as privileged. - optional bool allowPrivilegedContainer = 3; - - // DefaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capabiility in both - // DefaultAddCapabilities and RequiredDropCapabilities. - repeated string defaultAddCapabilities = 4; - - // RequiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - repeated string requiredDropCapabilities = 5; - - // AllowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field maybe added at the pod author's discretion. - // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. - repeated string allowedCapabilities = 6; - - // AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin - // +k8s:conversion-gen=false - optional bool allowHostDirVolumePlugin = 7; - - // Volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names - // of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use '*'. - repeated string volumes = 8; - - // AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - optional bool allowHostNetwork = 9; - - // AllowHostPorts determines if the policy allows host ports in the containers. - optional bool allowHostPorts = 10; - - // AllowHostPID determines if the policy allows host pid in the containers. - optional bool allowHostPID = 11; - - // AllowHostIPC determines if the policy allows host ipc in the containers. - optional bool allowHostIPC = 12; - - // SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext. - optional SELinuxContextStrategyOptions seLinuxContext = 13; - - // RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext. - optional RunAsUserStrategyOptions runAsUser = 14; - - // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - optional SupplementalGroupsStrategyOptions supplementalGroups = 15; - - // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. - optional FSGroupStrategyOptions fsGroup = 16; - - // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the SCC should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - optional bool readOnlyRootFilesystem = 17; - - // The users who have permissions to use this security context constraints - repeated string users = 18; - - // The groups that have permission to use this security context constraints - repeated string groups = 19; - - // SeccompProfiles lists the allowed profiles that may be set for the pod or - // container's seccomp annotations. An unset (nil) or empty value means that no profiles may - // be specifid by the pod or container. The wildcard '*' may be used to allow all profiles. When - // used to generate a value for a pod the first non-wildcard profile will be used as - // the default. - repeated string seccompProfiles = 20; -} - -// SecurityContextConstraintsList is a list of SecurityContextConstraints objects -message SecurityContextConstraintsList { - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of security context constraints. - repeated SecurityContextConstraints items = 2; -} - -// SerializedReference is a reference to serialized object. -message SerializedReference { - // The reference to an object in the system. - optional ObjectReference reference = 1; -} - -// Service is a named abstraction of software service (for example, mysql) consisting of local port -// (for example 3306) that the proxy listens on, and the selector that determines which pods -// will answer requests sent through the proxy. -message Service { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Spec defines the behavior of a service. - // http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional ServiceSpec spec = 2; - - // Most recently observed status of the service. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional ServiceStatus status = 3; -} - -// ServiceAccount binds together: -// * a name, understood by users, and perhaps by peripheral systems, for an identity -// * a principal that can be authenticated and authorized -// * a set of secrets -message ServiceAccount { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/secrets.md - repeated ObjectReference secrets = 2; - - // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images - // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets - // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/secrets.md#manually-specifying-an-imagepullsecret - repeated LocalObjectReference imagePullSecrets = 3; -} - -// ServiceAccountList is a list of ServiceAccount objects -message ServiceAccountList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of ServiceAccounts. - // More info: http://releases.k8s.io/release-1.4/docs/design/service_accounts.md#service-accounts - repeated ServiceAccount items = 2; -} - -// ServiceList holds a list of services. -message ServiceList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of services - repeated Service items = 2; -} - -// ServicePort contains information on service's port. -message ServicePort { - // The name of this port within the service. This must be a DNS_LABEL. - // All ports within a ServiceSpec must have unique names. This maps to - // the 'Name' field in EndpointPort objects. - // Optional if only one ServicePort is defined on this service. - optional string name = 1; - - // The IP protocol for this port. Supports "TCP" and "UDP". - // Default is TCP. - optional string protocol = 2; - - // The port that will be exposed by this service. - optional int32 port = 3; - - // Number or name of the port to access on the pods targeted by the service. - // Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - // If this is a string, it will be looked up as a named port in the - // target Pod's container ports. If this is not specified, the value - // of the 'port' field is used (an identity map). - // This field is ignored for services with clusterIP=None, and should be - // omitted or set equal to the 'port' field. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#defining-a-service - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString targetPort = 4; - - // The port on each node on which this service is exposed when type=NodePort or LoadBalancer. - // Usually assigned by the system. If specified, it will be allocated to the service - // if unused or else creation of the service will fail. - // Default is to auto-allocate a port if the ServiceType of this Service requires one. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#type--nodeport - optional int32 nodePort = 5; -} - -// ServiceProxyOptions is the query options to a Service's proxy call. -message ServiceProxyOptions { - // Path is the part of URLs that include service endpoints, suffixes, - // and parameters to use for the current proxy request to service. - // For example, the whole request URL is - // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. - // Path is _search?q=user:kimchy. - optional string path = 1; -} - -// ServiceSpec describes the attributes that a user creates on a service. -message ServiceSpec { - // The list of ports that are exposed by this service. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#virtual-ips-and-service-proxies - repeated ServicePort ports = 1; - - // Route service traffic to pods with label keys and values matching this - // selector. If empty or not present, the service is assumed to have an - // external process managing its endpoints, which Kubernetes will not - // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. - // Ignored if type is ExternalName. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#overview - map selector = 2; - - // clusterIP is the IP address of the service and is usually assigned - // randomly by the master. If an address is specified manually and is not in - // use by others, it will be allocated to the service; otherwise, creation - // of the service will fail. This field can not be changed through updates. - // Valid values are "None", empty string (""), or a valid IP address. "None" - // can be specified for headless services when proxying is not required. - // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if - // type is ExternalName. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#virtual-ips-and-service-proxies - optional string clusterIP = 3; - - // type determines how the Service is exposed. Defaults to ClusterIP. Valid - // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. - // "ExternalName" maps to the specified externalName. - // "ClusterIP" allocates a cluster-internal IP address for load-balancing to - // endpoints. Endpoints are determined by the selector or if that is not - // specified, by manual construction of an Endpoints object. If clusterIP is - // "None", no virtual IP is allocated and the endpoints are published as a - // set of endpoints rather than a stable IP. - // "NodePort" builds on ClusterIP and allocates a port on every node which - // routes to the clusterIP. - // "LoadBalancer" builds on NodePort and creates an - // external load-balancer (if supported in the current cloud) which routes - // to the clusterIP. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#overview - optional string type = 4; - - // externalIPs is a list of IP addresses for which nodes in the cluster - // will also accept traffic for this service. These IPs are not managed by - // Kubernetes. The user is responsible for ensuring that traffic arrives - // at a node with this IP. A common example is external load-balancers - // that are not part of the Kubernetes system. A previous form of this - // functionality exists as the deprecatedPublicIPs field. When using this - // field, callers should also clear the deprecatedPublicIPs field. - repeated string externalIPs = 5; - - // deprecatedPublicIPs is deprecated and replaced by the externalIPs field - // with almost the exact same semantics. This field is retained in the v1 - // API for compatibility until at least 8/20/2016. It will be removed from - // any new API revisions. If both deprecatedPublicIPs *and* externalIPs are - // set, deprecatedPublicIPs is used. - // +k8s:conversion-gen=false - repeated string deprecatedPublicIPs = 6; - - // Supports "ClientIP" and "None". Used to maintain session affinity. - // Enable client IP based session affinity. - // Must be ClientIP or None. - // Defaults to None. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#virtual-ips-and-service-proxies - optional string sessionAffinity = 7; - - // Only applies to Service Type: LoadBalancer - // LoadBalancer will get created with the IP specified in this field. - // This feature depends on whether the underlying cloud-provider supports specifying - // the loadBalancerIP when a load balancer is created. - // This field will be ignored if the cloud-provider does not support the feature. - optional string loadBalancerIP = 8; - - // If specified and supported by the platform, this will restrict traffic through the cloud-provider - // load-balancer will be restricted to the specified client IPs. This field will be ignored if the - // cloud-provider does not support the feature." - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services-firewalls.md - repeated string loadBalancerSourceRanges = 9; - - // externalName is the external reference that kubedns or equivalent will - // return as a CNAME record for this service. No proxying will be involved. - // Must be a valid DNS name and requires Type to be ExternalName. - optional string externalName = 10; -} - -// ServiceStatus represents the current status of a service. -message ServiceStatus { - // LoadBalancer contains the current status of the load-balancer, - // if one is present. - optional LoadBalancerStatus loadBalancer = 1; -} - -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -message SupplementalGroupsStrategyOptions { - // Type is the strategy that will dictate what supplemental groups is used in the SecurityContext. - optional string type = 1; - - // Ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. - repeated IDRange ranges = 2; -} - -// TCPSocketAction describes an action based on opening a socket -message TCPSocketAction { - // Number or name of the port to access on the container. - // Number must be in the range 1 to 65535. - // Name must be an IANA_SVC_NAME. - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString port = 1; -} - -// The node this Taint is attached to has the effect "effect" on -// any pod that that does not tolerate the Taint. -message Taint { - // Required. The taint key to be applied to a node. - optional string key = 1; - - // Required. The taint value corresponding to the taint key. - optional string value = 2; - - // Required. The effect of the taint on pods - // that do not tolerate the taint. - // Valid effects are NoSchedule and PreferNoSchedule. - optional string effect = 3; -} - -// The pod this Toleration is attached to tolerates any taint that matches -// the triple using the matching operator . -message Toleration { - // Required. Key is the taint key that the toleration applies to. - optional string key = 1; - - // operator represents a key's relationship to the value. - // Valid operators are Exists and Equal. Defaults to Equal. - // Exists is equivalent to wildcard for value, so that a pod can - // tolerate all taints of a particular category. - optional string operator = 2; - - // Value is the taint value the toleration matches to. - // If the operator is Exists, the value should be empty, otherwise just a regular string. - optional string value = 3; - - // Effect indicates the taint effect to match. Empty means match all taint effects. - // When specified, allowed values are NoSchedule and PreferNoSchedule. - optional string effect = 4; -} - -// Volume represents a named volume in a pod that may be accessed by any container in the pod. -message Volume { - // Volume's name. - // Must be a DNS_LABEL and unique within the pod. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names - optional string name = 1; - - // VolumeSource represents the location and type of the mounted volume. - // If not specified, the Volume is implied to be an EmptyDir. - // This implied behavior is deprecated and will be removed in a future version. - optional VolumeSource volumeSource = 2; -} - -// VolumeMount describes a mounting of a Volume within a container. -message VolumeMount { - // This must match the Name of a Volume. - optional string name = 1; - - // Mounted read-only if true, read-write otherwise (false or unspecified). - // Defaults to false. - optional bool readOnly = 2; - - // Path within the container at which the volume should be mounted. Must - // not contain ':'. - optional string mountPath = 3; - - // Path within the volume from which the container's volume should be mounted. - // Defaults to "" (volume's root). - optional string subPath = 4; -} - -// Represents the source of a volume to mount. -// Only one of its members may be specified. -message VolumeSource { - // HostPath represents a pre-existing file or directory on the host - // machine that is directly exposed to the container. This is generally - // used for system agents or other privileged things that are allowed - // to see the host machine. Most containers will NOT need this. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#hostpath - // --- - // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not - // mount host directories as read/write. - optional HostPathVolumeSource hostPath = 1; - - // EmptyDir represents a temporary directory that shares a pod's lifetime. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#emptydir - optional EmptyDirVolumeSource emptyDir = 2; - - // GCEPersistentDisk represents a GCE Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk - optional GCEPersistentDiskVolumeSource gcePersistentDisk = 3; - - // AWSElasticBlockStore represents an AWS Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#awselasticblockstore - optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 4; - - // GitRepo represents a git repository at a particular revision. - optional GitRepoVolumeSource gitRepo = 5; - - // Secret represents a secret that should populate this volume. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#secrets - optional SecretVolumeSource secret = 6; - - // NFS represents an NFS mount on the host that shares a pod's lifetime - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#nfs - optional NFSVolumeSource nfs = 7; - - // ISCSI represents an ISCSI Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/iscsi/README.md - optional ISCSIVolumeSource iscsi = 8; - - // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/glusterfs/README.md - optional GlusterfsVolumeSource glusterfs = 9; - - // PersistentVolumeClaimVolumeSource represents a reference to a - // PersistentVolumeClaim in the same namespace. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistentvolumeclaims - optional PersistentVolumeClaimVolumeSource persistentVolumeClaim = 10; - - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md - optional RBDVolumeSource rbd = 11; - - // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an - // alpha feature and may change in future. - optional FlexVolumeSource flexVolume = 12; - - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: http://releases.k8s.io/release-1.4/examples/mysql-cinder-pd/README.md - optional CinderVolumeSource cinder = 13; - - // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime - optional CephFSVolumeSource cephfs = 14; - - // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running - optional FlockerVolumeSource flocker = 15; - - // DownwardAPI represents downward API about the pod that should populate this volume - optional DownwardAPIVolumeSource downwardAPI = 16; - - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - optional FCVolumeSource fc = 17; - - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - optional AzureFileVolumeSource azureFile = 18; - - // ConfigMap represents a configMap that should populate this volume - optional ConfigMapVolumeSource configMap = 19; - - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine - optional VsphereVirtualDiskVolumeSource vsphereVolume = 20; - - // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime - optional QuobyteVolumeSource quobyte = 21; - - // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. - optional AzureDiskVolumeSource azureDisk = 22; - - // Metadata represents metadata about the pod that should populate this volume - // Deprecated: Use downwardAPI instead. - // +k8s:conversion-gen=false - optional DeprecatedDownwardAPIVolumeSource metadata = 23; -} - -// Represents a vSphere volume resource. -message VsphereVirtualDiskVolumeSource { - // Path that identifies vSphere volume vmdk - optional string volumePath = 1; - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - optional string fsType = 2; -} - -// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) -message WeightedPodAffinityTerm { - // weight associated with matching the corresponding podAffinityTerm, - // in the range 1-100. - optional int32 weight = 1; - - // Required. A pod affinity term, associated with the corresponding weight. - optional PodAffinityTerm podAffinityTerm = 2; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/types.go b/vendor/k8s.io/kubernetes/pkg/api/v1/types.go index 45e5d6a21..0fb0ced61 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/types.go +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/types.go @@ -1242,7 +1242,7 @@ type Container struct { Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` // Pod volumes to mount into the container's filesystem. // Cannot be updated. - VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,9,rep,name=volumeMounts"` + VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` // Periodic probe of container liveness. // Container will be restarted if the probe fails. // Cannot be updated. @@ -1754,8 +1754,13 @@ const ( PodInitContainersAnnotationKey = "pod.alpha.kubernetes.io/init-containers" // This annotation key will be used to contain an array of v1 JSON encoded // ContainerStatuses for init containers. The annotation will be placed into the internal - // type and cleared. - PodInitContainerStatusesAnnotationKey = "pod.beta.kubernetes.io/init-container-statuses" + // type and cleared. This key is only recognized by version >= 1.4. + PodInitContainerStatusesBetaAnnotationKey = "pod.beta.kubernetes.io/init-container-statuses" + // This annotation key will be used to contain an array of v1 JSON encoded + // ContainerStatuses for init containers. The annotation will be placed into the internal + // type and cleared. This key is recognized by version >= 1.3. For version 1.4 code, + // this key will have its value copied to the beta key. + PodInitContainerStatusesAnnotationKey = "pod.alpha.kubernetes.io/init-container-statuses" ) // PodSpec is a description of a pod. @@ -3093,6 +3098,8 @@ const ( LimitTypePod LimitType = "Pod" // Limit that applies to all containers in a namespace LimitTypeContainer LimitType = "Container" + // Limit that applies to all persistent volume claims in a namespace + LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim" ) // LimitRangeItem defines a min/max usage limit for any resource that matches on kind. diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/doc.go b/vendor/k8s.io/kubernetes/pkg/api/validation/doc.go index f17a15cf9..30f541de3 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/validation/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/api/validation/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package validation has functions for validating the correctness of api // objects and explaining what is wrong with them when they aren't valid. -package validation +package validation // import "k8s.io/kubernetes/pkg/api/validation" diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go index 9cdaa0b6f..136cb6e8b 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go +++ b/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go @@ -2574,6 +2574,9 @@ func ValidateServiceUpdate(service, oldService *api.Service) field.ErrorList { allErrs = append(allErrs, ValidateImmutableField(service.Spec.ClusterIP, oldService.Spec.ClusterIP, field.NewPath("spec", "clusterIP"))...) } + // TODO(freehan): allow user to update loadbalancerSourceRanges + allErrs = append(allErrs, ValidateImmutableField(service.Spec.LoadBalancerSourceRanges, oldService.Spec.LoadBalancerSourceRanges, field.NewPath("spec", "loadBalancerSourceRanges"))...) + allErrs = append(allErrs, ValidateService(service)...) return allErrs } @@ -2935,6 +2938,17 @@ func ValidateLimitRange(limitRange *api.LimitRange) field.ErrorList { } } + if limit.Type == api.LimitTypePersistentVolumeClaim { + _, minQuantityFound := limit.Min[api.ResourceStorage] + _, maxQuantityFound := limit.Max[api.ResourceStorage] + if !minQuantityFound { + allErrs = append(allErrs, field.Required(idxPath.Child("min"), "minimum storage value is required")) + } + if !maxQuantityFound { + allErrs = append(allErrs, field.Required(idxPath.Child("max"), "maximum storage value is required")) + } + } + for k, q := range limit.MaxLimitRequestRatio { allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("maxLimitRequestRatio").Key(string(k)))...) keys.Insert(string(k)) diff --git a/vendor/k8s.io/kubernetes/pkg/apimachinery/doc.go b/vendor/k8s.io/kubernetes/pkg/apimachinery/doc.go index ede22b3d6..9a40e426b 100644 --- a/vendor/k8s.io/kubernetes/pkg/apimachinery/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apimachinery/doc.go @@ -17,4 +17,4 @@ limitations under the License. // Package apimachinery contains the generic API machinery code that // is common to both server and clients. // This package should never import specific API objects. -package apimachinery +package apimachinery // import "k8s.io/kubernetes/pkg/apimachinery" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/doc.go index bca1ff4ef..e216ed6fd 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/doc.go @@ -16,4 +16,4 @@ limitations under the License. // +k8s:deepcopy-gen=package,register -package apps +package apps // import "k8s.io/kubernetes/pkg/apis/apps" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/doc.go index 53d9fcabc..1b6819c2a 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/doc.go @@ -17,4 +17,4 @@ limitations under the License. // +k8s:deepcopy-gen=package,register // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/apps -package v1alpha1 +package v1alpha1 // import "k8s.io/kubernetes/pkg/apis/apps/v1alpha1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto deleted file mode 100644 index b9c560675..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto +++ /dev/null @@ -1,102 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.apis.apps.v1alpha1; - -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1alpha1"; - -// PetSet represents a set of pods with consistent identities. -// Identities are defined as: -// - Network: A single stable DNS and hostname. -// - Storage: As many VolumeClaims as requested. -// The PetSet guarantees that a given network identity will always -// map to the same storage identity. PetSet is currently in alpha -// and subject to change without notice. -message PetSet { - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec defines the desired identities of pets in this set. - optional PetSetSpec spec = 2; - - // Status is the current status of Pets in this PetSet. This data - // may be out of date by some window of time. - optional PetSetStatus status = 3; -} - -// PetSetList is a collection of PetSets. -message PetSetList { - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - repeated PetSet items = 2; -} - -// A PetSetSpec is the specification of a PetSet. -message PetSetSpec { - // Replicas is the desired number of replicas of the given Template. - // These are replicas in the sense that they are instantiations of the - // same Template, but individual replicas also have a consistent identity. - // If unspecified, defaults to 1. - // TODO: Consider a rename of this field. - optional int32 replicas = 1; - - // Selector is a label query over pods that should match the replica count. - // If empty, defaulted to labels on the pod template. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors - optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector selector = 2; - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. Each pod stamped out by the PetSet - // will fulfill this Template, but have a unique identity from the rest - // of the PetSet. - optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3; - - // VolumeClaimTemplates is a list of claims that pets are allowed to reference. - // The PetSet controller is responsible for mapping network identities to - // claims in a way that maintains the identity of a pet. Every claim in - // this list must have at least one matching (by name) volumeMount in one - // container in the template. A claim in this list takes precedence over - // any volumes in the template, with the same name. - // TODO: Define the behavior if a claim already exists with the same name. - repeated k8s.io.kubernetes.pkg.api.v1.PersistentVolumeClaim volumeClaimTemplates = 4; - - // ServiceName is the name of the service that governs this PetSet. - // This service must exist before the PetSet, and is responsible for - // the network identity of the set. Pets get DNS/hostnames that follow the - // pattern: pet-specific-string.serviceName.default.svc.cluster.local - // where "pet-specific-string" is managed by the PetSet controller. - optional string serviceName = 5; -} - -// PetSetStatus represents the current state of a PetSet. -message PetSetStatus { - // most recent generation observed by this autoscaler. - optional int64 observedGeneration = 1; - - // Replicas is the number of actual replicas. - optional int32 replicas = 2; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/doc.go index 7a8a65b77..88bdf625b 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/doc.go @@ -16,4 +16,4 @@ limitations under the License. // +k8s:deepcopy-gen=package,register // +groupName=authentication.k8s.io -package authentication +package authentication // import "k8s.io/kubernetes/pkg/apis/authentication" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/doc.go index 32e5b5526..2b985104c 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/doc.go @@ -17,4 +17,4 @@ limitations under the License. // +k8s:deepcopy-gen=package,register // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/authentication // +groupName=authentication.k8s.io -package v1beta1 +package v1beta1 // import "k8s.io/kubernetes/pkg/apis/authentication/v1beta1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/generated.proto deleted file mode 100644 index 3b775d269..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/generated.proto +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.apis.authentication.v1beta1; - -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// ExtraValue masks the value so protobuf can generate -// +protobuf.nullable=true -// +protobuf.options.(gogoproto.goproto_stringer)=false -message ExtraValue { - // items, if empty, will result in an empty slice - - repeated string items = 1; -} - -// TokenReview attempts to authenticate a token to a known user. -// Note: TokenReview requests may be cached by the webhook token authenticator -// plugin in the kube-apiserver. -message TokenReview { - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec holds information about the request being evaluated - optional TokenReviewSpec spec = 2; - - // Status is filled in by the server and indicates whether the request can be authenticated. - optional TokenReviewStatus status = 3; -} - -// TokenReviewSpec is a description of the token authentication request. -message TokenReviewSpec { - // Token is the opaque bearer token. - optional string token = 1; -} - -// TokenReviewStatus is the result of the token authentication request. -message TokenReviewStatus { - // Authenticated indicates that the token was associated with a known user. - optional bool authenticated = 1; - - // User is the UserInfo associated with the provided token. - optional UserInfo user = 2; - - // Error indicates that the token couldn't be checked - optional string error = 3; -} - -// UserInfo holds the information about the user needed to implement the -// user.Info interface. -message UserInfo { - // The name that uniquely identifies this user among all active users. - optional string username = 1; - - // A unique value that identifies this user across time. If this user is - // deleted and another user by the same name is added, they will have - // different UIDs. - optional string uid = 2; - - // The names of groups this user is a part of. - repeated string groups = 3; - - // Any additional information provided by the authenticator. - map extra = 4; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/doc.go index 477c9c7fa..a6c011cbf 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/doc.go @@ -17,4 +17,4 @@ limitations under the License. // +k8s:deepcopy-gen=package,register // +groupName=authorization.k8s.io -package authorization +package authorization // import "k8s.io/kubernetes/pkg/apis/authorization" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go index 3b756551e..690ab217a 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go @@ -18,4 +18,4 @@ limitations under the License. // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/authorization // +groupName=authorization.k8s.io -package v1beta1 +package v1beta1 // import "k8s.io/kubernetes/pkg/apis/authorization/v1beta1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/generated.proto deleted file mode 100644 index 21fd4cfc2..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/generated.proto +++ /dev/null @@ -1,159 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.apis.authorization.v1beta1; - -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// ExtraValue masks the value so protobuf can generate -// +protobuf.nullable=true -// +protobuf.options.(gogoproto.goproto_stringer)=false -message ExtraValue { - // items, if empty, will result in an empty slice - - repeated string items = 1; -} - -// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. -// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions -// checking. -message LocalSubjectAccessReview { - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace - // you made the request against. If empty, it is defaulted. - optional SubjectAccessReviewSpec spec = 2; - - // Status is filled in by the server and indicates whether the request is allowed or not - optional SubjectAccessReviewStatus status = 3; -} - -// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface -message NonResourceAttributes { - // Path is the URL path of the request - optional string path = 1; - - // Verb is the standard HTTP verb - optional string verb = 2; -} - -// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface -message ResourceAttributes { - // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces - // "" (empty) is defaulted for LocalSubjectAccessReviews - // "" (empty) is empty for cluster-scoped resources - // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview - optional string namespace = 1; - - // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. - optional string verb = 2; - - // Group is the API Group of the Resource. "*" means all. - optional string group = 3; - - // Version is the API Version of the Resource. "*" means all. - optional string version = 4; - - // Resource is one of the existing resource types. "*" means all. - optional string resource = 5; - - // Subresource is one of the existing resource types. "" means none. - optional string subresource = 6; - - // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. - optional string name = 7; -} - -// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a -// spec.namespace means "in all namespaces". Self is a special case, because users should always be able -// to check whether they can perform an action -message SelfSubjectAccessReview { - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec holds information about the request being evaluated. user and groups must be empty - optional SelfSubjectAccessReviewSpec spec = 2; - - // Status is filled in by the server and indicates whether the request is allowed or not - optional SubjectAccessReviewStatus status = 3; -} - -// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes -// and NonResourceAuthorizationAttributes must be set -message SelfSubjectAccessReviewSpec { - // ResourceAuthorizationAttributes describes information for a resource access request - optional ResourceAttributes resourceAttributes = 1; - - // NonResourceAttributes describes information for a non-resource access request - optional NonResourceAttributes nonResourceAttributes = 2; -} - -// SubjectAccessReview checks whether or not a user or group can perform an action. -message SubjectAccessReview { - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec holds information about the request being evaluated - optional SubjectAccessReviewSpec spec = 2; - - // Status is filled in by the server and indicates whether the request is allowed or not - optional SubjectAccessReviewStatus status = 3; -} - -// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes -// and NonResourceAuthorizationAttributes must be set -message SubjectAccessReviewSpec { - // ResourceAuthorizationAttributes describes information for a resource access request - optional ResourceAttributes resourceAttributes = 1; - - // NonResourceAttributes describes information for a non-resource access request - optional NonResourceAttributes nonResourceAttributes = 2; - - // User is the user you're testing for. - // If you specify "User" but not "Group", then is it interpreted as "What if User were not a member of any groups - optional string verb = 3; - - // Groups is the groups you're testing for. - repeated string group = 4; - - // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer - // it needs a reflection here. - map extra = 5; -} - -// SubjectAccessReviewStatus -message SubjectAccessReviewStatus { - // Allowed is required. True if the action would be allowed, false otherwise. - optional bool allowed = 1; - - // Reason is optional. It indicates why a request was allowed or denied. - optional string reason = 2; - - // EvaluationError is an indication that some error occurred during the authorization check. - // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. - // For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request. - optional string evaluationError = 3; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go index 2c770186b..d9e11576f 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go @@ -16,4 +16,4 @@ limitations under the License. // +k8s:deepcopy-gen=package,register -package autoscaling +package autoscaling // import "k8s.io/kubernetes/pkg/apis/autoscaling" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/doc.go index be1c70fdb..5ce82c963 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/doc.go @@ -17,4 +17,4 @@ limitations under the License. // +k8s:deepcopy-gen=package,register // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/autoscaling -package v1 +package v1 // import "k8s.io/kubernetes/pkg/apis/autoscaling/v1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto deleted file mode 100644 index 2a58b8431..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto +++ /dev/null @@ -1,131 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.apis.autoscaling.v1; - -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1"; - -// CrossVersionObjectReference contains enough information to let you identify the referred resource. -message CrossVersionObjectReference { - // Kind of the referent; More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds" - optional string kind = 1; - - // Name of the referent; More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names - optional string name = 2; - - // API version of the referent - optional string apiVersion = 3; -} - -// configuration of a horizontal pod autoscaler. -message HorizontalPodAutoscaler { - // Standard object metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // behaviour of autoscaler. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status. - optional HorizontalPodAutoscalerSpec spec = 2; - - // current information about the autoscaler. - optional HorizontalPodAutoscalerStatus status = 3; -} - -// list of horizontal pod autoscaler objects. -message HorizontalPodAutoscalerList { - // Standard list metadata. - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // list of horizontal pod autoscaler objects. - repeated HorizontalPodAutoscaler items = 2; -} - -// specification of a horizontal pod autoscaler. -message HorizontalPodAutoscalerSpec { - // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption - // and will set the desired number of pods by using its Scale subresource. - optional CrossVersionObjectReference scaleTargetRef = 1; - - // lower limit for the number of pods that can be set by the autoscaler, default 1. - optional int32 minReplicas = 2; - - // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. - optional int32 maxReplicas = 3; - - // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; - // if not specified the default autoscaling policy will be used. - optional int32 targetCPUUtilizationPercentage = 4; -} - -// current status of a horizontal pod autoscaler -message HorizontalPodAutoscalerStatus { - // most recent generation observed by this autoscaler. - optional int64 observedGeneration = 1; - - // last time the HorizontalPodAutoscaler scaled the number of pods; - // used by the autoscaler to control how often the number of pods is changed. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastScaleTime = 2; - - // current number of replicas of pods managed by this autoscaler. - optional int32 currentReplicas = 3; - - // desired number of replicas of pods managed by this autoscaler. - optional int32 desiredReplicas = 4; - - // current average CPU utilization over all pods, represented as a percentage of requested CPU, - // e.g. 70 means that an average pod is using now 70% of its requested CPU. - optional int32 currentCPUUtilizationPercentage = 5; -} - -// Scale represents a scaling request for a resource. -message Scale { - // Standard object metadata; More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // defines the behavior of the scale. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status. - optional ScaleSpec spec = 2; - - // current status of the scale. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status. Read-only. - optional ScaleStatus status = 3; -} - -// ScaleSpec describes the attributes of a scale subresource. -message ScaleSpec { - // desired number of instances for the scaled object. - optional int32 replicas = 1; -} - -// ScaleStatus represents the current status of a scale subresource. -message ScaleStatus { - // actual number of observed instances of the scaled object. - optional int32 replicas = 1; - - // label query over pods that should match the replicas count. This is same - // as the label selector but in the string format to avoid introspection - // by clients. The string will be in the same format as the query-param syntax. - // More info about label selectors: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors - optional string selector = 2; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/doc.go index c6b203cd8..9b2b792bd 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/doc.go @@ -16,4 +16,4 @@ limitations under the License. // +k8s:deepcopy-gen=package,register -package batch +package batch // import "k8s.io/kubernetes/pkg/apis/batch" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/doc.go index 5695b9e40..31a944c40 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/doc.go @@ -17,4 +17,4 @@ limitations under the License. // +k8s:deepcopy-gen=package,register // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/batch -package v1 +package v1 // import "k8s.io/kubernetes/pkg/apis/batch/v1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto deleted file mode 100644 index 76ee49994..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto +++ /dev/null @@ -1,177 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.apis.batch.v1; - -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1"; - -// Job represents the configuration of a single job. -message Job { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional JobSpec spec = 2; - - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional JobStatus status = 3; -} - -// JobCondition describes current state of a job. -message JobCondition { - // Type of job condition, Complete or Failed. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // Last time the condition was checked. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3; - - // Last time the condition transit from one status to another. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; - - // (brief) reason for the condition's last transition. - optional string reason = 5; - - // Human readable message indicating details about last transition. - optional string message = 6; -} - -// JobList is a collection of jobs. -message JobList { - // Standard list metadata - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is the list of Job. - repeated Job items = 2; -} - -// JobSpec describes how the job execution will look like. -message JobSpec { - // Parallelism specifies the maximum desired number of pods the job should - // run at any given time. The actual number of pods running in steady state will - // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), - // i.e. when the work left to do is less than max parallelism. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md - optional int32 parallelism = 1; - - // Completions specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any - // pod signals the success of all pods, and allows parallelism to have any positive - // value. Setting to 1 means that parallelism is limited to 1 and the success of that - // pod signals the success of the job. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md - optional int32 completions = 2; - - // Optional duration in seconds relative to the startTime that the job may be active - // before the system tries to terminate it; value must be positive integer - optional int64 activeDeadlineSeconds = 3; - - // Selector is a label query over pods that should match the pod count. - // Normally, the system sets this field for you. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors - optional LabelSelector selector = 4; - - // ManualSelector controls generation of pod labels and pod selectors. - // Leave `manualSelector` unset unless you are certain what you are doing. - // When false or unset, the system pick labels unique to this job - // and appends those labels to the pod template. When true, - // the user is responsible for picking unique labels and specifying - // the selector. Failure to pick a unique label may cause this - // and other jobs to not function correctly. However, You may see - // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` - // API. - // More info: http://releases.k8s.io/release-1.4/docs/design/selector-generation.md - optional bool manualSelector = 5; - - // Template is the object that describes the pod that will be created when - // executing a job. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md - optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 6; -} - -// JobStatus represents the current state of a Job. -message JobStatus { - // Conditions represent the latest available observations of an object's current state. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md - repeated JobCondition conditions = 1; - - // StartTime represents time when the job was acknowledged by the Job Manager. - // It is not guaranteed to be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - optional k8s.io.kubernetes.pkg.api.unversioned.Time startTime = 2; - - // CompletionTime represents time when the job was completed. It is not guaranteed to - // be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - optional k8s.io.kubernetes.pkg.api.unversioned.Time completionTime = 3; - - // Active is the number of actively running pods. - optional int32 active = 4; - - // Succeeded is the number of pods which reached Phase Succeeded. - optional int32 succeeded = 5; - - // Failed is the number of pods which reached Phase Failed. - optional int32 failed = 6; -} - -// A label selector is a label query over a set of resources. The result of matchLabels and -// matchExpressions are ANDed. An empty label selector matches all objects. A null -// label selector matches no objects. -message LabelSelector { - // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - // map is equivalent to an element of matchExpressions, whose key field is "key", the - // operator is "In", and the values array contains only "value". The requirements are ANDed. - map matchLabels = 1; - - // matchExpressions is a list of label selector requirements. The requirements are ANDed. - repeated LabelSelectorRequirement matchExpressions = 2; -} - -// A label selector requirement is a selector that contains values, a key, and an operator that -// relates the key and values. -message LabelSelectorRequirement { - // key is the label key that the selector applies to. - optional string key = 1; - - // operator represents a key's relationship to a set of values. - // Valid operators ard In, NotIn, Exists and DoesNotExist. - optional string operator = 2; - - // values is an array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. This array is replaced during a strategic - // merge patch. - repeated string values = 3; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/doc.go index 76b5d3251..2b2d60314 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/doc.go @@ -17,4 +17,4 @@ limitations under the License. // +k8s:deepcopy-gen=package,register // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/batch -package v2alpha1 +package v2alpha1 // import "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto deleted file mode 100644 index c849e63c5..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto +++ /dev/null @@ -1,254 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.apis.batch.v2alpha1; - -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v2alpha1"; - -// Job represents the configuration of a single job. -message Job { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional JobSpec spec = 2; - - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional JobStatus status = 3; -} - -// JobCondition describes current state of a job. -message JobCondition { - // Type of job condition, Complete or Failed. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // Last time the condition was checked. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3; - - // Last time the condition transit from one status to another. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; - - // (brief) reason for the condition's last transition. - optional string reason = 5; - - // Human readable message indicating details about last transition. - optional string message = 6; -} - -// JobList is a collection of jobs. -message JobList { - // Standard list metadata - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is the list of Job. - repeated Job items = 2; -} - -// JobSpec describes how the job execution will look like. -message JobSpec { - // Parallelism specifies the maximum desired number of pods the job should - // run at any given time. The actual number of pods running in steady state will - // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), - // i.e. when the work left to do is less than max parallelism. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md - optional int32 parallelism = 1; - - // Completions specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any - // pod signals the success of all pods, and allows parallelism to have any positive - // value. Setting to 1 means that parallelism is limited to 1 and the success of that - // pod signals the success of the job. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md - optional int32 completions = 2; - - // Optional duration in seconds relative to the startTime that the job may be active - // before the system tries to terminate it; value must be positive integer - optional int64 activeDeadlineSeconds = 3; - - // Selector is a label query over pods that should match the pod count. - // Normally, the system sets this field for you. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors - optional LabelSelector selector = 4; - - // ManualSelector controls generation of pod labels and pod selectors. - // Leave `manualSelector` unset unless you are certain what you are doing. - // When false or unset, the system pick labels unique to this job - // and appends those labels to the pod template. When true, - // the user is responsible for picking unique labels and specifying - // the selector. Failure to pick a unique label may cause this - // and other jobs to not function correctly. However, You may see - // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` - // API. - // More info: http://releases.k8s.io/release-1.4/docs/design/selector-generation.md - optional bool manualSelector = 5; - - // Template is the object that describes the pod that will be created when - // executing a job. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md - optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 6; -} - -// JobStatus represents the current state of a Job. -message JobStatus { - // Conditions represent the latest available observations of an object's current state. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md - repeated JobCondition conditions = 1; - - // StartTime represents time when the job was acknowledged by the Job Manager. - // It is not guaranteed to be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - optional k8s.io.kubernetes.pkg.api.unversioned.Time startTime = 2; - - // CompletionTime represents time when the job was completed. It is not guaranteed to - // be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - optional k8s.io.kubernetes.pkg.api.unversioned.Time completionTime = 3; - - // Active is the number of actively running pods. - optional int32 active = 4; - - // Succeeded is the number of pods which reached Phase Succeeded. - optional int32 succeeded = 5; - - // Failed is the number of pods which reached Phase Failed. - optional int32 failed = 6; -} - -// JobTemplate describes a template for creating copies of a predefined pod. -message JobTemplate { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Template defines jobs that will be created from this template - // http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional JobTemplateSpec template = 2; -} - -// JobTemplateSpec describes the data a Job should have when created from a template -message JobTemplateSpec { - // Standard object's metadata of the jobs created from this template. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Specification of the desired behavior of the job. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional JobSpec spec = 2; -} - -// A label selector is a label query over a set of resources. The result of matchLabels and -// matchExpressions are ANDed. An empty label selector matches all objects. A null -// label selector matches no objects. -message LabelSelector { - // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - // map is equivalent to an element of matchExpressions, whose key field is "key", the - // operator is "In", and the values array contains only "value". The requirements are ANDed. - map matchLabels = 1; - - // matchExpressions is a list of label selector requirements. The requirements are ANDed. - repeated LabelSelectorRequirement matchExpressions = 2; -} - -// A label selector requirement is a selector that contains values, a key, and an operator that -// relates the key and values. -message LabelSelectorRequirement { - // key is the label key that the selector applies to. - optional string key = 1; - - // operator represents a key's relationship to a set of values. - // Valid operators ard In, NotIn, Exists and DoesNotExist. - optional string operator = 2; - - // values is an array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. This array is replaced during a strategic - // merge patch. - repeated string values = 3; -} - -// ScheduledJob represents the configuration of a single scheduled job. -message ScheduledJob { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec is a structure defining the expected behavior of a job, including the schedule. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional ScheduledJobSpec spec = 2; - - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional ScheduledJobStatus status = 3; -} - -// ScheduledJobList is a collection of scheduled jobs. -message ScheduledJobList { - // Standard list metadata - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is the list of ScheduledJob. - repeated ScheduledJob items = 2; -} - -// ScheduledJobSpec describes how the job execution will look like and when it will actually run. -message ScheduledJobSpec { - // Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - optional string schedule = 1; - - // Optional deadline in seconds for starting the job if it misses scheduled - // time for any reason. Missed jobs executions will be counted as failed ones. - optional int64 startingDeadlineSeconds = 2; - - // ConcurrencyPolicy specifies how to treat concurrent executions of a Job. - optional string concurrencyPolicy = 3; - - // Suspend flag tells the controller to suspend subsequent executions, it does - // not apply to already started executions. Defaults to false. - optional bool suspend = 4; - - // JobTemplate is the object that describes the job that will be created when - // executing a ScheduledJob. - optional JobTemplateSpec jobTemplate = 5; -} - -// ScheduledJobStatus represents the current state of a Job. -message ScheduledJobStatus { - // Active holds pointers to currently running jobs. - repeated k8s.io.kubernetes.pkg.api.v1.ObjectReference active = 1; - - // LastScheduleTime keeps information of when was the last time the job was successfully scheduled. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastScheduleTime = 4; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/doc.go index cce031e28..801141b82 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/doc.go @@ -17,4 +17,4 @@ limitations under the License. // +k8s:deepcopy-gen=package,register // +groupName=certificates.k8s.io -package certificates +package certificates // import "k8s.io/kubernetes/pkg/apis/certificates" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/doc.go index 13be49cbf..fc7971237 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/doc.go @@ -18,4 +18,4 @@ limitations under the License. // +k8s:deepcopy-gen=package,register // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/certificates -package v1alpha1 +package v1alpha1 // import "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/generated.proto deleted file mode 100644 index 5638d1d62..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1alpha1/generated.proto +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.apis.certificates.v1alpha1; - -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1alpha1"; - -// Describes a certificate signing request -message CertificateSigningRequest { - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // The certificate request itself and any additional information. - optional CertificateSigningRequestSpec spec = 2; - - // Derived information about the request. - optional CertificateSigningRequestStatus status = 3; -} - -message CertificateSigningRequestCondition { - // request approval state, currently Approved or Denied. - optional string type = 1; - - // brief reason for the request state - optional string reason = 2; - - // human readable message with details about the request state - optional string message = 3; - - // timestamp for the last update to this condition - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastUpdateTime = 4; -} - -message CertificateSigningRequestList { - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - repeated CertificateSigningRequest items = 2; -} - -// This information is immutable after the request is created. Only the Request -// and ExtraInfo fields can be set on creation, other fields are derived by -// Kubernetes and cannot be modified by users. -message CertificateSigningRequestSpec { - // Base64-encoded PKCS#10 CSR data - optional bytes request = 1; - - // Information about the requesting user (if relevant) - // See user.Info interface for details - optional string username = 2; - - optional string uid = 3; - - repeated string groups = 4; -} - -message CertificateSigningRequestStatus { - // Conditions applied to the request, such as approval or denial. - repeated CertificateSigningRequestCondition conditions = 1; - - // If request was approved, the controller will place the issued certificate here. - optional bytes certificate = 2; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/doc.go index d044b16db..8fe8d52ed 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/doc.go @@ -16,4 +16,4 @@ limitations under the License. // +k8s:deepcopy-gen=package,register -package componentconfig +package componentconfig // import "k8s.io/kubernetes/pkg/apis/componentconfig" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.go index 82f5b88da..09197df74 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.go @@ -257,8 +257,14 @@ type KubeletConfiguration struct { // computed (such as IPSEC). NetworkPluginMTU int32 `json:"networkPluginMTU"` // networkPluginDir is the full path of the directory in which to search - // for network plugins + // for network plugins (and, for backwards-compat, CNI config files) NetworkPluginDir string `json:"networkPluginDir"` + // CNIConfDir is the full path of the directory in which to search for + // CNI config files + CNIConfDir string `json:"cniConfDir"` + // CNIBinDir is the full path of the directory in which to search for + // CNI plugin binaries + CNIBinDir string `json:"cniBinDir"` // volumePluginDir is the full path of the directory in which to search // for additional third party volume plugins VolumePluginDir string `json:"volumePluginDir"` diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go index 16031a1b7..3c27d0d54 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go @@ -249,9 +249,6 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) { if obj.MinimumGCAge == zeroDuration { obj.MinimumGCAge = unversioned.Duration{Duration: 0} } - if obj.NetworkPluginDir == "" { - obj.NetworkPluginDir = "/usr/libexec/kubernetes/kubelet-plugins/net/exec/" - } if obj.NonMasqueradeCIDR == "" { obj.NonMasqueradeCIDR = "10.0.0.0/8" } @@ -300,7 +297,7 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) { obj.SerializeImagePulls = boolVar(true) } if obj.SeccompProfileRoot == "" { - filepath.Join(defaultRootDir, "seccomp") + obj.SeccompProfileRoot = filepath.Join(defaultRootDir, "seccomp") } if obj.StreamingConnectionIdleTimeout == zeroDuration { obj.StreamingConnectionIdleTimeout = unversioned.Duration{Duration: 4 * time.Hour} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/doc.go index 621e80613..0ae8b2a99 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/doc.go @@ -17,4 +17,4 @@ limitations under the License. // +k8s:deepcopy-gen=package,register // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/componentconfig -package v1alpha1 +package v1alpha1 // import "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go index 39da57407..1a34a0058 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go @@ -308,8 +308,14 @@ type KubeletConfiguration struct { // various events in kubelet/pod lifecycle NetworkPluginName string `json:"networkPluginName"` // networkPluginDir is the full path of the directory in which to search - // for network plugins + // for network plugins (and, for backwards-compat, CNI config files) NetworkPluginDir string `json:"networkPluginDir"` + // CNIConfDir is the full path of the directory in which to search for + // CNI config files + CNIConfDir string `json:"cniConfDir"` + // CNIBinDir is the full path of the directory in which to search for + // CNI plugin binaries + CNIBinDir string `json:"cniBinDir"` // networkPluginMTU is the MTU to be passed to the network plugin, // and overrides the default MTU for cases where it cannot be automatically // computed (such as IPSEC). diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go index d5dc36ace..c54075200 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go @@ -233,6 +233,8 @@ func autoConvert_v1alpha1_KubeletConfiguration_To_componentconfig_KubeletConfigu out.VolumeStatsAggPeriod = in.VolumeStatsAggPeriod out.NetworkPluginName = in.NetworkPluginName out.NetworkPluginDir = in.NetworkPluginDir + out.CNIConfDir = in.CNIConfDir + out.CNIBinDir = in.CNIBinDir out.NetworkPluginMTU = in.NetworkPluginMTU out.VolumePluginDir = in.VolumePluginDir out.CloudProvider = in.CloudProvider @@ -412,6 +414,8 @@ func autoConvert_componentconfig_KubeletConfiguration_To_v1alpha1_KubeletConfigu out.NetworkPluginName = in.NetworkPluginName out.NetworkPluginMTU = in.NetworkPluginMTU out.NetworkPluginDir = in.NetworkPluginDir + out.CNIConfDir = in.CNIConfDir + out.CNIBinDir = in.CNIBinDir out.VolumePluginDir = in.VolumePluginDir out.CloudProvider = in.CloudProvider out.CloudConfigFile = in.CloudConfigFile diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go index 952ce0de6..abc4cfc4a 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go @@ -239,6 +239,8 @@ func DeepCopy_v1alpha1_KubeletConfiguration(in interface{}, out interface{}, c * out.VolumeStatsAggPeriod = in.VolumeStatsAggPeriod out.NetworkPluginName = in.NetworkPluginName out.NetworkPluginDir = in.NetworkPluginDir + out.CNIConfDir = in.CNIConfDir + out.CNIBinDir = in.CNIBinDir out.NetworkPluginMTU = in.NetworkPluginMTU out.VolumePluginDir = in.VolumePluginDir out.CloudProvider = in.CloudProvider diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/zz_generated.deepcopy.go index 7f3dd1b15..b6ceb626c 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/zz_generated.deepcopy.go @@ -259,6 +259,8 @@ func DeepCopy_componentconfig_KubeletConfiguration(in interface{}, out interface out.NetworkPluginName = in.NetworkPluginName out.NetworkPluginMTU = in.NetworkPluginMTU out.NetworkPluginDir = in.NetworkPluginDir + out.CNIConfDir = in.CNIConfDir + out.CNIBinDir = in.CNIBinDir out.VolumePluginDir = in.VolumePluginDir out.CloudProvider = in.CloudProvider out.CloudConfigFile = in.CloudConfigFile diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/doc.go index 2bbb71d05..fbce8ee70 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/doc.go @@ -16,4 +16,4 @@ limitations under the License. // +k8s:deepcopy-gen=package,register -package extensions +package extensions // import "k8s.io/kubernetes/pkg/apis/extensions" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/doc.go index dc8790024..bd4087f6b 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/doc.go @@ -19,4 +19,4 @@ limitations under the License. // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/autoscaling // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/batch -package v1beta1 +package v1beta1 // import "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto deleted file mode 100644 index 6a343f130..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto +++ /dev/null @@ -1,1013 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.apis.extensions.v1beta1; - -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// An APIVersion represents a single concrete version of an object model. -message APIVersion { - // Name of this version (e.g. 'v1'). - optional string name = 1; -} - -message CPUTargetUtilization { - // fraction of the requested CPU that should be utilized/used, - // e.g. 70 means that 70% of the requested CPU should be in use. - optional int32 targetPercentage = 1; -} - -message CustomMetricCurrentStatus { - // Custom Metric name. - optional string name = 1; - - // Custom Metric value (average). - optional k8s.io.kubernetes.pkg.api.resource.Quantity value = 2; -} - -message CustomMetricCurrentStatusList { - repeated CustomMetricCurrentStatus items = 1; -} - -// Alpha-level support for Custom Metrics in HPA (as annotations). -message CustomMetricTarget { - // Custom Metric name. - optional string name = 1; - - // Custom Metric value (average). - optional k8s.io.kubernetes.pkg.api.resource.Quantity value = 2; -} - -message CustomMetricTargetList { - repeated CustomMetricTarget items = 1; -} - -// DaemonSet represents the configuration of a daemon set. -message DaemonSet { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec defines the desired behavior of this daemon set. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional DaemonSetSpec spec = 2; - - // Status is the current status of this daemon set. This data may be - // out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional DaemonSetStatus status = 3; -} - -// DaemonSetList is a collection of daemon sets. -message DaemonSetList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is a list of daemon sets. - repeated DaemonSet items = 2; -} - -// DaemonSetSpec is the specification of a daemon set. -message DaemonSetSpec { - // Selector is a label query over pods that are managed by the daemon set. - // Must match in order to be controlled. - // If empty, defaulted to labels on Pod template. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors - optional LabelSelector selector = 1; - - // Template is the object that describes the pod that will be created. - // The DaemonSet will create exactly one copy of this pod on every node - // that matches the template's node selector (or on every node if no node - // selector is specified). - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#pod-template - optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 2; -} - -// DaemonSetStatus represents the current status of a daemon set. -message DaemonSetStatus { - // CurrentNumberScheduled is the number of nodes that are running at least 1 - // daemon pod and are supposed to run the daemon pod. - // More info: http://releases.k8s.io/release-1.4/docs/admin/daemons.md - optional int32 currentNumberScheduled = 1; - - // NumberMisscheduled is the number of nodes that are running the daemon pod, but are - // not supposed to run the daemon pod. - // More info: http://releases.k8s.io/release-1.4/docs/admin/daemons.md - optional int32 numberMisscheduled = 2; - - // DesiredNumberScheduled is the total number of nodes that should be running the daemon - // pod (including nodes correctly running the daemon pod). - // More info: http://releases.k8s.io/release-1.4/docs/admin/daemons.md - optional int32 desiredNumberScheduled = 3; -} - -// Deployment enables declarative updates for Pods and ReplicaSets. -message Deployment { - // Standard object metadata. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Specification of the desired behavior of the Deployment. - optional DeploymentSpec spec = 2; - - // Most recently observed status of the Deployment. - optional DeploymentStatus status = 3; -} - -// DeploymentList is a list of Deployments. -message DeploymentList { - // Standard list metadata. - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is the list of Deployments. - repeated Deployment items = 2; -} - -// DeploymentRollback stores the information required to rollback a deployment. -message DeploymentRollback { - // Required: This must match the Name of a deployment. - optional string name = 1; - - // The annotations to be updated to a deployment - map updatedAnnotations = 2; - - // The config of this deployment rollback. - optional RollbackConfig rollbackTo = 3; -} - -// DeploymentSpec is the specification of the desired behavior of the Deployment. -message DeploymentSpec { - // Number of desired pods. This is a pointer to distinguish between explicit - // zero and not specified. Defaults to 1. - optional int32 replicas = 1; - - // Label selector for pods. Existing ReplicaSets whose pods are - // selected by this will be the ones affected by this deployment. - optional LabelSelector selector = 2; - - // Template describes the pods that will be created. - optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3; - - // The deployment strategy to use to replace existing pods with new ones. - optional DeploymentStrategy strategy = 4; - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - optional int32 minReadySeconds = 5; - - // The number of old ReplicaSets to retain to allow rollback. - // This is a pointer to distinguish between explicit zero and not specified. - optional int32 revisionHistoryLimit = 6; - - // Indicates that the deployment is paused and will not be processed by the - // deployment controller. - optional bool paused = 7; - - // The config this deployment is rolling back to. Will be cleared after rollback is done. - optional RollbackConfig rollbackTo = 8; -} - -// DeploymentStatus is the most recently observed status of the Deployment. -message DeploymentStatus { - // The generation observed by the deployment controller. - optional int64 observedGeneration = 1; - - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). - optional int32 replicas = 2; - - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. - optional int32 updatedReplicas = 3; - - // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. - optional int32 availableReplicas = 4; - - // Total number of unavailable pods targeted by this deployment. - optional int32 unavailableReplicas = 5; -} - -// DeploymentStrategy describes how to replace existing pods with new ones. -message DeploymentStrategy { - // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. - optional string type = 1; - - // Rolling update config params. Present only if DeploymentStrategyType = - // RollingUpdate. - // --- - // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. - optional RollingUpdateDeployment rollingUpdate = 2; -} - -// ExportOptions is the query options to the standard REST get call. -message ExportOptions { - // Should this value be exported. Export strips fields that a user can not specify. - optional bool export = 1; - - // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace' - optional bool exact = 2; -} - -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -message FSGroupStrategyOptions { - // Rule is the strategy that will dictate what FSGroup is used in the SecurityContext. - optional string rule = 1; - - // Ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. - repeated IDRange ranges = 2; -} - -// HTTPIngressPath associates a path regex with a backend. Incoming urls matching -// the path are forwarded to the backend. -message HTTPIngressPath { - // Path is an extended POSIX regex as defined by IEEE Std 1003.1, - // (i.e this follows the egrep/unix syntax, not the perl syntax) - // matched against the path of an incoming request. Currently it can - // contain characters disallowed from the conventional "path" - // part of a URL as defined by RFC 3986. Paths must begin with - // a '/'. If unspecified, the path defaults to a catch all sending - // traffic to the backend. - optional string path = 1; - - // Backend defines the referenced service endpoint to which the traffic - // will be forwarded to. - optional IngressBackend backend = 2; -} - -// HTTPIngressRuleValue is a list of http selectors pointing to backends. -// In the example: http:///? -> backend where -// where parts of the url correspond to RFC 3986, this resource will be used -// to match against everything after the last '/' and before the first '?' -// or '#'. -message HTTPIngressRuleValue { - // A collection of paths that map requests to backends. - repeated HTTPIngressPath paths = 1; -} - -// configuration of a horizontal pod autoscaler. -message HorizontalPodAutoscaler { - // Standard object metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // behaviour of autoscaler. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status. - optional HorizontalPodAutoscalerSpec spec = 2; - - // current information about the autoscaler. - optional HorizontalPodAutoscalerStatus status = 3; -} - -// list of horizontal pod autoscaler objects. -message HorizontalPodAutoscalerList { - // Standard list metadata. - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // list of horizontal pod autoscaler objects. - repeated HorizontalPodAutoscaler items = 2; -} - -// specification of a horizontal pod autoscaler. -message HorizontalPodAutoscalerSpec { - // reference to Scale subresource; horizontal pod autoscaler will learn the current resource consumption from its status, - // and will set the desired number of pods by modifying its spec. - optional SubresourceReference scaleRef = 1; - - // lower limit for the number of pods that can be set by the autoscaler, default 1. - optional int32 minReplicas = 2; - - // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. - optional int32 maxReplicas = 3; - - // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; - // if not specified it defaults to the target CPU utilization at 80% of the requested resources. - optional CPUTargetUtilization cpuUtilization = 4; -} - -// current status of a horizontal pod autoscaler -message HorizontalPodAutoscalerStatus { - // most recent generation observed by this autoscaler. - optional int64 observedGeneration = 1; - - // last time the HorizontalPodAutoscaler scaled the number of pods; - // used by the autoscaler to control how often the number of pods is changed. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastScaleTime = 2; - - // current number of replicas of pods managed by this autoscaler. - optional int32 currentReplicas = 3; - - // desired number of replicas of pods managed by this autoscaler. - optional int32 desiredReplicas = 4; - - // current average CPU utilization over all pods, represented as a percentage of requested CPU, - // e.g. 70 means that an average pod is using now 70% of its requested CPU. - optional int32 currentCPUUtilizationPercentage = 5; -} - -// Host Port Range defines a range of host ports that will be enabled by a policy -// for pods to use. It requires both the start and end to be defined. -message HostPortRange { - // min is the start of the range, inclusive. - optional int32 min = 1; - - // max is the end of the range, inclusive. - optional int32 max = 2; -} - -// ID Range provides a min/max of an allowed range of IDs. -message IDRange { - // Min is the start of the range, inclusive. - optional int64 min = 1; - - // Max is the end of the range, inclusive. - optional int64 max = 2; -} - -// Ingress is a collection of rules that allow inbound connections to reach the -// endpoints defined by a backend. An Ingress can be configured to give services -// externally-reachable urls, load balance traffic, terminate SSL, offer name -// based virtual hosting etc. -message Ingress { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec is the desired state of the Ingress. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional IngressSpec spec = 2; - - // Status is the current state of the Ingress. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional IngressStatus status = 3; -} - -// IngressBackend describes all endpoints for a given service and port. -message IngressBackend { - // Specifies the name of the referenced service. - optional string serviceName = 1; - - // Specifies the port of the referenced service. - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString servicePort = 2; -} - -// IngressList is a collection of Ingress. -message IngressList { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is the list of Ingress. - repeated Ingress items = 2; -} - -// IngressRule represents the rules mapping the paths under a specified host to -// the related backend services. Incoming requests are first evaluated for a host -// match, then routed to the backend associated with the matching IngressRuleValue. -message IngressRule { - // Host is the fully qualified domain name of a network host, as defined - // by RFC 3986. Note the following deviations from the "host" part of the - // URI as defined in the RFC: - // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the - // IP in the Spec of the parent Ingress. - // 2. The `:` delimiter is not respected because ports are not allowed. - // Currently the port of an Ingress is implicitly :80 for http and - // :443 for https. - // Both these may change in the future. - // Incoming requests are matched against the host before the IngressRuleValue. - // If the host is unspecified, the Ingress routes all traffic based on the - // specified IngressRuleValue. - optional string host = 1; - - // IngressRuleValue represents a rule to route requests for this IngressRule. - // If unspecified, the rule defaults to a http catch-all. Whether that sends - // just traffic matching the host to the default backend or all traffic to the - // default backend, is left to the controller fulfilling the Ingress. Http is - // currently the only supported IngressRuleValue. - optional IngressRuleValue ingressRuleValue = 2; -} - -// IngressRuleValue represents a rule to apply against incoming requests. If the -// rule is satisfied, the request is routed to the specified backend. Currently -// mixing different types of rules in a single Ingress is disallowed, so exactly -// one of the following must be set. -message IngressRuleValue { - optional HTTPIngressRuleValue http = 1; -} - -// IngressSpec describes the Ingress the user wishes to exist. -message IngressSpec { - // A default backend capable of servicing requests that don't match any - // rule. At least one of 'backend' or 'rules' must be specified. This field - // is optional to allow the loadbalancer controller or defaulting logic to - // specify a global default. - optional IngressBackend backend = 1; - - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified - // through the SNI TLS extension, if the ingress controller fulfilling the - // ingress supports SNI. - repeated IngressTLS tls = 2; - - // A list of host rules used to configure the Ingress. If unspecified, or - // no rule matches, all traffic is sent to the default backend. - repeated IngressRule rules = 3; -} - -// IngressStatus describe the current state of the Ingress. -message IngressStatus { - // LoadBalancer contains the current status of the load-balancer. - optional k8s.io.kubernetes.pkg.api.v1.LoadBalancerStatus loadBalancer = 1; -} - -// IngressTLS describes the transport layer security associated with an Ingress. -message IngressTLS { - // Hosts are a list of hosts included in the TLS certificate. The values in - // this list must match the name/s used in the tlsSecret. Defaults to the - // wildcard host setting for the loadbalancer controller fulfilling this - // Ingress, if left unspecified. - repeated string hosts = 1; - - // SecretName is the name of the secret used to terminate SSL traffic on 443. - // Field is left optional to allow SSL routing based on SNI hostname alone. - // If the SNI host in a listener conflicts with the "Host" header field used - // by an IngressRule, the SNI host is used for termination and value of the - // Host header is used for routing. - optional string secretName = 2; -} - -// Job represents the configuration of a single job. -message Job { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional JobSpec spec = 2; - - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional JobStatus status = 3; -} - -// JobCondition describes current state of a job. -message JobCondition { - // Type of job condition, Complete or Failed. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // Last time the condition was checked. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3; - - // Last time the condition transit from one status to another. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; - - // (brief) reason for the condition's last transition. - optional string reason = 5; - - // Human readable message indicating details about last transition. - optional string message = 6; -} - -// JobList is a collection of jobs. -message JobList { - // Standard list metadata - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is the list of Job. - repeated Job items = 2; -} - -// JobSpec describes how the job execution will look like. -message JobSpec { - // Parallelism specifies the maximum desired number of pods the job should - // run at any given time. The actual number of pods running in steady state will - // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), - // i.e. when the work left to do is less than max parallelism. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md - optional int32 parallelism = 1; - - // Completions specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any - // pod signals the success of all pods, and allows parallelism to have any positive - // value. Setting to 1 means that parallelism is limited to 1 and the success of that - // pod signals the success of the job. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md - optional int32 completions = 2; - - // Optional duration in seconds relative to the startTime that the job may be active - // before the system tries to terminate it; value must be positive integer - optional int64 activeDeadlineSeconds = 3; - - // Selector is a label query over pods that should match the pod count. - // Normally, the system sets this field for you. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors - optional LabelSelector selector = 4; - - // AutoSelector controls generation of pod labels and pod selectors. - // It was not present in the original extensions/v1beta1 Job definition, but exists - // to allow conversion from batch/v1 Jobs, where it corresponds to, but has the opposite - // meaning as, ManualSelector. - // More info: http://releases.k8s.io/release-1.4/docs/design/selector-generation.md - optional bool autoSelector = 5; - - // Template is the object that describes the pod that will be created when - // executing a job. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md - optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 6; -} - -// JobStatus represents the current state of a Job. -message JobStatus { - // Conditions represent the latest available observations of an object's current state. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md - repeated JobCondition conditions = 1; - - // StartTime represents time when the job was acknowledged by the Job Manager. - // It is not guaranteed to be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - optional k8s.io.kubernetes.pkg.api.unversioned.Time startTime = 2; - - // CompletionTime represents time when the job was completed. It is not guaranteed to - // be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - optional k8s.io.kubernetes.pkg.api.unversioned.Time completionTime = 3; - - // Active is the number of actively running pods. - optional int32 active = 4; - - // Succeeded is the number of pods which reached Phase Succeeded. - optional int32 succeeded = 5; - - // Failed is the number of pods which reached Phase Failed. - optional int32 failed = 6; -} - -// A label selector is a label query over a set of resources. The result of matchLabels and -// matchExpressions are ANDed. An empty label selector matches all objects. A null -// label selector matches no objects. -message LabelSelector { - // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - // map is equivalent to an element of matchExpressions, whose key field is "key", the - // operator is "In", and the values array contains only "value". The requirements are ANDed. - map matchLabels = 1; - - // matchExpressions is a list of label selector requirements. The requirements are ANDed. - repeated LabelSelectorRequirement matchExpressions = 2; -} - -// A label selector requirement is a selector that contains values, a key, and an operator that -// relates the key and values. -message LabelSelectorRequirement { - // key is the label key that the selector applies to. - optional string key = 1; - - // operator represents a key's relationship to a set of values. - // Valid operators ard In, NotIn, Exists and DoesNotExist. - optional string operator = 2; - - // values is an array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. This array is replaced during a strategic - // merge patch. - repeated string values = 3; -} - -// ListOptions is the query options to a standard REST list call. -message ListOptions { - // A selector to restrict the list of returned objects by their labels. - // Defaults to everything. - optional string labelSelector = 1; - - // A selector to restrict the list of returned objects by their fields. - // Defaults to everything. - optional string fieldSelector = 2; - - // Watch for changes to the described resources and return them as a stream of - // add, update, and remove notifications. Specify resourceVersion. - optional bool watch = 3; - - // When specified with a watch call, shows changes that occur after that particular version of a resource. - // Defaults to changes from the beginning of history. - optional string resourceVersion = 4; - - // Timeout for the list/watch call. - optional int64 timeoutSeconds = 5; -} - -message NetworkPolicy { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Specification of the desired behavior for this NetworkPolicy. - optional NetworkPolicySpec spec = 2; -} - -// This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. -message NetworkPolicyIngressRule { - // List of ports which should be made accessible on the pods selected for this rule. - // Each item in this list is combined using a logical OR. - // If this field is not provided, this rule matches all ports (traffic not restricted by port). - // If this field is empty, this rule matches no ports (no traffic matches). - // If this field is present and contains at least one item, then this rule allows traffic - // only if the traffic matches at least one port in the list. - // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. - repeated NetworkPolicyPort ports = 1; - - // List of sources which should be able to access the pods selected for this rule. - // Items in this list are combined using a logical OR operation. - // If this field is not provided, this rule matches all sources (traffic not restricted by source). - // If this field is empty, this rule matches no sources (no traffic matches). - // If this field is present and contains at least on item, this rule allows traffic only if the - // traffic matches at least one item in the from list. - // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. - repeated NetworkPolicyPeer from = 2; -} - -// Network Policy List is a list of NetworkPolicy objects. -message NetworkPolicyList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is a list of schema objects. - repeated NetworkPolicy items = 2; -} - -message NetworkPolicyPeer { - // This is a label selector which selects Pods in this namespace. - // This field follows standard label selector semantics. - // If not provided, this selector selects no pods. - // If present but empty, this selector selects all pods in this namespace. - optional LabelSelector podSelector = 1; - - // Selects Namespaces using cluster scoped-labels. This - // matches all pods in all namespaces selected by this label selector. - // This field follows standard label selector semantics. - // If omitted, this selector selects no namespaces. - // If present but empty, this selector selects all namespaces. - optional LabelSelector namespaceSelector = 2; -} - -message NetworkPolicyPort { - // Optional. The protocol (TCP or UDP) which traffic must match. - // If not specified, this field defaults to TCP. - optional string protocol = 1; - - // If specified, the port on the given protocol. This can - // either be a numerical or named port on a pod. If this field is not provided, - // this matches all port names and numbers. - // If present, only traffic on the specified protocol AND port - // will be matched. - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString port = 2; -} - -message NetworkPolicySpec { - // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules - // is applied to any pods selected by this field. Multiple network policies can select the - // same set of pods. In this case, the ingress rules for each are combined additively. - // This field is NOT optional and follows standard label selector semantics. - // An empty podSelector matches all pods in this namespace. - optional LabelSelector podSelector = 1; - - // List of ingress rules to be applied to the selected pods. - // Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it, - // OR if the traffic source is the pod's local node, - // OR if the traffic matches at least one ingress rule across all of the NetworkPolicy - // objects whose podSelector matches the pod. - // If this field is empty then this NetworkPolicy does not affect ingress isolation. - // If this field is present and contains at least one rule, this policy allows any traffic - // which matches at least one of the ingress rules in this list. - repeated NetworkPolicyIngressRule ingress = 2; -} - -// Pod Security Policy governs the ability to make requests that affect the Security Context -// that will be applied to a pod and container. -message PodSecurityPolicy { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // spec defines the policy enforced. - optional PodSecurityPolicySpec spec = 2; -} - -// Pod Security Policy List is a list of PodSecurityPolicy objects. -message PodSecurityPolicyList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is a list of schema objects. - repeated PodSecurityPolicy items = 2; -} - -// Pod Security Policy Spec defines the policy enforced. -message PodSecurityPolicySpec { - // privileged determines if a pod can request to be run as privileged. - optional bool privileged = 1; - - // DefaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capabiility in both - // DefaultAddCapabilities and RequiredDropCapabilities. - repeated string defaultAddCapabilities = 2; - - // RequiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - repeated string requiredDropCapabilities = 3; - - // AllowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. - repeated string allowedCapabilities = 4; - - // volumes is a white list of allowed volume plugins. Empty indicates that all plugins - // may be used. - repeated string volumes = 5; - - // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - optional bool hostNetwork = 6; - - // hostPorts determines which host port ranges are allowed to be exposed. - repeated HostPortRange hostPorts = 7; - - // hostPID determines if the policy allows the use of HostPID in the pod spec. - optional bool hostPID = 8; - - // hostIPC determines if the policy allows the use of HostIPC in the pod spec. - optional bool hostIPC = 9; - - // seLinux is the strategy that will dictate the allowable labels that may be set. - optional SELinuxStrategyOptions seLinux = 10; - - // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - optional RunAsUserStrategyOptions runAsUser = 11; - - // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - optional SupplementalGroupsStrategyOptions supplementalGroups = 12; - - // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. - optional FSGroupStrategyOptions fsGroup = 13; - - // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the PSP should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - optional bool readOnlyRootFilesystem = 14; -} - -// ReplicaSet represents the configuration of a ReplicaSet. -message ReplicaSet { - // If the Labels of a ReplicaSet are empty, they are defaulted to - // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional ReplicaSetSpec spec = 2; - - // Status is the most recently observed status of the ReplicaSet. - // This data may be out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status - optional ReplicaSetStatus status = 3; -} - -// ReplicaSetList is a collection of ReplicaSets. -message ReplicaSetList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of ReplicaSets. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md - repeated ReplicaSet items = 2; -} - -// ReplicaSetSpec is the specification of a ReplicaSet. -message ReplicaSetSpec { - // Replicas is the number of desired replicas. - // This is a pointer to distinguish between explicit zero and unspecified. - // Defaults to 1. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#what-is-a-replication-controller - optional int32 replicas = 1; - - // Selector is a label query over pods that should match the replica count. - // If the selector is empty, it is defaulted to the labels present on the pod template. - // Label keys and values that must match in order to be controlled by this replica set. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors - optional LabelSelector selector = 2; - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#pod-template - optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3; -} - -// ReplicaSetStatus represents the current status of a ReplicaSet. -message ReplicaSetStatus { - // Replicas is the most recently oberved number of replicas. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#what-is-a-replication-controller - optional int32 replicas = 1; - - // The number of pods that have labels matching the labels of the pod template of the replicaset. - optional int32 fullyLabeledReplicas = 2; - - // The number of ready replicas for this replica set. - optional int32 readyReplicas = 4; - - // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. - optional int64 observedGeneration = 3; -} - -// Dummy definition -message ReplicationControllerDummy { -} - -message RollbackConfig { - // The revision to rollback to. If set to 0, rollbck to the last revision. - optional int64 revision = 1; -} - -// Spec to control the desired behavior of rolling update. -message RollingUpdateDeployment { - // The maximum number of pods that can be unavailable during the update. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // Absolute number is calculated from percentage by rounding up. - // This can not be 0 if MaxSurge is 0. - // By default, a fixed value of 1 is used. - // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods - // immediately when the rolling update starts. Once new pods are ready, old RC - // can be scaled down further, followed by scaling up the new RC, ensuring - // that the total number of pods available at all times during the update is at - // least 70% of desired pods. - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString maxUnavailable = 1; - - // The maximum number of pods that can be scheduled above the desired number of - // pods. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // This can not be 0 if MaxUnavailable is 0. - // Absolute number is calculated from percentage by rounding up. - // By default, a value of 1 is used. - // Example: when this is set to 30%, the new RC can be scaled up immediately when - // the rolling update starts, such that the total number of old and new pods do not exceed - // 130% of desired pods. Once old pods have been killed, - // new RC can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of desired pods. - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString maxSurge = 2; -} - -// Run A sUser Strategy Options defines the strategy type and any options used to create the strategy. -message RunAsUserStrategyOptions { - // Rule is the strategy that will dictate the allowable RunAsUser values that may be set. - optional string rule = 1; - - // Ranges are the allowed ranges of uids that may be used. - repeated IDRange ranges = 2; -} - -// SELinux Strategy Options defines the strategy type and any options used to create the strategy. -message SELinuxStrategyOptions { - // type is the strategy that will dictate the allowable labels that may be set. - optional string rule = 1; - - // seLinuxOptions required to run as; required for MustRunAs - // More info: http://releases.k8s.io/release-1.4/docs/design/security_context.md#security-context - optional k8s.io.kubernetes.pkg.api.v1.SELinuxOptions seLinuxOptions = 2; -} - -// represents a scaling request for a resource. -message Scale { - // Standard object metadata; More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // defines the behavior of the scale. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status. - optional ScaleSpec spec = 2; - - // current status of the scale. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status. Read-only. - optional ScaleStatus status = 3; -} - -// describes the attributes of a scale subresource -message ScaleSpec { - // desired number of instances for the scaled object. - optional int32 replicas = 1; -} - -// represents the current status of a scale subresource. -message ScaleStatus { - // actual number of observed instances of the scaled object. - optional int32 replicas = 1; - - // label query over pods that should match the replicas count. More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors - map selector = 2; - - // label selector for pods that should match the replicas count. This is a serializated - // version of both map-based and more expressive set-based selectors. This is done to - // avoid introspection in the clients. The string will be in the same format as the - // query-param syntax. If the target type only supports map-based selectors, both this - // field and map-based selector field are populated. - // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors - optional string targetSelector = 3; -} - -// SubresourceReference contains enough information to let you inspect or modify the referred subresource. -message SubresourceReference { - // Kind of the referent; More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds - optional string kind = 1; - - // Name of the referent; More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names - optional string name = 2; - - // API version of the referent - optional string apiVersion = 3; - - // Subresource name of the referent - optional string subresource = 4; -} - -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -message SupplementalGroupsStrategyOptions { - // Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. - optional string rule = 1; - - // Ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. - repeated IDRange ranges = 2; -} - -// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource -// types to the API. It consists of one or more Versions of the api. -message ThirdPartyResource { - // Standard object metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Description is the description of this object. - optional string description = 2; - - // Versions are versions for this third party object - repeated APIVersion versions = 3; -} - -// An internal object, used for versioned storage in etcd. Not exposed to the end user. -message ThirdPartyResourceData { - // Standard object metadata. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Data is the raw JSON data for this data. - optional bytes data = 2; -} - -// ThirdPartyResrouceDataList is a list of ThirdPartyResourceData. -message ThirdPartyResourceDataList { - // Standard list metadata - // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is the list of ThirdpartyResourceData. - repeated ThirdPartyResourceData items = 2; -} - -// ThirdPartyResourceList is a list of ThirdPartyResources. -message ThirdPartyResourceList { - // Standard list metadata. - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is the list of ThirdPartyResources. - repeated ThirdPartyResource items = 2; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/doc.go index 876858cd9..86f4cd560 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/doc.go @@ -16,4 +16,4 @@ limitations under the License. // +k8s:deepcopy-gen=package,register -package policy +package policy // import "k8s.io/kubernetes/pkg/apis/policy" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/doc.go index 985d4bbf0..8bd6c3a52 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/doc.go @@ -20,4 +20,4 @@ limitations under the License. // Package policy is for any kind of policy object. Suitable examples, even if // they aren't all here, are PodDisruptionBudget, PodSecurityPolicy, // NetworkPolicy, etc. -package v1alpha1 +package v1alpha1 // import "k8s.io/kubernetes/pkg/apis/policy/v1alpha1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto deleted file mode 100644 index 531db804b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.apis.policy.v1alpha1; - -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1alpha1"; - -// Eviction evicts a pod from its node subject to certain policies and safety constraints. -// This is a subresource of Pod. A request to cause such an eviction is -// created by POSTing to .../pods//evictions. -message Eviction { - // ObjectMeta describes the pod that is being evicted. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // DeleteOptions may be provided - optional k8s.io.kubernetes.pkg.api.v1.DeleteOptions deleteOptions = 2; -} - -// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods -message PodDisruptionBudget { - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Specification of the desired behavior of the PodDisruptionBudget. - optional PodDisruptionBudgetSpec spec = 2; - - // Most recently observed status of the PodDisruptionBudget. - optional PodDisruptionBudgetStatus status = 3; -} - -// PodDisruptionBudgetList is a collection of PodDisruptionBudgets. -message PodDisruptionBudgetList { - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - repeated PodDisruptionBudget items = 2; -} - -// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. -message PodDisruptionBudgetSpec { - // The minimum number of pods that must be available simultaneously. This - // can be either an integer or a string specifying a percentage, e.g. "28%". - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString minAvailable = 1; - - // Label query over pods whose evictions are managed by the disruption - // budget. - optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector selector = 2; -} - -// PodDisruptionBudgetStatus represents information about the status of a -// PodDisruptionBudget. Status may trail the actual state of a system. -message PodDisruptionBudgetStatus { - // Whether or not a disruption is currently allowed. - optional bool disruptionAllowed = 1; - - // current number of healthy pods - optional int32 currentHealthy = 2; - - // minimum desired number of healthy pods - optional int32 desiredHealthy = 3; - - // total number of pods counted by this disruption budget - optional int32 expectedPods = 4; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go index e4ce69b40..ac9920c60 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go @@ -17,4 +17,4 @@ limitations under the License. // +k8s:deepcopy-gen=package,register // +groupName=rbac.authorization.k8s.io -package rbac +package rbac // import "k8s.io/kubernetes/pkg/apis/rbac" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go index e471bd384..86ddb241a 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go @@ -18,4 +18,4 @@ limitations under the License. // +k8s:deepcopy-gen=package,register // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/rbac -package v1alpha1 +package v1alpha1 // import "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto deleted file mode 100644 index 51ac1605b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto +++ /dev/null @@ -1,159 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.apis.rbac.v1alpha1; - -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1alpha1"; - -// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. -message ClusterRole { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Rules holds all the PolicyRules for this ClusterRole - repeated PolicyRule rules = 2; -} - -// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, -// and adds who information via Subject. -message ClusterRoleBinding { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Subjects holds references to the objects the role applies to. - repeated Subject subjects = 2; - - // RoleRef can only reference a ClusterRole in the global namespace. - // If the RoleRef cannot be resolved, the Authorizer must return an error. - optional k8s.io.kubernetes.pkg.api.v1.ObjectReference roleRef = 3; -} - -// ClusterRoleBindingList is a collection of ClusterRoleBindings -message ClusterRoleBindingList { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is a list of ClusterRoleBindings - repeated ClusterRoleBinding items = 2; -} - -// ClusterRoleList is a collection of ClusterRoles -message ClusterRoleList { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is a list of ClusterRoles - repeated ClusterRole items = 2; -} - -// PolicyRule holds information that describes a policy rule, but does not contain information -// about who the rule applies to or which namespace the rule applies to. -message PolicyRule { - // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. - repeated string verbs = 1; - - // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. - // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error. - optional k8s.io.kubernetes.pkg.runtime.RawExtension attributeRestrictions = 2; - - // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of - // the enumerated resources in any API group will be allowed. - repeated string apiGroups = 3; - - // Resources is a list of resources this rule applies to. ResourceAll represents all resources. - repeated string resources = 4; - - // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. - repeated string resourceNames = 5; - - // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path - // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. - // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. - // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. - repeated string nonResourceURLs = 6; -} - -// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. -message Role { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Rules holds all the PolicyRules for this Role - repeated PolicyRule rules = 2; -} - -// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. -// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given -// namespace only have effect in that namespace. -message RoleBinding { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Subjects holds references to the objects the role applies to. - repeated Subject subjects = 2; - - // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. - // If the RoleRef cannot be resolved, the Authorizer must return an error. - optional k8s.io.kubernetes.pkg.api.v1.ObjectReference roleRef = 3; -} - -// RoleBindingList is a collection of RoleBindings -message RoleBindingList { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is a list of RoleBindings - repeated RoleBinding items = 2; -} - -// RoleList is a collection of Roles -message RoleList { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is a list of Roles - repeated Role items = 2; -} - -// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, -// or a value for non-objects such as user and group names. -message Subject { - // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - // If the Authorizer does not recognized the kind value, the Authorizer should report an error. - optional string kind = 1; - - // APIVersion holds the API group and version of the referenced object. - optional string apiVersion = 2; - - // Name of the object being referenced. - optional string name = 3; - - // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - // the Authorizer should report an error. - optional string namespace = 4; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/doc.go index a7eb30b64..51dbb344c 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/doc.go @@ -16,4 +16,4 @@ limitations under the License. // +k8s:deepcopy-gen=package,register // +groupName=storage.k8s.io -package storage +package storage // import "k8s.io/kubernetes/pkg/apis/storage" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/util/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/util/helpers.go new file mode 100644 index 000000000..49eada993 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/util/helpers.go @@ -0,0 +1,136 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "k8s.io/kubernetes/pkg/api" +) + +// IsDefaultStorageClassAnnotation represents a StorageClass annotation that +// marks a class as the default StorageClass +//TODO: Update IsDefaultStorageClassannotation and remove Beta when no longer used +const IsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class" +const BetaIsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class" + +// AlphaStorageClassAnnotation represents the previous alpha storage class +// annotation. it's no longer used and held here for posterity. +const AlphaStorageClassAnnotation = "volume.alpha.kubernetes.io/storage-class" + +// BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. +// It's currently still used and will be held for backwards compatibility +const BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" + +// StorageClassAnnotation represents the storage class associated with a resource. +// It currently matches the Beta value and can change when official is set. +// - in PersistentVolumeClaim it represents required class to match. +// Only PersistentVolumes with the same class (i.e. annotation with the same +// value) can be bound to the claim. In case no such volume exists, the +// controller will provision a new one using StorageClass instance with +// the same name as the annotation value. +// - in PersistentVolume it represents storage class to which the persistent +// volume belongs. +//TODO: Update this to final annotation value as it matches BetaStorageClassAnnotation for now +const StorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" + +// GetVolumeStorageClass returns value of StorageClassAnnotation or empty string in case +// the annotation does not exist. +// TODO: change to PersistentVolume.Spec.Class value when this attribute is +// introduced. +func GetVolumeStorageClass(volume *api.PersistentVolume) string { + if class, found := volume.Annotations[StorageClassAnnotation]; found { + return class + } + + // 'nil' is interpreted as "", i.e. the volume does not belong to any class. + return "" +} + +// GetClaimStorageClass returns name of class that is requested by given claim. +// Request for `nil` class is interpreted as request for class "", +// i.e. for a classless PV. +// TODO: change to PersistentVolumeClaim.Spec.Class value when this +// attribute is introduced. +func GetClaimStorageClass(claim *api.PersistentVolumeClaim) string { + if class, found := claim.Annotations[StorageClassAnnotation]; found { + return class + } + + return "" +} + +// GetStorageClassAnnotation returns the StorageClass value +// if the annotation is set, empty string if not +// TODO: remove Alpha and Beta when no longer used or needed +func GetStorageClassAnnotation(obj api.ObjectMeta) string { + if class, ok := obj.Annotations[StorageClassAnnotation]; ok { + return class + } + if class, ok := obj.Annotations[BetaStorageClassAnnotation]; ok { + return class + } + if class, ok := obj.Annotations[AlphaStorageClassAnnotation]; ok { + return class + } + + return "" +} + +// HasStorageClassAnnotation returns a boolean +// if the annotation is set +// TODO: remove Alpha and Beta when no longer used or needed +func HasStorageClassAnnotation(obj api.ObjectMeta) bool { + if _, found := obj.Annotations[StorageClassAnnotation]; found { + return found + } + if _, found := obj.Annotations[BetaStorageClassAnnotation]; found { + return found + } + if _, found := obj.Annotations[AlphaStorageClassAnnotation]; found { + return found + } + + return false + +} + +// IsDefaultAnnotationText returns a pretty Yes/No String if +// the annotation is set +// TODO: remove Beta when no longer needed +func IsDefaultAnnotationText(obj api.ObjectMeta) string { + if obj.Annotations[IsDefaultStorageClassAnnotation] == "true" { + return "Yes" + } + if obj.Annotations[BetaIsDefaultStorageClassAnnotation] == "true" { + return "Yes" + } + + return "No" +} + +// IsDefaultAnnotation returns a boolean if +// the annotation is set +// TODO: remove Beta when no longer needed +func IsDefaultAnnotation(obj api.ObjectMeta) bool { + if obj.Annotations[IsDefaultStorageClassAnnotation] == "true" { + return true + } + if obj.Annotations[BetaIsDefaultStorageClassAnnotation] == "true" { + return true + } + + return false +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/doc.go index fd556e8b6..13d14635d 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/doc.go @@ -17,4 +17,4 @@ limitations under the License. // +k8s:deepcopy-gen=package,register // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/storage // +groupName=storage.k8s.io -package v1beta1 +package v1beta1 // import "k8s.io/kubernetes/pkg/apis/storage/v1beta1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/generated.proto deleted file mode 100644 index 36aa6e9ef..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/generated.proto +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.apis.storage.v1beta1; - -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// StorageClass describes the parameters for a class of storage for -// which PersistentVolumes can be dynamically provisioned. -// -// StorageClasses are non-namespaced; the name of the storage class -// according to etcd is in ObjectMeta.Name. -message StorageClass { - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Provisioner indicates the type of the provisioner. - optional string provisioner = 2; - - // Parameters holds the parameters for the provisioner that should - // create volumes of this storage class. - map parameters = 3; -} - -// StorageClassList is a collection of storage classes. -message StorageClassList { - // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is the list of StorageClasses - repeated StorageClass items = 2; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/auth/user/doc.go b/vendor/k8s.io/kubernetes/pkg/auth/user/doc.go index 570c51ae9..589656f0c 100644 --- a/vendor/k8s.io/kubernetes/pkg/auth/user/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/auth/user/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package user contains utilities for dealing with simple user exchange in the auth // packages. The user.Info interface defines an interface for exchanging that info. -package user +package user // import "k8s.io/kubernetes/pkg/auth/user" diff --git a/vendor/k8s.io/kubernetes/pkg/capabilities/doc.go b/vendor/k8s.io/kubernetes/pkg/capabilities/doc.go index e2042a881..bbdc89d50 100644 --- a/vendor/k8s.io/kubernetes/pkg/capabilities/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/capabilities/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // package capabilities manages system level capabilities -package capabilities +package capabilities // import "k8s.io/kubernetes/pkg/capabilities" diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/doc.go b/vendor/k8s.io/kubernetes/pkg/client/cache/doc.go index 4f593f0d3..67dd3dd45 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/client/cache/doc.go @@ -21,4 +21,4 @@ limitations under the License. // list currently available nodes), and one that additionally acts as // a FIFO queue (for example, to allow a scheduler to process incoming // pods). -package cache +package cache // import "k8s.io/kubernetes/pkg/client/cache" diff --git a/vendor/k8s.io/kubernetes/pkg/client/record/doc.go b/vendor/k8s.io/kubernetes/pkg/client/record/doc.go index 0dc790696..4c1fbaff3 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/record/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/client/record/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package record has all client logic for recording and reporting events. -package record +package record // import "k8s.io/kubernetes/pkg/client/record" diff --git a/vendor/k8s.io/kubernetes/pkg/client/restclient/config.go b/vendor/k8s.io/kubernetes/pkg/client/restclient/config.go index 82c1ac2cf..8d5ea891b 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/restclient/config.go +++ b/vendor/k8s.io/kubernetes/pkg/client/restclient/config.go @@ -25,6 +25,7 @@ import ( "path" gruntime "runtime" "strings" + "time" "github.com/golang/glog" @@ -109,6 +110,9 @@ type Config struct { // Rate limiter for limiting connections to the master from this client. If present overwrites QPS/Burst RateLimiter flowcontrol.RateLimiter + // The maximum length of time to wait before giving up on a server request. A value of zero means no timeout. + Timeout time.Duration + // Version forces a specific version to be used (if registered) // Do we need this? // Version string @@ -185,6 +189,9 @@ func RESTClientFor(config *Config) (*RESTClient, error) { var httpClient *http.Client if transport != http.DefaultTransport { httpClient = &http.Client{Transport: transport} + if config.Timeout > 0 { + httpClient.Timeout = config.Timeout + } } return NewRESTClient(baseURL, versionedAPIPath, config.ContentConfig, qps, burst, config.RateLimiter, httpClient) @@ -210,6 +217,9 @@ func UnversionedRESTClientFor(config *Config) (*RESTClient, error) { var httpClient *http.Client if transport != http.DefaultTransport { httpClient = &http.Client{Transport: transport} + if config.Timeout > 0 { + httpClient.Timeout = config.Timeout + } } versionConfig := config.ContentConfig diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/client_config.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/client_config.go index 339b5d847..f58704c8e 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/client_config.go +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/client_config.go @@ -27,6 +27,9 @@ import ( "github.com/golang/glog" "github.com/imdario/mergo" + "strconv" + "time" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/restclient" clientauth "k8s.io/kubernetes/pkg/client/unversioned/auth" @@ -34,16 +37,25 @@ import ( ) var ( - // DefaultCluster is the cluster config used when no other config is specified - // TODO: eventually apiserver should start on 443 and be secure by default - DefaultCluster = clientcmdapi.Cluster{Server: "http://localhost:8080"} - - // EnvVarCluster allows overriding the DefaultCluster using an envvar for the server name - EnvVarCluster = clientcmdapi.Cluster{Server: os.Getenv("KUBERNETES_MASTER")} - - DefaultClientConfig = DirectClientConfig{*clientcmdapi.NewConfig(), "", &ConfigOverrides{}, nil, NewDefaultClientConfigLoadingRules()} + // ClusterDefaults has the same behavior as the old EnvVar and DefaultCluster fields + // DEPRECATED will be replaced + ClusterDefaults = clientcmdapi.Cluster{Server: getDefaultServer()} + // DefaultClientConfig represents the legacy behavior of this package for defaulting + // DEPRECATED will be replace + DefaultClientConfig = DirectClientConfig{*clientcmdapi.NewConfig(), "", &ConfigOverrides{ + ClusterDefaults: ClusterDefaults, + }, nil, NewDefaultClientConfigLoadingRules()} ) +// getDefaultServer returns a default setting for DefaultClientConfig +// DEPRECATED +func getDefaultServer() string { + if server := os.Getenv("KUBERNETES_MASTER"); len(server) > 0 { + return server + } + return "http://localhost:8080" +} + // ClientConfig is used to make it easy to get an api server client type ClientConfig interface { // RawConfig returns the merged result of all overrides @@ -90,15 +102,42 @@ func (config *DirectClientConfig) RawConfig() (clientcmdapi.Config, error) { // ClientConfig implements ClientConfig func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) { - if err := config.ConfirmUsable(); err != nil { + // check that getAuthInfo, getContext, and getCluster do not return an error. + // Do this before checking if the curent config is usable in the event that an + // AuthInfo, Context, or Cluster config with user-defined names are not found. + // This provides a user with the immediate cause for error if one is found + configAuthInfo, err := config.getAuthInfo() + if err != nil { + return nil, err + } + + _, err = config.getContext() + if err != nil { + return nil, err + } + + configClusterInfo, err := config.getCluster() + if err != nil { return nil, err } - configAuthInfo := config.getAuthInfo() - configClusterInfo := config.getCluster() + if err := config.ConfirmUsable(); err != nil { + return nil, err + } clientConfig := &restclient.Config{} clientConfig.Host = configClusterInfo.Server + + if len(config.overrides.Timeout) > 0 { + if i, err := strconv.ParseInt(config.overrides.Timeout, 10, 64); err == nil && i >= 0 { + clientConfig.Timeout = time.Duration(i) * time.Second + } else if requestTimeout, err := time.ParseDuration(config.overrides.Timeout); err == nil { + clientConfig.Timeout = requestTimeout + } else { + return nil, fmt.Errorf("Invalid value for option '--request-timeout'. Value must be a single integer, or an integer followed by a corresponding time unit (e.g. 1s | 2m | 3h)") + } + } + if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 { u.RawQuery = "" u.Fragment = "" @@ -117,7 +156,8 @@ func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) { // Our mergo.Merge version is older than this change. var persister restclient.AuthProviderConfigPersister if config.configAccess != nil { - persister = PersisterForUser(config.configAccess, config.getAuthInfoName()) + authInfoName, _ := config.getAuthInfoName() + persister = PersisterForUser(config.configAccess, authInfoName) } userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo, config.fallbackReader, persister) if err != nil { @@ -241,7 +281,10 @@ func (config *DirectClientConfig) Namespace() (string, bool, error) { return "", false, err } - configContext := config.getContext() + configContext, err := config.getContext() + if err != nil { + return "", false, err + } if len(configContext.Namespace) == 0 { return api.NamespaceDefault, false, nil @@ -263,8 +306,12 @@ func (config *DirectClientConfig) ConfigAccess() ConfigAccess { // but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. func (config *DirectClientConfig) ConfirmUsable() error { validationErrors := make([]error, 0) - validationErrors = append(validationErrors, validateAuthInfo(config.getAuthInfoName(), config.getAuthInfo())...) - validationErrors = append(validationErrors, validateClusterInfo(config.getClusterName(), config.getCluster())...) + authInfoName, _ := config.getAuthInfoName() + authInfo, _ := config.getAuthInfo() + validationErrors = append(validationErrors, validateAuthInfo(authInfoName, authInfo)...) + clusterName, _ := config.getClusterName() + cluster, _ := config.getCluster() + validationErrors = append(validationErrors, validateClusterInfo(clusterName, cluster)...) // when direct client config is specified, and our only error is that no server is defined, we should // return a standard "no config" error if len(validationErrors) == 1 && validationErrors[0] == ErrEmptyCluster { @@ -273,66 +320,84 @@ func (config *DirectClientConfig) ConfirmUsable() error { return newErrConfigurationInvalid(validationErrors) } -func (config *DirectClientConfig) getContextName() string { +// getContextName returns the default, or user-set context name, and a boolean that indicates +// whether the default context name has been overwritten by a user-set flag, or left as its default value +func (config *DirectClientConfig) getContextName() (string, bool) { if len(config.overrides.CurrentContext) != 0 { - return config.overrides.CurrentContext + return config.overrides.CurrentContext, true } if len(config.contextName) != 0 { - return config.contextName + return config.contextName, false } - return config.config.CurrentContext + return config.config.CurrentContext, false } -func (config *DirectClientConfig) getAuthInfoName() string { +// getAuthInfoName returns a string containing the current authinfo name for the current context, +// and a boolean indicating whether the default authInfo name is overwritten by a user-set flag, or +// left as its default value +func (config *DirectClientConfig) getAuthInfoName() (string, bool) { if len(config.overrides.Context.AuthInfo) != 0 { - return config.overrides.Context.AuthInfo + return config.overrides.Context.AuthInfo, true } - return config.getContext().AuthInfo + context, _ := config.getContext() + return context.AuthInfo, false } -func (config *DirectClientConfig) getClusterName() string { +// getClusterName returns a string containing the default, or user-set cluster name, and a boolean +// indicating whether the default clusterName has been overwritten by a user-set flag, or left as +// its default value +func (config *DirectClientConfig) getClusterName() (string, bool) { if len(config.overrides.Context.Cluster) != 0 { - return config.overrides.Context.Cluster + return config.overrides.Context.Cluster, true } - return config.getContext().Cluster + context, _ := config.getContext() + return context.Cluster, false } -func (config *DirectClientConfig) getContext() clientcmdapi.Context { +// getContext returns the clientcmdapi.Context, or an error if a required context is not found. +func (config *DirectClientConfig) getContext() (clientcmdapi.Context, error) { contexts := config.config.Contexts - contextName := config.getContextName() + contextName, required := config.getContextName() var mergedContext clientcmdapi.Context if configContext, exists := contexts[contextName]; exists { mergo.Merge(&mergedContext, configContext) + } else if required { + return clientcmdapi.Context{}, fmt.Errorf("context %q does not exist", contextName) } mergo.Merge(&mergedContext, config.overrides.Context) - return mergedContext + return mergedContext, nil } -func (config *DirectClientConfig) getAuthInfo() clientcmdapi.AuthInfo { +// getAuthInfo returns the clientcmdapi.AuthInfo, or an error if a required auth info is not found. +func (config *DirectClientConfig) getAuthInfo() (clientcmdapi.AuthInfo, error) { authInfos := config.config.AuthInfos - authInfoName := config.getAuthInfoName() + authInfoName, required := config.getAuthInfoName() var mergedAuthInfo clientcmdapi.AuthInfo if configAuthInfo, exists := authInfos[authInfoName]; exists { mergo.Merge(&mergedAuthInfo, configAuthInfo) + } else if required { + return clientcmdapi.AuthInfo{}, fmt.Errorf("auth info %q does not exist", authInfoName) } mergo.Merge(&mergedAuthInfo, config.overrides.AuthInfo) - return mergedAuthInfo + return mergedAuthInfo, nil } -func (config *DirectClientConfig) getCluster() clientcmdapi.Cluster { +// getCluster returns the clientcmdapi.Cluster, or an error if a required cluster is not found. +func (config *DirectClientConfig) getCluster() (clientcmdapi.Cluster, error) { clusterInfos := config.config.Clusters - clusterInfoName := config.getClusterName() + clusterInfoName, required := config.getClusterName() var mergedClusterInfo clientcmdapi.Cluster mergo.Merge(&mergedClusterInfo, config.overrides.ClusterDefaults) - mergo.Merge(&mergedClusterInfo, EnvVarCluster) if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists { mergo.Merge(&mergedClusterInfo, configClusterInfo) + } else if required { + return clientcmdapi.Cluster{}, fmt.Errorf("cluster %q does not exist", clusterInfoName) } mergo.Merge(&mergedClusterInfo, config.overrides.ClusterInfo) // An override of --insecure-skip-tls-verify=true and no accompanying CA/CA data should clear already-set CA/CA data @@ -344,7 +409,7 @@ func (config *DirectClientConfig) getCluster() clientcmdapi.Cluster { mergedClusterInfo.CertificateAuthorityData = nil } - return mergedClusterInfo + return mergedClusterInfo, nil } // inClusterClientConfig makes a config that will work from within a kubernetes cluster container environment. diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/doc.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/doc.go index 30ef6f36d..b5261697b 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/doc.go @@ -34,4 +34,4 @@ Sample usage from merged .kubeconfig files (local directory, home directory) client, err := unversioned.New(config) // ... */ -package clientcmd +package clientcmd // import "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader.go index a1ea68658..4aa42ad99 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader.go +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader.go @@ -23,6 +23,7 @@ import ( "os" "path" "path/filepath" + "reflect" goruntime "runtime" "strings" @@ -30,6 +31,7 @@ import ( "github.com/imdario/mergo" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/client/restclient" clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" clientcmdlatest "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest" "k8s.io/kubernetes/pkg/runtime" @@ -65,6 +67,9 @@ func currentMigrationRules() map[string]string { type ClientConfigLoader interface { ConfigAccess + // IsDefaultConfig returns true if the returned config matches the defaults. + IsDefaultConfig(*restclient.Config) bool + // Load returns the latest config Load() (*clientcmdapi.Config, error) } @@ -96,6 +101,9 @@ func (g *ClientConfigGetter) IsExplicitFile() bool { func (g *ClientConfigGetter) GetExplicitFile() string { return "" } +func (g *ClientConfigGetter) IsDefaultConfig(config *restclient.Config) bool { + return false +} // ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config // Callers can put the chain together however they want, but we'd recommend: @@ -112,6 +120,10 @@ type ClientConfigLoadingRules struct { // DoNotResolvePaths indicates whether or not to resolve paths with respect to the originating files. This is phrased as a negative so // that a default object that doesn't set this will usually get the behavior it wants. DoNotResolvePaths bool + + // DefaultClientConfig is an optional field indicating what rules to use to calculate a default configuration. + // This should match the overrides passed in to ClientConfig loader. + DefaultClientConfig ClientConfig } // ClientConfigLoadingRules implements the ClientConfigLoader interface. @@ -192,6 +204,7 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) { // first merge all of our maps mapConfig := clientcmdapi.NewConfig() + for _, kubeconfig := range kubeconfigs { mergo.Merge(mapConfig, kubeconfig) } @@ -316,6 +329,18 @@ func (rules *ClientConfigLoadingRules) GetExplicitFile() string { return rules.ExplicitPath } +// IsDefaultConfig returns true if the provided configuration matches the default +func (rules *ClientConfigLoadingRules) IsDefaultConfig(config *restclient.Config) bool { + if rules.DefaultClientConfig == nil { + return false + } + defaultConfig, err := rules.DefaultClientConfig.ClientConfig() + if err != nil { + return false + } + return reflect.DeepEqual(config, defaultConfig) +} + // LoadFromFile takes a filename and deserializes the contents into Config object func LoadFromFile(filename string) (*clientcmdapi.Config, error) { kubeconfigBytes, err := ioutil.ReadFile(filename) diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/merged_client_builder.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/merged_client_builder.go index f64903335..4c3645121 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/merged_client_builder.go +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/merged_client_builder.go @@ -18,7 +18,6 @@ package clientcmd import ( "io" - "reflect" "sync" "github.com/golang/glog" @@ -105,17 +104,15 @@ func (config *DeferredLoadingClientConfig) ClientConfig() (*restclient.Config, e // content differs from the default config mergedConfig, err := mergedClientConfig.ClientConfig() switch { - case err != nil && !IsEmptyConfig(err): - // return on any error except empty config - return nil, err + case err != nil: + if !IsEmptyConfig(err) { + // return on any error except empty config + return nil, err + } case mergedConfig != nil: - // if the configuration has any settings at all, we cannot use ICC - // TODO: we need to discriminate better between "empty due to env" and - // "empty due to defaults" - // TODO: this shouldn't be a global - the client config rules should be - // handling this. - defaultConfig, err := DefaultClientConfig.ClientConfig() - if err == nil && !reflect.DeepEqual(mergedConfig, defaultConfig) { + // the configuration is valid, but if this is equal to the defaults we should try + // in-cluster configuration + if !config.loader.IsDefaultConfig(mergedConfig) { return mergedConfig, nil } } diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/overrides.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/overrides.go index 9c117ea35..626cdeaae 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/overrides.go +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/overrides.go @@ -33,6 +33,7 @@ type ConfigOverrides struct { ClusterInfo clientcmdapi.Cluster Context clientcmdapi.Context CurrentContext string + Timeout string } // ConfigOverrideFlags holds the flag names to be used for binding command line flags. Notice that this structure tightly @@ -42,6 +43,7 @@ type ConfigOverrideFlags struct { ClusterOverrideFlags ClusterOverrideFlags ContextOverrideFlags ContextOverrideFlags CurrentContext FlagInfo + Timeout FlagInfo } // AuthOverrideFlags holds the flag names to be used for binding command line flags for AuthInfo objects @@ -121,6 +123,7 @@ const ( FlagImpersonate = "as" FlagUsername = "username" FlagPassword = "password" + FlagTimeout = "request-timeout" ) // RecommendedAuthOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing @@ -151,7 +154,9 @@ func RecommendedConfigOverrideFlags(prefix string) ConfigOverrideFlags { AuthOverrideFlags: RecommendedAuthOverrideFlags(prefix), ClusterOverrideFlags: RecommendedClusterOverrideFlags(prefix), ContextOverrideFlags: RecommendedContextOverrideFlags(prefix), - CurrentContext: FlagInfo{prefix + FlagContext, "", "", "The name of the kubeconfig context to use"}, + + CurrentContext: FlagInfo{prefix + FlagContext, "", "", "The name of the kubeconfig context to use"}, + Timeout: FlagInfo{prefix + FlagTimeout, "", "0", "The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests."}, } } @@ -190,6 +195,7 @@ func BindOverrideFlags(overrides *ConfigOverrides, flags *pflag.FlagSet, flagNam BindClusterFlags(&overrides.ClusterInfo, flags, flagNames.ClusterOverrideFlags) BindContextFlags(&overrides.Context, flags, flagNames.ContextOverrideFlags) flagNames.CurrentContext.BindStringFlag(flags, &overrides.CurrentContext) + flagNames.Timeout.BindStringFlag(flags, &overrides.Timeout) } // BindFlags is a convenience method to bind the specified flags to their associated variables diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/doc.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/doc.go index dac3925b4..41cd0e025 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/doc.go @@ -54,4 +54,4 @@ The RESTClient type implements the Kubernetes API conventions (see `docs/devel/a for a given API path and is intended for use by consumers implementing their own Kubernetes compatible APIs. */ -package unversioned +package unversioned // import "k8s.io/kubernetes/pkg/client/unversioned" diff --git a/vendor/k8s.io/kubernetes/pkg/controller/OWNERS b/vendor/k8s.io/kubernetes/pkg/controller/OWNERS deleted file mode 100644 index ac8301602..000000000 --- a/vendor/k8s.io/kubernetes/pkg/controller/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -assignees: - - bprashanth - - derekwaynecarr - - mikedanese diff --git a/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go b/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go index 8a9203372..d0def9d2a 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go @@ -26,11 +26,13 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/annotations" + "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apis/extensions" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util/errors" "k8s.io/kubernetes/pkg/util/integer" intstrutil "k8s.io/kubernetes/pkg/util/intstr" @@ -686,8 +688,12 @@ func filterPodsMatchingReplicaSets(replicaSets []*extensions.ReplicaSet, podList } // Revision returns the revision number of the input replica set -func Revision(rs *extensions.ReplicaSet) (int64, error) { - v, ok := rs.Annotations[RevisionAnnotation] +func Revision(obj runtime.Object) (int64, error) { + acc, err := meta.Accessor(obj) + if err != nil { + return 0, err + } + v, ok := acc.GetAnnotations()[RevisionAnnotation] if !ok { return 0, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/doc.go b/vendor/k8s.io/kubernetes/pkg/controller/doc.go index ded390582..3c5c943da 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package controller contains code for controllers (like the replication // controller). -package controller +package controller // import "k8s.io/kubernetes/pkg/controller" diff --git a/vendor/k8s.io/kubernetes/pkg/controller/framework/doc.go b/vendor/k8s.io/kubernetes/pkg/controller/framework/doc.go index feceba36c..1f65fe65e 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/framework/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/framework/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package framework implements all the grunt work involved in running a simple controller. -package framework +package framework // import "k8s.io/kubernetes/pkg/controller/framework" diff --git a/vendor/k8s.io/kubernetes/pkg/controller/framework/informers/core.go b/vendor/k8s.io/kubernetes/pkg/controller/framework/informers/core.go deleted file mode 100644 index a4f40b587..000000000 --- a/vendor/k8s.io/kubernetes/pkg/controller/framework/informers/core.go +++ /dev/null @@ -1,203 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package informers - -import ( - "reflect" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/cache" - "k8s.io/kubernetes/pkg/controller/framework" -) - -// PodInformer is type of SharedIndexInformer which watches and lists all pods. -// Interface provides constructor for informer and lister for pods -type PodInformer interface { - Informer() framework.SharedIndexInformer - Lister() *cache.StoreToPodLister -} - -type podInformer struct { - *sharedInformerFactory -} - -// Informer checks whether podInformer exists in sharedInformerFactory and if not, it creates new informer of type -// podInformer and connects it to sharedInformerFactory -func (f *podInformer) Informer() framework.SharedIndexInformer { - f.lock.Lock() - defer f.lock.Unlock() - - informerType := reflect.TypeOf(&api.Pod{}) - informer, exists := f.informers[informerType] - if exists { - return informer - } - informer = NewPodInformer(f.client, f.defaultResync) - f.informers[informerType] = informer - - return informer -} - -// Lister returns lister for podInformer -func (f *podInformer) Lister() *cache.StoreToPodLister { - informer := f.Informer() - return &cache.StoreToPodLister{Indexer: informer.GetIndexer()} -} - -//***************************************************************************** - -// NamespaceInformer is type of SharedIndexInformer which watches and lists all namespaces. -// Interface provides constructor for informer and lister for namsespaces -type NamespaceInformer interface { - Informer() framework.SharedIndexInformer - Lister() *cache.IndexerToNamespaceLister -} - -type namespaceInformer struct { - *sharedInformerFactory -} - -// Informer checks whether namespaceInformer exists in sharedInformerFactory and if not, it creates new informer of type -// namespaceInformer and connects it to sharedInformerFactory -func (f *namespaceInformer) Informer() framework.SharedIndexInformer { - f.lock.Lock() - defer f.lock.Unlock() - - informerType := reflect.TypeOf(&api.Namespace{}) - informer, exists := f.informers[informerType] - if exists { - return informer - } - informer = NewNamespaceInformer(f.client, f.defaultResync) - f.informers[informerType] = informer - - return informer -} - -// Lister returns lister for namespaceInformer -func (f *namespaceInformer) Lister() *cache.IndexerToNamespaceLister { - informer := f.Informer() - return &cache.IndexerToNamespaceLister{Indexer: informer.GetIndexer()} -} - -//***************************************************************************** - -// NodeInformer is type of SharedIndexInformer which watches and lists all nodes. -// Interface provides constructor for informer and lister for nodes -type NodeInformer interface { - Informer() framework.SharedIndexInformer - Lister() *cache.StoreToNodeLister -} - -type nodeInformer struct { - *sharedInformerFactory -} - -// Informer checks whether nodeInformer exists in sharedInformerFactory and if not, it creates new informer of type -// nodeInformer and connects it to sharedInformerFactory -func (f *nodeInformer) Informer() framework.SharedIndexInformer { - f.lock.Lock() - defer f.lock.Unlock() - - informerType := reflect.TypeOf(&api.Node{}) - informer, exists := f.informers[informerType] - if exists { - return informer - } - informer = NewNodeInformer(f.client, f.defaultResync) - f.informers[informerType] = informer - - return informer -} - -// Lister returns lister for nodeInformer -func (f *nodeInformer) Lister() *cache.StoreToNodeLister { - informer := f.Informer() - return &cache.StoreToNodeLister{Store: informer.GetStore()} -} - -//***************************************************************************** - -// PVCInformer is type of SharedIndexInformer which watches and lists all persistent volume claims. -// Interface provides constructor for informer and lister for persistent volume claims -type PVCInformer interface { - Informer() framework.SharedIndexInformer - Lister() *cache.StoreToPVCFetcher -} - -type pvcInformer struct { - *sharedInformerFactory -} - -// Informer checks whether pvcInformer exists in sharedInformerFactory and if not, it creates new informer of type -// pvcInformer and connects it to sharedInformerFactory -func (f *pvcInformer) Informer() framework.SharedIndexInformer { - f.lock.Lock() - defer f.lock.Unlock() - - informerType := reflect.TypeOf(&api.PersistentVolumeClaim{}) - informer, exists := f.informers[informerType] - if exists { - return informer - } - informer = NewPVCInformer(f.client, f.defaultResync) - f.informers[informerType] = informer - - return informer -} - -// Lister returns lister for pvcInformer -func (f *pvcInformer) Lister() *cache.StoreToPVCFetcher { - informer := f.Informer() - return &cache.StoreToPVCFetcher{Store: informer.GetStore()} -} - -//***************************************************************************** - -// PVInformer is type of SharedIndexInformer which watches and lists all persistent volumes. -// Interface provides constructor for informer and lister for persistent volumes -type PVInformer interface { - Informer() framework.SharedIndexInformer - Lister() *cache.StoreToPVFetcher -} - -type pvInformer struct { - *sharedInformerFactory -} - -// Informer checks whether pvInformer exists in sharedInformerFactory and if not, it creates new informer of type -// pvInformer and connects it to sharedInformerFactory -func (f *pvInformer) Informer() framework.SharedIndexInformer { - f.lock.Lock() - defer f.lock.Unlock() - - informerType := reflect.TypeOf(&api.PersistentVolume{}) - informer, exists := f.informers[informerType] - if exists { - return informer - } - informer = NewPVInformer(f.client, f.defaultResync) - f.informers[informerType] = informer - - return informer -} - -// Lister returns lister for pvInformer -func (f *pvInformer) Lister() *cache.StoreToPVFetcher { - informer := f.Informer() - return &cache.StoreToPVFetcher{Store: informer.GetStore()} -} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/framework/informers/factory.go b/vendor/k8s.io/kubernetes/pkg/controller/framework/informers/factory.go deleted file mode 100644 index de1a6918d..000000000 --- a/vendor/k8s.io/kubernetes/pkg/controller/framework/informers/factory.go +++ /dev/null @@ -1,193 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package informers - -import ( - "reflect" - "sync" - "time" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/cache" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/controller/framework" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/watch" -) - -// SharedInformerFactory provides interface which holds unique informers for pods, nodes, namespaces, persistent volume -// claims and persistent volumes -type SharedInformerFactory interface { - // Start starts informers that can start AFTER the API server and controllers have started - Start(stopCh <-chan struct{}) - - Pods() PodInformer - Nodes() NodeInformer - Namespaces() NamespaceInformer - PersistentVolumeClaims() PVCInformer - PersistentVolumes() PVInformer -} - -type sharedInformerFactory struct { - client clientset.Interface - lock sync.Mutex - defaultResync time.Duration - - informers map[reflect.Type]framework.SharedIndexInformer - // startedInformers is used for tracking which informers have been started - // this allows calling of Start method multiple times - startedInformers map[reflect.Type]bool -} - -// NewSharedInformerFactory constructs a new instance of sharedInformerFactory -func NewSharedInformerFactory(client clientset.Interface, defaultResync time.Duration) SharedInformerFactory { - return &sharedInformerFactory{ - client: client, - defaultResync: defaultResync, - informers: make(map[reflect.Type]framework.SharedIndexInformer), - startedInformers: make(map[reflect.Type]bool), - } -} - -// Start initializes all requested informers. -func (s *sharedInformerFactory) Start(stopCh <-chan struct{}) { - s.lock.Lock() - defer s.lock.Unlock() - - for informerType, informer := range s.informers { - if !s.startedInformers[informerType] { - go informer.Run(stopCh) - s.startedInformers[informerType] = true - } - } -} - -// Pods returns a SharedIndexInformer that lists and watches all pods -func (f *sharedInformerFactory) Pods() PodInformer { - return &podInformer{sharedInformerFactory: f} -} - -// Nodes returns a SharedIndexInformer that lists and watches all nodes -func (f *sharedInformerFactory) Nodes() NodeInformer { - return &nodeInformer{sharedInformerFactory: f} -} - -// Namespaces returns a SharedIndexInformer that lists and watches all namespaces -func (f *sharedInformerFactory) Namespaces() NamespaceInformer { - return &namespaceInformer{sharedInformerFactory: f} -} - -// PersistentVolumeClaims returns a SharedIndexInformer that lists and watches all persistent volume claims -func (f *sharedInformerFactory) PersistentVolumeClaims() PVCInformer { - return &pvcInformer{sharedInformerFactory: f} -} - -// PersistentVolumes returns a SharedIndexInformer that lists and watches all persistent volumes -func (f *sharedInformerFactory) PersistentVolumes() PVInformer { - return &pvInformer{sharedInformerFactory: f} -} - -// NewPodInformer returns a SharedIndexInformer that lists and watches all pods -func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer { - sharedIndexInformer := framework.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options api.ListOptions) (runtime.Object, error) { - return client.Core().Pods(api.NamespaceAll).List(options) - }, - WatchFunc: func(options api.ListOptions) (watch.Interface, error) { - return client.Core().Pods(api.NamespaceAll).Watch(options) - }, - }, - &api.Pod{}, - resyncPeriod, - cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, - ) - - return sharedIndexInformer -} - -// NewNodeInformer returns a SharedIndexInformer that lists and watches all nodes -func NewNodeInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer { - sharedIndexInformer := framework.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options api.ListOptions) (runtime.Object, error) { - return client.Core().Nodes().List(options) - }, - WatchFunc: func(options api.ListOptions) (watch.Interface, error) { - return client.Core().Nodes().Watch(options) - }, - }, - &api.Node{}, - resyncPeriod, - cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) - - return sharedIndexInformer -} - -// NewPVCInformer returns a SharedIndexInformer that lists and watches all PVCs -func NewPVCInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer { - sharedIndexInformer := framework.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options api.ListOptions) (runtime.Object, error) { - return client.Core().PersistentVolumeClaims(api.NamespaceAll).List(options) - }, - WatchFunc: func(options api.ListOptions) (watch.Interface, error) { - return client.Core().PersistentVolumeClaims(api.NamespaceAll).Watch(options) - }, - }, - &api.PersistentVolumeClaim{}, - resyncPeriod, - cache.Indexers{}) - - return sharedIndexInformer -} - -// NewPVInformer returns a SharedIndexInformer that lists and watches all PVs -func NewPVInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer { - sharedIndexInformer := framework.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options api.ListOptions) (runtime.Object, error) { - return client.Core().PersistentVolumes().List(options) - }, - WatchFunc: func(options api.ListOptions) (watch.Interface, error) { - return client.Core().PersistentVolumes().Watch(options) - }, - }, - &api.PersistentVolume{}, - resyncPeriod, - cache.Indexers{}) - - return sharedIndexInformer -} - -// NewNamespaceInformer returns a SharedIndexInformer that lists and watches namespaces -func NewNamespaceInformer(client clientset.Interface, resyncPeriod time.Duration) framework.SharedIndexInformer { - sharedIndexInformer := framework.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options api.ListOptions) (runtime.Object, error) { - return client.Core().Namespaces().List(options) - }, - WatchFunc: func(options api.ListOptions) (watch.Interface, error) { - return client.Core().Namespaces().Watch(options) - }, - }, - &api.Namespace{}, - resyncPeriod, - cache.Indexers{}) - - return sharedIndexInformer -} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/framework/shared_informer.go b/vendor/k8s.io/kubernetes/pkg/controller/framework/shared_informer.go index b1e786278..9b2d2414d 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/framework/shared_informer.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/framework/shared_informer.go @@ -25,6 +25,9 @@ import ( "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/runtime" utilruntime "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/pkg/util/wait" + + "github.com/golang/glog" ) // if you use this, there is one behavior change compared to a standard Informer. @@ -77,6 +80,34 @@ func NewSharedIndexInformer(lw cache.ListerWatcher, objType runtime.Object, resy return sharedIndexInformer } +// InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced. +type InformerSynced func() bool + +// syncedPollPeriod controls how often you look at the status of your sync funcs +const syncedPollPeriod = 100 * time.Millisecond + +// WaitForCacheSync waits for caches to populate. It returns true if it was successful, false +// if the contoller should shutdown +func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool { + err := wait.PollUntil(syncedPollPeriod, + func() (bool, error) { + for _, syncFunc := range cacheSyncs { + if !syncFunc() { + return false, nil + } + } + return true, nil + }, + stopCh) + if err != nil { + glog.V(2).Infof("stop requested") + return false + } + + glog.V(4).Infof("caches populated") + return true +} + type sharedIndexInformer struct { indexer cache.Indexer controller *Controller diff --git a/vendor/k8s.io/kubernetes/pkg/controller/replication/doc.go b/vendor/k8s.io/kubernetes/pkg/controller/replication/doc.go deleted file mode 100644 index eb0f42158..000000000 --- a/vendor/k8s.io/kubernetes/pkg/controller/replication/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package replication contains logic for watching and synchronizing -// replication controllers. -package replication diff --git a/vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller.go deleted file mode 100644 index 679ca827e..000000000 --- a/vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller.go +++ /dev/null @@ -1,738 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// If you make changes to this file, you should also make the corresponding change in ReplicaSet. - -package replication - -import ( - "reflect" - "sort" - "sync" - "time" - - "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/client/cache" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" - "k8s.io/kubernetes/pkg/client/record" - "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/controller/framework" - "k8s.io/kubernetes/pkg/controller/framework/informers" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util" - utilerrors "k8s.io/kubernetes/pkg/util/errors" - "k8s.io/kubernetes/pkg/util/metrics" - utilruntime "k8s.io/kubernetes/pkg/util/runtime" - "k8s.io/kubernetes/pkg/util/wait" - "k8s.io/kubernetes/pkg/util/workqueue" - "k8s.io/kubernetes/pkg/watch" -) - -const ( - // We'll attempt to recompute the required replicas of all replication controllers - // that have fulfilled their expectations at least this often. This recomputation - // happens based on contents in local pod storage. - // Full Resync shouldn't be needed at all in a healthy system. This is a protection - // against disappearing objects and watch notification, that we believe should not - // happen at all. - // TODO: We should get rid of it completely in the fullness of time. - FullControllerResyncPeriod = 10 * time.Minute - - // Realistic value of the burstReplica field for the replication manager based off - // performance requirements for kubernetes 1.0. - BurstReplicas = 500 - - // We must avoid counting pods until the pod store has synced. If it hasn't synced, to - // avoid a hot loop, we'll wait this long between checks. - PodStoreSyncedPollPeriod = 100 * time.Millisecond - - // The number of times we retry updating a replication controller's status. - statusUpdateRetries = 1 -) - -func getRCKind() unversioned.GroupVersionKind { - return v1.SchemeGroupVersion.WithKind("ReplicationController") -} - -// ReplicationManager is responsible for synchronizing ReplicationController objects stored -// in the system with actual running pods. -// TODO: this really should be called ReplicationController. The only reason why it's a Manager -// is to distinguish this type from API object "ReplicationController". We should fix this. -type ReplicationManager struct { - kubeClient clientset.Interface - podControl controller.PodControlInterface - - // internalPodInformer is used to hold a personal informer. If we're using - // a normal shared informer, then the informer will be started for us. If - // we have a personal informer, we must start it ourselves. If you start - // the controller using NewReplicationManager(passing SharedInformer), this - // will be null - internalPodInformer framework.SharedIndexInformer - - // An rc is temporarily suspended after creating/deleting these many replicas. - // It resumes normal action after observing the watch events for them. - burstReplicas int - // To allow injection of syncReplicationController for testing. - syncHandler func(rcKey string) error - - // A TTLCache of pod creates/deletes each rc expects to see. - expectations *controller.UIDTrackingControllerExpectations - - // A store of replication controllers, populated by the rcController - rcStore cache.StoreToReplicationControllerLister - // Watches changes to all replication controllers - rcController *framework.Controller - // A store of pods, populated by the podController - podStore cache.StoreToPodLister - // Watches changes to all pods - podController framework.ControllerInterface - // podStoreSynced returns true if the pod store has been synced at least once. - // Added as a member to the struct to allow injection for testing. - podStoreSynced func() bool - - lookupCache *controller.MatchingCache - - // Controllers that need to be synced - queue workqueue.RateLimitingInterface - - // garbageCollectorEnabled denotes if the garbage collector is enabled. RC - // manager behaves differently if GC is enabled. - garbageCollectorEnabled bool -} - -// NewReplicationManager creates a replication manager -func NewReplicationManager(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager { - eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) - eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")}) - return newReplicationManager( - eventBroadcaster.NewRecorder(api.EventSource{Component: "replication-controller"}), - podInformer, kubeClient, resyncPeriod, burstReplicas, lookupCacheSize, garbageCollectorEnabled) -} - -// newReplicationManager configures a replication manager with the specified event recorder -func newReplicationManager(eventRecorder record.EventRecorder, podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager { - if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.Core().GetRESTClient().GetRateLimiter()) - } - - rm := &ReplicationManager{ - kubeClient: kubeClient, - podControl: controller.RealPodControl{ - KubeClient: kubeClient, - Recorder: eventRecorder, - }, - burstReplicas: burstReplicas, - expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()), - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "replicationmanager"), - garbageCollectorEnabled: garbageCollectorEnabled, - } - - rm.rcStore.Indexer, rm.rcController = framework.NewIndexerInformer( - &cache.ListWatch{ - ListFunc: func(options api.ListOptions) (runtime.Object, error) { - return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options) - }, - WatchFunc: func(options api.ListOptions) (watch.Interface, error) { - return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).Watch(options) - }, - }, - &api.ReplicationController{}, - // TODO: Can we have much longer period here? - FullControllerResyncPeriod, - framework.ResourceEventHandlerFuncs{ - AddFunc: rm.enqueueController, - UpdateFunc: rm.updateRC, - // This will enter the sync loop and no-op, because the controller has been deleted from the store. - // Note that deleting a controller immediately after scaling it to 0 will not work. The recommended - // way of achieving this is by performing a `stop` operation on the controller. - DeleteFunc: rm.enqueueController, - }, - cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, - ) - - podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ - AddFunc: rm.addPod, - // This invokes the rc for every pod change, eg: host assignment. Though this might seem like overkill - // the most frequent pod update is status, and the associated rc will only list from local storage, so - // it should be ok. - UpdateFunc: rm.updatePod, - DeleteFunc: rm.deletePod, - }) - rm.podStore.Indexer = podInformer.GetIndexer() - rm.podController = podInformer.GetController() - - rm.syncHandler = rm.syncReplicationController - rm.podStoreSynced = rm.podController.HasSynced - rm.lookupCache = controller.NewMatchingCache(lookupCacheSize) - return rm -} - -// NewReplicationManagerFromClientForIntegration creates a new ReplicationManager that runs its own informer. It disables event recording for use in integration tests. -func NewReplicationManagerFromClientForIntegration(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicationManager { - podInformer := informers.NewPodInformer(kubeClient, resyncPeriod()) - garbageCollectorEnabled := false - rm := newReplicationManager(&record.FakeRecorder{}, podInformer, kubeClient, resyncPeriod, burstReplicas, lookupCacheSize, garbageCollectorEnabled) - rm.internalPodInformer = podInformer - return rm -} - -// NewReplicationManagerFromClient creates a new ReplicationManager that runs its own informer. -func NewReplicationManagerFromClient(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicationManager { - podInformer := informers.NewPodInformer(kubeClient, resyncPeriod()) - garbageCollectorEnabled := false - rm := NewReplicationManager(podInformer, kubeClient, resyncPeriod, burstReplicas, lookupCacheSize, garbageCollectorEnabled) - rm.internalPodInformer = podInformer - - return rm -} - -// SetEventRecorder replaces the event recorder used by the replication manager -// with the given recorder. Only used for testing. -func (rm *ReplicationManager) SetEventRecorder(recorder record.EventRecorder) { - // TODO: Hack. We can't cleanly shutdown the event recorder, so benchmarks - // need to pass in a fake. - rm.podControl = controller.RealPodControl{KubeClient: rm.kubeClient, Recorder: recorder} -} - -// Run begins watching and syncing. -func (rm *ReplicationManager) Run(workers int, stopCh <-chan struct{}) { - defer utilruntime.HandleCrash() - glog.Infof("Starting RC Manager") - go rm.rcController.Run(stopCh) - go rm.podController.Run(stopCh) - for i := 0; i < workers; i++ { - go wait.Until(rm.worker, time.Second, stopCh) - } - - if rm.internalPodInformer != nil { - go rm.internalPodInformer.Run(stopCh) - } - - <-stopCh - glog.Infof("Shutting down RC Manager") - rm.queue.ShutDown() -} - -// getPodController returns the controller managing the given pod. -// TODO: Surface that we are ignoring multiple controllers for a single pod. -// TODO: use ownerReference.Controller to determine if the rc controls the pod. -func (rm *ReplicationManager) getPodController(pod *api.Pod) *api.ReplicationController { - // look up in the cache, if cached and the cache is valid, just return cached value - if obj, cached := rm.lookupCache.GetMatchingObject(pod); cached { - controller, ok := obj.(*api.ReplicationController) - if !ok { - // This should not happen - glog.Errorf("lookup cache does not return a ReplicationController object") - return nil - } - if cached && rm.isCacheValid(pod, controller) { - return controller - } - } - - // if not cached or cached value is invalid, search all the rc to find the matching one, and update cache - controllers, err := rm.rcStore.GetPodControllers(pod) - if err != nil { - glog.V(4).Infof("No controllers found for pod %v, replication manager will avoid syncing", pod.Name) - return nil - } - // In theory, overlapping controllers is user error. This sorting will not prevent - // oscillation of replicas in all cases, eg: - // rc1 (older rc): [(k1=v1)], replicas=1 rc2: [(k2=v2)], replicas=2 - // pod: [(k1:v1), (k2:v2)] will wake both rc1 and rc2, and we will sync rc1. - // pod: [(k2:v2)] will wake rc2 which creates a new replica. - if len(controllers) > 1 { - // More than two items in this list indicates user error. If two replication-controller - // overlap, sort by creation timestamp, subsort by name, then pick - // the first. - glog.Errorf("user error! more than one replication controller is selecting pods with labels: %+v", pod.Labels) - sort.Sort(OverlappingControllers(controllers)) - } - - // update lookup cache - rm.lookupCache.Update(pod, &controllers[0]) - - return &controllers[0] -} - -// isCacheValid check if the cache is valid -func (rm *ReplicationManager) isCacheValid(pod *api.Pod, cachedRC *api.ReplicationController) bool { - exists, err := rm.rcStore.Exists(cachedRC) - // rc has been deleted or updated, cache is invalid - if err != nil || !exists || !isControllerMatch(pod, cachedRC) { - return false - } - return true -} - -// isControllerMatch take a Pod and ReplicationController, return whether the Pod and ReplicationController are matching -// TODO(mqliang): This logic is a copy from GetPodControllers(), remove the duplication -func isControllerMatch(pod *api.Pod, rc *api.ReplicationController) bool { - if rc.Namespace != pod.Namespace { - return false - } - selector := labels.Set(rc.Spec.Selector).AsSelectorPreValidated() - - // If an rc with a nil or empty selector creeps in, it should match nothing, not everything. - if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { - return false - } - return true -} - -// callback when RC is updated -func (rm *ReplicationManager) updateRC(old, cur interface{}) { - oldRC := old.(*api.ReplicationController) - curRC := cur.(*api.ReplicationController) - - // We should invalidate the whole lookup cache if a RC's selector has been updated. - // - // Imagine that you have two RCs: - // * old RC1 - // * new RC2 - // You also have a pod that is attached to RC2 (because it doesn't match RC1 selector). - // Now imagine that you are changing RC1 selector so that it is now matching that pod, - // in such case, we must invalidate the whole cache so that pod could be adopted by RC1 - // - // This makes the lookup cache less helpful, but selector update does not happen often, - // so it's not a big problem - if !reflect.DeepEqual(oldRC.Spec.Selector, curRC.Spec.Selector) { - rm.lookupCache.InvalidateAll() - } - // TODO: Remove when #31981 is resolved! - glog.Infof("Observed updated replication controller %v. Desired pod count change: %d->%d", curRC.Name, oldRC.Spec.Replicas, curRC.Spec.Replicas) - - // You might imagine that we only really need to enqueue the - // controller when Spec changes, but it is safer to sync any - // time this function is triggered. That way a full informer - // resync can requeue any controllers that don't yet have pods - // but whose last attempts at creating a pod have failed (since - // we don't block on creation of pods) instead of those - // controllers stalling indefinitely. Enqueueing every time - // does result in some spurious syncs (like when Status.Replica - // is updated and the watch notification from it retriggers - // this function), but in general extra resyncs shouldn't be - // that bad as rcs that haven't met expectations yet won't - // sync, and all the listing is done using local stores. - if oldRC.Status.Replicas != curRC.Status.Replicas { - // TODO: Should we log status or spec? - glog.V(4).Infof("Observed updated replica count for rc: %v, %d->%d", curRC.Name, oldRC.Status.Replicas, curRC.Status.Replicas) - } - rm.enqueueController(cur) -} - -// When a pod is created, enqueue the controller that manages it and update it's expectations. -func (rm *ReplicationManager) addPod(obj interface{}) { - pod := obj.(*api.Pod) - - rc := rm.getPodController(pod) - if rc == nil { - return - } - rcKey, err := controller.KeyFunc(rc) - if err != nil { - glog.Errorf("Couldn't get key for replication controller %#v: %v", rc, err) - return - } - - if pod.DeletionTimestamp != nil { - // on a restart of the controller manager, it's possible a new pod shows up in a state that - // is already pending deletion. Prevent the pod from being a creation observation. - rm.deletePod(pod) - return - } - rm.expectations.CreationObserved(rcKey) - rm.enqueueController(rc) -} - -// When a pod is updated, figure out what controller/s manage it and wake them -// up. If the labels of the pod have changed we need to awaken both the old -// and new controller. old and cur must be *api.Pod types. -func (rm *ReplicationManager) updatePod(old, cur interface{}) { - curPod := cur.(*api.Pod) - oldPod := old.(*api.Pod) - if curPod.ResourceVersion == oldPod.ResourceVersion { - // Periodic resync will send update events for all known pods. - // Two different versions of the same pod will always have different RVs. - return - } - glog.V(4).Infof("Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta) - labelChanged := !reflect.DeepEqual(curPod.Labels, oldPod.Labels) - if curPod.DeletionTimestamp != nil { - // when a pod is deleted gracefully it's deletion timestamp is first modified to reflect a grace period, - // and after such time has passed, the kubelet actually deletes it from the store. We receive an update - // for modification of the deletion timestamp and expect an rc to create more replicas asap, not wait - // until the kubelet actually deletes the pod. This is different from the Phase of a pod changing, because - // an rc never initiates a phase change, and so is never asleep waiting for the same. - rm.deletePod(curPod) - if labelChanged { - // we don't need to check the oldPod.DeletionTimestamp because DeletionTimestamp cannot be unset. - rm.deletePod(oldPod) - } - return - } - - // Only need to get the old controller if the labels changed. - // Enqueue the oldRC before the curRC to give curRC a chance to adopt the oldPod. - if labelChanged { - // If the old and new rc are the same, the first one that syncs - // will set expectations preventing any damage from the second. - if oldRC := rm.getPodController(oldPod); oldRC != nil { - rm.enqueueController(oldRC) - } - } - - if curRC := rm.getPodController(curPod); curRC != nil { - rm.enqueueController(curRC) - } -} - -// When a pod is deleted, enqueue the controller that manages the pod and update its expectations. -// obj could be an *api.Pod, or a DeletionFinalStateUnknown marker item. -func (rm *ReplicationManager) deletePod(obj interface{}) { - pod, ok := obj.(*api.Pod) - - // When a delete is dropped, the relist will notice a pod in the store not - // in the list, leading to the insertion of a tombstone object which contains - // the deleted key/value. Note that this value might be stale. If the pod - // changed labels the new rc will not be woken up till the periodic resync. - if !ok { - tombstone, ok := obj.(cache.DeletedFinalStateUnknown) - if !ok { - glog.Errorf("Couldn't get object from tombstone %#v", obj) - return - } - pod, ok = tombstone.Obj.(*api.Pod) - if !ok { - glog.Errorf("Tombstone contained object that is not a pod %#v", obj) - return - } - } - glog.V(4).Infof("Pod %s/%s deleted through %v, timestamp %+v, labels %+v.", pod.Namespace, pod.Name, utilruntime.GetCaller(), pod.DeletionTimestamp, pod.Labels) - if rc := rm.getPodController(pod); rc != nil { - rcKey, err := controller.KeyFunc(rc) - if err != nil { - glog.Errorf("Couldn't get key for replication controller %#v: %v", rc, err) - return - } - rm.expectations.DeletionObserved(rcKey, controller.PodKey(pod)) - rm.enqueueController(rc) - } -} - -// obj could be an *api.ReplicationController, or a DeletionFinalStateUnknown marker item. -func (rm *ReplicationManager) enqueueController(obj interface{}) { - key, err := controller.KeyFunc(obj) - if err != nil { - glog.Errorf("Couldn't get key for object %+v: %v", obj, err) - return - } - - // TODO: Handle overlapping controllers better. Either disallow them at admission time or - // deterministically avoid syncing controllers that fight over pods. Currently, we only - // ensure that the same controller is synced for a given pod. When we periodically relist - // all controllers there will still be some replica instability. One way to handle this is - // by querying the store for all controllers that this rc overlaps, as well as all - // controllers that overlap this rc, and sorting them. - rm.queue.Add(key) -} - -// worker runs a worker thread that just dequeues items, processes them, and marks them done. -// It enforces that the syncHandler is never invoked concurrently with the same key. -func (rm *ReplicationManager) worker() { - workFunc := func() bool { - key, quit := rm.queue.Get() - if quit { - return true - } - defer rm.queue.Done(key) - - err := rm.syncHandler(key.(string)) - if err == nil { - rm.queue.Forget(key) - return false - } - - rm.queue.AddRateLimited(key) - utilruntime.HandleError(err) - return false - } - for { - if quit := workFunc(); quit { - glog.Infof("replication controller worker shutting down") - return - } - } -} - -// manageReplicas checks and updates replicas for the given replication controller. -// Does NOT modify . -func (rm *ReplicationManager) manageReplicas(filteredPods []*api.Pod, rc *api.ReplicationController) error { - diff := len(filteredPods) - int(rc.Spec.Replicas) - rcKey, err := controller.KeyFunc(rc) - if err != nil { - return err - } - if diff == 0 { - return nil - } - - if diff < 0 { - diff *= -1 - if diff > rm.burstReplicas { - diff = rm.burstReplicas - } - // TODO: Track UIDs of creates just like deletes. The problem currently - // is we'd need to wait on the result of a create to record the pod's - // UID, which would require locking *across* the create, which will turn - // into a performance bottleneck. We should generate a UID for the pod - // beforehand and store it via ExpectCreations. - errCh := make(chan error, diff) - rm.expectations.ExpectCreations(rcKey, diff) - var wg sync.WaitGroup - wg.Add(diff) - glog.V(2).Infof("Too few %q/%q replicas, need %d, creating %d", rc.Namespace, rc.Name, rc.Spec.Replicas, diff) - for i := 0; i < diff; i++ { - go func() { - defer wg.Done() - var err error - if rm.garbageCollectorEnabled { - var trueVar = true - controllerRef := &api.OwnerReference{ - APIVersion: getRCKind().GroupVersion().String(), - Kind: getRCKind().Kind, - Name: rc.Name, - UID: rc.UID, - Controller: &trueVar, - } - err = rm.podControl.CreatePodsWithControllerRef(rc.Namespace, rc.Spec.Template, rc, controllerRef) - } else { - err = rm.podControl.CreatePods(rc.Namespace, rc.Spec.Template, rc) - } - if err != nil { - // Decrement the expected number of creates because the informer won't observe this pod - glog.V(2).Infof("Failed creation, decrementing expectations for controller %q/%q", rc.Namespace, rc.Name) - rm.expectations.CreationObserved(rcKey) - errCh <- err - utilruntime.HandleError(err) - } - }() - } - wg.Wait() - - select { - case err := <-errCh: - // all errors have been reported before and they're likely to be the same, so we'll only return the first one we hit. - if err != nil { - return err - } - default: - } - - return nil - } - - if diff > rm.burstReplicas { - diff = rm.burstReplicas - } - glog.V(2).Infof("Too many %q/%q replicas, need %d, deleting %d", rc.Namespace, rc.Name, rc.Spec.Replicas, diff) - // No need to sort pods if we are about to delete all of them - if rc.Spec.Replicas != 0 { - // Sort the pods in the order such that not-ready < ready, unscheduled - // < scheduled, and pending < running. This ensures that we delete pods - // in the earlier stages whenever possible. - sort.Sort(controller.ActivePods(filteredPods)) - } - // Snapshot the UIDs (ns/name) of the pods we're expecting to see - // deleted, so we know to record their expectations exactly once either - // when we see it as an update of the deletion timestamp, or as a delete. - // Note that if the labels on a pod/rc change in a way that the pod gets - // orphaned, the rs will only wake up after the expectations have - // expired even if other pods are deleted. - deletedPodKeys := []string{} - for i := 0; i < diff; i++ { - deletedPodKeys = append(deletedPodKeys, controller.PodKey(filteredPods[i])) - } - // We use pod namespace/name as a UID to wait for deletions, so if the - // labels on a pod/rc change in a way that the pod gets orphaned, the - // rc will only wake up after the expectation has expired. - errCh := make(chan error, diff) - rm.expectations.ExpectDeletions(rcKey, deletedPodKeys) - var wg sync.WaitGroup - wg.Add(diff) - for i := 0; i < diff; i++ { - go func(ix int) { - defer wg.Done() - if err := rm.podControl.DeletePod(rc.Namespace, filteredPods[ix].Name, rc); err != nil { - // Decrement the expected number of deletes because the informer won't observe this deletion - podKey := controller.PodKey(filteredPods[ix]) - glog.V(2).Infof("Failed to delete %v due to %v, decrementing expectations for controller %q/%q", podKey, err, rc.Namespace, rc.Name) - rm.expectations.DeletionObserved(rcKey, podKey) - errCh <- err - utilruntime.HandleError(err) - } - }(i) - } - wg.Wait() - - select { - case err := <-errCh: - // all errors have been reported before and they're likely to be the same, so we'll only return the first one we hit. - if err != nil { - return err - } - default: - } - - return nil - -} - -// syncReplicationController will sync the rc with the given key if it has had its expectations fulfilled, meaning -// it did not expect to see any more of its pods created or deleted. This function is not meant to be invoked -// concurrently with the same key. -func (rm *ReplicationManager) syncReplicationController(key string) error { - trace := util.NewTrace("syncReplicationController: " + key) - defer trace.LogIfLong(250 * time.Millisecond) - - startTime := time.Now() - defer func() { - glog.V(4).Infof("Finished syncing controller %q (%v)", key, time.Now().Sub(startTime)) - }() - - if !rm.podStoreSynced() { - // Sleep so we give the pod reflector goroutine a chance to run. - time.Sleep(PodStoreSyncedPollPeriod) - glog.Infof("Waiting for pods controller to sync, requeuing rc %v", key) - rm.queue.Add(key) - return nil - } - - obj, exists, err := rm.rcStore.Indexer.GetByKey(key) - if !exists { - glog.Infof("Replication Controller has been deleted %v", key) - rm.expectations.DeleteExpectations(key) - return nil - } - if err != nil { - return err - } - rc := *obj.(*api.ReplicationController) - - // Check the expectations of the rc before counting active pods, otherwise a new pod can sneak in - // and update the expectations after we've retrieved active pods from the store. If a new pod enters - // the store after we've checked the expectation, the rc sync is just deferred till the next relist. - rcKey, err := controller.KeyFunc(&rc) - if err != nil { - glog.Errorf("Couldn't get key for replication controller %#v: %v", rc, err) - return err - } - trace.Step("ReplicationController restored") - rcNeedsSync := rm.expectations.SatisfiedExpectations(rcKey) - trace.Step("Expectations restored") - - // NOTE: filteredPods are pointing to objects from cache - if you need to - // modify them, you need to copy it first. - // TODO: Do the List and Filter in a single pass, or use an index. - var filteredPods []*api.Pod - if rm.garbageCollectorEnabled { - // list all pods to include the pods that don't match the rc's selector - // anymore but has the stale controller ref. - pods, err := rm.podStore.Pods(rc.Namespace).List(labels.Everything()) - if err != nil { - glog.Errorf("Error getting pods for rc %q: %v", key, err) - rm.queue.Add(key) - return err - } - cm := controller.NewPodControllerRefManager(rm.podControl, rc.ObjectMeta, labels.Set(rc.Spec.Selector).AsSelectorPreValidated(), getRCKind()) - matchesAndControlled, matchesNeedsController, controlledDoesNotMatch := cm.Classify(pods) - for _, pod := range matchesNeedsController { - err := cm.AdoptPod(pod) - // continue to next pod if adoption fails. - if err != nil { - // If the pod no longer exists, don't even log the error. - if !errors.IsNotFound(err) { - utilruntime.HandleError(err) - } - } else { - matchesAndControlled = append(matchesAndControlled, pod) - } - } - filteredPods = matchesAndControlled - // remove the controllerRef for the pods that no longer have matching labels - var errlist []error - for _, pod := range controlledDoesNotMatch { - err := cm.ReleasePod(pod) - if err != nil { - errlist = append(errlist, err) - } - } - if len(errlist) != 0 { - aggregate := utilerrors.NewAggregate(errlist) - // push the RC into work queue again. We need to try to free the - // pods again otherwise they will stuck with the stale - // controllerRef. - rm.queue.Add(key) - return aggregate - } - } else { - pods, err := rm.podStore.Pods(rc.Namespace).List(labels.Set(rc.Spec.Selector).AsSelectorPreValidated()) - if err != nil { - glog.Errorf("Error getting pods for rc %q: %v", key, err) - rm.queue.Add(key) - return err - } - filteredPods = controller.FilterActivePods(pods) - } - - var manageReplicasErr error - if rcNeedsSync && rc.DeletionTimestamp == nil { - manageReplicasErr = rm.manageReplicas(filteredPods, &rc) - } - trace.Step("manageReplicas done") - - // Count the number of pods that have labels matching the labels of the pod - // template of the replication controller, the matching pods may have more - // labels than are in the template. Because the label of podTemplateSpec is - // a superset of the selector of the replication controller, so the possible - // matching pods must be part of the filteredPods. - fullyLabeledReplicasCount := 0 - readyReplicasCount := 0 - templateLabel := labels.Set(rc.Spec.Template.Labels).AsSelectorPreValidated() - for _, pod := range filteredPods { - if templateLabel.Matches(labels.Set(pod.Labels)) { - fullyLabeledReplicasCount++ - } - if api.IsPodReady(pod) { - readyReplicasCount++ - } - } - - // Always updates status as pods come up or die. - if err := updateReplicaCount(rm.kubeClient.Core().ReplicationControllers(rc.Namespace), rc, len(filteredPods), fullyLabeledReplicasCount, readyReplicasCount); err != nil { - // Multiple things could lead to this update failing. Returning an error causes a requeue without forcing a hotloop - return err - } - - return manageReplicasErr -} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller_utils.go b/vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller_utils.go deleted file mode 100644 index e7e22795a..000000000 --- a/vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller_utils.go +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// If you make changes to this file, you should also make the corresponding change in ReplicaSet. - -package replication - -import ( - "fmt" - - "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" -) - -// updateReplicaCount attempts to update the Status.Replicas of the given controller, with a single GET/PUT retry. -func updateReplicaCount(rcClient unversionedcore.ReplicationControllerInterface, controller api.ReplicationController, numReplicas, numFullyLabeledReplicas, numReadyReplicas int) (updateErr error) { - // This is the steady state. It happens when the rc doesn't have any expectations, since - // we do a periodic relist every 30s. If the generations differ but the replicas are - // the same, a caller might've resized to the same replica count. - if int(controller.Status.Replicas) == numReplicas && - int(controller.Status.FullyLabeledReplicas) == numFullyLabeledReplicas && - int(controller.Status.ReadyReplicas) == numReadyReplicas && - controller.Generation == controller.Status.ObservedGeneration { - return nil - } - // Save the generation number we acted on, otherwise we might wrongfully indicate - // that we've seen a spec update when we retry. - // TODO: This can clobber an update if we allow multiple agents to write to the - // same status. - generation := controller.Generation - - var getErr error - for i, rc := 0, &controller; ; i++ { - glog.V(4).Infof(fmt.Sprintf("Updating replica count for rc: %s/%s, ", controller.Namespace, controller.Name) + - fmt.Sprintf("replicas %d->%d (need %d), ", controller.Status.Replicas, numReplicas, controller.Spec.Replicas) + - fmt.Sprintf("fullyLabeledReplicas %d->%d, ", controller.Status.FullyLabeledReplicas, numFullyLabeledReplicas) + - fmt.Sprintf("readyReplicas %d->%d, ", controller.Status.ReadyReplicas, numReadyReplicas) + - fmt.Sprintf("sequence No: %v->%v", controller.Status.ObservedGeneration, generation)) - - rc.Status = api.ReplicationControllerStatus{ - Replicas: int32(numReplicas), - FullyLabeledReplicas: int32(numFullyLabeledReplicas), - ReadyReplicas: int32(numReadyReplicas), - ObservedGeneration: generation, - } - _, updateErr = rcClient.UpdateStatus(rc) - if updateErr == nil || i >= statusUpdateRetries { - return updateErr - } - // Update the controller with the latest resource version for the next poll - if rc, getErr = rcClient.Get(controller.Name); getErr != nil { - // If the GET fails we can't trust status.Replicas anymore. This error - // is bound to be more interesting than the update failure. - return getErr - } - } -} - -// OverlappingControllers sorts a list of controllers by creation timestamp, using their names as a tie breaker. -type OverlappingControllers []api.ReplicationController - -func (o OverlappingControllers) Len() int { return len(o) } -func (o OverlappingControllers) Swap(i, j int) { o[i], o[j] = o[j], o[i] } - -func (o OverlappingControllers) Less(i, j int) bool { - if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) { - return o[i].Name < o[j].Name - } - return o[i].CreationTimestamp.Before(o[j].CreationTimestamp) -} diff --git a/vendor/k8s.io/kubernetes/pkg/conversion/OWNERS b/vendor/k8s.io/kubernetes/pkg/conversion/OWNERS deleted file mode 100644 index a046efc0c..000000000 --- a/vendor/k8s.io/kubernetes/pkg/conversion/OWNERS +++ /dev/null @@ -1,5 +0,0 @@ -assignees: - - derekwaynecarr - - lavalamp - - smarterclayton - - wojtek-t diff --git a/vendor/k8s.io/kubernetes/pkg/conversion/doc.go b/vendor/k8s.io/kubernetes/pkg/conversion/doc.go index 0c46ef2d1..16eda0075 100644 --- a/vendor/k8s.io/kubernetes/pkg/conversion/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/conversion/doc.go @@ -21,4 +21,4 @@ limitations under the License. // but for the fields which did not change, copying is automated. This makes it // easy to modify the structures you use in memory without affecting the format // you store on disk or respond to in your external API calls. -package conversion +package conversion // import "k8s.io/kubernetes/pkg/conversion" diff --git a/vendor/k8s.io/kubernetes/pkg/conversion/queryparams/doc.go b/vendor/k8s.io/kubernetes/pkg/conversion/queryparams/doc.go index 4c1002a4c..519e1e4be 100644 --- a/vendor/k8s.io/kubernetes/pkg/conversion/queryparams/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/conversion/queryparams/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package queryparams provides conversion from versioned // runtime objects to URL query values -package queryparams +package queryparams // import "k8s.io/kubernetes/pkg/conversion/queryparams" diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/OWNERS b/vendor/k8s.io/kubernetes/pkg/credentialprovider/OWNERS deleted file mode 100644 index 766c481bd..000000000 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/OWNERS +++ /dev/null @@ -1,3 +0,0 @@ -assignees: - - erictune - - liggitt diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/doc.go b/vendor/k8s.io/kubernetes/pkg/credentialprovider/doc.go index 41c12410f..5acf6ef62 100644 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/credentialprovider/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package credentialprovider supplies interfaces and implementations for // docker registry providers to expose their authentication scheme. -package credentialprovider +package credentialprovider // import "k8s.io/kubernetes/pkg/credentialprovider" diff --git a/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go b/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go index 83cbdce0c..400d001e7 100644 --- a/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package fieldpath supplies methods for extracting fields from objects // given a path to a field. -package fieldpath +package fieldpath // import "k8s.io/kubernetes/pkg/fieldpath" diff --git a/vendor/k8s.io/kubernetes/pkg/fields/doc.go b/vendor/k8s.io/kubernetes/pkg/fields/doc.go index 49059e263..3129b02a7 100644 --- a/vendor/k8s.io/kubernetes/pkg/fields/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/fields/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package fields implements a simple field system, parsing and matching // selectors with sets of fields. -package fields +package fields // import "k8s.io/kubernetes/pkg/fields" diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/OWNERS b/vendor/k8s.io/kubernetes/pkg/kubectl/OWNERS deleted file mode 100644 index 26e3e7540..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/OWNERS +++ /dev/null @@ -1,7 +0,0 @@ -assignees: - - brendandburns - - deads2k - - janetkuo - - jlowdermilk - - pwittrock - - smarterclayton diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go index 49ffb4d95..e6ed544a8 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go @@ -33,7 +33,6 @@ import ( "time" "github.com/emicklei/go-restful/swagger" - "github.com/imdario/mergo" "github.com/spf13/cobra" "github.com/spf13/pflag" @@ -135,6 +134,10 @@ type Factory struct { PauseObject func(object runtime.Object) (bool, error) // ResumeObject resumes a paused object ie. it will be reconciled by its controller. ResumeObject func(object runtime.Object) (bool, error) + // ResolveImage resolves the image names. For kubernetes this function is just + // passthrough but it allows to perform more sophisticated image name resolving for + // third-party vendors. + ResolveImage func(imageName string) (string, error) // Returns a schema that can validate objects stored on disk. Validator func(validate bool, cacheDir string) (validation.Schema, error) // SwaggerSchema returns the schema declaration for the provided group version kind. @@ -672,6 +675,9 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { return false, fmt.Errorf("cannot resume %v", gvks[0]) } }, + ResolveImage: func(imageName string) (string, error) { + return imageName, nil + }, Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) @@ -1223,11 +1229,13 @@ func (c *clientSwaggerSchema) ValidateBytes(data []byte) error { // exists and is not a directory. func DefaultClientConfig(flags *pflag.FlagSet) clientcmd.ClientConfig { loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + // use the standard defaults for this client command + // DEPRECATED: remove and replace with something more accurate + loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig + flags.StringVar(&loadingRules.ExplicitPath, "kubeconfig", "", "Path to the kubeconfig file to use for CLI requests.") - overrides := &clientcmd.ConfigOverrides{} - // use the standard defaults for this client config - mergo.Merge(&overrides.ClusterDefaults, clientcmd.DefaultCluster) + overrides := &clientcmd.ConfigOverrides{ClusterDefaults: clientcmd.ClusterDefaults} flagNames := clientcmd.RecommendedConfigOverrideFlags("") // short flagnames are disabled by default. These are here for compatibility with existing scripts @@ -1239,6 +1247,29 @@ func DefaultClientConfig(flags *pflag.FlagSet) clientcmd.ClientConfig { return clientConfig } +func (f *Factory) DefaultResourceFilterOptions(cmd *cobra.Command, withNamespace bool) *kubectl.PrintOptions { + columnLabel, err := cmd.Flags().GetStringSlice("label-columns") + if err != nil { + columnLabel = []string{} + } + opts := &kubectl.PrintOptions{ + NoHeaders: GetFlagBool(cmd, "no-headers"), + WithNamespace: withNamespace, + Wide: GetWideFlag(cmd), + ShowAll: GetFlagBool(cmd, "show-all"), + ShowLabels: GetFlagBool(cmd, "show-labels"), + AbsoluteTimestamps: isWatch(cmd), + ColumnLabels: columnLabel, + } + + return opts +} + +// DefaultResourceFilterFunc returns a collection of FilterFuncs suitable for filtering specific resource types. +func (f *Factory) DefaultResourceFilterFunc() kubectl.Filters { + return kubectl.NewResourceFilter() +} + // PrintObject prints an api object given command line flags to modify the output format func (f *Factory) PrintObject(cmd *cobra.Command, mapper meta.RESTMapper, obj runtime.Object, out io.Writer) error { gvks, _, err := api.Scheme.ObjectKinds(obj) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go index adc82b6b2..cc99d5218 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go @@ -28,6 +28,7 @@ import ( "strings" "time" + "k8s.io/kubernetes/pkg/api" kerrors "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/unversioned" @@ -43,7 +44,7 @@ import ( "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/strategicpatch" - "github.com/evanphx/json-patch" + jsonpatch "github.com/evanphx/json-patch" "github.com/golang/glog" "github.com/spf13/cobra" "github.com/spf13/pflag" @@ -60,7 +61,7 @@ type debugError interface { // AddSourceToErr adds handleResourcePrefix and source string to error message. // verb is the string like "creating", "deleting" etc. -// souce is the filename or URL to the template file(*.json or *.yaml), or stdin to use to handle the resource. +// source is the filename or URL to the template file(*.json or *.yaml), or stdin to use to handle the resource. func AddSourceToErr(verb string, source string, err error) error { if source != "" { if statusError, ok := err.(kerrors.APIStatus); ok { @@ -247,6 +248,26 @@ func MultilineError(prefix string, err error) string { return fmt.Sprintf("%s%s\n", prefix, err) } +// PrintErrorWithCauses prints an error's kind, name, and each of the error's causes in a new line. +// The returned string will end with a newline. +// Returns true if the error type can be handled, or false otherwise. +func PrintErrorWithCauses(err error, errOut io.Writer) bool { + switch t := err.(type) { + case *kerrors.StatusError: + errorDetails := t.Status().Details + if errorDetails != nil { + fmt.Fprintf(errOut, "error: %s %q is invalid\n\n", errorDetails.Kind, errorDetails.Name) + for _, cause := range errorDetails.Causes { + fmt.Fprintf(errOut, "* %s: %s\n", cause.Field, cause.Message) + } + return true + } + } + + fmt.Fprintf(errOut, "error: %v\n", err) + return false +} + // MultipleErrors returns a newline delimited string containing // the prefix and referenced errors in standard form. func MultipleErrors(prefix string, errs []error) string { @@ -295,7 +316,7 @@ func getFlag(cmd *cobra.Command, flag string) *pflag.Flag { func GetFlagString(cmd *cobra.Command, flag string) string { s, err := cmd.Flags().GetString(flag) if err != nil { - glog.Fatalf("err accessing flag %s for command %s: %v", flag, cmd.Name(), err) + glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) } return s } @@ -304,7 +325,7 @@ func GetFlagString(cmd *cobra.Command, flag string) string { func GetFlagStringSlice(cmd *cobra.Command, flag string) []string { s, err := cmd.Flags().GetStringSlice(flag) if err != nil { - glog.Fatalf("err accessing flag %s for command %s: %v", flag, cmd.Name(), err) + glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) } return s } @@ -321,7 +342,7 @@ func GetWideFlag(cmd *cobra.Command) bool { func GetFlagBool(cmd *cobra.Command, flag string) bool { b, err := cmd.Flags().GetBool(flag) if err != nil { - glog.Fatalf("err accessing flag %s for command %s: %v", flag, cmd.Name(), err) + glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) } return b } @@ -330,7 +351,7 @@ func GetFlagBool(cmd *cobra.Command, flag string) bool { func GetFlagInt(cmd *cobra.Command, flag string) int { i, err := cmd.Flags().GetInt(flag) if err != nil { - glog.Fatalf("err accessing flag %s for command %s: %v", flag, cmd.Name(), err) + glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) } return i } @@ -339,7 +360,7 @@ func GetFlagInt(cmd *cobra.Command, flag string) int { func GetFlagInt64(cmd *cobra.Command, flag string) int64 { i, err := cmd.Flags().GetInt64(flag) if err != nil { - glog.Fatalf("err accessing flag %s for command %s: %v", flag, cmd.Name(), err) + glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) } return i } @@ -347,7 +368,7 @@ func GetFlagInt64(cmd *cobra.Command, flag string) int64 { func GetFlagDuration(cmd *cobra.Command, flag string) time.Duration { d, err := cmd.Flags().GetDuration(flag) if err != nil { - glog.Fatalf("err accessing flag %s for command %s: %v", flag, cmd.Name(), err) + glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) } return d } @@ -600,7 +621,7 @@ func ParsePairs(pairArgs []string, pairType string, supportRemove bool) (newPair for _, pairArg := range pairArgs { if strings.Index(pairArg, "=") != -1 { parts := strings.SplitN(pairArg, "=", 2) - if len(parts) != 2 || len(parts[1]) == 0 { + if len(parts) != 2 { if invalidBuf.Len() > 0 { invalidBuf.WriteString(", ") } @@ -636,3 +657,107 @@ func MaybeConvertObject(obj runtime.Object, gv unversioned.GroupVersion, convert return converter.ConvertToVersion(obj, gv) } } + +// MustPrintWithKinds determines if printer is dealing +// with multiple resource kinds, in which case it will +// return true, indicating resource kind will be +// included as part of printer output +func MustPrintWithKinds(objs []runtime.Object, infos []*resource.Info, sorter *kubectl.RuntimeSort, printAll bool) bool { + var lastMap *meta.RESTMapping + + if len(infos) == 1 && printAll { + return true + } + + for ix := range objs { + var mapping *meta.RESTMapping + if sorter != nil { + mapping = infos[sorter.OriginalPosition(ix)].Mapping + } else { + mapping = infos[ix].Mapping + } + + // display "kind" only if we have mixed resources + if lastMap != nil && mapping.Resource != lastMap.Resource { + return true + } + lastMap = mapping + } + + return false +} + +// FilterResourceList receives a list of runtime objects. +// If any objects are filtered, that number is returned along with a modified list. +func FilterResourceList(obj runtime.Object, filterFuncs kubectl.Filters, filterOpts *kubectl.PrintOptions) (int, []runtime.Object, error) { + items, err := meta.ExtractList(obj) + if err != nil { + return 0, []runtime.Object{obj}, utilerrors.NewAggregate([]error{err}) + } + if errs := runtime.DecodeList(items, api.Codecs.UniversalDecoder(), runtime.UnstructuredJSONScheme); len(errs) > 0 { + return 0, []runtime.Object{obj}, utilerrors.NewAggregate(errs) + } + + filterCount := 0 + list := make([]runtime.Object, 0, len(items)) + for _, obj := range items { + if isFiltered, err := filterFuncs.Filter(obj, filterOpts); !isFiltered { + if err != nil { + glog.V(2).Infof("Unable to filter resource: %v", err) + continue + } + list = append(list, obj) + } else if isFiltered { + filterCount++ + } + } + return filterCount, list, nil +} + +func PrintFilterCount(hiddenObjNum int, resource string, out io.Writer, options *kubectl.PrintOptions) error { + if !options.NoHeaders && !options.ShowAll && hiddenObjNum > 0 { + _, err := fmt.Fprintf(out, " info: %d completed object(s) was(were) not shown in %s list. Pass --show-all to see all objects.\n\n", hiddenObjNum, resource) + return err + } + return nil +} + +// ObjectListToVersionedObject receives a list of api objects and a group version +// and squashes the list's items into a single versioned runtime.Object. +func ObjectListToVersionedObject(objects []runtime.Object, version unversioned.GroupVersion) (runtime.Object, error) { + objectList := &api.List{Items: objects} + converted, err := resource.TryConvert(api.Scheme, objectList, version, registered.GroupOrDie(api.GroupName).GroupVersion) + if err != nil { + return nil, err + } + return converted, nil +} + +// IsSiblingCommandExists receives a pointer to a cobra command and a target string. +// Returns true if the target string is found in the list of sibling commands. +func IsSiblingCommandExists(cmd *cobra.Command, targetCmdName string) bool { + for _, c := range cmd.Parent().Commands() { + if c.Name() == targetCmdName { + return true + } + } + + return false +} + +// DefaultSubCommandRun prints a command's help string to the specified output if no +// arguments (sub-commands) are provided, or a usage error otherwise. +func DefaultSubCommandRun(out io.Writer) func(c *cobra.Command, args []string) { + return func(c *cobra.Command, args []string) { + c.SetOutput(out) + RequireNoArguments(c, args) + c.Help() + } +} + +// RequireNoArguments exits with a usage error if extra arguments are provided. +func RequireNoArguments(c *cobra.Command, args []string) { + if len(args) > 0 { + CheckErr(UsageError(c, fmt.Sprintf(`unknown command "%s"`, strings.Join(args, " ")))) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go index c27cc29c2..60d724d7e 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go @@ -56,8 +56,12 @@ func AddNoHeadersFlags(cmd *cobra.Command) { } // PrintSuccess prints message after finishing mutating operations -func PrintSuccess(mapper meta.RESTMapper, shortOutput bool, out io.Writer, resource string, name string, operation string) { +func PrintSuccess(mapper meta.RESTMapper, shortOutput bool, out io.Writer, resource string, name string, dryRun bool, operation string) { resource, _ = mapper.ResourceSingularizer(resource) + dryRunMsg := "" + if dryRun { + dryRunMsg = " (dry run)" + } if shortOutput { // -o name: prints resource/name if len(resource) > 0 { @@ -68,9 +72,9 @@ func PrintSuccess(mapper meta.RESTMapper, shortOutput bool, out io.Writer, resou } else { // understandable output by default if len(resource) > 0 { - fmt.Fprintf(out, "%s \"%s\" %s\n", resource, name, operation) + fmt.Fprintf(out, "%s \"%s\" %s%s\n", resource, name, operation, dryRunMsg) } else { - fmt.Fprintf(out, "\"%s\" %s\n", name, operation) + fmt.Fprintf(out, "\"%s\" %s%s\n", name, operation, dryRunMsg) } } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/describe.go b/vendor/k8s.io/kubernetes/pkg/kubectl/describe.go index 88117776b..a7400a615 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/describe.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/describe.go @@ -40,6 +40,9 @@ import ( "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/certificates" "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/apis/policy" + "k8s.io/kubernetes/pkg/apis/storage" + storageutil "k8s.io/kubernetes/pkg/apis/storage/util" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" client "k8s.io/kubernetes/pkg/client/unversioned" adapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset" @@ -118,6 +121,8 @@ func describerMap(c *client.Client) map[unversioned.GroupKind]Describer { apps.Kind("PetSet"): &PetSetDescriber{c}, certificates.Kind("CertificateSigningRequest"): &CertificateSigningRequestDescriber{c}, api.Kind("SecurityContextConstraints"): &SecurityContextConstraintsDescriber{c}, + storage.Kind("StorageClass"): &StorageClassDescriber{c}, + policy.Kind("PodDisruptionBudget"): &PodDisruptionBudgetDescriber{c}, } return m @@ -174,13 +179,24 @@ func (d *NamespaceDescriber) Describe(namespace, name string, describerSettings } resourceQuotaList, err := d.ResourceQuotas(name).List(api.ListOptions{}) if err != nil { - return "", err + if errors.IsNotFound(err) { + // Server does not support resource quotas. + // Not an error, will not show resource quotas information. + resourceQuotaList = nil + } else { + return "", err + } } limitRangeList, err := d.LimitRanges(name).List(api.ListOptions{}) if err != nil { - return "", err + if errors.IsNotFound(err) { + // Server does not support limit ranges. + // Not an error, will not show limit ranges information. + limitRangeList = nil + } else { + return "", err + } } - return describeNamespace(ns, resourceQuotaList, limitRangeList) } @@ -885,6 +901,7 @@ func (d *PersistentVolumeDescriber) Describe(namespace, name string, describerSe return tabbedString(func(out io.Writer) error { fmt.Fprintf(out, "Name:\t%s\n", pv.Name) printLabelsMultiline(out, "Labels", pv.Labels) + fmt.Fprintf(out, "StorageClass:\t%s\n", storageutil.GetStorageClassAnnotation(pv.ObjectMeta)) fmt.Fprintf(out, "Status:\t%s\n", pv.Status.Phase) if pv.Spec.ClaimRef != nil { fmt.Fprintf(out, "Claim:\t%s\n", pv.Spec.ClaimRef.Namespace+"/"+pv.Spec.ClaimRef.Name) @@ -950,6 +967,7 @@ func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, descri return tabbedString(func(out io.Writer) error { fmt.Fprintf(out, "Name:\t%s\n", pvc.Name) fmt.Fprintf(out, "Namespace:\t%s\n", pvc.Namespace) + fmt.Fprintf(out, "StorageClass:\t%s\n", storageutil.GetStorageClassAnnotation(pvc.ObjectMeta)) fmt.Fprintf(out, "Status:\t%v\n", pvc.Status.Phase) fmt.Fprintf(out, "Volume:\t%s\n", pvc.Spec.VolumeName) printLabelsMultiline(out, "Labels", pvc.Labels) @@ -2500,6 +2518,64 @@ func describeNetworkPolicy(networkPolicy *extensions.NetworkPolicy) (string, err }) } +type StorageClassDescriber struct { + client.Interface +} + +func (s *StorageClassDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + sc, err := s.Storage().StorageClasses().Get(name) + if err != nil { + return "", err + } + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", sc.Name) + fmt.Fprintf(out, "IsDefaultClass:\t%s\n", storageutil.IsDefaultAnnotationText(sc.ObjectMeta)) + fmt.Fprintf(out, "Annotations:\t%s\n", labels.FormatLabels(sc.Annotations)) + fmt.Fprintf(out, "Provisioner:\t%s\n", sc.Provisioner) + fmt.Fprintf(out, "Parameters:\t%s\n", labels.FormatLabels(sc.Parameters)) + if describerSettings.ShowEvents { + events, err := s.Events(namespace).Search(sc) + if err != nil { + return err + } + if events != nil { + DescribeEvents(events, out) + } + } + return nil + }) +} + +type PodDisruptionBudgetDescriber struct { + client *client.Client +} + +func (p *PodDisruptionBudgetDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + pdb, err := p.client.Policy().PodDisruptionBudgets(namespace).Get(name) + if err != nil { + return "", err + } + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", pdb.Name) + fmt.Fprintf(out, "Min available:\t%s\n", pdb.Spec.MinAvailable.String()) + if pdb.Spec.Selector != nil { + fmt.Fprintf(out, "Selector:\t%s\n", unversioned.FormatLabelSelector(pdb.Spec.Selector)) + } else { + fmt.Fprintf(out, "Selector:\t\n") + } + if describerSettings.ShowEvents { + events, err := p.client.Events(namespace).Search(pdb) + if err != nil { + return err + } + if events != nil { + DescribeEvents(events, out) + } + } + return nil + }) +} + // newErrNoDescriber creates a new ErrNoDescriber with the names of the provided types. func newErrNoDescriber(types ...reflect.Type) error { names := make([]string, 0, len(types)) @@ -2694,23 +2770,19 @@ func printTaintsMultilineWithIndent(out io.Writer, initialIndent, title, innerIn // to print taints in the sorted order keys := make([]string, 0, len(taints)) for _, taint := range taints { - keys = append(keys, taint.Key) + keys = append(keys, string(taint.Effect)+","+taint.Key) } sort.Strings(keys) - effects := []api.TaintEffect{api.TaintEffectNoSchedule, api.TaintEffectPreferNoSchedule} - for i, key := range keys { - for _, effect := range effects { - for _, taint := range taints { - if taint.Key == key && taint.Effect == effect { - if i != 0 { - fmt.Fprint(out, initialIndent) - fmt.Fprint(out, innerIndent) - } - fmt.Fprintf(out, "%s=%s:%s\n", taint.Key, taint.Value, taint.Effect) - i++ + for _, taint := range taints { + if string(taint.Effect)+","+taint.Key == key { + if i != 0 { + fmt.Fprint(out, initialIndent) + fmt.Fprint(out, innerIndent) } + fmt.Fprintf(out, "%s\n", taint.ToString()) + i++ } } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/doc.go b/vendor/k8s.io/kubernetes/pkg/kubectl/doc.go index d1516ebb7..df45d4469 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/doc.go @@ -17,4 +17,4 @@ limitations under the License. // Package kubectl is a set of libraries that are used by the kubectl command line tool. // They are separated out into a library to support unit testing. Most functionality should // be included in this package, and the main kubectl should really just be an entry point. -package kubectl +package kubectl // import "k8s.io/kubernetes/pkg/kubectl" diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/doc.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/doc.go index a0e22e7cf..94944dfe3 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/doc.go @@ -21,4 +21,4 @@ limitations under the License. // standard command line arguments and parameters into a Visitor that can iterate // over all of the identified resources, whether on the server or on the local // filesystem. -package resource +package resource // import "k8s.io/kubernetes/pkg/kubectl/resource" diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/result.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/result.go index 892b30e94..05fba8af5 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/result.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/result.go @@ -20,6 +20,7 @@ import ( "fmt" "reflect" + "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/unversioned" @@ -222,12 +223,24 @@ func AsVersionedObject(infos []*Info, forceList bool, version unversioned.GroupV object = objects[0] } else { object = &api.List{Items: objects} - converted, err := tryConvert(api.Scheme, object, version, registered.GroupOrDie(api.GroupName).GroupVersion) + converted, err := TryConvert(api.Scheme, object, version, registered.GroupOrDie(api.GroupName).GroupVersion) if err != nil { return nil, err } object = converted } + + // validSpecifiedVersion resolves to true if the version passed to this function matches the + // version assigned to the converted object + actualVersion := object.GetObjectKind().GroupVersionKind() + if actualVersion.Version != version.Version { + defaultVersionInfo := "" + if len(actualVersion.Version) > 0 { + defaultVersionInfo = fmt.Sprintf("Defaulting to %q", actualVersion.Version) + } + glog.V(1).Infof(" info: the output version specified is invalid. %s\n", defaultVersionInfo) + } + return object, nil } @@ -263,7 +276,7 @@ func AsVersionedObjects(infos []*Info, version unversioned.GroupVersion, encoder } } - converted, err := tryConvert(info.Mapping.ObjectConvertor, info.Object, version, info.Mapping.GroupVersionKind.GroupVersion()) + converted, err := TryConvert(info.Mapping.ObjectConvertor, info.Object, version, info.Mapping.GroupVersionKind.GroupVersion()) if err != nil { return nil, err } @@ -272,9 +285,9 @@ func AsVersionedObjects(infos []*Info, version unversioned.GroupVersion, encoder return objects, nil } -// tryConvert attempts to convert the given object to the provided versions in order. This function assumes +// TryConvert attempts to convert the given object to the provided versions in order. This function assumes // the object is in internal version. -func tryConvert(converter runtime.ObjectConvertor, object runtime.Object, versions ...unversioned.GroupVersion) (runtime.Object, error) { +func TryConvert(converter runtime.ObjectConvertor, object runtime.Object, versions ...unversioned.GroupVersion) (runtime.Object, error) { var last error for _, version := range versions { if version.Empty() { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource_filter.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource_filter.go new file mode 100644 index 000000000..8ca31d83a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/resource_filter.go @@ -0,0 +1,67 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/runtime" +) + +// FilterFunc is a function that knows how to filter a specific resource kind. +// It receives a generic runtime.Object which must be type-checked by the function. +// Returns a boolean value true if a resource is filtered, or false otherwise. +type FilterFunc func(runtime.Object, PrintOptions) bool + +// Filters is a collection of filter funcs +type Filters []FilterFunc + +func NewResourceFilter() Filters { + return []FilterFunc{ + filterPods, + } +} + +// filterPods returns true if a pod should be skipped. +// defaults to true for terminated pods +func filterPods(obj runtime.Object, options PrintOptions) bool { + switch p := obj.(type) { + case *v1.Pod: + reason := string(p.Status.Phase) + if p.Status.Reason != "" { + reason = p.Status.Reason + } + return !options.ShowAll && (reason == string(v1.PodSucceeded) || reason == string(v1.PodFailed)) + case *api.Pod: + reason := string(p.Status.Phase) + if p.Status.Reason != "" { + reason = p.Status.Reason + } + return !options.ShowAll && (reason == string(api.PodSucceeded) || reason == string(api.PodFailed)) + } + return false +} + +// Filter loops through a collection of FilterFuncs until it finds one that can filter the given resource +func (f Filters) Filter(obj runtime.Object, opts *PrintOptions) (bool, error) { + for _, filter := range f { + if ok := filter(obj, *opts); ok { + return true, nil + } + } + return false, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource_printer.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource_printer.go index 5af291ed6..4af94bc06 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource_printer.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/resource_printer.go @@ -41,8 +41,10 @@ import ( "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/certificates" "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/apis/policy" "k8s.io/kubernetes/pkg/apis/rbac" "k8s.io/kubernetes/pkg/apis/storage" + storageutil "k8s.io/kubernetes/pkg/apis/storage/util" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" utilerrors "k8s.io/kubernetes/pkg/util/errors" @@ -352,10 +354,9 @@ type PrintOptions struct { // will only be printed if the object type changes. This makes it useful for printing items // received from watches. type HumanReadablePrinter struct { - handlerMap map[reflect.Type]*handlerEntry - options PrintOptions - lastType reflect.Type - hiddenObjNum int + handlerMap map[reflect.Type]*handlerEntry + options PrintOptions + lastType reflect.Type } // NewHumanReadablePrinter creates a HumanReadablePrinter. @@ -448,10 +449,6 @@ func (h *HumanReadablePrinter) HandledResources() []string { } func (h *HumanReadablePrinter) FinishPrint(output io.Writer, res string) error { - if !h.options.NoHeaders && !h.options.ShowAll && h.hiddenObjNum > 0 { - _, err := fmt.Fprintf(output, " info: %d completed object(s) was(were) not shown in %s list. Pass --show-all to see all objects.\n\n", h.hiddenObjNum, res) - return err - } return nil } @@ -459,6 +456,7 @@ func (h *HumanReadablePrinter) FinishPrint(output io.Writer, res string) error { // pkg/kubectl/cmd/get.go to reflect the new resource type. var podColumns = []string{"NAME", "READY", "STATUS", "RESTARTS", "AGE"} var podTemplateColumns = []string{"TEMPLATE", "CONTAINER(S)", "IMAGE(S)", "PODLABELS"} +var podDisruptionBudgetColumns = []string{"NAME", "MIN-AVAILABLE", "SELECTOR"} var replicationControllerColumns = []string{"NAME", "DESIRED", "CURRENT", "READY", "AGE"} var replicaSetColumns = []string{"NAME", "DESIRED", "CURRENT", "READY", "AGE"} var jobColumns = []string{"NAME", "DESIRED", "SUCCESSFUL", "AGE"} @@ -498,12 +496,6 @@ var certificateSigningRequestColumns = []string{"NAME", "AGE", "REQUESTOR", "CON var securityContextConstraintsColumns = []string{"NAME", "PRIV", "CAPS", "SELINUX", "RUNASUSER", "FSGROUP", "SUPGROUP", "PRIORITY", "READONLYROOTFS", "VOLUMES"} func (h *HumanReadablePrinter) printPod(pod *api.Pod, w io.Writer, options PrintOptions) error { - reason := string(pod.Status.Phase) - // if not printing all pods, skip terminated pods (default) - if !options.ShowAll && (reason == string(api.PodSucceeded) || reason == string(api.PodFailed)) { - h.hiddenObjNum++ - return nil - } if err := printPodBase(pod, w, options); err != nil { return err } @@ -513,13 +505,6 @@ func (h *HumanReadablePrinter) printPod(pod *api.Pod, w io.Writer, options Print func (h *HumanReadablePrinter) printPodList(podList *api.PodList, w io.Writer, options PrintOptions) error { for _, pod := range podList.Items { - reason := string(pod.Status.Phase) - // if not printing all pods, skip terminated pods (default) - if !options.ShowAll && (reason == string(api.PodSucceeded) || reason == string(api.PodFailed)) { - h.hiddenObjNum++ - continue - } - if err := printPodBase(&pod, w, options); err != nil { return err } @@ -533,6 +518,8 @@ func (h *HumanReadablePrinter) addDefaultHandlers() { h.Handler(podColumns, h.printPod) h.Handler(podTemplateColumns, printPodTemplate) h.Handler(podTemplateColumns, printPodTemplateList) + h.Handler(podDisruptionBudgetColumns, printPodDisruptionBudget) + h.Handler(podDisruptionBudgetColumns, printPodDisruptionBudgetList) h.Handler(replicationControllerColumns, printReplicationController) h.Handler(replicationControllerColumns, printReplicationControllerList) h.Handler(replicaSetColumns, printReplicaSet) @@ -823,6 +810,41 @@ func printPodTemplateList(podList *api.PodTemplateList, w io.Writer, options Pri return nil } +func printPodDisruptionBudget(pdb *policy.PodDisruptionBudget, w io.Writer, options PrintOptions) error { + // name, minavailable, selector + name := formatResourceName(options.Kind, pdb.Name, options.WithKind) + namespace := pdb.Namespace + + if options.WithNamespace { + if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { + return err + } + } + + selector := "" + if pdb.Spec.Selector != nil { + selector = unversioned.FormatLabelSelector(pdb.Spec.Selector) + } + if _, err := fmt.Fprintf(w, "%s\t%s\t%s\n", + name, + pdb.Spec.MinAvailable.String(), + selector, + ); err != nil { + return err + } + + return nil +} + +func printPodDisruptionBudgetList(pdbList *policy.PodDisruptionBudgetList, w io.Writer, options PrintOptions) error { + for _, pdb := range pdbList.Items { + if err := printPodDisruptionBudget(&pdb, w, options); err != nil { + return err + } + } + return nil +} + // TODO(AdoHe): try to put wide output in a single method func printReplicationController(controller *api.ReplicationController, w io.Writer, options PrintOptions) error { name := formatResourceName(options.Kind, controller.Name, options.WithKind) @@ -2074,6 +2096,10 @@ func printNetworkPolicyList(list *extensions.NetworkPolicyList, w io.Writer, opt func printStorageClass(sc *storage.StorageClass, w io.Writer, options PrintOptions) error { name := sc.Name + + if storageutil.IsDefaultAnnotation(sc.ObjectMeta) { + name += " (default)" + } provtype := sc.Provisioner if _, err := fmt.Fprintf(w, "%s\t%s\t", name, provtype); err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go b/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go index 632a98f79..62507e84a 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go @@ -22,11 +22,12 @@ import ( "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/apis/extensions" client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/controller/deployment/util" ) // StatusViewer provides an interface for resources that provides rollout status. type StatusViewer interface { - Status(namespace, name string) (string, bool, error) + Status(namespace, name string, revision int64) (string, bool, error) } func StatusViewerFor(kind unversioned.GroupKind, c client.Interface) (StatusViewer, error) { @@ -42,14 +43,23 @@ type DeploymentStatusViewer struct { } // Status returns a message describing deployment status, and a bool value indicating if the status is considered done -func (s *DeploymentStatusViewer) Status(namespace, name string) (string, bool, error) { +func (s *DeploymentStatusViewer) Status(namespace, name string, revision int64) (string, bool, error) { deployment, err := s.c.Deployments(namespace).Get(name) if err != nil { return "", false, err } + if revision > 0 { + deploymentRev, err := util.Revision(deployment) + if err != nil { + return "", false, fmt.Errorf("cannot get the revision of deployment %q: %v", deployment.Name, err) + } + if revision != deploymentRev { + return "", false, fmt.Errorf("desired revision (%d) is different from the running revision (%d)", revision, deploymentRev) + } + } if deployment.Generation <= deployment.Status.ObservedGeneration { if deployment.Status.UpdatedReplicas == deployment.Spec.Replicas { - return fmt.Sprintf("deployment %s successfully rolled out\n", name), true, nil + return fmt.Sprintf("deployment %q successfully rolled out\n", name), true, nil } return fmt.Sprintf("Waiting for rollout to finish: %d out of %d new replicas have been updated...\n", deployment.Status.UpdatedReplicas, deployment.Spec.Replicas), false, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/service_basic.go b/vendor/k8s.io/kubernetes/pkg/kubectl/service_basic.go index ee080aae3..9f1c3d2b6 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/service_basic.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/service_basic.go @@ -31,6 +31,7 @@ type ServiceCommonGeneratorV1 struct { TCP []string Type api.ServiceType ClusterIP string + NodePort int } type ServiceClusterIPGeneratorV1 struct { @@ -56,6 +57,7 @@ func (ServiceNodePortGeneratorV1) ParamNames() []GeneratorParam { return []GeneratorParam{ {"name", true}, {"tcp", true}, + {"nodeport", true}, } } func (ServiceLoadBalancerGeneratorV1) ParamNames() []GeneratorParam { @@ -174,12 +176,14 @@ func (s ServiceCommonGeneratorV1) StructuredGenerate() (runtime.Object, error) { if err != nil { return nil, err } + portName := strings.Replace(tcpString, ":", "-", -1) ports = append(ports, api.ServicePort{ Name: portName, Port: port, TargetPort: targetPort, Protocol: api.Protocol("TCP"), + NodePort: int32(s.NodePort), }) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/doc.go b/vendor/k8s.io/kubernetes/pkg/kubelet/qos/doc.go index ebc1cc598..46c271afc 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/qos/doc.go @@ -22,4 +22,4 @@ limitations under the License. // when available. // Best-Effort containers, which don’t specify a request, can use resources only if not being used // by other pods. -package qos +package qos // import "k8s.io/kubernetes/pkg/kubelet/qos" diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go b/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go index ad696f361..7013f712f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go @@ -21,8 +21,9 @@ import ( ) const ( - PodInfraOOMAdj int = -999 + PodInfraOOMAdj int = -998 KubeletOOMScoreAdj int = -999 + DockerOOMScoreAdj int = -999 KubeProxyOOMScoreAdj int = -999 guaranteedOOMScoreAdj int = -998 besteffortOOMScoreAdj int = 1000 @@ -53,10 +54,10 @@ func GetContainerOOMScoreAdjust(pod *api.Pod, container *api.Container, memoryCa // Note that this is a heuristic, it won't work if a container has many small processes. memoryRequest := container.Resources.Requests.Memory().Value() oomScoreAdjust := 1000 - (1000*memoryRequest)/memoryCapacity - // A guaranteed pod using 100% of memory can have an OOM score of 1. Ensure + // A guaranteed pod using 100% of memory can have an OOM score of 10. Ensure // that burstable pods have a higher OOM score adjustment. - if oomScoreAdjust < 2 { - return 2 + if int(oomScoreAdjust) < (1000 + guaranteedOOMScoreAdj) { + return (1000 + guaranteedOOMScoreAdj) } // Give burstable pods a higher chance of survival over besteffort pods. if int(oomScoreAdjust) == besteffortOOMScoreAdj { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/types/doc.go b/vendor/k8s.io/kubernetes/pkg/kubelet/types/doc.go index 0d9efe50f..88e345636 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/types/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/types/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Common types in the Kubelet. -package types +package types // import "k8s.io/kubernetes/pkg/kubelet/types" diff --git a/vendor/k8s.io/kubernetes/pkg/labels/doc.go b/vendor/k8s.io/kubernetes/pkg/labels/doc.go index 35ba78809..64a415556 100644 --- a/vendor/k8s.io/kubernetes/pkg/labels/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/labels/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package labels implements a simple label system, parsing and matching // selectors with sets of labels. -package labels +package labels // import "k8s.io/kubernetes/pkg/labels" diff --git a/vendor/k8s.io/kubernetes/pkg/master/ports/doc.go b/vendor/k8s.io/kubernetes/pkg/master/ports/doc.go index a2a002101..5e14f82e7 100644 --- a/vendor/k8s.io/kubernetes/pkg/master/ports/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/master/ports/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package ports defines ports used by various pieces of the kubernetes // infrastructure. -package ports +package ports // import "k8s.io/kubernetes/pkg/master/ports" diff --git a/vendor/k8s.io/kubernetes/pkg/master/thirdparty_controller.go b/vendor/k8s.io/kubernetes/pkg/master/thirdparty_controller.go new file mode 100644 index 000000000..defd84ab3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/master/thirdparty_controller.go @@ -0,0 +1,146 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package master + +import ( + "fmt" + "strings" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + expapi "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/apiserver" + thirdpartyresourceetcd "k8s.io/kubernetes/pkg/registry/thirdpartyresource/etcd" + "k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/sets" +) + +const thirdpartyprefix = "/apis" + +// dynamicLister is used to list resources for dynamic third party +// apis. It implements the apiserver.APIResourceLister interface +type dynamicLister struct { + m *Master + path string +} + +func (d dynamicLister) ListAPIResources() []unversioned.APIResource { + return d.m.getExistingThirdPartyResources(d.path) +} + +var _ apiserver.APIResourceLister = &dynamicLister{} + +func makeThirdPartyPath(group string) string { + if len(group) == 0 { + return thirdpartyprefix + } + return thirdpartyprefix + "/" + group +} + +func getThirdPartyGroupName(path string) string { + return strings.TrimPrefix(strings.TrimPrefix(path, thirdpartyprefix), "/") +} + +// resourceInterface is the interface for the parts of the master that know how to add/remove +// third party resources. Extracted into an interface for injection for testing. +type resourceInterface interface { + // Remove a third party resource based on the RESTful path for that resource, the path is / + RemoveThirdPartyResource(path string) error + // Install a third party resource described by 'rsrc' + InstallThirdPartyResource(rsrc *expapi.ThirdPartyResource) error + // Is a particular third party resource currently installed? + HasThirdPartyResource(rsrc *expapi.ThirdPartyResource) (bool, error) + // List all currently installed third party resources, the returned + // names are of the form / + ListThirdPartyResources() []string +} + +// ThirdPartyController is a control loop that knows how to synchronize ThirdPartyResource objects with +// RESTful resources which are present in the API server. +type ThirdPartyController struct { + master resourceInterface + thirdPartyResourceRegistry *thirdpartyresourceetcd.REST +} + +// Synchronize a single resource with RESTful resources on the master +func (t *ThirdPartyController) SyncOneResource(rsrc *expapi.ThirdPartyResource) error { + // TODO: we also need to test if the existing installed resource matches the resource we are sync-ing. + // Currently, if there is an older, incompatible resource installed, we won't remove it. We should detect + // older, incompatible resources and remove them before testing if the resource exists. + hasResource, err := t.master.HasThirdPartyResource(rsrc) + if err != nil { + return err + } + if !hasResource { + return t.master.InstallThirdPartyResource(rsrc) + } + return nil +} + +// Synchronize all resources with RESTful resources on the master +func (t *ThirdPartyController) SyncResources() error { + list, err := t.thirdPartyResourceRegistry.List(api.NewDefaultContext(), nil) + if err != nil { + return err + } + return t.syncResourceList(list) +} + +func (t *ThirdPartyController) syncResourceList(list runtime.Object) error { + existing := sets.String{} + switch list := list.(type) { + case *expapi.ThirdPartyResourceList: + // Loop across all schema objects for third party resources + for ix := range list.Items { + item := &list.Items[ix] + // extract the api group and resource kind from the schema + _, group, err := thirdpartyresourcedata.ExtractApiGroupAndKind(item) + if err != nil { + return err + } + // place it in the set of resources that we expect, so that we don't delete it in the delete pass + existing.Insert(makeThirdPartyPath(group)) + // ensure a RESTful resource for this schema exists on the master + if err := t.SyncOneResource(item); err != nil { + return err + } + } + default: + return fmt.Errorf("expected a *ThirdPartyResourceList, got %#v", list) + } + // deletion phase, get all installed RESTful resources + installed := t.master.ListThirdPartyResources() + for _, installedAPI := range installed { + found := false + // search across the expected restful resources to see if this resource belongs to one of the expected ones + for _, apiPath := range existing.List() { + if installedAPI == apiPath || strings.HasPrefix(installedAPI, apiPath+"/") { + found = true + break + } + } + // not expected, delete the resource + if !found { + if err := t.master.RemoveThirdPartyResource(installedAPI); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/generic/doc.go b/vendor/k8s.io/kubernetes/pkg/registry/generic/doc.go index 47bb95304..002e0fd42 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/generic/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/generic/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package generic provides a generic object store interface and a // generic label/field matching type. -package generic +package generic // import "k8s.io/kubernetes/pkg/registry/generic" diff --git a/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/doc.go b/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/doc.go index d9988ccb0..22ce5edd5 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package thirdpartyresourcedata provides Registry interface and its REST // implementation for storing ThirdPartyResourceData api objects. -package thirdpartyresourcedata +package thirdpartyresourcedata // import "k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata" diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/OWNERS b/vendor/k8s.io/kubernetes/pkg/runtime/OWNERS deleted file mode 100644 index d038b5e9b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/OWNERS +++ /dev/null @@ -1,5 +0,0 @@ -assignees: - - caesarxuchao - - deads2k - - lavalamp - - smarterclayton diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/doc.go b/vendor/k8s.io/kubernetes/pkg/runtime/doc.go index a9d084d9f..83accf4f3 100644 --- a/vendor/k8s.io/kubernetes/pkg/runtime/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/runtime/doc.go @@ -42,4 +42,4 @@ limitations under the License. // As a bonus, a few common types useful from all api objects and versions // are provided in types.go. -package runtime +package runtime // import "k8s.io/kubernetes/pkg/runtime" diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/generated.proto b/vendor/k8s.io/kubernetes/pkg/runtime/generated.proto deleted file mode 100644 index 0e602abe1..000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/generated.proto +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.runtime; - -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "runtime"; - -// RawExtension is used to hold extensions in external versions. -// -// To use this, make a field which has RawExtension as its type in your external, versioned -// struct, and Object in your internal struct. You also need to register your -// various plugin types. -// -// // Internal package: -// type MyAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// MyPlugin runtime.Object `json:"myPlugin"` -// } -// type PluginA struct { -// AOption string `json:"aOption"` -// } -// -// // External package: -// type MyAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// MyPlugin runtime.RawExtension `json:"myPlugin"` -// } -// type PluginA struct { -// AOption string `json:"aOption"` -// } -// -// // On the wire, the JSON will look something like this: -// { -// "kind":"MyAPIObject", -// "apiVersion":"v1", -// "myPlugin": { -// "kind":"PluginA", -// "aOption":"foo", -// }, -// } -// -// So what happens? Decode first uses json or yaml to unmarshal the serialized data into -// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. -// The next step is to copy (using pkg/conversion) into the internal struct. The runtime -// package's DefaultScheme has conversion functions installed which will unpack the -// JSON stored in RawExtension, turning it into the correct object type, and storing it -// in the Object. (TODO: In the case where the object is of an unknown type, a -// runtime.Unknown object will be created and stored.) -// -// +k8s:deepcopy-gen=true -// +protobuf=true -message RawExtension { - // Raw is the underlying serialization of this object. - // - // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. - optional bytes raw = 1; -} - -// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, -// like this: -// type MyAwesomeAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// ... // other fields -// } -// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *unversioned.GroupVersionKind) { unversioned.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind -// -// TypeMeta is provided here for convenience. You may use it directly from this package or define -// your own with the same fields. -// -// +k8s:deepcopy-gen=true -// +protobuf=true -message TypeMeta { - optional string apiVersion = 1; - - optional string kind = 2; -} - -// Unknown allows api objects with unknown types to be passed-through. This can be used -// to deal with the API objects from a plug-in. Unknown objects still have functioning -// TypeMeta features-- kind, version, etc. -// TODO: Make this object have easy access to field based accessors and settors for -// metadata and field mutatation. -// -// +k8s:deepcopy-gen=true -// +protobuf=true -message Unknown { - optional TypeMeta typeMeta = 1; - - // Raw will hold the complete serialized object which couldn't be matched - // with a registered type. Most likely, nothing should be done with this - // except for passing it through the system. - optional bytes raw = 2; - - // ContentEncoding is encoding used to encode 'Raw' data. - // Unspecified means no encoding. - optional string contentEncoding = 3; - - // ContentType is serialization method used to serialize 'Raw'. - // Unspecified means ContentTypeJSON. - optional string contentType = 4; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/doc.go b/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/doc.go index 381748d69..19e8a692c 100644 --- a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package protobuf provides a Kubernetes serializer for the protobuf format. -package protobuf +package protobuf // import "k8s.io/kubernetes/pkg/runtime/serializer/protobuf" diff --git a/vendor/k8s.io/kubernetes/pkg/storage/OWNERS b/vendor/k8s.io/kubernetes/pkg/storage/OWNERS deleted file mode 100644 index a57ded7f6..000000000 --- a/vendor/k8s.io/kubernetes/pkg/storage/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -assignees: - - lavalamp - - liggitt - - timothysc - - wojtek-t - - xiang90 diff --git a/vendor/k8s.io/kubernetes/pkg/storage/doc.go b/vendor/k8s.io/kubernetes/pkg/storage/doc.go index d2c5dbfc4..901ea6509 100644 --- a/vendor/k8s.io/kubernetes/pkg/storage/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/storage/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Interfaces for database-related operations. -package storage +package storage // import "k8s.io/kubernetes/pkg/storage" diff --git a/vendor/k8s.io/kubernetes/pkg/storage/etcd/doc.go b/vendor/k8s.io/kubernetes/pkg/storage/etcd/doc.go index 73e2f50c4..22b269d5a 100644 --- a/vendor/k8s.io/kubernetes/pkg/storage/etcd/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/storage/etcd/doc.go @@ -14,4 +14,4 @@ See the License for the specific language governing permissions and limitations under the License. */ -package etcd +package etcd // import "k8s.io/kubernetes/pkg/storage/etcd" diff --git a/vendor/k8s.io/kubernetes/pkg/storage/etcd/util/doc.go b/vendor/k8s.io/kubernetes/pkg/storage/etcd/util/doc.go index 48a70cb73..d10b9a992 100644 --- a/vendor/k8s.io/kubernetes/pkg/storage/etcd/util/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/storage/etcd/util/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package util holds generic etcd-related utility functions that any user of ectd might want to // use, without pulling in kubernetes-specific code. -package util +package util // import "k8s.io/kubernetes/pkg/storage/etcd/util" diff --git a/vendor/k8s.io/kubernetes/pkg/types/doc.go b/vendor/k8s.io/kubernetes/pkg/types/doc.go index 783cbcdc8..b2f6f58b6 100644 --- a/vendor/k8s.io/kubernetes/pkg/types/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/types/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package types implements various generic types used throughout kubernetes. -package types +package types // import "k8s.io/kubernetes/pkg/types" diff --git a/vendor/k8s.io/kubernetes/pkg/util/cache/lruexpirecache.go b/vendor/k8s.io/kubernetes/pkg/util/cache/lruexpirecache.go index 7e1bb65a8..9c87ba7d3 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/cache/lruexpirecache.go +++ b/vendor/k8s.io/kubernetes/pkg/util/cache/lruexpirecache.go @@ -23,13 +23,32 @@ import ( "github.com/golang/groupcache/lru" ) +// Clock defines an interface for obtaining the current time +type Clock interface { + Now() time.Time +} + +// realClock implements the Clock interface by calling time.Now() +type realClock struct{} + +func (realClock) Now() time.Time { return time.Now() } + type LRUExpireCache struct { + // clock is used to obtain the current time + clock Clock + cache *lru.Cache - lock sync.RWMutex + lock sync.Mutex } +// NewLRUExpireCache creates an expiring cache with the given size func NewLRUExpireCache(maxSize int) *LRUExpireCache { - return &LRUExpireCache{cache: lru.New(maxSize)} + return &LRUExpireCache{clock: realClock{}, cache: lru.New(maxSize)} +} + +// NewLRUExpireCache creates an expiring cache with the given size, using the specified clock to obtain the current time +func NewLRUExpireCacheWithClock(maxSize int, clock Clock) *LRUExpireCache { + return &LRUExpireCache{clock: clock, cache: lru.New(maxSize)} } type cacheEntry struct { @@ -40,19 +59,19 @@ type cacheEntry struct { func (c *LRUExpireCache) Add(key lru.Key, value interface{}, ttl time.Duration) { c.lock.Lock() defer c.lock.Unlock() - c.cache.Add(key, &cacheEntry{value, time.Now().Add(ttl)}) + c.cache.Add(key, &cacheEntry{value, c.clock.Now().Add(ttl)}) // Remove entry from cache after ttl. time.AfterFunc(ttl, func() { c.remove(key) }) } func (c *LRUExpireCache) Get(key lru.Key) (interface{}, bool) { - c.lock.RLock() - defer c.lock.RUnlock() + c.lock.Lock() + defer c.lock.Unlock() e, ok := c.cache.Get(key) if !ok { return nil, false } - if time.Now().After(e.(*cacheEntry).expireTime) { + if c.clock.Now().After(e.(*cacheEntry).expireTime) { go c.remove(key) return nil, false } diff --git a/vendor/k8s.io/kubernetes/pkg/util/config/doc.go b/vendor/k8s.io/kubernetes/pkg/util/config/doc.go index 5dbb37d44..5e9a469df 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/config/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/util/config/doc.go @@ -17,4 +17,4 @@ limitations under the License. // Package config provides utility objects for decoupling sources of configuration and the // actual configuration state. Consumers must implement the Merger interface to unify // the sources of change into an object. -package config +package config // import "k8s.io/kubernetes/pkg/util/config" diff --git a/vendor/k8s.io/kubernetes/pkg/util/doc.go b/vendor/k8s.io/kubernetes/pkg/util/doc.go index 1747db550..f7e214f31 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/util/doc.go @@ -17,4 +17,4 @@ limitations under the License. // Package util implements various utility functions used in both testing and implementation // of Kubernetes. Package util may not depend on any other package in the Kubernetes // package tree. -package util +package util // import "k8s.io/kubernetes/pkg/util" diff --git a/vendor/k8s.io/kubernetes/pkg/util/errors/doc.go b/vendor/k8s.io/kubernetes/pkg/util/errors/doc.go index b3b39bc38..9590c0d21 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/errors/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/util/errors/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package errors implements various utility functions and types around errors. -package errors +package errors // import "k8s.io/kubernetes/pkg/util/errors" diff --git a/vendor/k8s.io/kubernetes/pkg/util/errors/errors.go b/vendor/k8s.io/kubernetes/pkg/util/errors/errors.go index 42631f216..de62fe399 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/errors/errors.go +++ b/vendor/k8s.io/kubernetes/pkg/util/errors/errors.go @@ -147,6 +147,20 @@ func Flatten(agg Aggregate) Aggregate { return NewAggregate(result) } +// Reduce will return err or, if err is an Aggregate and only has one item, +// the first item in the aggregate. +func Reduce(err error) error { + if agg, ok := err.(Aggregate); ok && err != nil { + switch len(agg.Errors()) { + case 1: + return agg.Errors()[0] + case 0: + return nil + } + } + return err +} + // AggregateGoroutines runs the provided functions in parallel, stuffing all // non-nil errors into the returned Aggregate. // Returns nil if all the functions complete successfully. diff --git a/vendor/k8s.io/kubernetes/pkg/util/exec/doc.go b/vendor/k8s.io/kubernetes/pkg/util/exec/doc.go index 8b824d244..de7301c8d 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/exec/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/util/exec/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package exec provides an injectable interface and implementations for running commands. -package exec +package exec // import "k8s.io/kubernetes/pkg/util/exec" diff --git a/vendor/k8s.io/kubernetes/pkg/util/interrupt/interrupt.go b/vendor/k8s.io/kubernetes/pkg/util/interrupt/interrupt.go deleted file mode 100644 index 0265b9fb1..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/interrupt/interrupt.go +++ /dev/null @@ -1,104 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package interrupt - -import ( - "os" - "os/signal" - "sync" - "syscall" -) - -// terminationSignals are signals that cause the program to exit in the -// supported platforms (linux, darwin, windows). -var terminationSignals = []os.Signal{syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT} - -// Handler guarantees execution of notifications after a critical section (the function passed -// to a Run method), even in the presence of process termination. It guarantees exactly once -// invocation of the provided notify functions. -type Handler struct { - notify []func() - final func(os.Signal) - once sync.Once -} - -// Chain creates a new handler that invokes all notify functions when the critical section exits -// and then invokes the optional handler's notifications. This allows critical sections to be -// nested without losing exactly once invocations. Notify functions can invoke any cleanup needed -// but should not exit (which is the responsibility of the parent handler). -func Chain(handler *Handler, notify ...func()) *Handler { - if handler == nil { - return New(nil, notify...) - } - return New(handler.Signal, append(notify, handler.Close)...) -} - -// New creates a new handler that guarantees all notify functions are run after the critical -// section exits (or is interrupted by the OS), then invokes the final handler. If no final -// handler is specified, the default final is `os.Exit(1)`. A handler can only be used for -// one critical section. -func New(final func(os.Signal), notify ...func()) *Handler { - return &Handler{ - final: final, - notify: notify, - } -} - -// Close executes all the notification handlers if they have not yet been executed. -func (h *Handler) Close() { - h.once.Do(func() { - for _, fn := range h.notify { - fn() - } - }) -} - -// Signal is called when an os.Signal is received, and guarantees that all notifications -// are executed, then the final handler is executed. This function should only be called once -// per Handler instance. -func (h *Handler) Signal(s os.Signal) { - h.once.Do(func() { - for _, fn := range h.notify { - fn() - } - if h.final == nil { - os.Exit(1) - } - h.final(s) - }) -} - -// Run ensures that any notifications are invoked after the provided fn exits (even if the -// process is interrupted by an OS termination signal). Notifications are only invoked once -// per Handler instance, so calling Run more than once will not behave as the user expects. -func (h *Handler) Run(fn func() error) error { - ch := make(chan os.Signal, 1) - signal.Notify(ch, terminationSignals...) - defer func() { - signal.Stop(ch) - close(ch) - }() - go func() { - sig, ok := <-ch - if !ok { - return - } - h.Signal(sig) - }() - defer h.Close() - return fn() -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/intstr/generated.proto b/vendor/k8s.io/kubernetes/pkg/util/intstr/generated.proto deleted file mode 100644 index dd508e1c8..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/intstr/generated.proto +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.util.intstr; - -// Package-wide variables from generator "generated". -option go_package = "intstr"; - -// IntOrString is a type that can hold an int32 or a string. When used in -// JSON or YAML marshalling and unmarshalling, it produces or consumes the -// inner type. This allows you to have, for example, a JSON field that can -// accept a name or number. -// TODO: Rename to Int32OrString -// -// +protobuf=true -// +protobuf.options.(gogoproto.goproto_stringer)=false -message IntOrString { - optional int64 type = 1; - - optional int32 intVal = 2; - - optional string strVal = 3; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/util/jsonpath/doc.go b/vendor/k8s.io/kubernetes/pkg/util/jsonpath/doc.go index 2a6e17061..0077f3035 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/jsonpath/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/util/jsonpath/doc.go @@ -17,4 +17,4 @@ limitations under the License. // package jsonpath is a template engine using jsonpath syntax, // which can be seen at http://goessner.net/articles/JsonPath/. // In addition, it has {range} {end} function to iterate list and slice. -package jsonpath +package jsonpath // import "k8s.io/kubernetes/pkg/util/jsonpath" diff --git a/vendor/k8s.io/kubernetes/pkg/util/labels/doc.go b/vendor/k8s.io/kubernetes/pkg/util/labels/doc.go index c87305fb0..a5e83763e 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/labels/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/util/labels/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package labels provides utilities to work with Kubernetes labels. -package labels +package labels // import "k8s.io/kubernetes/pkg/util/labels" diff --git a/vendor/k8s.io/kubernetes/pkg/util/metrics/util.go b/vendor/k8s.io/kubernetes/pkg/util/metrics/util.go deleted file mode 100644 index 0fbc3ae08..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/metrics/util.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package metrics - -import ( - "fmt" - "sync" - "time" - - "k8s.io/kubernetes/pkg/util/flowcontrol" - "k8s.io/kubernetes/pkg/util/wait" - - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" -) - -const ( - updatePeriod = 5 * time.Second -) - -var ( - metricsLock sync.Mutex - rateLimiterMetrics map[string]prometheus.Gauge = make(map[string]prometheus.Gauge) -) - -func registerRateLimiterMetric(ownerName string) error { - metricsLock.Lock() - defer metricsLock.Unlock() - - if _, ok := rateLimiterMetrics[ownerName]; ok { - glog.Errorf("Metric for %v already registered", ownerName) - return fmt.Errorf("Metric for %v already registered", ownerName) - } - metric := prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "rate_limiter_use", - Subsystem: ownerName, - Help: fmt.Sprintf("A metric measuring the saturation of the rate limiter for %v", ownerName), - }) - rateLimiterMetrics[ownerName] = metric - prometheus.MustRegister(metric) - return nil -} - -// RegisterMetricAndTrackRateLimiterUsage registers a metric ownerName_rate_limiter_use in prometheus to track -// how much used rateLimiter is and starts a goroutine that updates this metric every updatePeriod -func RegisterMetricAndTrackRateLimiterUsage(ownerName string, rateLimiter flowcontrol.RateLimiter) error { - err := registerRateLimiterMetric(ownerName) - if err != nil { - return err - } - go wait.Forever(func() { - metricsLock.Lock() - defer metricsLock.Unlock() - rateLimiterMetrics[ownerName].Set(rateLimiter.Saturation()) - }, updatePeriod) - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/net/http.go b/vendor/k8s.io/kubernetes/pkg/util/net/http.go index 53f28dfca..15df07741 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/net/http.go +++ b/vendor/k8s.io/kubernetes/pkg/util/net/http.go @@ -108,6 +108,34 @@ func Dialer(transport http.RoundTripper) (DialFunc, error) { } } +// CloneTLSConfig returns a tls.Config with all exported fields except SessionTicketsDisabled and SessionTicketKey copied. +// This makes it safe to call CloneTLSConfig on a config in active use by a server. +// TODO: replace with tls.Config#Clone when we move to go1.8 +func CloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } +} + func TLSClientConfig(transport http.RoundTripper) (*tls.Config, error) { if transport == nil { return nil, nil diff --git a/vendor/k8s.io/kubernetes/pkg/util/pod/doc.go b/vendor/k8s.io/kubernetes/pkg/util/pod/doc.go index d7a8ff226..ba38c1f55 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/pod/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/util/pod/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package pod provides utilities to work with Kubernetes pod and pod templates. -package pod +package pod // import "k8s.io/kubernetes/pkg/util/pod" diff --git a/vendor/k8s.io/kubernetes/pkg/util/term/resize.go b/vendor/k8s.io/kubernetes/pkg/util/term/resize.go deleted file mode 100644 index 3d78c8664..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/term/resize.go +++ /dev/null @@ -1,144 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package term - -import ( - "fmt" - - "github.com/docker/docker/pkg/term" - "k8s.io/kubernetes/pkg/util/runtime" -) - -// Size represents the width and height of a terminal. -type Size struct { - Width uint16 - Height uint16 -} - -// GetSize returns the current size of the user's terminal. If it isn't a terminal, -// nil is returned. -func (t TTY) GetSize() *Size { - outFd, isTerminal := term.GetFdInfo(t.Out) - if !isTerminal { - return nil - } - return GetSize(outFd) -} - -// GetSize returns the current size of the terminal associated with fd. -func GetSize(fd uintptr) *Size { - winsize, err := term.GetWinsize(fd) - if err != nil { - runtime.HandleError(fmt.Errorf("unable to get terminal size: %v", err)) - return nil - } - - return &Size{Width: winsize.Width, Height: winsize.Height} -} - -// MonitorSize monitors the terminal's size. It returns a TerminalSizeQueue primed with -// initialSizes, or nil if there's no TTY present. -func (t *TTY) MonitorSize(initialSizes ...*Size) TerminalSizeQueue { - outFd, isTerminal := term.GetFdInfo(t.Out) - if !isTerminal { - return nil - } - - t.sizeQueue = &sizeQueue{ - t: *t, - // make it buffered so we can send the initial terminal sizes without blocking, prior to starting - // the streaming below - resizeChan: make(chan Size, len(initialSizes)), - stopResizing: make(chan struct{}), - } - - t.sizeQueue.monitorSize(outFd, initialSizes...) - - return t.sizeQueue -} - -// TerminalSizeQueue is capable of returning terminal resize events as they occur. -type TerminalSizeQueue interface { - // Next returns the new terminal size after the terminal has been resized. It returns nil when - // monitoring has been stopped. - Next() *Size -} - -// sizeQueue implements TerminalSizeQueue -type sizeQueue struct { - t TTY - // resizeChan receives a Size each time the user's terminal is resized. - resizeChan chan Size - stopResizing chan struct{} -} - -// make sure sizeQueue implements the TerminalSizeQueue interface -var _ TerminalSizeQueue = &sizeQueue{} - -// monitorSize primes resizeChan with initialSizes and then monitors for resize events. With each -// new event, it sends the current terminal size to resizeChan. -func (s *sizeQueue) monitorSize(outFd uintptr, initialSizes ...*Size) { - // send the initial sizes - for i := range initialSizes { - if initialSizes[i] != nil { - s.resizeChan <- *initialSizes[i] - } - } - - resizeEvents := make(chan Size, 1) - - monitorResizeEvents(outFd, resizeEvents, s.stopResizing) - - // listen for resize events in the background - go func() { - defer runtime.HandleCrash() - - for { - select { - case size, ok := <-resizeEvents: - if !ok { - return - } - - select { - // try to send the size to resizeChan, but don't block - case s.resizeChan <- size: - // send successful - default: - // unable to send / no-op - } - case <-s.stopResizing: - return - } - } - }() -} - -// Next returns the new terminal size after the terminal has been resized. It returns nil when -// monitoring has been stopped. -func (s *sizeQueue) Next() *Size { - size, ok := <-s.resizeChan - if !ok { - return nil - } - return &size -} - -// stop stops the background goroutine that is monitoring for terminal resizes. -func (s *sizeQueue) stop() { - close(s.stopResizing) -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/term/resizeevents.go b/vendor/k8s.io/kubernetes/pkg/util/term/resizeevents.go deleted file mode 100644 index 70858ed03..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/term/resizeevents.go +++ /dev/null @@ -1,60 +0,0 @@ -// +build !windows - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package term - -import ( - "os" - "os/signal" - "syscall" - - "k8s.io/kubernetes/pkg/util/runtime" -) - -// monitorResizeEvents spawns a goroutine that waits for SIGWINCH signals (these indicate the -// terminal has resized). After receiving a SIGWINCH, this gets the terminal size and tries to send -// it to the resizeEvents channel. The goroutine stops when the stop channel is closed. -func monitorResizeEvents(fd uintptr, resizeEvents chan<- Size, stop chan struct{}) { - go func() { - defer runtime.HandleCrash() - - winch := make(chan os.Signal, 1) - signal.Notify(winch, syscall.SIGWINCH) - defer signal.Stop(winch) - - for { - select { - case <-winch: - size := GetSize(fd) - if size == nil { - return - } - - // try to send size - select { - case resizeEvents <- *size: - // success - default: - // not sent - } - case <-stop: - return - } - } - }() -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/term/resizeevents_windows.go b/vendor/k8s.io/kubernetes/pkg/util/term/resizeevents_windows.go deleted file mode 100644 index e994d28da..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/term/resizeevents_windows.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package term - -import ( - "time" - - "k8s.io/kubernetes/pkg/util/runtime" -) - -// monitorResizeEvents spawns a goroutine that periodically gets the terminal size and tries to send -// it to the resizeEvents channel if the size has changed. The goroutine stops when the stop channel -// is closed. -func monitorResizeEvents(fd uintptr, resizeEvents chan<- Size, stop chan struct{}) { - go func() { - defer runtime.HandleCrash() - - size := GetSize(fd) - if size == nil { - return - } - lastSize := *size - - for { - // see if we need to stop running - select { - case <-stop: - return - default: - } - - size := GetSize(fd) - if size == nil { - return - } - - if size.Height != lastSize.Height || size.Width != lastSize.Width { - lastSize.Height = size.Height - lastSize.Width = size.Width - resizeEvents <- *size - } - - // sleep to avoid hot looping - time.Sleep(250 * time.Millisecond) - } - }() -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/term/setsize.go b/vendor/k8s.io/kubernetes/pkg/util/term/setsize.go deleted file mode 100644 index 944d4e5ab..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/term/setsize.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build !windows - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package term - -import ( - "github.com/docker/docker/pkg/term" -) - -// SetSize sets the terminal size associated with fd. -func SetSize(fd uintptr, size Size) error { - return term.SetWinsize(fd, &term.Winsize{Height: size.Height, Width: size.Width}) -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/term/setsize_unsupported.go b/vendor/k8s.io/kubernetes/pkg/util/term/setsize_unsupported.go deleted file mode 100644 index dd3de3126..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/term/setsize_unsupported.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build windows - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package term - -func SetSize(fd uintptr, size Size) error { - // NOP - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/term/term.go b/vendor/k8s.io/kubernetes/pkg/util/term/term.go deleted file mode 100644 index 58baee831..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/term/term.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package term - -import ( - "io" - "os" - - "github.com/docker/docker/pkg/term" - - "k8s.io/kubernetes/pkg/util/interrupt" -) - -// SafeFunc is a function to be invoked by TTY. -type SafeFunc func() error - -// TTY helps invoke a function and preserve the state of the terminal, even if the process is -// terminated during execution. It also provides support for terminal resizing for remote command -// execution/attachment. -type TTY struct { - // In is a reader representing stdin. It is a required field. - In io.Reader - // Out is a writer representing stdout. It must be set to support terminal resizing. It is an - // optional field. - Out io.Writer - // Raw is true if the terminal should be set raw. - Raw bool - // TryDev indicates the TTY should try to open /dev/tty if the provided input - // is not a file descriptor. - TryDev bool - // Parent is an optional interrupt handler provided to this function - if provided - // it will be invoked after the terminal state is restored. If it is not provided, - // a signal received during the TTY will result in os.Exit(0) being invoked. - Parent *interrupt.Handler - - // sizeQueue is set after a call to MonitorSize() and is used to monitor SIGWINCH signals when the - // user's terminal resizes. - sizeQueue *sizeQueue -} - -// IsTerminalIn returns true if t.In is a terminal. Does not check /dev/tty -// even if TryDev is set. -func (t TTY) IsTerminalIn() bool { - return IsTerminal(t.In) -} - -// IsTerminalOut returns true if t.Out is a terminal. Does not check /dev/tty -// even if TryDev is set. -func (t TTY) IsTerminalOut() bool { - return IsTerminal(t.Out) -} - -// IsTerminal returns whether the passed object is a terminal or not -func IsTerminal(i interface{}) bool { - _, terminal := term.GetFdInfo(i) - return terminal -} - -// Safe invokes the provided function and will attempt to ensure that when the -// function returns (or a termination signal is sent) that the terminal state -// is reset to the condition it was in prior to the function being invoked. If -// t.Raw is true the terminal will be put into raw mode prior to calling the function. -// If the input file descriptor is not a TTY and TryDev is true, the /dev/tty file -// will be opened (if available). -func (t TTY) Safe(fn SafeFunc) error { - inFd, isTerminal := term.GetFdInfo(t.In) - - if !isTerminal && t.TryDev { - if f, err := os.Open("/dev/tty"); err == nil { - defer f.Close() - inFd = f.Fd() - isTerminal = term.IsTerminal(inFd) - } - } - if !isTerminal { - return fn() - } - - var state *term.State - var err error - if t.Raw { - state, err = term.MakeRaw(inFd) - } else { - state, err = term.SaveState(inFd) - } - if err != nil { - return err - } - return interrupt.Chain(t.Parent, func() { - if t.sizeQueue != nil { - t.sizeQueue.stop() - } - - term.RestoreTerminal(inFd, state) - }).Run(fn) -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/wait/doc.go b/vendor/k8s.io/kubernetes/pkg/util/wait/doc.go index ff89dc170..590071398 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/wait/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/util/wait/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package wait provides tools for polling or listening for changes // to a condition. -package wait +package wait // import "k8s.io/kubernetes/pkg/util/wait" diff --git a/vendor/k8s.io/kubernetes/pkg/util/wait/wait.go b/vendor/k8s.io/kubernetes/pkg/util/wait/wait.go index e2d6a4866..c949de426 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/wait/wait.go +++ b/vendor/k8s.io/kubernetes/pkg/util/wait/wait.go @@ -193,6 +193,11 @@ func PollInfinite(interval time.Duration, condition ConditionFunc) error { return WaitFor(poller(interval, 0), condition, done) } +// PollUntil is like Poll, but it takes a stop change instead of total duration +func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { + return WaitFor(poller(interval, 0), condition, stopCh) +} + // WaitFunc creates a channel that receives an item every time a test // should be executed and is closed when the last test should be invoked. type WaitFunc func(done <-chan struct{}) <-chan struct{} diff --git a/vendor/k8s.io/kubernetes/pkg/util/workqueue/default_rate_limiters.go b/vendor/k8s.io/kubernetes/pkg/util/workqueue/default_rate_limiters.go deleted file mode 100644 index 35caed4fa..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/workqueue/default_rate_limiters.go +++ /dev/null @@ -1,211 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package workqueue - -import ( - "math" - "sync" - "time" - - "github.com/juju/ratelimit" -) - -type RateLimiter interface { - // When gets an item and gets to decide how long that item should wait - When(item interface{}) time.Duration - // Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing - // or for success, we'll stop tracking it - Forget(item interface{}) - // NumRequeues returns back how many failures the item has had - NumRequeues(item interface{}) int -} - -// DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has -// both overall and per-item rate limitting. The overall is a token bucket and the per-item is exponential -func DefaultControllerRateLimiter() RateLimiter { - return NewMaxOfRateLimiter( - NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second), - // 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item) - &BucketRateLimiter{Bucket: ratelimit.NewBucketWithRate(float64(10), int64(100))}, - ) -} - -// BucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API -type BucketRateLimiter struct { - *ratelimit.Bucket -} - -var _ RateLimiter = &BucketRateLimiter{} - -func (r *BucketRateLimiter) When(item interface{}) time.Duration { - return r.Bucket.Take(1) -} - -func (r *BucketRateLimiter) NumRequeues(item interface{}) int { - return 0 -} - -func (r *BucketRateLimiter) Forget(item interface{}) { -} - -// ItemExponentialFailureRateLimiter does a simple baseDelay*10^ limit -// dealing with max failures and expiration are up to the caller -type ItemExponentialFailureRateLimiter struct { - failuresLock sync.Mutex - failures map[interface{}]int - - baseDelay time.Duration - maxDelay time.Duration -} - -var _ RateLimiter = &ItemExponentialFailureRateLimiter{} - -func NewItemExponentialFailureRateLimiter(baseDelay time.Duration, maxDelay time.Duration) RateLimiter { - return &ItemExponentialFailureRateLimiter{ - failures: map[interface{}]int{}, - baseDelay: baseDelay, - maxDelay: maxDelay, - } -} - -func DefaultItemBasedRateLimiter() RateLimiter { - return NewItemExponentialFailureRateLimiter(time.Millisecond, 1000*time.Second) -} - -func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration { - r.failuresLock.Lock() - defer r.failuresLock.Unlock() - - exp := r.failures[item] - r.failures[item] = r.failures[item] + 1 - - // The backoff is capped such that 'calculated' value never overflows. - backoff := float64(r.baseDelay.Nanoseconds()) * math.Pow(2, float64(exp)) - if backoff > math.MaxInt64 { - return r.maxDelay - } - - calculated := time.Duration(backoff) - if calculated > r.maxDelay { - return r.maxDelay - } - - return calculated -} - -func (r *ItemExponentialFailureRateLimiter) NumRequeues(item interface{}) int { - r.failuresLock.Lock() - defer r.failuresLock.Unlock() - - return r.failures[item] -} - -func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) { - r.failuresLock.Lock() - defer r.failuresLock.Unlock() - - delete(r.failures, item) -} - -// ItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that -type ItemFastSlowRateLimiter struct { - failuresLock sync.Mutex - failures map[interface{}]int - - maxFastAttempts int - fastDelay time.Duration - slowDelay time.Duration -} - -var _ RateLimiter = &ItemFastSlowRateLimiter{} - -func NewItemFastSlowRateLimiter(fastDelay, slowDelay time.Duration, maxFastAttempts int) RateLimiter { - return &ItemFastSlowRateLimiter{ - failures: map[interface{}]int{}, - fastDelay: fastDelay, - slowDelay: slowDelay, - maxFastAttempts: maxFastAttempts, - } -} - -func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration { - r.failuresLock.Lock() - defer r.failuresLock.Unlock() - - r.failures[item] = r.failures[item] + 1 - - if r.failures[item] <= r.maxFastAttempts { - return r.fastDelay - } - - return r.slowDelay -} - -func (r *ItemFastSlowRateLimiter) NumRequeues(item interface{}) int { - r.failuresLock.Lock() - defer r.failuresLock.Unlock() - - return r.failures[item] -} - -func (r *ItemFastSlowRateLimiter) Forget(item interface{}) { - r.failuresLock.Lock() - defer r.failuresLock.Unlock() - - delete(r.failures, item) -} - -// MaxOfRateLimiter calls every RateLimiter and returns the worst case response -// When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items -// were separately delayed a longer time. -type MaxOfRateLimiter struct { - limiters []RateLimiter -} - -func (r *MaxOfRateLimiter) When(item interface{}) time.Duration { - ret := time.Duration(0) - for _, limiter := range r.limiters { - curr := limiter.When(item) - if curr > ret { - ret = curr - } - } - - return ret -} - -func NewMaxOfRateLimiter(limiters ...RateLimiter) RateLimiter { - return &MaxOfRateLimiter{limiters: limiters} -} - -func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int { - ret := 0 - for _, limiter := range r.limiters { - curr := limiter.NumRequeues(item) - if curr > ret { - ret = curr - } - } - - return ret -} - -func (r *MaxOfRateLimiter) Forget(item interface{}) { - for _, limiter := range r.limiters { - limiter.Forget(item) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/workqueue/delaying_queue.go b/vendor/k8s.io/kubernetes/pkg/util/workqueue/delaying_queue.go deleted file mode 100644 index 5a71b8181..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/workqueue/delaying_queue.go +++ /dev/null @@ -1,240 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package workqueue - -import ( - "sort" - "time" - - "k8s.io/kubernetes/pkg/util/clock" - utilruntime "k8s.io/kubernetes/pkg/util/runtime" -) - -// DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to -// requeue items after failures without ending up in a hot-loop. -type DelayingInterface interface { - Interface - // AddAfter adds an item to the workqueue after the indicated duration has passed - AddAfter(item interface{}, duration time.Duration) -} - -// NewDelayingQueue constructs a new workqueue with delayed queuing ability -func NewDelayingQueue() DelayingInterface { - return newDelayingQueue(clock.RealClock{}, "") -} - -func NewNamedDelayingQueue(name string) DelayingInterface { - return newDelayingQueue(clock.RealClock{}, name) -} - -func newDelayingQueue(clock clock.Clock, name string) DelayingInterface { - ret := &delayingType{ - Interface: NewNamed(name), - clock: clock, - heartbeat: clock.Tick(maxWait), - stopCh: make(chan struct{}), - waitingTimeByEntry: map[t]time.Time{}, - waitingForAddCh: make(chan waitFor, 1000), - metrics: newRetryMetrics(name), - } - - go ret.waitingLoop() - - return ret -} - -// delayingType wraps an Interface and provides delayed re-enquing -type delayingType struct { - Interface - - // clock tracks time for delayed firing - clock clock.Clock - - // stopCh lets us signal a shutdown to the waiting loop - stopCh chan struct{} - - // heartbeat ensures we wait no more than maxWait before firing - heartbeat <-chan time.Time - - // waitingForAdd is an ordered slice of items to be added to the contained work queue - waitingForAdd []waitFor - // waitingTimeByEntry holds wait time by entry, so we can lookup pre-existing indexes - waitingTimeByEntry map[t]time.Time - // waitingForAddCh is a buffered channel that feeds waitingForAdd - waitingForAddCh chan waitFor - - // metrics counts the number of retries - metrics retryMetrics -} - -// waitFor holds the data to add and the time it should be added -type waitFor struct { - data t - readyAt time.Time -} - -// ShutDown gives a way to shut off this queue -func (q *delayingType) ShutDown() { - q.Interface.ShutDown() - close(q.stopCh) -} - -// AddAfter adds the given item to the work queue after the given delay -func (q *delayingType) AddAfter(item interface{}, duration time.Duration) { - // don't add if we're already shutting down - if q.ShuttingDown() { - return - } - - q.metrics.retry() - - // immediately add things with no delay - if duration <= 0 { - q.Add(item) - return - } - - select { - case <-q.stopCh: - // unblock if ShutDown() is called - case q.waitingForAddCh <- waitFor{data: item, readyAt: q.clock.Now().Add(duration)}: - } -} - -// maxWait keeps a max bound on the wait time. It's just insurance against weird things happening. -// Checking the queue every 10 seconds isn't expensive and we know that we'll never end up with an -// expired item sitting for more than 10 seconds. -const maxWait = 10 * time.Second - -// waitingLoop runs until the workqueue is shutdown and keeps a check on the list of items to be added. -func (q *delayingType) waitingLoop() { - defer utilruntime.HandleCrash() - - // Make a placeholder channel to use when there are no items in our list - never := make(<-chan time.Time) - - for { - if q.Interface.ShuttingDown() { - // discard waiting entries - q.waitingForAdd = nil - q.waitingTimeByEntry = nil - return - } - - now := q.clock.Now() - - // Add ready entries - readyEntries := 0 - for _, entry := range q.waitingForAdd { - if entry.readyAt.After(now) { - break - } - q.Add(entry.data) - delete(q.waitingTimeByEntry, entry.data) - readyEntries++ - } - q.waitingForAdd = q.waitingForAdd[readyEntries:] - - // Set up a wait for the first item's readyAt (if one exists) - nextReadyAt := never - if len(q.waitingForAdd) > 0 { - nextReadyAt = q.clock.After(q.waitingForAdd[0].readyAt.Sub(now)) - } - - select { - case <-q.stopCh: - return - - case <-q.heartbeat: - // continue the loop, which will add ready items - - case <-nextReadyAt: - // continue the loop, which will add ready items - - case waitEntry := <-q.waitingForAddCh: - if waitEntry.readyAt.After(q.clock.Now()) { - q.waitingForAdd = insert(q.waitingForAdd, q.waitingTimeByEntry, waitEntry) - } else { - q.Add(waitEntry.data) - } - - drained := false - for !drained { - select { - case waitEntry := <-q.waitingForAddCh: - if waitEntry.readyAt.After(q.clock.Now()) { - q.waitingForAdd = insert(q.waitingForAdd, q.waitingTimeByEntry, waitEntry) - } else { - q.Add(waitEntry.data) - } - default: - drained = true - } - } - } - } -} - -// inserts the given entry into the sorted entries list -// same semantics as append()... the given slice may be modified, -// and the returned value should be used -func insert(entries []waitFor, knownEntries map[t]time.Time, entry waitFor) []waitFor { - // if the entry is already in our retry list and the existing time is before the new one, just skip it - existingTime, exists := knownEntries[entry.data] - if exists && existingTime.Before(entry.readyAt) { - return entries - } - - // if the entry exists and is scheduled for later, go ahead and remove the entry - if exists { - if existingIndex := findEntryIndex(entries, existingTime, entry.data); existingIndex >= 0 && existingIndex < len(entries) { - entries = append(entries[:existingIndex], entries[existingIndex+1:]...) - } - } - - insertionIndex := sort.Search(len(entries), func(i int) bool { - return entry.readyAt.Before(entries[i].readyAt) - }) - - // grow by 1 - entries = append(entries, waitFor{}) - // shift items from the insertion point to the end - copy(entries[insertionIndex+1:], entries[insertionIndex:]) - // insert the record - entries[insertionIndex] = entry - - knownEntries[entry.data] = entry.readyAt - - return entries -} - -// findEntryIndex returns the index for an existing entry -func findEntryIndex(entries []waitFor, existingTime time.Time, data t) int { - index := sort.Search(len(entries), func(i int) bool { - return entries[i].readyAt.After(existingTime) || existingTime == entries[i].readyAt - }) - - // we know this is the earliest possible index, but there could be multiple with the same time - // iterate from here to find the dupe - for ; index < len(entries); index++ { - if entries[index].data == data { - break - } - } - - return index -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/workqueue/doc.go b/vendor/k8s.io/kubernetes/pkg/util/workqueue/doc.go deleted file mode 100644 index 2a00c74ac..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/workqueue/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package workqueue provides a simple queue that supports the following -// features: -// * Fair: items processed in the order in which they are added. -// * Stingy: a single item will not be processed multiple times concurrently, -// and if an item is added multiple times before it can be processed, it -// will only be processed once. -// * Multiple consumers and producers. In particular, it is allowed for an -// item to be reenqueued while it is being processed. -// * Shutdown notifications. -package workqueue diff --git a/vendor/k8s.io/kubernetes/pkg/util/workqueue/metrics.go b/vendor/k8s.io/kubernetes/pkg/util/workqueue/metrics.go deleted file mode 100644 index 8a37d2e70..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/workqueue/metrics.go +++ /dev/null @@ -1,153 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package workqueue - -import ( - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -type queueMetrics interface { - add(item t) - get(item t) - done(item t) -} - -type defaultQueueMetrics struct { - depth prometheus.Gauge - adds prometheus.Counter - latency prometheus.Summary - workDuration prometheus.Summary - addTimes map[t]time.Time - processingStartTimes map[t]time.Time -} - -func newQueueMetrics(name string) queueMetrics { - var ret *defaultQueueMetrics - if len(name) == 0 { - return ret - } - - ret = &defaultQueueMetrics{ - depth: prometheus.NewGauge(prometheus.GaugeOpts{ - Subsystem: name, - Name: "depth", - Help: "Current depth of workqueue: " + name, - }), - adds: prometheus.NewCounter(prometheus.CounterOpts{ - Subsystem: name, - Name: "adds", - Help: "Total number of adds handled by workqueue: " + name, - }), - latency: prometheus.NewSummary(prometheus.SummaryOpts{ - Subsystem: name, - Name: "queue_latency", - Help: "How long an item stays in workqueue" + name + " before being requested.", - }), - workDuration: prometheus.NewSummary(prometheus.SummaryOpts{ - Subsystem: name, - Name: "work_duration", - Help: "How long processing an item from workqueue" + name + " takes.", - }), - addTimes: map[t]time.Time{}, - processingStartTimes: map[t]time.Time{}, - } - - prometheus.Register(ret.depth) - prometheus.Register(ret.adds) - prometheus.Register(ret.latency) - prometheus.Register(ret.workDuration) - - return ret -} - -func (m *defaultQueueMetrics) add(item t) { - if m == nil { - return - } - - m.adds.Inc() - m.depth.Inc() - if _, exists := m.addTimes[item]; !exists { - m.addTimes[item] = time.Now() - } -} - -func (m *defaultQueueMetrics) get(item t) { - if m == nil { - return - } - - m.depth.Dec() - m.processingStartTimes[item] = time.Now() - if startTime, exists := m.addTimes[item]; exists { - m.latency.Observe(sinceInMicroseconds(startTime)) - delete(m.addTimes, item) - } -} - -func (m *defaultQueueMetrics) done(item t) { - if m == nil { - return - } - - if startTime, exists := m.processingStartTimes[item]; exists { - m.workDuration.Observe(sinceInMicroseconds(startTime)) - delete(m.processingStartTimes, item) - } -} - -// Gets the time since the specified start in microseconds. -func sinceInMicroseconds(start time.Time) float64 { - return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds()) -} - -type retryMetrics interface { - retry() -} - -type defaultRetryMetrics struct { - retries prometheus.Counter -} - -func newRetryMetrics(name string) retryMetrics { - var ret *defaultRetryMetrics - if len(name) == 0 { - return ret - } - - ret = &defaultRetryMetrics{ - retries: prometheus.NewCounter(prometheus.CounterOpts{ - Subsystem: name, - Name: "retries", - Help: "Total number of retries handled by workqueue: " + name, - }), - } - - prometheus.Register(ret.retries) - - return ret -} - -func (m *defaultRetryMetrics) retry() { - if m == nil { - return - } - - m.retries.Inc() -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/workqueue/parallelizer.go b/vendor/k8s.io/kubernetes/pkg/util/workqueue/parallelizer.go deleted file mode 100644 index a9305935b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/workqueue/parallelizer.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package workqueue - -import ( - "sync" - - utilruntime "k8s.io/kubernetes/pkg/util/runtime" -) - -type DoWorkPieceFunc func(piece int) - -// Parallelize is a very simple framework that allow for parallelizing -// N independent pieces of work. -func Parallelize(workers, pieces int, doWorkPiece DoWorkPieceFunc) { - toProcess := make(chan int, pieces) - for i := 0; i < pieces; i++ { - toProcess <- i - } - close(toProcess) - - wg := sync.WaitGroup{} - wg.Add(workers) - for i := 0; i < workers; i++ { - go func() { - defer utilruntime.HandleCrash() - defer wg.Done() - for piece := range toProcess { - doWorkPiece(piece) - } - }() - } - wg.Wait() -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/workqueue/queue.go b/vendor/k8s.io/kubernetes/pkg/util/workqueue/queue.go deleted file mode 100644 index 9a2ecad38..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/workqueue/queue.go +++ /dev/null @@ -1,172 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package workqueue - -import ( - "sync" -) - -type Interface interface { - Add(item interface{}) - Len() int - Get() (item interface{}, shutdown bool) - Done(item interface{}) - ShutDown() - ShuttingDown() bool -} - -// New constructs a new workqueue (see the package comment). -func New() *Type { - return NewNamed("") -} - -func NewNamed(name string) *Type { - return &Type{ - dirty: set{}, - processing: set{}, - cond: sync.NewCond(&sync.Mutex{}), - metrics: newQueueMetrics(name), - } -} - -// Type is a work queue (see the package comment). -type Type struct { - // queue defines the order in which we will work on items. Every - // element of queue should be in the dirty set and not in the - // processing set. - queue []t - - // dirty defines all of the items that need to be processed. - dirty set - - // Things that are currently being processed are in the processing set. - // These things may be simultaneously in the dirty set. When we finish - // processing something and remove it from this set, we'll check if - // it's in the dirty set, and if so, add it to the queue. - processing set - - cond *sync.Cond - - shuttingDown bool - - metrics queueMetrics -} - -type empty struct{} -type t interface{} -type set map[t]empty - -func (s set) has(item t) bool { - _, exists := s[item] - return exists -} - -func (s set) insert(item t) { - s[item] = empty{} -} - -func (s set) delete(item t) { - delete(s, item) -} - -// Add marks item as needing processing. -func (q *Type) Add(item interface{}) { - q.cond.L.Lock() - defer q.cond.L.Unlock() - if q.shuttingDown { - return - } - if q.dirty.has(item) { - return - } - - q.metrics.add(item) - - q.dirty.insert(item) - if q.processing.has(item) { - return - } - - q.queue = append(q.queue, item) - q.cond.Signal() -} - -// Len returns the current queue length, for informational purposes only. You -// shouldn't e.g. gate a call to Add() or Get() on Len() being a particular -// value, that can't be synchronized properly. -func (q *Type) Len() int { - q.cond.L.Lock() - defer q.cond.L.Unlock() - return len(q.queue) -} - -// Get blocks until it can return an item to be processed. If shutdown = true, -// the caller should end their goroutine. You must call Done with item when you -// have finished processing it. -func (q *Type) Get() (item interface{}, shutdown bool) { - q.cond.L.Lock() - defer q.cond.L.Unlock() - for len(q.queue) == 0 && !q.shuttingDown { - q.cond.Wait() - } - if len(q.queue) == 0 { - // We must be shutting down. - return nil, true - } - - item, q.queue = q.queue[0], q.queue[1:] - - q.metrics.get(item) - - q.processing.insert(item) - q.dirty.delete(item) - - return item, false -} - -// Done marks item as done processing, and if it has been marked as dirty again -// while it was being processed, it will be re-added to the queue for -// re-processing. -func (q *Type) Done(item interface{}) { - q.cond.L.Lock() - defer q.cond.L.Unlock() - - q.metrics.done(item) - - q.processing.delete(item) - if q.dirty.has(item) { - q.queue = append(q.queue, item) - q.cond.Signal() - } -} - -// Shutdown will cause q to ignore all new items added to it. As soon as the -// worker goroutines have drained the existing items in the queue, they will be -// instructed to exit. -func (q *Type) ShutDown() { - q.cond.L.Lock() - defer q.cond.L.Unlock() - q.shuttingDown = true - q.cond.Broadcast() -} - -func (q *Type) ShuttingDown() bool { - q.cond.L.Lock() - defer q.cond.L.Unlock() - - return q.shuttingDown -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/workqueue/rate_limitting_queue.go b/vendor/k8s.io/kubernetes/pkg/util/workqueue/rate_limitting_queue.go deleted file mode 100644 index 9a2bfbb56..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/workqueue/rate_limitting_queue.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package workqueue - -// RateLimitingInterface is an Interface that can Add an item at a later time. This makes it easier to -// requeue items after failures without ending up in a hot-loop. -type RateLimitingInterface interface { - DelayingInterface - // AddRateLimited adds an item to the workqueue after the rate limiter says its ok - AddRateLimited(item interface{}) - - // Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing - // or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you - // still have to call `Done` on the queue. - Forget(item interface{}) - // NumRequeues returns back how many times the item was requeued - NumRequeues(item interface{}) int -} - -// NewRateLimitingQueue constructs a new workqueue with rateLimited queuing ability -// Remember to call Forget! If you don't, you may end up tracking failures forever. -func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface { - return &rateLimitingType{ - DelayingInterface: NewDelayingQueue(), - rateLimiter: rateLimiter, - } -} - -func NewNamedRateLimitingQueue(rateLimiter RateLimiter, name string) RateLimitingInterface { - return &rateLimitingType{ - DelayingInterface: NewNamedDelayingQueue(name), - rateLimiter: rateLimiter, - } -} - -// rateLimitingType wraps an Interface and provides rateLimited re-enquing -type rateLimitingType struct { - DelayingInterface - - rateLimiter RateLimiter -} - -// AddRateLimited AddAfter's the item based on the time when the rate limiter says its ok -func (q *rateLimitingType) AddRateLimited(item interface{}) { - q.DelayingInterface.AddAfter(item, q.rateLimiter.When(item)) -} - -func (q *rateLimitingType) NumRequeues(item interface{}) int { - return q.rateLimiter.NumRequeues(item) -} - -func (q *rateLimitingType) Forget(item interface{}) { - q.rateLimiter.Forget(item) -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/workqueue/timed_queue.go b/vendor/k8s.io/kubernetes/pkg/util/workqueue/timed_queue.go deleted file mode 100644 index 2ad90bfdf..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/workqueue/timed_queue.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package workqueue - -import "time" - -type TimedWorkQueue struct { - *Type -} - -type TimedWorkQueueItem struct { - StartTime time.Time - Object interface{} -} - -func NewTimedWorkQueue() *TimedWorkQueue { - return &TimedWorkQueue{New()} -} - -// Add adds the obj along with the current timestamp to the queue. -func (q TimedWorkQueue) Add(timedItem *TimedWorkQueueItem) { - q.Type.Add(timedItem) -} - -// Get gets the obj along with its timestamp from the queue. -func (q TimedWorkQueue) Get() (timedItem *TimedWorkQueueItem, shutdown bool) { - origin, shutdown := q.Type.Get() - if origin == nil { - return nil, shutdown - } - timedItem, _ = origin.(*TimedWorkQueueItem) - return timedItem, shutdown -} - -func (q TimedWorkQueue) Done(timedItem *TimedWorkQueueItem) error { - q.Type.Done(timedItem) - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/version/.gitattributes b/vendor/k8s.io/kubernetes/pkg/version/.gitattributes deleted file mode 100644 index 7e349eff6..000000000 --- a/vendor/k8s.io/kubernetes/pkg/version/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -base.go export-subst diff --git a/vendor/k8s.io/kubernetes/pkg/version/base.go b/vendor/k8s.io/kubernetes/pkg/version/base.go index 4f53bd244..75dcdcea2 100644 --- a/vendor/k8s.io/kubernetes/pkg/version/base.go +++ b/vendor/k8s.io/kubernetes/pkg/version/base.go @@ -39,8 +39,8 @@ var ( // them irrelevant. (Next we'll take it out, which may muck with // scripts consuming the kubectl version output - but most of // these should be looking at gitVersion already anyways.) - gitMajor string = "1" // major version, always numeric - gitMinor string = "4+" // minor version, numeric possibly followed by "+" + gitMajor string = "1" // major version, always numeric + gitMinor string = "4" // minor version, numeric possibly followed by "+" // semantic version, derived by build scripts (see // https://github.com/kubernetes/kubernetes/blob/master/docs/design/versioning.md @@ -51,7 +51,7 @@ var ( // semantic version is a git hash, but the version itself is no // longer the direct output of "git describe", but a slight // translation to be semver compliant. - gitVersion string = "v1.4.0-beta.3+$Format:%h$" + gitVersion string = "v1.4.0+$Format:%h$" gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty" diff --git a/vendor/k8s.io/kubernetes/pkg/version/doc.go b/vendor/k8s.io/kubernetes/pkg/version/doc.go index ccedec76f..3cb84338d 100644 --- a/vendor/k8s.io/kubernetes/pkg/version/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/version/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package version supplies version information collected at build time to // kubernetes components. -package version +package version // import "k8s.io/kubernetes/pkg/version" diff --git a/vendor/k8s.io/kubernetes/pkg/watch/doc.go b/vendor/k8s.io/kubernetes/pkg/watch/doc.go index 5fde5e742..cb866d873 100644 --- a/vendor/k8s.io/kubernetes/pkg/watch/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/watch/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package watch contains a generic watchable interface, and a fake for // testing code that uses the watch interface. -package watch +package watch // import "k8s.io/kubernetes/pkg/watch" diff --git a/vendor/k8s.io/kubernetes/pkg/watch/until.go b/vendor/k8s.io/kubernetes/pkg/watch/until.go index 4259f51bb..01d649f2d 100644 --- a/vendor/k8s.io/kubernetes/pkg/watch/until.go +++ b/vendor/k8s.io/kubernetes/pkg/watch/until.go @@ -40,7 +40,7 @@ func Until(timeout time.Duration, watcher Interface, conditions ...ConditionFunc after = time.After(timeout) } else { ch := make(chan time.Time) - close(ch) + defer close(ch) after = ch } var lastEvent *Event diff --git a/vendor/k8s.io/kubernetes/pkg/watch/versioned/generated.proto b/vendor/k8s.io/kubernetes/pkg/watch/versioned/generated.proto deleted file mode 100644 index 8d5506552..000000000 --- a/vendor/k8s.io/kubernetes/pkg/watch/versioned/generated.proto +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.watch.versioned; - -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "versioned"; - -// Event represents a single event to a watched resource. -// -// +protobuf=true -message Event { - optional string type = 1; - - // Object is: - // * If Type is Added or Modified: the new state of the object. - // * If Type is Deleted: the state of the object immediately before deletion. - // * If Type is Error: *api.Status is recommended; other types may make sense - // depending on context. - optional k8s.io.kubernetes.pkg.runtime.RawExtension object = 2; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/watch/watch.go b/vendor/k8s.io/kubernetes/pkg/watch/watch.go index 96b2fe3de..5c2e4b915 100644 --- a/vendor/k8s.io/kubernetes/pkg/watch/watch.go +++ b/vendor/k8s.io/kubernetes/pkg/watch/watch.go @@ -20,6 +20,8 @@ import ( "sync" "k8s.io/kubernetes/pkg/runtime" + + "github.com/golang/glog" ) // Interface can be implemented by anything that knows how to watch and report changes. @@ -89,16 +91,29 @@ func NewFake() *FakeWatcher { } } +func NewFakeWithChanSize(size int) *FakeWatcher { + return &FakeWatcher{ + result: make(chan Event, size), + } +} + // Stop implements Interface.Stop(). func (f *FakeWatcher) Stop() { f.Lock() defer f.Unlock() if !f.Stopped { + glog.V(4).Infof("Stopping fake watcher.") close(f.result) f.Stopped = true } } +func (f *FakeWatcher) IsStopped() bool { + f.Lock() + defer f.Unlock() + return f.Stopped +} + // Reset prepares the watcher to be reused. func (f *FakeWatcher) Reset() { f.Lock() diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/OWNERS b/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/OWNERS deleted file mode 100644 index ecf334993..000000000 --- a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/OWNERS +++ /dev/null @@ -1,2 +0,0 @@ -assignees: - - bobbyrullo diff --git a/vendor/k8s.io/kubernetes/third_party/forked/golang/LICENSE b/vendor/k8s.io/kubernetes/third_party/forked/golang/LICENSE new file mode 100644 index 000000000..744875676 --- /dev/null +++ b/vendor/k8s.io/kubernetes/third_party/forked/golang/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/k8s.io/kubernetes/third_party/forked/golang/PATENTS b/vendor/k8s.io/kubernetes/third_party/forked/golang/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/k8s.io/kubernetes/third_party/forked/golang/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. From 5c5e0d16d5e7b209f0900f9a26f2e9e242f6ec30 Mon Sep 17 00:00:00 2001 From: Tomas Kral Date: Mon, 5 Dec 2016 14:03:01 +0100 Subject: [PATCH 2/3] Update Developer Guide - switch to glide --- docs/development.md | 60 +++++++++++++++++++++++++-------------------- 1 file changed, 33 insertions(+), 27 deletions(-) diff --git a/docs/development.md b/docs/development.md index 923a8b74c..637d3043e 100644 --- a/docs/development.md +++ b/docs/development.md @@ -78,60 +78,66 @@ git push -f origin myfeature 2. Click the "Compare and pull request" button next to your "myfeature" branch. 3. Check out the pull request process for more details -## Godep and dependency management +## Glide, glide-vc and dependency management -Kompose uses `godep` to manage dependencies. It is not strictly required for building Kompose but it is required when managing dependencies under the vendor/ tree, and is required by a number of the build and test scripts. Please make sure that godep is installed and in your `$PATH`, and that `godep version` says it is at least v63. +Kompose uses `glide` to manage dependencies and `glide-vc` to clean vendor directory. +They are not strictly required for building Kompose but they are required when managing dependencies under the vendor/ tree. +If you want to make changes to dependencies please make sure that `glide` and `glide-vc` are installed and in your `$PATH`. -### Installing godep +### Installing glide -There are many ways to build and host Go binaries. Here is an easy way to get utilities like `godep` installed: +There are many ways to build and host Go binaries. Here is an easy way to get utilities like `glide` and `glide-vc` installed: -1) Ensure that mercurial is installed on your system. (some of godep's dependencies use the mercurial source control system). Use `apt-get install mercurial` or `yum install mercurial` on Linux, or brew.sh on OS X, or download directly from mercurial. +1. Ensure that Mercurial and Git are installed on your system. (some of the dependencies use the mercurial source control system). + Use `apt-get install mercurial git` or `yum install mercurial git` on Linux, or brew.sh on OS X, or download directly them directly. -2) Create a new GOPATH for your tools and install godep: +2. Create a new GOPATH for your tools and install `godep`: ```console export GOPATH=$HOME/go-tools mkdir -p $GOPATH -go get -u github.com/tools/godep +go get -u github.com/Masterminds/glide +go get github.com/sgotti/glide-vc ``` -3) Add this $GOPATH/bin to your path. Typically you'd add this to your ~/.profile: +3. Add this $GOPATH/bin to your path. Typically you'd add this to your ~/.profile: ```console export GOPATH=$HOME/go-tools export PATH=$PATH:$GOPATH/bin ``` -To check your version of godep: +Check that `glide` and `glide-vc` commands are working. ```console -godep version -godep v74 (linux/amd64/go1.6.2) +glide --version + +glide-vc -h ``` -Note: At this time, godep version >= v64 is known to work in the Kompose project. +### Using glide -If it is not a valid version try, make sure you have updated the godep repo with `go get -u github.com/tools/godep`. +#### Adding new dependency +1. Update `glide.yml` file. + Add new packages or subpackages to `glide.yml` depending if you added whole new package as dependency or + just new subpackage. -### Using godep +2. Run `glide update --strip-vendor` to get new dependencies. + Than run `glide-vc --only-code --no-tests` to delete all unnecessary files from vendor. -1. Populate dependencies for your Kompose. +3. Commit updated `glide.yml`, `glide.lock` and `vendor` to git. -```console -cd $GOPATH/src/github.com/kubernetes-incubator/kompose -script/godep-restore.sh -``` -Reason for calling `script/godep-restore.sh` instead of just `godep restore` is that Kompose is using `github.com/openshift/kubernetes` instead of `github.com/kubernetes/kubernetes`. +#### Updating dependencies +1. Set new package version in `glide.yml` file. -2. Add a new dependency. +2. Run `glide update --strip-vendor` to update dependencies. + Than run `glide-vc --only-code --no-tests` to delete all unnecessary files from vendor. -To add a new package, do this: -```console -go get foo/bar -# edit your code to import foo/bar -godep save ./... -``` +##### Updating Kubernetes and OpenShift +Kubernetes version depends on what version is OpenShift using. +OpenShift is using forked Kubernetes to carry some patches. +Currently it is not possible to use different Kubernetes version from version that OpenShift uses. +(for more see comments in `glide.yml`) From a0ba435efb6686e945682bd5cae049c26cc0169a Mon Sep 17 00:00:00 2001 From: Tomas Kral Date: Wed, 7 Dec 2016 13:08:19 +0100 Subject: [PATCH 3/3] Add check-vendor script This checks vendor dir for nested vendors and if vendor has been cleaned by glide-vc --- .travis.yml | 3 ++ Makefile | 3 ++ script/check-vendor | 70 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 76 insertions(+) create mode 100755 script/check-vendor diff --git a/.travis.yml b/.travis.yml index 3da98a633..b9134a523 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,11 +11,14 @@ go: before_install: - go get github.com/mattn/goveralls - go get github.com/modocache/gover + - go get github.com/Masterminds/glide + - go get github.com/sgotti/glide-vc install: - true script: + - make check-vendor - make validate - make test-unit-cover # gover collects all .coverprofile files and saves it to one file gover.coverprofile diff --git a/Makefile b/Makefile index 61cdd4c35..c5b7bab8f 100644 --- a/Makefile +++ b/Makefile @@ -54,3 +54,6 @@ lint: ./script/make.sh validate-lint gofmt: ./script/make.sh validate-gofmt + +check-vendor: + ./script/make.sh check-vendor diff --git a/script/check-vendor b/script/check-vendor new file mode 100755 index 000000000..f4e4c6a0a --- /dev/null +++ b/script/check-vendor @@ -0,0 +1,70 @@ +#!/bin/bash + +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + + +# Check if there are nested vendor dirs inside Kompose vendor. +# All dependencies should be flattened and there shouldn't be vendor in inside vendor. +function check_nested_vendor() { + echo "Checking if there are nested vendor dirs" + + # count all vendor directories inside Kompose vendor + NO_NESTED_VENDORS=$(find vendor/ -type d | sed 's/^[^/]*.//g' | grep -E "vendor$" | grep -v _vendor | wc -l) + + if [ $NO_NESTED_VENDORS -ne 0 ]; then + echo "ERROR" + echo " There are $NO_NESTED_VENDORS nested vendors in Kompose vendor directory" + echo " Please run 'glide update --strip-vendor'" + return 1 + else + echo "OK" + return 0 + fi +} + + +# Check if Kompose vendor directory was cleaned by glide-vc +function check_glide-vc() { + echo "Checking if vendor was cleaned using glide-vc." + + # dry run glide-vc and count how many could be deleted. + NO_DELETED_FILES=$($GOPATH/bin/glide-vc --only-code --no-tests --dryrun | wc -l) + + if [ $NO_DELETED_FILES -ne 0 ]; then + echo "ERROR" + echo " There are $NO_DELETED_FILES files that can be deleted by glide-vc." + echo " Please run 'glide-vc --only-code --no-tests'" + return 1 + else + echo "OK" + return 0 + fi +} + + +# Run both checks and exit report fail exit code if one of them failed. +check_nested_vendor +VENDOR_CHECK=$? + +check_glide-vc +VC_CHECK=$? + +if [ $VENDOR_CHECK -eq 0 ] && [ $VC_CHECK -eq 0 ]; then + exit 0 +else + exit 1 +fi +